Closed ahjing99 closed 1 year ago
➜ ~ k get sts pulsarcluster-bookies -o yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
annotations:
config.kubeblocks.io/tpl-agamotto-configuration: pulsarcluster-bookies-agamotto-configuration
config.kubeblocks.io/tpl-bookies-config: pulsarcluster-bookies-bookies-config
config.kubeblocks.io/tpl-bookies-env: pulsarcluster-bookies-bookies-env
config.kubeblocks.io/tpl-pulsar-scripts: pulsarcluster-bookies-pulsar-scripts
kubeblocks.io/generation: "1"
creationTimestamp: "2023-08-02T06:26:15Z"
finalizers:
- cluster.kubeblocks.io/finalizer
generation: 1
labels:
app.kubernetes.io/component: bookies
app.kubernetes.io/instance: pulsarcluster
app.kubernetes.io/managed-by: kubeblocks
app.kubernetes.io/name: pulsar
apps.kubeblocks.io/component-name: bookies
name: pulsarcluster-bookies
namespace: default
ownerReferences:
- apiVersion: apps.kubeblocks.io/v1alpha1
blockOwnerDeletion: true
controller: true
kind: Cluster
name: pulsarcluster
uid: e2f7cab2-f8ea-4706-b069-0076788c322f
resourceVersion: "1796231"
uid: 120b6a9f-07dc-4590-abf4-18584253e887
spec:
persistentVolumeClaimRetentionPolicy:
whenDeleted: Retain
whenScaled: Retain
podManagementPolicy: OrderedReady
replicas: 3
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/instance: pulsarcluster
app.kubernetes.io/managed-by: kubeblocks
app.kubernetes.io/name: pulsar
apps.kubeblocks.io/component-name: bookies
serviceName: pulsarcluster-bookies-headless
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/component: bookies
app.kubernetes.io/instance: pulsarcluster
app.kubernetes.io/managed-by: kubeblocks
app.kubernetes.io/name: pulsar
app.kubernetes.io/version: pulsar-2.11.2
apps.kubeblocks.io/component-name: bookies
apps.kubeblocks.io/workload-type: Stateful
spec:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: kb-data
operator: In
values:
- "true"
weight: 100
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/instance: pulsarcluster
apps.kubeblocks.io/component-name: bookies
topologyKey: kubernetes.io/hostname
weight: 100
containers:
- command:
- /kb-scripts/start-bookies.sh
env:
- name: KB_POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: KB_POD_UID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.uid
- name: KB_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KB_SA_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.serviceAccountName
- name: KB_NODENAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: KB_HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: KB_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: KB_POD_IPS
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIPs
- name: KB_HOSTIP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: KB_PODIP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: KB_PODIPS
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIPs
- name: KB_CLUSTER_NAME
value: pulsarcluster
- name: KB_COMP_NAME
value: bookies
- name: KB_CLUSTER_COMP_NAME
value: pulsarcluster-bookies
- name: KB_CLUSTER_UID_POSTFIX_8
value: 788c322f
- name: KB_POD_FQDN
value: $(KB_POD_NAME).$(KB_CLUSTER_COMP_NAME)-headless.$(KB_NAMESPACE).svc
- name: cluster_domain
value: .cluster.local
envFrom:
- configMapRef:
name: pulsarcluster-bookies-env
image: docker.io/apecloud/pulsar:2.11.2
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /kb-scripts/prestop-bookies.sh
livenessProbe:
failureThreshold: 60
httpGet:
path: /api/v1/bookie/state
port: http
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
name: bookies
ports:
- containerPort: 8000
name: http
protocol: TCP
- containerPort: 3181
name: bookie
protocol: TCP
readinessProbe:
failureThreshold: 60
httpGet:
path: /api/v1/bookie/is_ready
port: http
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
resources:
requests:
cpu: 100m
memory: 512Mi
securityContext:
runAsGroup: 10000
runAsUser: 0
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /pulsar/data/bookkeeper/journal
name: journal
- mountPath: /pulsar/data/bookkeeper/ledgers
name: ledgers
- mountPath: /kb-scripts
name: scripts
- mountPath: /etc/annotations
name: annotations
- mountPath: /opt/pulsar/none
name: bookies-env
- mountPath: /opt/pulsar/conf
name: pulsar-bookies-config
- command:
- /bin/agamotto
- --config=/opt/agamotto/agamotto-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
env:
- name: KB_POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: KB_POD_UID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.uid
- name: KB_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KB_SA_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.serviceAccountName
- name: KB_NODENAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: KB_HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: KB_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: KB_POD_IPS
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIPs
- name: KB_HOSTIP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: KB_PODIP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: KB_PODIPS
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIPs
- name: KB_CLUSTER_NAME
value: pulsarcluster
- name: KB_COMP_NAME
value: bookies
- name: KB_CLUSTER_COMP_NAME
value: pulsarcluster-bookies
- name: KB_CLUSTER_UID_POSTFIX_8
value: 788c322f
- name: KB_POD_FQDN
value: $(KB_POD_NAME).$(KB_CLUSTER_COMP_NAME)-headless.$(KB_NAMESPACE).svc
- name: JOB_NAME
value: bookie
- name: SERVICE_PORT
value: "8000"
- name: POD_NAME
value: $(KB_POD_NAME)
- name: POD_NAMESPACE
value: $(KB_NAMESPACE)
- name: LOG_LEVEL
value: info
envFrom:
- configMapRef:
name: pulsarcluster-bookies-env
image: registry.cn-hangzhou.aliyuncs.com/apecloud/agamotto:0.1.2-beta.2
imagePullPolicy: IfNotPresent
name: metrics
ports:
- containerPort: 1234
name: http-metrics
protocol: TCP
resources:
limits:
cpu: "0"
memory: "0"
securityContext:
runAsNonRoot: false
runAsUser: 0
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /opt/agamotto
name: agamotto-configuration
dnsPolicy: ClusterFirst
enableServiceLinks: false
initContainers:
- command:
- /kb-scripts/init-bookies.sh
env:
- name: KB_POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: KB_POD_UID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.uid
- name: KB_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KB_SA_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.serviceAccountName
- name: KB_NODENAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: KB_HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: KB_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: KB_POD_IPS
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIPs
- name: KB_HOSTIP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: KB_PODIP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: KB_PODIPS
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIPs
- name: KB_CLUSTER_NAME
value: pulsarcluster
- name: KB_COMP_NAME
value: bookies
- name: KB_CLUSTER_COMP_NAME
value: pulsarcluster-bookies
- name: KB_CLUSTER_UID_POSTFIX_8
value: 788c322f
- name: KB_POD_FQDN
value: $(KB_POD_NAME).$(KB_CLUSTER_COMP_NAME)-headless.$(KB_NAMESPACE).svc
envFrom:
- configMapRef:
name: pulsarcluster-bookies-env
image: docker.io/apecloud/pulsar:2.11.2
imagePullPolicy: IfNotPresent
name: init-bookies
resources:
limits:
cpu: "0"
memory: "0"
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /kb-scripts
name: scripts
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoSchedule
key: kb-data
operator: Equal
value: "true"
topologySpreadConstraints:
- labelSelector:
matchLabels:
app.kubernetes.io/instance: pulsarcluster
apps.kubeblocks.io/component-name: bookies
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- downwardAPI:
defaultMode: 420
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.annotations['apps.kubeblocks.io/component-replicas']
path: component-replicas
name: annotations
- configMap:
defaultMode: 420
name: pulsarcluster-bookies-agamotto-configuration
name: agamotto-configuration
- configMap:
defaultMode: 420
name: pulsarcluster-bookies-bookies-config
name: pulsar-bookies-config
- configMap:
defaultMode: 420
name: pulsarcluster-bookies-bookies-env
name: bookies-env
- configMap:
defaultMode: 511
name: pulsarcluster-bookies-pulsar-scripts
name: scripts
- emptyDir: {}
name: journal
- emptyDir: {}
name: ledgers
updateStrategy:
rollingUpdate:
partition: 0
type: RollingUpdate
volumeClaimTemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
apps.kubeblocks.io/vct-name: ledgers
name: ledgers
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: kb-default-sc
volumeMode: Filesystem
status:
phase: Pending
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
apps.kubeblocks.io/vct-name: journal
name: journal
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: kb-default-sc
volumeMode: Filesystem
status:
phase: Pending
status:
availableReplicas: 0
collisionCount: 0
currentReplicas: 1
currentRevision: pulsarcluster-bookies-75f8f9498b
observedGeneration: 1
replicas: 1
updateRevision: pulsarcluster-bookies-75f8f9498b
updatedReplicas: 1
➜ ~ k get cluster pulsarcluster -o yaml
apiVersion: apps.kubeblocks.io/v1alpha1
kind: Cluster
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"apps.kubeblocks.io/v1alpha1","kind":"Cluster","metadata":{"annotations":{},"name":"pulsarcluster","namespace":"default"},"spec":{"affinity":{"topologyKeys":["kubernetes.io/hostname"]},"clusterDefinitionRef":"pulsar","clusterVersionRef":"pulsar-2.11.2","componentSpecs":[{"componentDefRef":"pulsar-broker","monitor":false,"name":"pulsar-broker","replicas":1,"resources":{"requests":{"cpu":"100m","memory":"0.5Gi"}},"volumeClaimTemplates":[{"name":"data","spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}}}]},{"componentDefRef":"bookies","monitor":false,"name":"bookies","replicas":3,"resources":{"requests":{"cpu":"100m","memory":"0.5Gi"}},"volumeClaimTemplates":[{"name":"ledgers","spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}}},{"name":"journal","spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}}}]},{"componentDefRef":"zookeeper","monitor":false,"name":"zookeeper","replicas":3,"resources":{"requests":{"cpu":"100m","memory":"0.5Gi"}},"volumeClaimTemplates":[{"name":"data","spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}}}]},{"componentDefRef":"pulsar-proxy","monitor":false,"name":"pulsar-proxy","replicas":1,"resources":{"requests":{"cpu":"100m","memory":"0.5Gi"}}}],"terminationPolicy":"DoNotTerminate"}}
creationTimestamp: "2023-08-02T06:26:13Z"
finalizers:
- cluster.kubeblocks.io/finalizer
generation: 1
labels:
clusterdefinition.kubeblocks.io/name: pulsar
clusterversion.kubeblocks.io/name: pulsar-2.11.2
name: pulsarcluster
namespace: default
resourceVersion: "1796789"
uid: e2f7cab2-f8ea-4706-b069-0076788c322f
spec:
affinity:
podAntiAffinity: Preferred
tenancy: SharedNode
topologyKeys:
- kubernetes.io/hostname
clusterDefinitionRef: pulsar
clusterVersionRef: pulsar-2.11.2
componentSpecs:
- componentDefRef: pulsar-broker
monitor: false
name: pulsar-broker
noCreatePDB: false
replicas: 1
resources:
requests:
cpu: 100m
memory: 0.5Gi
volumeClaimTemplates:
- name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
- componentDefRef: bookies
monitor: false
name: bookies
noCreatePDB: false
replicas: 3
resources:
requests:
cpu: 100m
memory: 0.5Gi
volumeClaimTemplates:
- name: ledgers
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
- name: journal
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
- componentDefRef: zookeeper
monitor: false
name: zookeeper
noCreatePDB: false
replicas: 3
resources:
requests:
cpu: 100m
memory: 0.5Gi
volumeClaimTemplates:
- name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
- componentDefRef: pulsar-proxy
monitor: false
name: pulsar-proxy
noCreatePDB: false
replicas: 1
resources:
requests:
cpu: 100m
memory: 0.5Gi
terminationPolicy: DoNotTerminate
status:
clusterDefGeneration: 2
components:
bookies:
phase: Creating
podsReady: false
pulsar-broker:
phase: Creating
podsReady: false
pulsar-proxy:
phase: Creating
podsReady: false
zookeeper:
phase: Running
podsReady: true
podsReadyTime: "2023-08-02T06:27:03Z"
conditions:
- lastTransitionTime: "2023-08-02T06:26:13Z"
message: 'The operator has started the provisioning of Cluster: pulsarcluster'
observedGeneration: 1
reason: PreCheckSucceed
status: "True"
type: ProvisioningStarted
- lastTransitionTime: "2023-08-02T06:26:15Z"
message: Successfully applied for resources
observedGeneration: 1
reason: ApplyResourcesSucceed
status: "True"
type: ApplyResources
- lastTransitionTime: "2023-08-02T06:26:17Z"
message: 'pods are not ready in Components: [bookies pulsar-broker pulsar-proxy],
refer to related component message in Cluster.status.components'
reason: ReplicasNotReady
status: "False"
type: ReplicasReady
- lastTransitionTime: "2023-08-02T06:26:17Z"
message: 'pods are unavailable in Components: [bookies pulsar-broker pulsar-proxy],
refer to related component message in Cluster.status.components'
reason: ComponentsNotReady
status: "False"
type: Ready
observedGeneration: 1
phase: Creating
k logs pulsarcluster-bookies-0 -c init-bookies | less
waiting zookeeper ready...
nc: missing port number
nc: missing port number
nc: missing port number
nc: missing port number
nc: missing port number
nc: missing port number
nc: missing port number
nc: missing port number
nc: missing port number
k logs pulsarcluster-pulsar-broker-0 -c init-broker-cluster | less
+ echo INFO: wait for zookeeper ready...
+ zkDomain=$(zookeeperSVC)
+ zkURL=$(zookeeperSVC):2181 python3 /kb-scripts/zookeeper.py get /
INFO: wait for zookeeper ready...
Wed, 02 Aug 2023 06:26:20 connection.py[line:518] WARNING Cannot resolve $(zookeepersvc): [Errno -2] Name or service not known
Traceback (most recent call last):
File "/kb-scripts/zookeeper.py", line 71, in <module>
zk_client = ZKClient(zk_url)
File "/kb-scripts/zookeeper.py", line 17, in __init__
self.client.start()
File "/usr/lib/python3/dist-packages/kazoo/client.py", line 635, in start
raise self.handler.timeout_exception("Connection time-out")
kazoo.handlers.threading.KazooTimeoutError: Connection time-out
➜ ~ k get cd pulsar -o yaml
apiVersion: apps.kubeblocks.io/v1alpha1
kind: ClusterDefinition
metadata:
annotations:
meta.helm.sh/release-name: kb-addon-pulsar
meta.helm.sh/release-namespace: kb-system
creationTimestamp: "2023-08-02T05:05:33Z"
finalizers:
- clusterdefinition.kubeblocks.io/finalizer
generation: 2
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 2.11.2
config.kubeblocks.io/constraints-brokers-config-constraints: brokers-config-constraints
config.kubeblocks.io/constraints-pulsar-common-constraints: pulsar-common-constraints
config.kubeblocks.io/constraints-pulsar-env-constraints: pulsar-env-constraints
config.kubeblocks.io/tpl-agamotto-configuration: pulsar-agamotto-conf-tpl
config.kubeblocks.io/tpl-bookies-config: pulsar-bookies-config-tpl
config.kubeblocks.io/tpl-bookies-env: pulsar-bookies-env-tpl
config.kubeblocks.io/tpl-bookies-recovery-env: pulsar-recovery-env-tpl
config.kubeblocks.io/tpl-broker-config: pulsar-broker-config-tpl
config.kubeblocks.io/tpl-broker-env: pulsar-broker-env-tpl
config.kubeblocks.io/tpl-client-config: pulsar-client-config-tpl
config.kubeblocks.io/tpl-proxy-config: pulsar-proxy-config-tpl
config.kubeblocks.io/tpl-proxy-env: pulsar-proxy-env-tpl
config.kubeblocks.io/tpl-pulsar-scripts: pulsar-scripts
config.kubeblocks.io/tpl-zookeeper-env: pulsar-zookeeper-env-tpl
helm.sh/chart: pulsar-0.6.0-beta.21
name: pulsar
resourceVersion: "1747704"
uid: e4654248-b28b-401a-abeb-66d7665aeb4d
spec:
componentDefs:
- characterType: pulsar-broker
configSpecs:
- name: agamotto-configuration
namespace: kb-system
templateRef: pulsar-agamotto-conf-tpl
volumeName: agamotto-configuration
- constraintRef: pulsar-env-constraints
keys:
- conf
name: broker-env
namespace: kb-system
templateRef: pulsar-broker-env-tpl
volumeName: broker-env
- constraintRef: brokers-config-constraints
name: broker-config
namespace: kb-system
templateRef: pulsar-broker-config-tpl
volumeName: pulsar-broker-config
- constraintRef: pulsar-common-constraints
name: client-config
namespace: kb-system
templateRef: pulsar-client-config-tpl
volumeName: pulsar-client-config
monitor:
builtIn: false
exporterConfig:
scrapePath: /metrics
scrapePort: 1234
name: pulsar-broker
podSpec:
containers:
- args:
- |
set -x
/kb-scripts/merge_pulsar_config.py conf/client.conf /opt/pulsar/client-conf/client.conf && \
cp /opt/pulsar/conf/broker.conf conf/broker.conf && \
bin/apply-config-from-env.py conf/broker.conf && \
bin/apply-config-from-env.py conf/client.conf && \
echo 'OK' > status;exec bin/pulsar broker
command:
- sh
- -c
env:
- name: POD_NAME
value: $(KB_POD_NAME)
- name: PULSAR_PREFIX_brokerServicePort
value: "6650"
- name: PULSAR_PREFIX_internalListenerName
value: cluster
- name: PULSAR_PREFIX_advertisedListeners
value: cluster:pulsar://$(POD_NAME).$(KB_CLUSTER_COMP_NAME)-headless.$(KB_NAMESPACE).svc.cluster.local:6650
- name: PULSAR_PREFIX_allowAutoTopicCreationType
value: partitioned
- name: PULSAR_PREFIX_kafkaListeners
value: PLAINTEXT://0.0.0.0:9092
- name: PULSAR_PREFIX_kafkaAdvertisedListeners
value: PLAINTEXT://$(POD_NAME).$(KB_CLUSTER_COMP_NAME)-headless.$(KB_NAMESPACE).svc.cluster.local:9092
- name: brokerServiceUrl
value: pulsar://$(KB_CLUSTER_COMP_NAME).$(KB_NAMESPACE).svc.cluster.local:6650
- name: clusterName
value: $(KB_NAMESPACE)-$(KB_CLUSTER_COMP_NAME)
- name: webServiceUrl
value: http://$(KB_CLUSTER_COMP_NAME).$(KB_NAMESPACE).svc.cluster.local:80
- name: zookeeperServers
value: $(zookeeperSVC):2181
- name: configurationStoreServers
value: $(zookeeperSVC):2181
livenessProbe:
failureThreshold: 30
httpGet:
path: /status.html
port: http
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 100
name: broker
ports:
- containerPort: 6650
name: pulsar
protocol: TCP
- containerPort: 8080
name: http
protocol: TCP
- containerPort: 9092
name: kafka-client
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /status.html
port: http
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 100
resources:
requests:
cpu: 200m
memory: 512Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsGroup: 0
runAsNonRoot: true
runAsUser: 10000
startupProbe:
failureThreshold: 30
httpGet:
path: /status.html
port: http
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 100
volumeMounts:
- mountPath: /kb-scripts
name: scripts
- mountPath: /opt/pulsar/conf
name: pulsar-broker-config
- mountPath: /opt/pulsar/none
name: broker-env
- mountPath: /opt/pulsar/client-conf
name: pulsar-client-config
- command:
- /bin/agamotto
- --config=/opt/agamotto/agamotto-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
env:
- name: JOB_NAME
value: broker
- name: SERVICE_PORT
value: "8080"
- name: POD_NAME
value: $(KB_POD_NAME)
- name: POD_NAMESPACE
value: $(KB_NAMESPACE)
- name: LOG_LEVEL
value: info
image: registry.cn-hangzhou.aliyuncs.com/apecloud/agamotto:0.1.2-beta.2
imagePullPolicy: IfNotPresent
name: metrics
ports:
- containerPort: 1234
name: http-metrics
protocol: TCP
resources: {}
securityContext:
runAsNonRoot: false
runAsUser: 0
volumeMounts:
- mountPath: /opt/agamotto
name: agamotto-configuration
initContainers:
- command:
- /kb-scripts/init-broker.sh
env:
- name: brokerServiceUrl
value: pulsar://$(KB_CLUSTER_COMP_NAME).$(KB_NAMESPACE).svc.cluster.local:6650
- name: clusterName
value: $(KB_NAMESPACE)-$(KB_CLUSTER_COMP_NAME)
- name: webServiceUrl
value: http://$(KB_CLUSTER_COMP_NAME).$(KB_NAMESPACE).svc.cluster.local:80
- name: zookeeperServers
value: $(zookeeperSVC):2181
- name: configurationStoreServers
value: $(zookeeperSVC):2181
name: init-broker-cluster
resources: {}
volumeMounts:
- mountPath: /kb-scripts
name: scripts
- command:
- /kb-scripts/init-broker-sysctl.sh
name: init-sysctl
resources: {}
securityContext:
privileged: true
runAsNonRoot: false
runAsUser: 0
volumeMounts:
- mountPath: /kb-scripts
name: scripts
securityContext:
fsGroup: 0
runAsGroup: 0
runAsNonRoot: true
runAsUser: 10000
scriptSpecs:
- defaultMode: 511
name: pulsar-scripts
namespace: kb-system
templateRef: pulsar-scripts
volumeName: scripts
service:
ports:
- name: pulsar
port: 6650
protocol: TCP
targetPort: pulsar
- name: http
port: 80
protocol: TCP
targetPort: http
- name: http-alt
port: 8080
protocol: TCP
targetPort: http
- name: kafka-client
port: 9092
protocol: TCP
targetPort: kafka-client
workloadType: Stateful
- characterType: pulsar-proxy
configSpecs:
- name: agamotto-configuration
namespace: kb-system
templateRef: pulsar-agamotto-conf-tpl
volumeName: agamotto-configuration
- constraintRef: pulsar-env-constraints
keys:
- conf
name: proxy-env
namespace: kb-system
templateRef: pulsar-proxy-env-tpl
volumeName: proxy-env
- constraintRef: pulsar-common-constraints
name: proxy-config
namespace: kb-system
templateRef: pulsar-proxy-config-tpl
volumeName: pulsar-proxy-config
monitor:
builtIn: false
exporterConfig:
scrapePath: /metrics
scrapePort: 1234
name: pulsar-proxy
podSpec:
containers:
- command:
- /kb-scripts/start-proxy.sh
env:
- name: brokerWebServiceURL
value: http://$(brokerSVC):80
- name: brokerServiceURL
value: pulsar://$(brokerSVC):6650
- name: clusterName
value: $(KB_NAMESPACE)-$(KB_CLUSTER_COMP_NAME)
- name: webServicePort
value: "8080"
livenessProbe:
failureThreshold: 3
httpGet:
path: /status.html
port: http
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 100
name: proxy
ports:
- containerPort: 6650
name: pulsar
protocol: TCP
- containerPort: 8080
name: http
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /status.html
port: http
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 100
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsGroup: 0
runAsNonRoot: true
runAsUser: 10000
startupProbe:
failureThreshold: 20
httpGet:
path: /status.html
port: http
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 100
volumeMounts:
- mountPath: /kb-scripts
name: scripts
- mountPath: /opt/pulsar/none
name: proxy-env
- mountPath: /opt/pulsar/conf
name: pulsar-proxy-config
- command:
- /bin/agamotto
- --config=/opt/agamotto/agamotto-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
env:
- name: JOB_NAME
value: proxy
- name: SERVICE_PORT
value: "8080"
- name: POD_NAME
value: $(KB_POD_NAME)
- name: POD_NAMESPACE
value: $(KB_NAMESPACE)
- name: LOG_LEVEL
value: info
image: registry.cn-hangzhou.aliyuncs.com/apecloud/agamotto:0.1.2-beta.2
imagePullPolicy: IfNotPresent
name: metrics
ports:
- containerPort: 1234
name: http-metrics
protocol: TCP
resources: {}
securityContext:
runAsNonRoot: false
runAsUser: 0
volumeMounts:
- mountPath: /opt/agamotto
name: agamotto-configuration
initContainers:
- command:
- /kb-scripts/init-proxy.sh
name: check-broker
resources: {}
volumeMounts:
- mountPath: /kb-scripts
name: scripts
securityContext:
fsGroup: 0
runAsGroup: 0
runAsNonRoot: true
runAsUser: 10000
scriptSpecs:
- defaultMode: 511
name: pulsar-scripts
namespace: kb-system
templateRef: pulsar-scripts
volumeName: scripts
service:
ports:
- name: pulsar
port: 6650
protocol: TCP
targetPort: pulsar
- name: http
port: 80
protocol: TCP
targetPort: http
workloadType: Stateful
- characterType: bookkeeper
configSpecs:
- name: agamotto-configuration
namespace: kb-system
templateRef: pulsar-agamotto-conf-tpl
volumeName: agamotto-configuration
- constraintRef: pulsar-env-constraints
keys:
- conf
name: bookies-env
namespace: kb-system
templateRef: pulsar-bookies-env-tpl
volumeName: bookies-env
- constraintRef: pulsar-common-constraints
name: bookies-config
namespace: kb-system
templateRef: pulsar-bookies-config-tpl
volumeName: pulsar-bookies-config
monitor:
builtIn: false
exporterConfig:
scrapePath: /metrics
scrapePort: 1234
name: bookies
podSpec:
containers:
- command:
- /kb-scripts/start-bookies.sh
env:
- name: cluster_domain
value: .cluster.local
lifecycle:
preStop:
exec:
command:
- /kb-scripts/prestop-bookies.sh
livenessProbe:
failureThreshold: 60
httpGet:
path: /api/v1/bookie/state
port: http
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
name: bookies
ports:
- containerPort: 8000
name: http
protocol: TCP
- containerPort: 3181
name: bookie
protocol: TCP
readinessProbe:
failureThreshold: 60
httpGet:
path: /api/v1/bookie/is_ready
port: http
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
resources:
requests:
cpu: 200m
memory: 512Mi
securityContext:
runAsGroup: 10000
runAsUser: 0
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /pulsar/data/bookkeeper/journal
name: journal
- mountPath: /pulsar/data/bookkeeper/ledgers
name: ledgers
- mountPath: /kb-scripts
name: scripts
- mountPath: /etc/annotations
name: annotations
- mountPath: /opt/pulsar/none
name: bookies-env
- mountPath: /opt/pulsar/conf
name: pulsar-bookies-config
- command:
- /bin/agamotto
- --config=/opt/agamotto/agamotto-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
env:
- name: JOB_NAME
value: bookie
- name: SERVICE_PORT
value: "8000"
- name: POD_NAME
value: $(KB_POD_NAME)
- name: POD_NAMESPACE
value: $(KB_NAMESPACE)
- name: LOG_LEVEL
value: info
image: registry.cn-hangzhou.aliyuncs.com/apecloud/agamotto:0.1.2-beta.2
imagePullPolicy: IfNotPresent
name: metrics
ports:
- containerPort: 1234
name: http-metrics
protocol: TCP
resources: {}
securityContext:
runAsNonRoot: false
runAsUser: 0
volumeMounts:
- mountPath: /opt/agamotto
name: agamotto-configuration
initContainers:
- command:
- /kb-scripts/init-bookies.sh
name: init-bookies
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /kb-scripts
name: scripts
volumes:
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.annotations['apps.kubeblocks.io/component-replicas']
path: component-replicas
name: annotations
scriptSpecs:
- defaultMode: 511
name: pulsar-scripts
namespace: kb-system
templateRef: pulsar-scripts
volumeName: scripts
workloadType: Stateful
- characterType: pulsar-bookie-recovery
configSpecs:
- name: agamotto-configuration
namespace: kb-system
templateRef: pulsar-agamotto-conf-tpl
volumeName: agamotto-configuration
- constraintRef: pulsar-env-constraints
keys:
- conf
name: bookies-recovery-env
namespace: kb-system
templateRef: pulsar-recovery-env-tpl
volumeName: recovery-config-env
monitor:
builtIn: false
exporterConfig:
scrapePath: /metrics
scrapePort: 1234
name: bookies-recovery
podSpec:
containers:
- command:
- /kb-scripts/start-bookies-recovery.sh
env:
- name: httpServerEnabled
value: "true"
- name: httpServerPort
value: "8000"
- name: prometheusStatsHttpPort
value: "8000"
- name: useHostNameAsBookieID
value: "true"
name: bookies-recovery
ports:
- containerPort: 8000
name: http
protocol: TCP
- containerPort: 3181
name: bookie
protocol: TCP
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsGroup: 0
runAsNonRoot: true
runAsUser: 10000
volumeMounts:
- mountPath: /kb-scripts
name: scripts
- mountPath: /opt/pulsar/none
name: recovery-config-env
- command:
- /bin/agamotto
- --config=/opt/agamotto/agamotto-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
env:
- name: JOB_NAME
value: recovery
- name: SERVICE_PORT
value: "8000"
- name: POD_NAME
value: $(KB_POD_NAME)
- name: POD_NAMESPACE
value: $(KB_NAMESPACE)
- name: LOG_LEVEL
value: info
image: registry.cn-hangzhou.aliyuncs.com/apecloud/agamotto:0.1.2-beta.2
imagePullPolicy: IfNotPresent
name: metrics
ports:
- containerPort: 1234
name: http-metrics
protocol: TCP
resources: {}
securityContext:
runAsNonRoot: false
runAsUser: 0
volumeMounts:
- mountPath: /opt/agamotto
name: agamotto-configuration
initContainers:
- command:
- /kb-scripts/check-bookies.sh
name: check-bookies
resources: {}
securityContext:
privileged: true
runAsNonRoot: false
runAsUser: 0
volumeMounts:
- mountPath: /kb-scripts
name: scripts
securityContext:
fsGroup: 0
runAsGroup: 0
runAsNonRoot: true
runAsUser: 10000
scriptSpecs:
- defaultMode: 511
name: pulsar-scripts
namespace: kb-system
templateRef: pulsar-scripts
volumeName: scripts
workloadType: Stateful
- characterType: zookeeper
configSpecs:
- name: agamotto-configuration
namespace: kb-system
templateRef: pulsar-agamotto-conf-tpl
volumeName: agamotto-configuration
- constraintRef: pulsar-env-constraints
keys:
- conf
name: zookeeper-env
namespace: kb-system
templateRef: pulsar-zookeeper-env-tpl
volumeName: zookeeper-config-env
monitor:
builtIn: false
exporterConfig:
scrapePath: /metrics
scrapePort: 1234
name: zookeeper
podSpec:
containers:
- command:
- /kb-scripts/start-zookeeper.sh
env:
- name: EXTERNAL_PROVIDED_SERVERS
value: "false"
- name: OPTS
value: -Dlog4j2.formatMsgNoLookups=true
livenessProbe:
exec:
command:
- bash
- -c
- echo ruok | nc -q 1 localhost 2181 | grep imok
failureThreshold: 10
initialDelaySeconds: 5
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 30
name: zookeeper
ports:
- containerPort: 2181
name: client
protocol: TCP
- containerPort: 2888
name: tcp-quorum
protocol: TCP
- containerPort: 3888
name: tcp-election
protocol: TCP
- containerPort: 8000
name: http
protocol: TCP
resources:
requests:
cpu: 50m
memory: 256Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsGroup: 0
runAsNonRoot: true
runAsUser: 10000
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /pulsar/data
name: data
- mountPath: /pulsar/data-log
name: data-log
- mountPath: /kb-scripts
name: scripts
- mountPath: /opt/pulsar/none
name: zookeeper-config-env
- command:
- /bin/agamotto
- --config=/opt/agamotto/agamotto-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
env:
- name: JOB_NAME
value: zookeeper
- name: SERVICE_PORT
value: "8000"
- name: POD_NAME
value: $(KB_POD_NAME)
- name: POD_NAMESPACE
value: $(KB_NAMESPACE)
- name: LOG_LEVEL
value: info
image: registry.cn-hangzhou.aliyuncs.com/apecloud/agamotto:0.1.2-beta.2
imagePullPolicy: IfNotPresent
name: metrics
ports:
- containerPort: 1234
name: http-metrics
protocol: TCP
resources: {}
securityContext:
runAsNonRoot: false
runAsUser: 0
volumeMounts:
- mountPath: /opt/agamotto
name: agamotto-configuration
securityContext:
fsGroup: 0
runAsGroup: 0
runAsNonRoot: true
runAsUser: 10000
scriptSpecs:
- defaultMode: 511
name: pulsar-scripts
namespace: kb-system
templateRef: pulsar-scripts
volumeName: scripts
service:
ports:
- name: client
port: 2181
protocol: TCP
targetPort: client
workloadType: Stateful
connectionCredential:
httpEndpoint: http://$(SVC_FQDN):$(SVC_PORT_http)
kafkaEndpoint: $(SVC_FQDN):$(SVC_PORT_kafka-client)
password: ""
pulsarEndpoint: pulsar://$(SVC_FQDN):$(SVC_PORT_pulsar)
username: admin
type: pulsar
status:
observedGeneration: 2
phase: Available
Sorry, @Y-Rookie - This pull request can only be merged by the owner (@apecloud).
the cd version is incorrect,configSpecs lacks asEnvFrom
asEnvFrom:
- init-broker-cluster
- broker
- init-pulsar-client-config
The upgrade order of addon and KubeBlocks operator is dependent, we need to upgrade KubeBlocks operator deployment first, and then upgrade addon. If upgrade the addon first, the old version KubeBlocks operator will have problems reconcile the new addon, that will read the new addon CR and serialize to memory, the serialization will miss the new added fields in the new version, than patchs it to API server.
Now kbcli use the helm upgrade
command to upgrade, and the order of updates cannot be controlled.
I will fix it. Before upgrade, kbcli
will scale the KubeBlocks deployment replicas to 0 that will avoid old version KubeBlocks reconcile new version CRD and CR.
The KubeBlocks upgrade
document should be updated.
After upgrade kb from 053 to 0.6.0-beta.21, create pulsar cluster failed for some pods are always initing
1.install 053 kb, upgrade to 060, then create pulsar with following commands