Open xujiongzi opened 1 week ago
Hi @xujiongzi ,
I was not able to reproduce the issue using the values below (there are the values you provided just removing persistence and and node-selector):
image:
registry: docker.io
repository: bitnami/mysql
tag: 8.4.3-debian-12-r0
digest: ""
pullPolicy: IfNotPresent
pullSecrets: []
debug: false
architecture: replication
auth:
rootPassword: "123456"
createDatabase: false
database: "my_database"
username: ""
password: ""
replicationUser: replicator
replicationPassword: "replicator"
primary:
name: master
command: []
args: []
lifecycleHooks: {}
automountServiceAccountToken: false
hostAliases: []
enableMySQLX: true
configuration: |-
[mysqld]
authentication_policy='{{- .Values.auth.authenticationPolicy | default "* ,," }}'
skip-name-resolve
explicit_defaults_for_timestamp
basedir=/opt/bitnami/mysql
plugin_dir=/opt/bitnami/mysql/lib/plugin
port={{ .Values.primary.containerPorts.mysql }}
mysqlx={{ ternary 1 0 .Values.primary.enableMySQLX }}
mysqlx_port={{ .Values.primary.containerPorts.mysqlx }}
socket=/opt/bitnami/mysql/tmp/mysql.sock
datadir=/bitnami/mysql/data
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=16M
bind-address=*
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
log-error=/opt/bitnami/mysql/logs/mysqld.log
character-set-server=UTF8
slow_query_log=0
long_query_time=10.0
[client]
port={{ .Values.primary.containerPorts.mysql }}
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/lib/plugin
[manager]
port={{ .Values.primary.containerPorts.mysql }}
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
containerPorts:
mysql: 3306
mysqlx: 33060
livenessProbe:
enabled: true
initialDelaySeconds: 45
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 3
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 45
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 3
successThreshold: 1
startupProbe:
enabled: true
initialDelaySeconds: 55
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 10
successThreshold: 1
service:
type: NodePort
ports:
mysql: 3306
mysqlx: 33060
nodePorts:
mysql: "30360"
mysqlx: "30361"
secondary:
name: slave
replicaCount: 1
enableMySQLX: true
configuration: |-
[mysqld]
authentication_policy='{{- .Values.auth.authenticationPolicy | default "* ,," }}'
skip-name-resolve
explicit_defaults_for_timestamp
basedir=/opt/bitnami/mysql
plugin_dir=/opt/bitnami/mysql/lib/plugin
port={{ .Values.secondary.containerPorts.mysql }}
mysqlx={{ ternary 1 0 .Values.secondary.enableMySQLX }}
mysqlx_port={{ .Values.secondary.containerPorts.mysqlx }}
socket=/opt/bitnami/mysql/tmp/mysql.sock
datadir=/bitnami/mysql/data
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=16M
bind-address=0.0.0.0
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
log-error=/opt/bitnami/mysql/logs/mysqld.log
character-set-server=UTF8
slow_query_log=0
long_query_time=10.0
[client]
port={{ .Values.secondary.containerPorts.mysql }}
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/lib/plugin
[manager]
port={{ .Values.secondary.containerPorts.mysql }}
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
containerPorts:
mysql: 3306
mysqlx: 33060
livenessProbe:
enabled: true
initialDelaySeconds: 45
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 3
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 45
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 3
successThreshold: 1
startupProbe:
enabled: true
initialDelaySeconds: 55
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 15
successThreshold: 1
service:
type: NodePort
ports:
mysql: 3306
mysqlx: 33060
nodePorts:
mysql: "30370"
mysqlx: "30371"
The secondary pod was successfully initiated, and I could access it with mysql -u root -p123456
.
Could you double-check that you do not have leftover PVCs from a previous installation?
Hi @xujiongzi ,
I was not able to reproduce the issue using the values below (there are the values you provided just removing persistence and and node-selector):
image: registry: docker.io repository: bitnami/mysql tag: 8.4.3-debian-12-r0 digest: "" pullPolicy: IfNotPresent pullSecrets: [] debug: false architecture: replication auth: rootPassword: "123456" createDatabase: false database: "my_database" username: "" password: "" replicationUser: replicator replicationPassword: "replicator" primary: name: master command: [] args: [] lifecycleHooks: {} automountServiceAccountToken: false hostAliases: [] enableMySQLX: true configuration: |- [mysqld] authentication_policy='{{- .Values.auth.authenticationPolicy | default "* ,," }}' skip-name-resolve explicit_defaults_for_timestamp basedir=/opt/bitnami/mysql plugin_dir=/opt/bitnami/mysql/lib/plugin port={{ .Values.primary.containerPorts.mysql }} mysqlx={{ ternary 1 0 .Values.primary.enableMySQLX }} mysqlx_port={{ .Values.primary.containerPorts.mysqlx }} socket=/opt/bitnami/mysql/tmp/mysql.sock datadir=/bitnami/mysql/data tmpdir=/opt/bitnami/mysql/tmp max_allowed_packet=16M bind-address=* pid-file=/opt/bitnami/mysql/tmp/mysqld.pid log-error=/opt/bitnami/mysql/logs/mysqld.log character-set-server=UTF8 slow_query_log=0 long_query_time=10.0 [client] port={{ .Values.primary.containerPorts.mysql }} socket=/opt/bitnami/mysql/tmp/mysql.sock default-character-set=UTF8 plugin_dir=/opt/bitnami/mysql/lib/plugin [manager] port={{ .Values.primary.containerPorts.mysql }} socket=/opt/bitnami/mysql/tmp/mysql.sock pid-file=/opt/bitnami/mysql/tmp/mysqld.pid containerPorts: mysql: 3306 mysqlx: 33060 livenessProbe: enabled: true initialDelaySeconds: 45 periodSeconds: 10 timeoutSeconds: 1 failureThreshold: 3 successThreshold: 1 readinessProbe: enabled: true initialDelaySeconds: 45 periodSeconds: 10 timeoutSeconds: 1 failureThreshold: 3 successThreshold: 1 startupProbe: enabled: true initialDelaySeconds: 55 periodSeconds: 10 timeoutSeconds: 1 failureThreshold: 10 successThreshold: 1 service: type: NodePort ports: mysql: 3306 mysqlx: 33060 nodePorts: mysql: "30360" mysqlx: "30361" secondary: name: slave replicaCount: 1 enableMySQLX: true configuration: |- [mysqld] authentication_policy='{{- .Values.auth.authenticationPolicy | default "* ,," }}' skip-name-resolve explicit_defaults_for_timestamp basedir=/opt/bitnami/mysql plugin_dir=/opt/bitnami/mysql/lib/plugin port={{ .Values.secondary.containerPorts.mysql }} mysqlx={{ ternary 1 0 .Values.secondary.enableMySQLX }} mysqlx_port={{ .Values.secondary.containerPorts.mysqlx }} socket=/opt/bitnami/mysql/tmp/mysql.sock datadir=/bitnami/mysql/data tmpdir=/opt/bitnami/mysql/tmp max_allowed_packet=16M bind-address=0.0.0.0 pid-file=/opt/bitnami/mysql/tmp/mysqld.pid log-error=/opt/bitnami/mysql/logs/mysqld.log character-set-server=UTF8 slow_query_log=0 long_query_time=10.0 [client] port={{ .Values.secondary.containerPorts.mysql }} socket=/opt/bitnami/mysql/tmp/mysql.sock default-character-set=UTF8 plugin_dir=/opt/bitnami/mysql/lib/plugin [manager] port={{ .Values.secondary.containerPorts.mysql }} socket=/opt/bitnami/mysql/tmp/mysql.sock pid-file=/opt/bitnami/mysql/tmp/mysqld.pid containerPorts: mysql: 3306 mysqlx: 33060 livenessProbe: enabled: true initialDelaySeconds: 45 periodSeconds: 10 timeoutSeconds: 1 failureThreshold: 3 successThreshold: 1 readinessProbe: enabled: true initialDelaySeconds: 45 periodSeconds: 10 timeoutSeconds: 1 failureThreshold: 3 successThreshold: 1 startupProbe: enabled: true initialDelaySeconds: 55 periodSeconds: 10 timeoutSeconds: 1 failureThreshold: 15 successThreshold: 1 service: type: NodePort ports: mysql: 3306 mysqlx: 33060 nodePorts: mysql: "30370" mysqlx: "30371"
The secondary pod was successfully initiated, and I could access it with
mysql -u root -p123456
.Could you double-check that you do not have leftover PVCs from a previous installation?
No, every time I try again, I will Kubetl delete ns MySQL to ensure that PVC and PV are deleted, and I will delete persistent files on NFS server. Perhaps you should try using the same persistence as me? Before the issues I asked, I had encountered other problems caused by NFS, such as primary and secondary startup prompts like "I/O cannot be written, xxx bytes need to be written but only 0 bytes have been written, please check if the disk is full". It was only after I replaced the NFS provider image that this problem was resolved.
Hi @xujiongzi ,
It is true that I remember some permission issues with NFS in the past. However, I have tried the following without any issues:
$ helm install --name nfs-server-provisioner stable/nfs-server-provisioner --set persistence.enabled=true,persistence.size=10Gi
...
persistence:
enabled: true
storageClass: "nfs"
accessModes:
- ReadWriteMany
size: 8Gi
...
Hi @dgomezleon , I don't know why I'm the only one who can't succeed, but I see that you seem to have installed stable/nfs-server-provisioner as an NFS server, while my NFS server is installed using yum install nfs-utils rpcbind on each node before using chainguard/nfs-subdir-external-provisioner as my NFS client (this is how I understand my installation, I don't know if it's correct?) Do you have any other good troubleshooting suggestions? At present, I can't think of how to troubleshoot, even though I used different versions of charts
Hi @xujiongzi
The issue may not be directly related to the Bitnami Helm chart, but rather to how the application is being configured in your specific environment, or tied to a particular scenario that is not easy to reproduce on our side.
With that said, we'll keep this ticket open until the stale bot automatically closes it, in case someone from the community contributes valuable insights.
Name and Version
bitnami/mysql 11.1.19
What architecture are you using?
arm64
What steps will reproduce the bug?
helm install mysql ./values.yaml -n mysql --create-namespace
Are you using any custom parameters or values?
What is the expected behavior?
What do you see instead?
primary pod livenessProbe、readinessProbe、startupProbe success
secondary pod livenessProbe fail `
`
When I modify password_aux="" in templates/secondary/statefulset.yaml,secondary pod livenessProbe、readinessProbe、startupProbe can be success, and secondary pod start success (I don't know why he didn't use the primary root password)
[root@server mysql]# kubectl exec -it mysql-slave-0 -n mysql /bin/bash kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. Defaulted container "mysql" out of: mysql, preserve-logs-symlinks (init) I have no name!@mysql-slave-0:/$ mysql -u root -p Enter password: Welcome to the MySQL monitor. Commands end with ; or \g. Your MySQL connection id is 94 Server version: 8.4.3 Source distribution
Copyright (c) 2000, 2024, Oracle and/or its affiliates.
Oracle is a registered trademark of Oracle Corporation and/or its affiliates. Other names may be trademarks of their respective owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql> use mysql; Reading table information for completion of table and column names You can turn off this feature to get a quicker startup with -A
Database changed mysql> select host,user from user; +-----------+------------------+ | host | user | +-----------+------------------+ | localhost | mysql.infoschema | | localhost | mysql.session | | localhost | mysql.sys | +-----------+------------------+ 3 rows in set (0.00 sec)
mysql> show replica status\G; Empty set (0.01 sec)
ERROR: No query specified
[root@server nfs-provisioner]# cat /etc/centos-release CentOS Stream release 9
[root@server mysql]# helm version version.BuildInfo{Version:"v3.16.2", GitCommit:"13654a52f7c70a143b1dd51416d633e1071faffb", GitTreeState:"clean", GoVersion:"go1.22.7"}
[root@server mysql]# k3s --version k3s version v1.30.5+k3s1 (9b586704) go version go1.22.6
apiVersion: apps/v1 kind: Deployment metadata: name: nfs-client-provisioner labels: app: nfs-client-provisioner namespace: kube-system spec: replicas: 1 selector: matchLabels: app: nfs-client-provisioner strategy: type: Recreate template: metadata: labels: app: nfs-client-provisioner spec: serviceAccountName: nfs-provisioner nodeSelector: node-type: server containers:
imagePullPolicy: IfNotPresent volumeMounts:
value: /nfs volumes:
apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: nfs-storage provisioner: nfs-provisioner parameters:
archiveOnDelete: "true" reclaimPolicy: Delete allowVolumeExpansion: true volumeBindingMode: Immediate mountOptions: