unsetting a helm chart default from values.yaml is not working as it does in native helm.
In this example I want to unset digest via digest: null. It works when I converting the fleet.yaml to a regular helm.yaml and deploying via helm upgrade -n nginx-extern nginx-extern repo/ingress-nginx -f manual_install.yaml. The default values.yaml digest is properly unset there.
To reproduce:
---
defaultNamespace: nginx-extern
helm:
releaseName: nginx-extern
chart: "http://1.1.1.111:7070/charts/ingress-nginx-3.21.0.tgz"
version: 3.21.0
values:
controller:
name: controller
image:
repository: asdf1s:5000/k8s.gcr.io/ingress-nginx/controller
tag: "v0.43.0"
digest: null
# Name of the ingress class to route through this controller
ingressClass: nginx-extern
extraArgs:
default-ssl-certificate: "nginx-extern/nginx-extern-tls"
# The update strategy to apply to the Deployment or DaemonSet
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
replicaCount: 3
admissionWebhooks:
patch:
image:
repository: asdf1s:5000/docker.io/jettech/kube-webhook-certgen
tag: v1.5.1
affinity:
# don't schedule multiple nginx-ingress on the same node if possible
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- nginx-extern
topologyKey: kubernetes.io/hostname
resources:
limits:
cpu: 1
memory: 500Mi
requests:
cpu: 100m
memory: 500Mi
service:
enableHttp: false
type: NodePort
nodePorts:
http: 33080
https: 33443
metrics:
enabled: true
serviceMonitor:
enabled: true
namespace: prometheus-operator
prometheusRule:
enabled: true
namespace: prometheus-operator
rules:
- alert: NGINXConfigFailed
expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0
for: 1s
labels:
severity: critical
annotations:
description: bad ingress config - nginx config test failed
summary: uninstall the latest ingress changes to allow config reloads to resume
- alert: NGINXCertificateExpiry
expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800
for: 1s
labels:
severity: critical
annotations:
description: ssl certificate(s) will expire in less then a week
summary: renew expiring certificates to avoid downtime
- alert: NGINXTooMany500s
expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
for: 1m
labels:
severity: warning
annotations:
description: Too many 5XXs
summary: More than 5% of all requests returned 5XX, this requires your attention
- alert: NGINXTooMany400s
expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
for: 1m
labels:
severity: warning
annotations:
description: Too many 4XXs
summary: More than 5% of all requests returned 4XX, this requires your attention
# configuration for default 404 backend
defaultBackend:
enabled: true
name: default-backend
image:
repository: asdf1s:5000/k8s.gcr.io/defaultbackend-amd64
tag: "1.5"
resources:
limits:
cpu: 1
memory: 128Mi
requests:
cpu: 10m
memory: 50Mi
# Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266
rbac:
create: true
podSecurityPolicy:
enabled: false
#serviceAccount:
#secreate: false
#sename: nginx-extern
results in Image: asdf1s:5000/k8s.gcr.io/ingress-nginx/controller:v0.43.0@sha256:9bba603b99bf25f6d117cf1235b6598c16033ad027b143c90fa5b3cc583c5713
deployed via helm:
---
controller:
name: controller
image:
repository: abc1s:5000/k8s.gcr.io/ingress-nginx/controller
tag: "v0.43.0"
digest: null
# Name of the ingress class to route through this controller
ingressClass: nginx-extern
extraArgs:
default-ssl-certificate: "nginx-extern/nginx-extern-tls"
# The update strategy to apply to the Deployment or DaemonSet
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
replicaCount: 3
admissionWebhooks:
patch:
image:
repository: asdf1s:5000/docker.io/jettech/kube-webhook-certgen
tag: v1.5.1
affinity:
# don't schedule multiple nginx-ingress on the same node if possible
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- nginx-extern
topologyKey: kubernetes.io/hostname
resources:
limits:
cpu: 1
memory: 500Mi
requests:
cpu: 100m
memory: 500Mi
service:
enableHttp: false
type: NodePort
nodePorts:
http: 33080
https: 33443
metrics:
enabled: true
serviceMonitor:
enabled: true
namespace: prometheus-operator
prometheusRule:
enabled: true
namespace: prometheus-operator
rules:
- alert: NGINXConfigFailed
expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0
for: 1s
labels:
severity: critical
annotations:
description: bad ingress config - nginx config test failed
summary: uninstall the latest ingress changes to allow config reloads to resume
- alert: NGINXCertificateExpiry
expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800
for: 1s
labels:
severity: critical
annotations:
description: ssl certificate(s) will expire in less then a week
summary: renew expiring certificates to avoid downtime
- alert: NGINXTooMany500s
expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
for: 1m
labels:
severity: warning
annotations:
description: Too many 5XXs
summary: More than 5% of all requests returned 5XX, this requires your attention
- alert: NGINXTooMany400s
expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
for: 1m
labels:
severity: warning
annotations:
description: Too many 4XXs
summary: More than 5% of all requests returned 4XX, this requires your attention
# configuration for default 404 backend
defaultBackend:
enabled: true
name: default-backend
image:
repository: asdf1s:5000/k8s.gcr.io/defaultbackend-amd64
tag: "1.5"
resources:
limits:
cpu: 1
memory: 128Mi
requests:
cpu: 10m
memory: 50Mi
# Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266
rbac:
create: true
podSecurityPolicy:
enabled: false
#serviceAccount:
#secreate: false
#sename: nginx-extern
results in Image: asdf1s:5000/k8s.gcr.io/ingress-nginx/controller:v0.43.0
Rancher 2.5.5 Fleet 0.3.3
Dear developers,
unsetting a helm chart default from values.yaml is not working as it does in native helm.
In this example I want to unset digest via
digest: null
. It works when I converting the fleet.yaml to a regular helm.yaml and deploying viahelm upgrade -n nginx-extern nginx-extern repo/ingress-nginx -f manual_install.yaml
. The default values.yaml digest is properly unset there.To reproduce:
results in
Image: asdf1s:5000/k8s.gcr.io/ingress-nginx/controller:v0.43.0@sha256:9bba603b99bf25f6d117cf1235b6598c16033ad027b143c90fa5b3cc583c5713
deployed via helm:
results in
Image: asdf1s:5000/k8s.gcr.io/ingress-nginx/controller:v0.43.0