hashicorp / vault-helm

Helm chart to install Vault and other associated components.
Mozilla Public License 2.0
1.09k stars 880 forks source link

OpenShift route has serviceport and tls hardcoded, and tls is incorrect if endpoint is not secure #490

Open rgordill opened 3 years ago

rgordill commented 3 years ago

Describe the bug service-route.yaml has servicePort hardcoded (8200), and should be equivalent of service-ingress.yaml. Additionally, passthrough is selected for tls termination, but by default 8200 is not a secure port, so same as ingress should apply.

To Reproduce Steps to reproduce the behavior:

  1. Install chart in OpenShift with server.route.enabled=true and ui.enabled=true
  2. Try to access the url

When route tls is deleted, the ui is accessed without any issues.

Expected behavior Route configuration consistent with ingress.

Environment

Chart values:

csi:
  daemonSet:
    annotations: {}
    updateStrategy:
      maxUnavailable: ""
      type: RollingUpdate
  debug: false
  enabled: false
  image:
    pullPolicy: IfNotPresent
    repository: hashicorp/vault-csi-provider
    tag: 0.1.0
  livenessProbe:
    failureThreshold: 2
    initialDelaySeconds: 5
    periodSeconds: 5
    successThreshold: 1
    timeoutSeconds: 3
  pod:
    annotations: {}
  readinessProbe:
    failureThreshold: 2
    initialDelaySeconds: 5
    periodSeconds: 5
    successThreshold: 1
    timeoutSeconds: 3
  resources: {}
  serviceAccount:
    annotations: {}
  volumeMounts: null
  volumes: null
global:
  enabled: true
  imagePullSecrets: []
  openshift: true
  psp:
    annotations: |
      seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default,runtime/default
      apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
      seccomp.security.alpha.kubernetes.io/defaultProfileName:  runtime/default
      apparmor.security.beta.kubernetes.io/defaultProfileName:  runtime/default
    enable: false
  tlsDisable: true
injector:
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              app.kubernetes.io/name: {{ template "vault.name" . }}-agent-injector
              app.kubernetes.io/instance: "{{ .Release.Name }}"
              component: webhook
          topologyKey: kubernetes.io/hostname
  agentImage:
    repository: vault
    tag: 1.7.0
  annotations: {}
  authPath: auth/kubernetes
  certs:
    caBundle: ""
    certName: tls.crt
    keyName: tls.key
    secretName: null
  enabled: true
  externalVaultAddr: ""
  extraEnvironmentVars: {}
  extraLabels: {}
  failurePolicy: Ignore
  image:
    pullPolicy: IfNotPresent
    repository: hashicorp/vault-k8s
    tag: 0.9.0
  leaderElector:
    enabled: true
    image:
      repository: gcr.io/google_containers/leader-elector
      tag: "0.4"
    ttl: 60s
  logFormat: standard
  logLevel: info
  metrics:
    enabled: false
  namespaceSelector: {}
  nodeSelector: null
  objectSelector: {}
  priorityClassName: ""
  replicas: 1
  resources: {}
  revokeOnShutdown: false
  service:
    annotations: {}
  tolerations: null
server:
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              app.kubernetes.io/name: {{ template "vault.name" . }}
              app.kubernetes.io/instance: "{{ .Release.Name }}"
              component: server
          topologyKey: kubernetes.io/hostname
  annotations: {}
  auditStorage:
    accessMode: ReadWriteOnce
    annotations: {}
    enabled: false
    mountPath: /vault/audit
    size: 10Gi
    storageClass: null
  authDelegator:
    enabled: true
  dataStorage:
    accessMode: ReadWriteOnce
    annotations: {}
    enabled: true
    mountPath: /vault/data
    size: 10Gi
    storageClass: null
  dev:
    devRootToken: root
    enabled: false
  extraArgs: ""
  extraContainers: null
  extraEnvironmentVars: {}
  extraInitContainers: null
  extraLabels: {}
  extraSecretEnvironmentVars: []
  extraVolumes: []
  ha:
    apiAddr: null
    config: |
      ui = true

      listener "tcp" {
        tls_disable = 1
        address = "[::]:8200"
        cluster_address = "[::]:8201"
      }
      storage "consul" {
        path = "vault"
        address = "HOST_IP:8500"
      }

      service_registration "kubernetes" {}

      # Example configuration for using auto-unseal, using Google Cloud KMS. The
      # GKMS keys must already exist, and the cluster must have a service account
      # that is authorized to access GCP KMS.
      #seal "gcpckms" {
      #   project     = "vault-helm-dev-246514"
      #   region      = "global"
      #   key_ring    = "vault-helm-unseal-kr"
      #   crypto_key  = "vault-helm-unseal-key"
      #}
    disruptionBudget:
      enabled: true
      maxUnavailable: null
    enabled: false
    raft:
      config: |
        ui = true

        listener "tcp" {
          tls_disable = 1
          address = "[::]:8200"
          cluster_address = "[::]:8201"
        }

        storage "raft" {
          path = "/vault/data"
        }

        service_registration "kubernetes" {}
      enabled: false
      setNodeId: false
    replicas: 3
  image:
    pullPolicy: IfNotPresent
    repository: vault
    tag: 1.7.0
  ingress:
    annotations: {}
    enabled: false
    hosts:
    - host: chart-example.local
      paths: []
    labels: {}
    tls: []
  livenessProbe:
    enabled: false
    failureThreshold: 2
    initialDelaySeconds: 60
    path: /v1/sys/health?standbyok=true
    periodSeconds: 5
    successThreshold: 1
    timeoutSeconds: 3
  networkPolicy:
    egress: []
    enabled: false
  nodeSelector: null
  postStart: []
  preStopSleepSeconds: 5
  priorityClassName: ""
  readinessProbe:
    enabled: true
    failureThreshold: 2
    initialDelaySeconds: 5
    periodSeconds: 5
    successThreshold: 1
    timeoutSeconds: 3
  resources: {}
  route:
    annotations: {}
    enabled: true
    host: vault-vault.apps-crc.testing
    labels: {}
  service:
    annotations: {}
    enabled: true
    port: 8200
    targetPort: 8200
  serviceAccount:
    annotations: {}
    create: true
    name: ""
  shareProcessNamespace: false
  standalone:
    config: |
      ui = true

      listener "tcp" {
        tls_disable = 1
        address = "[::]:8200"
        cluster_address = "[::]:8201"
      }
      storage "file" {
        path = "/vault/data"
      }

      # Example configuration for using auto-unseal, using Google Cloud KMS. The
      # GKMS keys must already exist, and the cluster must have a service account
      # that is authorized to access GCP KMS.
      #seal "gcpckms" {
      #   project     = "vault-helm-dev"
      #   region      = "global"
      #   key_ring    = "vault-helm-unseal-kr"
      #   crypto_key  = "vault-helm-unseal-key"
      #}
    enabled: '-'
  statefulSet:
    annotations: {}
  tolerations: null
  updateStrategyType: OnDelete
  volumeMounts: null
  volumes: null
ui:
  activeVaultPodOnly: false
  annotations: {}
  enabled: true
  externalPort: 8200
  publishNotReadyAddresses: true
  serviceNodePort: null
  serviceType: ClusterIP

Additional context Add any other context about the problem here.

slauger commented 3 years ago

In general the helm chart lacks of the ability to customize the ingress/route for OpenShift, e.g. currently it is not possible to switch to a reencrypt route.

Also i would prefer to use a Ingress object, because this object has some advantages over to the route object (e.g. external tls secret).

OpenShift 4 automatically creates route objects when you try to create an ingress object. Currently the chart ignores the Ingress configuration when global.openshift is true.

https://docs.openshift.com/container-platform/4.6/networking/routes/route-configuration.html#nw-ingress-creating-a-route-via-an-ingress_route-configuration

This would allow us something like this:

global:
  openshift: true

server:
  ingress:
    enabled: true
    annotations:
      route.openshift.io/termination: "reencrypt" 
    hosts:
      - host: chart-example.local
        paths: []
slauger commented 3 years ago

So maybe an override to enable the Ingress object would be a solution?