temporalio / helm-charts

Temporal Helm charts
MIT License
282 stars 316 forks source link

Helm installation failure with opensearch as visibilitystore and postgres as defaultstore #471

Closed prashanthzen closed 1 month ago

prashanthzen commented 4 months ago

Is it possible to deploy temporal with postgres as primary datastore and opensearch as visibility datastore? I'm trying to deploy helm chart (1.22.4 version) using the values.yaml added in this message, but I keep running into the following error:

Error: INSTALLATION FAILED: execution error at (temporal/charts/temporal/templates/server-deployment.yaml:37:28): Please specify cassandra port for visibility store

values.yaml:

cassandra:
  enabled: false
elasticsearch:
  enabled: false
  external: true
  host: "es.mgmt.xxx.com"
  port: "443"
  version: "v7"
  scheme: "https"
  logLevel: "error"
  username: "temporal_visibility"
  password: "<placeholder>"
  visibilityIndex: temporal-visibility
schema:
  setup:
    enabled: true
  update:
    enabled: false
debug: true
server:
  replicaCount: 3
  podAnnotations:
    linkerd.io/inject: enabled
    config.linkerd.io/proxy-cpu-request: "10m"
    config.linkerd.io/proxy-memory-limit: 1Gi
    config.linkerd.io/proxy-memory-request: 25Mi
  metrics:
    annotations:
      enabled: true
    serviceMonitor:
      enabled: true
  dynamicConfig:
    limit.blobSize.error:
    - value: 268435456
    limit.historySize.error:
    - value: 268435456
    system.transactionSizeLimit:
    - value: 268435456
    frontend.namespaceCount:
    - value: 10000
    frontend.keepAliveMaxConnectionAge:
    - value: 72h
    frontend.keepAliveMaxConnectionAgeGrace:
    - value: 60s
    frontend.keepAliveMaxConnectionIdle:
    - value: 72h
    frontend.shutdownDrainDuration:
    - value: 30s
    matching.shutdownDrainDuration:
    - value: 30s
    history.shutdownDrainDuration:
    - value: 30s
    matching.longPollExpirationInterval:
    - value: 10s
    system.enableReadVisibilityFromES:
    - value: true
      constraints: {}
  config:
    logLevel: "debug,info"
    numHistoryShards: 512
    persistence:
      defaultStore: default
      default:
        driver: sql
        sql:
          driver: postgres12
          host: db-console-pg.xxx.com
          port: 5432
          database: temporal
          user: temporal
          existingSecret: pg-temporal
          maxConns: 20
          maxConnLifetime: "1h"
  frontend:
    metrics:
      annotations:
        enabled: false
      serviceMonitor:
        enabled: true
    service:
      membershipPort: 7933
    podAnnotations:
      linkerd.io/inject: enabled
      config.linkerd.io/proxy-cpu-request: "10m"
      config.linkerd.io/proxy-memory-limit: 1Gi
      config.linkerd.io/proxy-memory-request: 25Mi
      config.linkerd.io/skip-outbound-ports: 7933,7934,7935,7936,7939
      config.linkerd.io/skip-inbound-ports: "7933"
    resources:
      requests:
        cpu: 100m
        memory: 256Mi
      limits:
        memory: 1Gi
  history:
    metrics:
      annotations:
        enabled: false
      serviceMonitor:
        enabled: true
    service:
      membershipPort: 7934
    podAnnotations:
      linkerd.io/inject: enabled
      config.linkerd.io/proxy-cpu-request: "10m"
      config.linkerd.io/proxy-memory-limit: 1Gi
      config.linkerd.io/proxy-memory-request: 25Mi
      config.linkerd.io/skip-outbound-ports: 7933,7934,7935,7936,7939
      config.linkerd.io/skip-inbound-ports: "7934"
    resources:
      requests:
        cpu: 100m
        memory: 256Mi
      limits:
        memory: 1Gi
  matching:
    metrics:
      annotations:
        enabled: false
      serviceMonitor:
        enabled: true
    service:
      membershipPort: 7935
    podAnnotations:
      linkerd.io/inject: enabled
      config.linkerd.io/proxy-cpu-request: "10m"
      config.linkerd.io/proxy-memory-limit: 1Gi
      config.linkerd.io/proxy-memory-request: 25Mi
      config.linkerd.io/skip-outbound-ports: 7933,7934,7935,7936,7939
      config.linkerd.io/skip-inbound-ports: "7935"
    resources:
      requests:
        cpu: 100m
        memory: 256Mi
      limits:
        memory: 1Gi
  worker:
    metrics:
      annotations:
        enabled: false
      serviceMonitor:
        enabled: true
    service:
      membershipPort: 7939
    podAnnotations:
      linkerd.io/inject: enabled
      config.linkerd.io/proxy-cpu-request: "10m"
      config.linkerd.io/proxy-memory-limit: 1Gi
      config.linkerd.io/proxy-memory-request: 25Mi
      config.linkerd.io/skip-outbound-ports: 7933,7934,7935,7936,7939
      config.linkerd.io/skip-inbound-ports: "7939"
    resources:
      requests:
        cpu: 100m
        memory: 256Mi
      limits:
        memory: 1Gi
web:
  metrics:
    annotations:
      enabled: false
    serviceMonitor:
      enabled: true
  replicaCount: 3
  podAnnotations:
    linkerd.io/inject: enabled
    config.linkerd.io/proxy-cpu-request: "10m"
    config.linkerd.io/proxy-memory-limit: 1Gi
    config.linkerd.io/proxy-memory-request: 25Mi
  resources:
    requests:
      cpu: 10m
      memory: 256Mi
    limits:
      memory: 1Gi
  ingress:
    className: "ingress-zero"
    enabled: true
    hosts:
      - temporal.xxx.com
    tls:
      - hosts:
        - temporal.xxx.com
        secretName: ingress-self-signed-tls
prometheus:
  enabled: false
grafana:
  enabled: false
robholland commented 1 month ago

You need to set

postgresql:
   enabled: true
VLZZZ commented 3 weeks ago

@robholland I'm not sure but I believe that my issue #508 is about the same thing

BTW I've tried adding

postgresql:
   enabled: true

as proposed but it doesn't help as I belive the issue is affecting different part of the chart mechanics