Closed JeromeLiuLly closed 1 year ago
helm template dify --set 'externalWeaviate.enabled=true' --set 'externalWeaviate.endpoint=http://weaviate.dify.svc.cluster.local' --set 'api.url.api=https://xy-dify.xingyuxinchuan.com' --set 'api.url.console=https://xy-dify.xingyuxinchuan.com' --set 'api.url.app=https://xy-dify.xingyuxinchuan.com' --set 'redis.replica.replicaCount=1' -n dify . > log.yaml
log.yaml
---
# Source: dify/charts/redis/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: dify-redis
namespace: "dify"
labels:
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.13.2
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
---
# Source: dify/charts/postgresql/templates/secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: dify-postgresql
namespace: "dify"
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-12.5.6
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
type: Opaque
data:
postgres-password: "ZGlmeWFpMTIzNDU2"
replication-password: "N1JieVRucEh0cA=="
# We don't auto-generate LDAP password when it's not provided as we do for other passwords
---
# Source: dify/charts/redis/templates/secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: dify-redis
namespace: "dify"
labels:
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.13.2
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
type: Opaque
data:
redis-password: "ZGlmeWFpMTIzNDU2"
---
# Source: dify/charts/redis/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: dify-redis-configuration
namespace: "dify"
labels:
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.13.2
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: dify/charts/redis/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: dify-redis-health
namespace: "dify"
labels:
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.13.2
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$?" -eq "124" ]; then
echo "Timed out"
exit 1
fi
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$?" -eq "124" ]; then
echo "Timed out"
exit 1
fi
responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}')
if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ] && [ "$responseFirstWord" != "MASTERDOWN" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$?" -eq "124" ]; then
echo "Timed out"
exit 1
fi
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$?" -eq "124" ]; then
echo "Timed out"
exit 1
fi
responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}')
if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: dify/charts/redis/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: dify-redis-scripts
namespace: "dify"
labels:
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.13.2
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--replicaof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: dify/charts/weaviate/templates/weaviateConfigMap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: weaviate-config
labels:
app.kubernetes.io/name: weaviate
app.kubernetes.io/managed-by: helm
data:
conf.yaml: |-
---
authentication:
anonymous_access:
enabled: false
apikey:
allowed_keys:
- WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
enabled: true
users:
- hello@dify.ai
oidc:
enabled: false
authorization:
admin_list:
enabled: true
read_only_users: null
users:
- hello@dify.ai
query_defaults:
limit: 100
debug: false
---
# Source: dify/templates/api-config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: dify-api
data:
# Startup mode, 'api' starts the API server.
MODE: api
# The log level for the application. Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`
LOG_LEVEL: INFO
# A secret key that is used for securely signing the session cookie and encrypting sensitive information on the database. You can generate a strong key using `openssl rand -base64 42`.
SECRET_KEY: sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U
# The base URL of console application, refers to the Console base URL of WEB service if console domain is
# different from api or web app domain.
# example: http://cloud.dify.ai
CONSOLE_URL: https://xy-dify.xingyuxinchuan.com
# The URL for Service API endpoints,refers to the base URL of the current API service if api domain is
# different from console domain.
# example: http://api.dify.ai
API_URL: https://xy-dify.xingyuxinchuan.com
# The URL for Web APP, refers to the Web App base URL of WEB service if web app domain is different from
# console or api domain.
# example: http://udify.app
APP_URL: https://xy-dify.xingyuxinchuan.com
# When enabled, migrations will be executed prior to application startup and the application will start after the migrations have completed.
MIGRATION_ENABLED: "true"
# The configurations of postgres database connection.
# It is consistent with the configuration in the 'db' service below.
DB_USERNAME: postgres
DB_PASSWORD: difyai123456
DB_HOST: dify-postgresql-primary
DB_PORT: "5432"
DB_DATABASE: dify
# The configurations of redis connection.
# It is consistent with the configuration in the 'redis' service below.
REDIS_HOST: dify-redis-master
REDIS_PORT: "6379"
REDIS_USERNAME: ""
REDIS_PASSWORD: "difyai123456"
REDIS_USE_SSL: "false"
# use redis db 0 for redis cache
REDIS_DB: "0"
# The configurations of session, Supported values are `sqlalchemy`. `redis`
SESSION_TYPE: redis
SESSION_REDIS_HOST: dify-redis-master
SESSION_REDIS_PORT: "6379"
SESSION_REDIS_USERNAME: ""
SESSION_REDIS_PASSWORD: "difyai123456"
SESSION_REDIS_USE_SSL: "false"
# use redis db 2 for session store
SESSION_REDIS_DB: "2"
# The configurations of celery broker.# Use redis as the broker, and redis db 1 for celery broker.
CELERY_BROKER_URL: redis://:difyai123456@dify-redis-master:6379/1
# Specifies the allowed origins for cross-origin requests to the Web API, e.g. https://dify.app or * for all origins.
WEB_API_CORS_ALLOW_ORIGINS: '*'
# Specifies the allowed origins for cross-origin requests to the console API, e.g. https://cloud.dify.ai or * for all origins.
CONSOLE_CORS_ALLOW_ORIGINS: '*'
# CSRF Cookie settings
# Controls whether a cookie is sent with cross-site requests,
# providing some protection against cross-site request forgery attacks
#
# Default: `SameSite=Lax, Secure=false, HttpOnly=true`
# This default configuration supports same-origin requests using either HTTP or HTTPS,
# but does not support cross-origin requests. It is suitable for local debugging purposes.
#
# If you want to enable cross-origin support,
# you must use the HTTPS protocol and set the configuration to `SameSite=None, Secure=true, HttpOnly=true`.
#
# For **production** purposes, please set `SameSite=Lax, Secure=true, HttpOnly=true`.
COOKIE_HTTPONLY: 'true'
COOKIE_SAMESITE: 'Lax'
COOKIE_SECURE: 'false'
# The type of storage to use for storing user files. Supported values are `local` and `s3`, Default: `local`
STORAGE_TYPE: local
# The path to the local storage directory, the directory relative the root path of API service codes or absolute path. Default: `storage` or `/home/john/storage`.
# only available when STORAGE_TYPE is `local`.
STORAGE_LOCAL_PATH: /app/api/storage
# The type of vector store to use. Supported values are `weaviate`, `qdrant`.
VECTOR_STORE: weaviate
# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
WEAVIATE_ENDPOINT: "http://weaviate.dify.svc.cluster.local"
# The Weaviate API key.
WEAVIATE_API_KEY: WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
# The DSN for Sentry error reporting. If not set, Sentry error reporting will be disabled.
SENTRY_DSN: ''
# The sample rate for Sentry events. Default: `1.0`
SENTRY_TRACES_SAMPLE_RATE: "1.0"
# The sample rate for Sentry profiles. Default: `1.0`
SENTRY_PROFILES_SAMPLE_RATE: "1.0"
---
# Source: dify/templates/proxy-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: dify-proxy
data:
proxy.conf: |-
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
proxy_read_timeout 3600s;
proxy_send_timeout 3600s;
nginx.conf: |-
user nginx;
worker_processes auto;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
client_max_body_size 15M;
include /etc/nginx/conf.d/*.conf;
}
default.conf: |-
server {
listen 80;
server_name _;
location /console/api {
proxy_pass http://dify-api:5001;
include proxy.conf;
}
location /api {
proxy_pass http://dify-api:5001;
include proxy.conf;
}
location /v1 {
proxy_pass http://dify-api:5001;
include proxy.conf;
}
location / {
proxy_pass http://dify-web:3000;
include proxy.conf;
}
}
---
# Source: dify/templates/worker-config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: dify-worker
data:
# worker service
# The Celery worker for processing the queue.
# Startup mode, 'worker' starts the Celery worker for processing the queue.
MODE: worker
# --- All the configurations below are the same as those in the 'api' service. ---
# The log level for the application. Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`
LOG_LEVEL: INFO
# A secret key that is used for securely signing the session cookie and encrypting sensitive information on the database. You can generate a strong key using `openssl rand -base64 42`.
# same as the API service
SECRET_KEY: sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U
# The configurations of postgres database connection.
# It is consistent with the configuration in the 'db' service below.
DB_USERNAME: postgres
DB_PASSWORD: difyai123456
DB_HOST: dify-postgresql-primary
DB_PORT: "5432"
DB_DATABASE: dify
# The configurations of redis cache connection.
REDIS_HOST: dify-redis-master
REDIS_PORT: "6379"
REDIS_USERNAME: ""
REDIS_PASSWORD: "difyai123456"
REDIS_USE_SSL: "false"
# use redis db 0 for redis cache
REDIS_DB: "0"
# The configurations of celery broker.# Use redis as the broker, and redis db 1 for celery broker.
CELERY_BROKER_URL: redis://:difyai123456@dify-redis-master:6379/1
# The type of storage to use for storing user files. Supported values are `local` and `s3`, Default: `local`
STORAGE_TYPE: local
# The path to the local storage directory, the directory relative the root path of API service codes or absolute path. Default: `storage` or `/home/john/storage`.
# only available when STORAGE_TYPE is `local`.
STORAGE_LOCAL_PATH: /app/api/storage
# The Vector store configurations.
# The type of vector store to use. Supported values are `weaviate`, `qdrant`.
VECTOR_STORE: weaviate
# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
WEAVIATE_ENDPOINT: "http://weaviate.dify.svc.cluster.local"
# The Weaviate API key.
WEAVIATE_API_KEY: WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
---
# Source: dify/templates/pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: dify
annotations:
helm.sh/resource-policy: keep
labels:
helm.sh/chart: dify-0.14.0
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
app.kubernetes.io/version: "0.3.1"
app.kubernetes.io/managed-by: Helm
spec:
accessModes:
- "ReadWriteMany"
resources:
requests:
storage: 5Gi
---
# Source: dify/charts/postgresql/templates/primary/svc-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: dify-postgresql-primary-hl
namespace: "dify"
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-12.5.6
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: primary
# Use this annotation in addition to the actual publishNotReadyAddresses
# field below because the annotation will stop being respected soon but the
# field is broken in some versions of Kubernetes:
# https://github.com/kubernetes/kubernetes/issues/58662
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
type: ClusterIP
clusterIP: None
# We want all pods in the StatefulSet to have their addresses published for
# the sake of the other Postgresql pods even before they're ready, since they
# have to be able to talk to each other in order to become ready.
publishNotReadyAddresses: true
ports:
- name: tcp-postgresql
port: 5432
targetPort: tcp-postgresql
selector:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: dify
app.kubernetes.io/component: primary
---
# Source: dify/charts/postgresql/templates/primary/svc.yaml
apiVersion: v1
kind: Service
metadata:
name: dify-postgresql-primary
namespace: "dify"
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-12.5.6
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: primary
spec:
type: ClusterIP
sessionAffinity: None
ports:
- name: tcp-postgresql
port: 5432
targetPort: tcp-postgresql
nodePort: null
selector:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: dify
app.kubernetes.io/component: primary
---
# Source: dify/charts/postgresql/templates/read/svc-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: dify-postgresql-read-hl
namespace: "dify"
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-12.5.6
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: read
# Use this annotation in addition to the actual publishNotReadyAddresses
# field below because the annotation will stop being respected soon but the
# field is broken in some versions of Kubernetes:
# https://github.com/kubernetes/kubernetes/issues/58662
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
type: ClusterIP
clusterIP: None
# We want all pods in the StatefulSet to have their addresses published for
# the sake of the other Postgresql pods even before they're ready, since they
# have to be able to talk to each other in order to become ready.
publishNotReadyAddresses: true
ports:
- name: tcp-postgresql
port: 5432
targetPort: tcp-postgresql
selector:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: dify
app.kubernetes.io/component: read
---
# Source: dify/charts/postgresql/templates/read/svc.yaml
apiVersion: v1
kind: Service
metadata:
name: dify-postgresql-read
namespace: "dify"
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-12.5.6
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: read
spec:
type: ClusterIP
sessionAffinity: None
ports:
- name: tcp-postgresql
port: 5432
targetPort: tcp-postgresql
nodePort: null
selector:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: dify
app.kubernetes.io/component: read
---
# Source: dify/charts/redis/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: dify-redis-headless
namespace: "dify"
labels:
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.13.2
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
annotations:
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis
app.kubernetes.io/instance: dify
---
# Source: dify/charts/redis/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: dify-redis-master
namespace: "dify"
labels:
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.13.2
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: master
spec:
type: ClusterIP
sessionAffinity: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis
app.kubernetes.io/instance: dify
app.kubernetes.io/component: master
---
# Source: dify/charts/redis/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: dify-redis-replicas
namespace: "dify"
labels:
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.13.2
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: replica
spec:
type: ClusterIP
sessionAffinity: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis
app.kubernetes.io/instance: dify
app.kubernetes.io/component: replica
---
# Source: dify/charts/weaviate/templates/weaviateHeadlessService.yaml
apiVersion: v1
kind: Service
metadata:
name: weaviate-headless
labels:
app.kubernetes.io/name: weaviate
app.kubernetes.io/managed-by: helm
spec:
type: ClusterIP
clusterIP: None
selector:
app: weaviate
ports:
- protocol: TCP
port: 80
targetPort: 7000
publishNotReadyAddresses: true
---
# Source: dify/charts/weaviate/templates/weaviateService.yaml
apiVersion: v1
kind: Service
metadata:
name: weaviate
labels:
app.kubernetes.io/name: weaviate
app.kubernetes.io/managed-by: helm
spec:
type: ClusterIP
selector:
app: weaviate
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
---
# Source: dify/templates/api-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: dify-api
labels:
helm.sh/chart: dify-0.14.0
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
app.kubernetes.io/version: "0.3.1"
app.kubernetes.io/managed-by: Helm
component: "api"
spec:
type: ClusterIP
ports:
- name: api
port: 5001
protocol: TCP
targetPort: api
selector:
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
component: "api"
---
# Source: dify/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: dify
labels:
helm.sh/chart: dify-0.14.0
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
app.kubernetes.io/version: "0.3.1"
app.kubernetes.io/managed-by: Helm
component: "proxy"
spec:
type: ClusterIP
ports:
- name: dify
port: 80
protocol: TCP
targetPort: dify
selector:
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
component: "proxy"
---
# Source: dify/templates/web-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: dify-web
labels:
helm.sh/chart: dify-0.14.0
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
app.kubernetes.io/version: "0.3.1"
app.kubernetes.io/managed-by: Helm
component: "web"
spec:
type: ClusterIP
ports:
- name: web
port: 3000
protocol: TCP
targetPort: web
selector:
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
component: "web"
---
# Source: dify/templates/api-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
descriptions: api
labels:
helm.sh/chart: dify-0.14.0
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
app.kubernetes.io/version: "0.3.1"
app.kubernetes.io/managed-by: Helm
component: api
# app: dify-api
name: dify-api
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
component: api
template:
metadata:
annotations:
labels:
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
component: api
spec:
containers:
- image: "langgenius/dify-api:0.3.1"
imagePullPolicy: "IfNotPresent"
name: api
env:
- name: CHECK_UPDATE_URL
value: https://updates.dify.ai
envFrom:
- configMapRef:
name: dify-api
ports:
- name: api
containerPort: 5001
protocol: TCP
resources:
{}
volumeMounts:
- name: app-data
mountPath: "/app/api/storage"
subPath:
volumes:
- name: app-data
persistentVolumeClaim:
claimName: dify
---
# Source: dify/templates/proxy-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
descriptions: nginx proxy
labels:
helm.sh/chart: dify-0.14.0
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
app.kubernetes.io/version: "0.3.1"
app.kubernetes.io/managed-by: Helm
component: proxy
# app: dify-proxy
name: dify-proxy
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
component: proxy
template:
metadata:
annotations:
labels:
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
component: proxy
spec:
containers:
- image: "nginx:latest"
imagePullPolicy: "IfNotPresent"
name: nginx
env:
ports:
- name: dify
containerPort: 80
protocol: TCP
resources:
{}
volumeMounts:
- name: nginx
mountPath: /etc/nginx/nginx.conf
readOnly: true
subPath: nginx.conf
- name: nginx
mountPath: /etc/nginx/proxy.conf
readOnly: true
subPath: proxy.conf
- name: nginx
mountPath: /etc/nginx/conf.d/default.conf
readOnly: true
subPath: default.conf
volumes:
- name: nginx
configMap:
defaultMode: 420
name: dify-proxy
---
# Source: dify/templates/web-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
descriptions: web server
labels:
helm.sh/chart: dify-0.14.0
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
app.kubernetes.io/version: "0.3.1"
app.kubernetes.io/managed-by: Helm
component: web
# app: dify-web
name: dify-web
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
component: web
template:
metadata:
annotations:
labels:
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
component: web
spec:
containers:
- image: "langgenius/dify-web:0.3.1"
imagePullPolicy: "IfNotPresent"
name: web
env:
- name: EDITION
value: SELF_HOSTED
- name: CONSOLE_URL
value: ""
- name: APP_URL
value: ""
ports:
- name: web
containerPort: 3000
protocol: TCP
resources:
{}
---
# Source: dify/templates/worker-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
descriptions: worker
labels:
helm.sh/chart: dify-0.14.0
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
app.kubernetes.io/version: "0.3.1"
app.kubernetes.io/managed-by: Helm
component: api
# app: dify-worker
name: dify-worker
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
component: api
template:
metadata:
annotations:
labels:
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
component: api
spec:
containers:
- image: "langgenius/dify-api:0.3.1"
imagePullPolicy: "IfNotPresent"
name: worker
env:
envFrom:
- configMapRef:
name: dify-worker
resources:
{}
volumeMounts:
- name: app-data
mountPath: "/app/api/storage"
subPath:
volumes:
- name: app-data
persistentVolumeClaim:
claimName: dify
---
# Source: dify/charts/postgresql/templates/primary/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: dify-postgresql-primary
namespace: "dify"
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-12.5.6
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: primary
spec:
replicas: 1
serviceName: dify-postgresql-primary-hl
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: dify
app.kubernetes.io/component: primary
template:
metadata:
name: dify-postgresql-primary
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-12.5.6
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: primary
spec:
serviceAccountName: default
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: dify
app.kubernetes.io/component: primary
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
securityContext:
fsGroup: 1001
hostNetwork: false
hostIPC: false
containers:
- name: postgresql
image: docker.io/bitnami/postgresql:15.3.0-debian-11-r7
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "false"
- name: POSTGRESQL_PORT_NUMBER
value: "5432"
- name: POSTGRESQL_VOLUME_DIR
value: "/bitnami/postgresql"
- name: PGDATA
value: "/bitnami/postgresql/data"
# Authentication
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: dify-postgresql
key: postgres-password
- name: POSTGRES_DB
value: "dify"
# Replication
- name: POSTGRES_REPLICATION_MODE
value: "master"
- name: POSTGRES_REPLICATION_USER
value: "repl_user"
- name: POSTGRES_REPLICATION_PASSWORD
valueFrom:
secretKeyRef:
name: dify-postgresql
key: replication-password
- name: POSTGRES_CLUSTER_APP_NAME
value: my_application
# Initdb
# Standby
# LDAP
- name: POSTGRESQL_ENABLE_LDAP
value: "no"
# TLS
- name: POSTGRESQL_ENABLE_TLS
value: "no"
# Audit
- name: POSTGRESQL_LOG_HOSTNAME
value: "false"
- name: POSTGRESQL_LOG_CONNECTIONS
value: "false"
- name: POSTGRESQL_LOG_DISCONNECTIONS
value: "false"
- name: POSTGRESQL_PGAUDIT_LOG_CATALOG
value: "off"
# Others
- name: POSTGRESQL_CLIENT_MIN_MESSAGES
value: "error"
- name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES
value: "pgaudit"
ports:
- name: tcp-postgresql
containerPort: 5432
livenessProbe:
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
exec:
command:
- /bin/sh
- -c
- exec pg_isready -U "postgres" -d "dbname=dify" -h 127.0.0.1 -p 5432
readinessProbe:
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
exec:
command:
- /bin/sh
- -c
- -e
- |
exec pg_isready -U "postgres" -d "dbname=dify" -h 127.0.0.1 -p 5432
[ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ]
resources:
limits: {}
requests:
cpu: 250m
memory: 256Mi
volumeMounts:
- name: dshm
mountPath: /dev/shm
- name: data
mountPath: /bitnami/postgresql
volumes:
- name: dshm
emptyDir:
medium: Memory
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "8Gi"
---
# Source: dify/charts/postgresql/templates/read/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: dify-postgresql-read
namespace: "dify"
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-12.5.6
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: read
spec:
replicas: 1
serviceName: dify-postgresql-read-hl
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: dify
app.kubernetes.io/component: read
template:
metadata:
name: dify-postgresql-read
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-12.5.6
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: read
spec:
serviceAccountName: default
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: dify
app.kubernetes.io/component: read
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
securityContext:
fsGroup: 1001
hostNetwork: false
hostIPC: false
containers:
- name: postgresql
image: docker.io/bitnami/postgresql:15.3.0-debian-11-r7
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "false"
- name: POSTGRESQL_PORT_NUMBER
value: "5432"
- name: POSTGRESQL_VOLUME_DIR
value: "/bitnami/postgresql"
- name: PGDATA
value: "/bitnami/postgresql/data"
# Authentication
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: dify-postgresql
key: postgres-password
# Replication
- name: POSTGRES_REPLICATION_MODE
value: "slave"
- name: POSTGRES_REPLICATION_USER
value: "repl_user"
- name: POSTGRES_REPLICATION_PASSWORD
valueFrom:
secretKeyRef:
name: dify-postgresql
key: replication-password
- name: POSTGRES_CLUSTER_APP_NAME
value: my_application
- name: POSTGRES_MASTER_HOST
value: dify-postgresql-primary
- name: POSTGRES_MASTER_PORT_NUMBER
value: "5432"
# TLS
- name: POSTGRESQL_ENABLE_TLS
value: "no"
# Audit
- name: POSTGRESQL_LOG_HOSTNAME
value: "false"
- name: POSTGRESQL_LOG_CONNECTIONS
value: "false"
- name: POSTGRESQL_LOG_DISCONNECTIONS
value: "false"
- name: POSTGRESQL_PGAUDIT_LOG_CATALOG
value: "off"
# Others
- name: POSTGRESQL_CLIENT_MIN_MESSAGES
value: "error"
- name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES
value: "pgaudit"
ports:
- name: tcp-postgresql
containerPort: 5432
livenessProbe:
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
exec:
command:
- /bin/sh
- -c
- exec pg_isready -U "postgres" -d "dbname=dify" -h 127.0.0.1 -p 5432
readinessProbe:
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
exec:
command:
- /bin/sh
- -c
- -e
- |
exec pg_isready -U "postgres" -d "dbname=dify" -h 127.0.0.1 -p 5432
[ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ]
resources:
limits: {}
requests:
cpu: 250m
memory: 256Mi
volumeMounts:
- name: dshm
mountPath: /dev/shm
- name: data
mountPath: /bitnami/postgresql
volumes:
- name: dshm
emptyDir:
medium: Memory
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "8Gi"
---
# Source: dify/charts/redis/templates/master/application.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: dify-redis-master
namespace: "dify"
labels:
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.13.2
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis
app.kubernetes.io/instance: dify
app.kubernetes.io/component: master
serviceName: dify-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.13.2
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: master
annotations:
checksum/configmap: f81e19b391aaaabd2365d40a1c9a9bc3b3e6f9f6ec6bb3f02874b7d9cceca99d
checksum/health: ec832880b9813234da96cedcc3334291cc5b1b535009a5cb8ac42fc3c830d909
checksum/scripts: 85f6ffe23a75e5ab45fafa44085c32da1e501a2b14df122447f79dacdd50e0ee
checksum/secret: 91cc62b50ec1eea381c24512ce5528d17bf41b02f8f2052b14e886cbae9caed8
spec:
securityContext:
fsGroup: 1001
serviceAccountName: dify-redis
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: redis
app.kubernetes.io/instance: dify
app.kubernetes.io/component: master
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: docker.io/bitnami/redis:7.0.11-debian-11-r12
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: dify-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits: {}
requests: {}
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: dify-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: dify-redis-health
defaultMode: 0755
- name: config
configMap:
name: dify-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: tmp
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: redis-data
labels:
app.kubernetes.io/name: redis
app.kubernetes.io/instance: dify
app.kubernetes.io/component: master
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "8Gi"
---
# Source: dify/charts/redis/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: dify-redis-replicas
namespace: "dify"
labels:
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.13.2
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: replica
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis
app.kubernetes.io/instance: dify
app.kubernetes.io/component: replica
serviceName: dify-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis
helm.sh/chart: redis-16.13.2
app.kubernetes.io/instance: dify
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: replica
annotations:
checksum/configmap: f81e19b391aaaabd2365d40a1c9a9bc3b3e6f9f6ec6bb3f02874b7d9cceca99d
checksum/health: ec832880b9813234da96cedcc3334291cc5b1b535009a5cb8ac42fc3c830d909
checksum/scripts: 85f6ffe23a75e5ab45fafa44085c32da1e501a2b14df122447f79dacdd50e0ee
checksum/secret: 91cc62b50ec1eea381c24512ce5528d17bf41b02f8f2052b14e886cbae9caed8
spec:
securityContext:
fsGroup: 1001
serviceAccountName: dify-redis
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: redis
app.kubernetes.io/instance: dify
app.kubernetes.io/component: replica
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: docker.io/bitnami/redis:7.0.11-debian-11-r12
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: dify-redis-master-0.dify-redis-headless.dify.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: dify-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: dify-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
startupProbe:
failureThreshold: 22
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
tcpSocket:
port: redis
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits: {}
requests: {}
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: dify-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: dify-redis-health
defaultMode: 0755
- name: config
configMap:
name: dify-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: redis-data
labels:
app.kubernetes.io/name: redis
app.kubernetes.io/instance: dify
app.kubernetes.io/component: replica
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "8Gi"
---
# Source: dify/charts/weaviate/templates/weaviateStatefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: weaviate
labels:
name: weaviate
app: weaviate
app.kubernetes.io/name: weaviate
app.kubernetes.io/managed-by: helm
spec:
replicas: 1
serviceName: weaviate-headless
selector:
matchLabels:
app: weaviate
template:
metadata:
labels:
app: weaviate
app.kubernetes.io/name: weaviate
app.kubernetes.io/managed-by: helm
spec:
terminationGracePeriodSeconds: 600
containers:
- name: weaviate
image: 'docker.io/semitechnologies/weaviate:1.19.1'
imagePullPolicy: Always
command:
- /bin/weaviate
args:
- --host
- 0.0.0.0
- --port
- "8080"
- --scheme
- http
- --config-file
- /weaviate-config/conf.yaml
- --read-timeout=60s
- --write-timeout=60s
resources:
{}
env:
- name: AUTHENTICATION_APIKEY_ALLOWED_KEYS
value: "WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih"
- name: AUTHENTICATION_APIKEY_ENABLED
value: "true"
- name: AUTHENTICATION_APIKEY_USERS
value: "hello@dify.ai"
- name: AUTHORIZATION_ADMINLIST_ENABLED
value: "true"
- name: AUTHORIZATION_ADMINLIST_USERS
value: "hello@dify.ai"
- name: CLUSTER_DATA_BIND_PORT
value: "7001"
- name: CLUSTER_GOSSIP_BIND_PORT
value: "7000"
- name: GOGC
value: "100"
- name: PROMETHEUS_MONITORING_ENABLED
value: "false"
- name: QUERY_MAXIMUM_RESULTS
value: "100000"
- name: REINDEX_VECTOR_DIMENSIONS_AT_STARTUP
value: "false"
- name: TRACK_VECTOR_DIMENSIONS
value: "false"
- name: STANDALONE_MODE
value: 'true'
- name: PERSISTENCE_DATA_PATH
value: '/var/lib/weaviate'
- name: DEFAULT_VECTORIZER_MODULE
value: none
- name: CLUSTER_JOIN
value: weaviate-headless.dify.svc.cluster.local
ports:
- containerPort: 8080
volumeMounts:
- name: weaviate-config
mountPath: /weaviate-config
- name: weaviate-data
mountPath: /var/lib/weaviate
livenessProbe:
httpGet:
path: /v1/.well-known/live
port: 8080
initialDelaySeconds: 900
periodSeconds: 10
failureThreshold: 30
successThreshold: 1
timeoutSeconds: 3
readinessProbe:
httpGet:
path: /v1/.well-known/ready
port: 8080
initialDelaySeconds: 3
periodSeconds: 10
failureThreshold: 3
successThreshold: 1
timeoutSeconds: 3
volumes:
- name: weaviate-config
configMap:
name: weaviate-config
# - name: weaviate-persistence-data-vol
# persistentVolumeClaim:
#
# claimName: weaviate-persistence-data
#
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- weaviate
topologyKey: kubernetes.io/hostname
weight: 1
volumeClaimTemplates:
- metadata:
name: weaviate-data
labels:
app.kubernetes.io/name: weaviate
app.kubernetes.io/managed-by: helm
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName:
resources:
requests:
storage: 32Gi
---
# Source: dify/templates/tests/test-connection.yaml
apiVersion: v1
kind: Pod
metadata:
name: "dify-test-connection"
labels:
helm.sh/chart: dify-0.14.0
app.kubernetes.io/name: dify
app.kubernetes.io/instance: dify
app.kubernetes.io/version: "0.3.1"
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['dify:80']
restartPolicy: Never
helm install dify --set 'externalWeaviate.enabled=true' --set 'externalWeaviate.endpoint=http://weaviate.dify.svc.cluster.local' --set 'api.url.api=https://xy-dify.xingyuxinchuan.com' --set 'api.url.console=https://xy-dify.xingyuxinchuan.com' --set 'api.url.app=https://xy-dify.xingyuxinchuan.com' --set 'redis.replica.replicaCount=1' -n dify . --create-namespace WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /home/polonsky/.kube/config WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /home/polonsky/.kube/config NAME: dify LAST DEPLOYED: Mon Jul 10 08:26:53 2023 NAMESPACE: dify STATUS: deployed REVISION: 1 NOTES:
dify-api setup Err [root@VM-0-10-centos ~]# kubectl logs -f -n dify dify-api-5549c8b5d9-ggtkj Running migrations INFO [alembic.runtime.migration] Context impl PostgresqlImpl. INFO [alembic.runtime.migration] Will assume transactional DDL.
Error: 'tcp' is not a valid port number.
how to config this "tcp"
Thanks for your reply. We have successfully replicated the issue on our side. We are still looking to it.
dify-api setup Err [root@VM-0-10-centos ~]# kubectl logs -f -n dify dify-api-5549c8b5d9-ggtkj Running migrations INFO [alembic.runtime.migration] Context impl PostgresqlImpl. INFO [alembic.runtime.migration] Will assume transactional DDL.
Error: 'tcp' is not a valid port number.
how to config this "tcp"
Hello there. We have confirmed that this issue could be replicated only if release name were set as dify
. The current workaround is to specify another value, say my-release
for release name:
helm install my-release --set 'externalWeaviate.enabled=true' --set 'externalWeaviate.endpoint=http://weaviate.dify.svc.cluster.local' --set 'api.url.api=https://xy-dify.xingyuxinchuan.com' --set 'api.url.console=https://xy-dify.xingyuxinchuan.com' --set 'api.url.app=https://xy-dify.xingyuxinchuan.com' --set 'redis.replica.replicaCount=1' -n dify .
We are still investigating what causes the difference. Feel free to share if you could come up with your fix. @JeromeLiuLly
We have figured out the cause of the issue. Kubernetes will inject environment variable for all pods under the namespace given defined service, e.g.
DIFY_PORT=tcp://10.105.104.43:80
DIFY_PORT_80_TCP=tcp://10.105.104.43:80
DIFY_PORT_80_TCP_ADDR=10.105.104.43
DIFY_PORT_80_TCP_PORT=80
DIFY_PORT_80_TCP_PROTO=tcp
In this case {RELEASE_NAME}_PORT=tcp://**.***.***.**:80
would be injected for api
.
According to entrypoint.sh, DIFY_PORT
serves as reserved environment variable. Hence specifying dify
as release name will mess up the start up script.
You may choose a different release name for now as workaround.
We've already made a patch for this problem. It would be included in the next patch release unless further side effect should prevail. Closing this issue.
dify-api setup Err [root@VM-0-10-centos ~]# kubectl logs -f -n dify dify-api-5549c8b5d9-ggtkj Running migrations INFO [alembic.runtime.migration] Context impl PostgresqlImpl. INFO [alembic.runtime.migration] Will assume transactional DDL.
Error: 'tcp' is not a valid port number.
how to config this "tcp"