TeamPiped / Piped-Kubernetes

GNU Affero General Public License v3.0
7 stars 6 forks source link

Piped "Failed to construct URL" in Frontend #64

Closed 98jan closed 1 year ago

98jan commented 1 year ago

Helm chart name

piped

Helm chart version

3.0.2

Container name

1337kavin/piped-frontend

Container tag

3.0.2

Description

Configured Domains for frontend, api and ytproxy in the Helm Chart. When I open the frontend, I get an infinite loading animation and the browser console shows following error messages (see image):

image

Expected result

Application routing works as expected and now error is thrown in Browser console.

Helm values to reproduce

global:
      # -- Set an override for the prefix of the fullname
      nameOverride:
      # -- Set the entire name definition
      fullnameOverride:
      # -- Set additional global labels. Helm templates can be used.
      labels: { }
      # -- Set additional global annotations. Helm templates can be used.
      annotations: { }

    controller:
      # -- enable the controller.
      enabled: false

    serviceAccount:
      create: false

    frontend:
      enabled: true
      service:
        main:
          enabled: true
          primary: true
          type: ClusterIP
          ports:
            http:
              enabled: true
              primary: true
              port: 80
              protocol: HTTP

      image:
        # -- image repository
        repository: 1337kavin/piped-frontend
        # -- image tag
        # @chart.appVersion
        tag:
        # -- image pull policy
        pullPolicy: IfNotPresent

      env:
       BACKEND_HOSTNAME: pipedapi.<domain>

      command: "/bin/ash"
      args:
        - -c
        - "sed -i s/pipedapi.kavin.rocks/$BACKEND_HOSTNAME/g /usr/share/nginx/html/assets/* && /docker-entrypoint.sh nginx -g 'daemon off;'"

      # resources:
      #  requests:
      #    memory: 32Mi
      #  limits:
      #    memory: 128Mi

    backend:
      enabled: true
      service:
        main:
          enabled: true
          primary: true
          type: ClusterIP
          ports:
            http:
              enabled: true
              primary: true
              port: 8080
              protocol: HTTP

      #  command: "/bin/sh"
      #  args:
      #    - -c
      #    - sleep infinity

      # If the hostnames are not set for backend, proxy and API, they will be automatically fetched from their ingresses.
      config:
        PORT: 8080
        HTTP_WORKERS: 2
        # PROXY_PART: https://PROXY_HOSTNAME
        # Outgoing HTTP Proxy - eg: 127.0.0.1:8118
        # HTTP_PROXY: 127.0.0.1:8118
        # Captcha Parameters
        # CAPTCHA_BASE_URL: https://api.capmonster.cloud/
        # CAPTCHA_API_KEY: INSERT_HERE
        API_URL: https://pipedapi.<domain>
        FRONTEND_URL: https://piped.<domain>
        # Enable haveibeenpwned compromised password API
        # COMPROMISED_PASSWORD_CHECK: true
        # Disable Registration
        # DISABLE_REGISTRATION: false
        # Feed Retention Time in Days
        # FEED_RETENTION: 30
        # database:
        # connection_url: jdbc:postgresql://postgres:5432/piped
        # driver_class: org.postgresql.Driver
        # dialect: org.hibernate.dialect.PostgreSQLDialect
        # username: piped
        # password: changeme
        # Please only provide the secret name (it should already exist),
        # it should only include database.username and database.password as others will be taken from above.
        # secret: secret-name

      image:
        # -- image repository
        repository: 1337kavin/piped
        # -- image tag
        # @chart.appVersion
        tag:
        # -- image pull policy
        pullPolicy: IfNotPresent

      # resources:
      #  requests:
      #    memory: 500Mi
      #  limits:
      #    memory: 1500Mi

    ytproxy:
      enabled: true
      service:
        main:
          enabled: true
          primary: true
          type: ClusterIP
          ports:
            http:
              enabled: true
              primary: true
              port: 8080
              protocol: HTTP

      command: "/app/piped-proxy"

      image:
        # -- image repository
        repository: 1337kavin/piped-proxy
        # -- image tag
        # @chart.appVersion
        tag:
        # -- image pull policy
        pullPolicy: IfNotPresent

      # resources:
      #  requests:
      #    memory: 32Mi
      #  limits:
      #    memory: 500Mi

    ingress:
      main:
        enabled: true
        primary: true
        ingressClassName: nginx
        annotations:
          cert-manager.io/cluster-issuer: letsencrypt-prod
        hosts:
          - host: piped.<domain>
            paths:
              - path: "/"
        tls:
        - hosts:
          - piped.<domain>
          secretName: tls-piped

      backend:
        enabled: true
        ingressClassName: nginx
        primary: false
        annotations:
          cert-manager.io/cluster-issuer: letsencrypt-prod
        hosts:
          - host: pipedapi.<domain>
            paths:
              - path: "/"
        tls:
        - hosts:
          - pipedapi.<domain>
          secretName: tls-pipedapi

      ytproxy:
        enabled: true
        ingressClassName: nginx
        primary: false
        annotations:
          cert-manager.io/cluster-issuer: letsencrypt-prod
        hosts:
          - host: ytproxy.<domain>
            paths:
              - path: "/"
        tls:
        - hosts:
          - ytproxy.<domain>
          secretName: tls-piped-ytproxy

    # See options from https://artifacthub.io/packages/helm/bitnami/postgresql#parameters
    postgresql:
      enabled: true
      image:
        tag: 13.12.0-debian-11-r58
      auth:
        database: piped
        username: piped
        password: changemepiped

    # -- Probe configuration
    # -- [[ref]](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/)
    # @default -- See below
    probes:
      # -- Liveness probe configuration
      # @default -- See below
      liveness:
        # -- Enable the liveness probe
        enabled: true
        # -- Set this to `true` if you wish to specify your own livenessProbe
        custom: false
        # -- The spec field contains the values for the default livenessProbe.
        # If you selected `custom: true`, this field holds the definition of the livenessProbe.
        # @default -- See below
        spec:
          initialDelaySeconds: 0
          periodSeconds: 10
          timeoutSeconds: 1
          failureThreshold: 3

      # -- Redainess probe configuration
      # @default -- See below
      readiness:
        # -- Enable the readiness probe
        enabled: true
        # -- Set this to `true` if you wish to specify your own readinessProbe
        custom: false
        # -- The spec field contains the values for the default readinessProbe.
        # If you selected `custom: true`, this field holds the definition of the readinessProbe.
        # @default -- See below
        spec:
          initialDelaySeconds: 0
          periodSeconds: 10
          timeoutSeconds: 1
          failureThreshold: 3

      # -- Startup probe configuration
      # @default -- See below
      startup:
        # -- Enable the startup probe
        enabled: true
        # -- Set this to `true` if you wish to specify your own startupProbe
        custom: false
        # -- The spec field contains the values for the default startupProbe.
        # If you selected `custom: true`, this field holds the definition of the startupProbe.
        # @default -- See below
        spec:
          initialDelaySeconds: 0
          timeoutSeconds: 1
          ## This means it has a maximum of 5*30=150 seconds to start up before it fails
          periodSeconds: 5
          failureThreshold: 30

    termination:
      gracePeriodSeconds:

Additional Information

Repo link

no public repo available

samip5 commented 1 year ago

The 2nd and 3rd request in browser console suggests that the API endpoint is not correctly set.

I think setting the API_URL and FRONTEND_URL in the backend config is screwing it up. I have it working perfectly fine when I have just:

config:
  PORT: 8080
  NUM_WORKERS: 2
  PROXY_PART: https://proxy.<snip>
  #DISABLE_REGISTRATION: true
  database:
    connection_url: jdbc:postgresql://postgres-default-rw.database.svc.cluster.local/piped
    secret: *secret
samip5 commented 1 year ago

You're also using out-of-date chart. Currently latest is 4.0.0.

98jan commented 1 year ago

will try it with your configuration and come back to you. Didn't knew that a Helm Chart in version 4.0.0 is available the GitHub badge suggested that 3.0.2 is the newest version will try it and will come back if everything works.

samip5 commented 1 year ago

Keep in mind that 4.0.0 includes a major postgres update if you're not using external postgres. The reason why it hasn't updated in ArtifactHub is that because it had issues fetching the latest metadata, but should hopefully complete soon.

98jan commented 1 year ago

updated values.yaml to match your entries, what is the correct entry:

Tried both setups and having still the same issue (url missing in call to config) with the version 4.1.0, made a fresh installation with removing postgresql volumes. image

samip5 commented 1 year ago

That suggests that the frontend didn't get the right environment. so please check the frontend container's env.

98jan commented 1 year ago

With the command "printenv" I printed out all available enviornment variables in the frontend container. Can you see that any environment variable is missing?

PIPED_BACKEND_PORT=tcp://10.103.40.160:8080
KUBERNETES_PORT=tcp://10.96.0.1:443
PIPED_BACKEND_SERVICE_PORT=8080
KUBERNETES_SERVICE_PORT=443
HOSTNAME=piped-frontend-69b58d7fc4-h6gw6
PIPED_FRONTEND_PORT=tcp://10.111.181.61:80
PIPED_FRONTEND_SERVICE_PORT=80
SHLVL=1
PIPED_BACKEND_PORT_8080_TCP=tcp://10.103.40.160:8080
HOME=/root
PKG_RELEASE=1
PIPED_FRONTEND_PORT_80_TCP_ADDR=10.111.181.61
PIPED_YTPROXY_SERVICE_PORT_HTTP=8080
PIPED_FRONTEND_PORT_80_TCP_PORT=80
PIPED_FRONTEND_PORT_80_TCP_PROTO=tcp
TERM=xterm
NGINX_VERSION=1.25.3
PIPED_YTPROXY_PORT_8080_TCP_ADDR=10.110.15.44
PIPED_POSTGRESQL_PORT_5432_TCP_ADDR=10.107.107.175
PIPED_YTPROXY_SERVICE_HOST=10.110.15.44
KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
PIPED_POSTGRESQL_SERVICE_HOST=10.107.107.175
NJS_VERSION=0.8.2
KUBERNETES_PORT_443_TCP_PORT=443
PIPED_YTPROXY_PORT_8080_TCP_PORT=8080
PIPED_POSTGRESQL_PORT_5432_TCP_PORT=5432
PIPED_FRONTEND_PORT_80_TCP=tcp://10.111.181.61:80
BACKEND_HOSTNAME=pipedapi.<domain>
PIPED_YTPROXY_PORT_8080_TCP_PROTO=tcp
PIPED_POSTGRESQL_PORT_5432_TCP_PROTO=tcp
KUBERNETES_PORT_443_TCP_PROTO=tcp
PIPED_YTPROXY_PORT=tcp://10.110.15.44:8080
PIPED_YTPROXY_SERVICE_PORT=8080
PIPED_BACKEND_SERVICE_PORT_HTTP=8080
PIPED_POSTGRESQL_PORT=tcp://10.107.107.175:5432
PIPED_POSTGRESQL_SERVICE_PORT=5432
PIPED_POSTGRESQL_SERVICE_PORT_TCP_POSTGRESQL=5432
PIPED_FRONTEND_SERVICE_PORT_HTTP=80
PIPED_YTPROXY_PORT_8080_TCP=tcp://10.110.15.44:8080
KUBERNETES_SERVICE_PORT_HTTPS=443
KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443
PIPED_POSTGRESQL_PORT_5432_TCP=tcp://10.107.107.175:5432
PIPED_BACKEND_SERVICE_HOST=10.103.40.160
PIPED_BACKEND_PORT_8080_TCP_ADDR=10.103.40.160
KUBERNETES_SERVICE_HOST=10.96.0.1
PWD=/
PIPED_BACKEND_PORT_8080_TCP_PORT=8080
PIPED_FRONTEND_SERVICE_HOST=10.111.181.61
PIPED_BACKEND_PORT_8080_TCP_PROTO=tcp
samip5 commented 1 year ago
  • HTTP_WORKERS or NUM_WORKERS?

HTTP_WORKERS is correct. Please change your pullPolicy: IfNotPresent to Always as it's by default like that due to none of the containers following a semver, and thus it will not be updated if set like so at all, if the container "exists" already.

I cannot reproduce, you can try to execute the sed inside the container and check if that does anything.

98jan commented 1 year ago

Thanks the change from IfNotPresent to Always did fix my issue!