Closed anmoln4 closed 1 year ago
@tunaman @Subv @octopyth @drivard
below is mine values file
replicaCount: 3
image: repository: apache/nifi tag: "1.16.3" pullPolicy: "IfNotPresent"
securityContext: runAsUser: 1000 fsGroup: 1000
#
sts:
podManagementPolicy: Parallel AntiAffinity: soft useHostNetwork: null hostPort: null pod: annotations: security.alpha.kubernetes.io/sysctls: net.ipv4.ip_local_port_range=10000 65000
serviceAccount: create: false
annotations: {}
hostAliases: []
startupProbe: enabled: false failureThreshold: 60 periodSeconds: 10
properties:
sensitiveKey: changeMechangeMe # Must have at least 12 characters
algorithm: NIFI_PBKDF2_AES_GCM_256
externalSecure: true
isNode: true
httpsPort: 8443
webProxyHost: #
safetyValve:
nifi.web.http.network.interface.default: eth0
# listen to loopback interface so "kubectl port-forward ..." works
nifi.web.http.network.interface.lo: lo
auth:
admin: CN=admin, OU=NIFI SSL: keystorePasswd: changeMe truststorePasswd: changeMe
singleUser: username: admin password: admin@hcl1234 # Must to have at least 12 characters
clientAuth: enabled: false
ldap:
enabled: false
host: #ldap://
oidc:
enabled: false
discoveryUrl: #http://
additionalScopes:
openldap: enabled: false persistence: enabled: true env: LDAP_ORGANISATION: # name of your organization e.g. "Example" LDAP_DOMAIN: # your domain e.g. "ldap.example.be" LDAP_BACKEND: "hdb" LDAP_TLS: "true" LDAP_TLS_ENFORCE: "false" LDAP_REMOVE_CONFIG_AFTER_SETUP: "false" adminPassword: #ChengeMe configPassword: #ChangeMe customLdifFiles: 1-default-users.ldif: |-
headless: type: ClusterIP annotations: service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
service: type: ClusterIP httpsPort: 8443
annotations: {}
## Load Balancer sources
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
# loadBalancerSourceRanges:
# - 10.10.10.0/24
## OIDC authentication requires "sticky" session on the LoadBalancer for JWT to work properly...but AWS doesn't like it on creation
# sessionAffinity: ClientIP
# sessionAffinityConfig:
# clientIP:
processors: enabled: true ports:
containerPorts: #[]
ingress: enabled: true className: nginx annotations: nginx.ingress.kubernetes.io/backend-protocol: HTTPS nginx.ingress.kubernetes.io/configuration-snippet: | proxy_set_header 'X-ProxyScheme' 'https'; proxy_set_header 'X-ProxyPort' '443'; nginx.ingress.kubernetes.io/upstream-vhost: localhost:8443 nginx.ingress.kubernetes.io/proxy-redirect-from: https://localhost:8443 nginx.ingress.kubernetes.io/proxy-redirect-to: https://intdev.dryice-aws.com nginx.ingress.kubernetes.io/ssl-redirect: "true" nginx.ingress.kubernetes.io/affinity: cookie tls: [] hosts: ["intdev.dryice-aws.com"] path: /
jvmMemory: 2g
sidecar: image: busybox tag: "1.32.0" imagePullPolicy: "IfNotPresent"
persistence: enabled: true
#
#
accessModes: [ReadWriteOnce]
subPath: enabled: false name: data size: 30Gi
configStorage: size: 100Mi authconfStorage: size: 100Mi
dataStorage: size: 1Gi
flowfileRepoStorage: size: 10Gi
contentRepoStorage: size: 10Gi
provenanceRepoStorage: size: 10Gi
logStorage: size: 5Gi
resources: {}
logresources: requests: cpu: 10m memory: 10Mi limits: cpu: 50m memory: 50Mi
affinity: {}
nodeSelector: {}
tolerations: []
initContainers: {}
extraVolumeMounts: []
extraVolumes: []
extraContainers: []
terminationGracePeriodSeconds: 30
env: []
envFrom: []
extraOptions: []
openshift: scc: enabled: false route: enabled: false
#path: /nifi
ca:
enabled: false persistence: enabled: true server: "" service: port: 9090 token: sixteenCharacters admin: cn: admin serviceAccount: create: false
openshift: scc: enabled: false
certManager: enabled: true clusterDomain: cluster.local keystorePasswd: changeme truststorePasswd: changeme replaceDefaultTrustStore: false additionalDnsNames:
certDuration: 2160h caDuration: 87660h
zookeeper:
enabled: true
url: "" port: 2181 replicaCount: 3
registry:
enabled: false url: "" port: 80
metrics: prometheus:
enabled: false
# Port used to expose Prometheus metrics
port: 9092
serviceMonitor:
# Enable deployment of Prometheus Operator ServiceMonitor resource
enabled: false
# namespace: monitoring
# Additional labels for the ServiceMonitor
labels: {}
This issue is stale because it has not seen recent activity. Remove stale label or comment or this will be closed.
I have deployed 3 node cluster on AWS eks and exposing application using ingress controller.I have opened some additional ports on nifi pods and in nifi service also I have verified this within the cluster .custom ports are opened and I am able to use them in HandleHttp processor. But if I send data using ingress host on custom port .my ingress is rejecting the request giving me 502 error.
Is there any way I can access the UI on 8443 port and also use custom ports with same host?
Pls guide me here.