open-telemetry / opentelemetry-helm-charts

OpenTelemetry Helm Charts
https://opentelemetry.io
Apache License 2.0
387 stars 468 forks source link

opentelmetry collector prometheus exporter configuration #339

Open oxeye-schmil opened 2 years ago

oxeye-schmil commented 2 years ago

Hi

Below is my full configuration of my collector To export opentelemetry metrics to prometheus I've added the following prometheus exporter

  exporters:
    prometheus:
      endpoint: "0.0.0.0:8889"
      namespace: "data-pipeline"
      resource_to_telemetry_conversion:
        enabled: true

port 8889 to ports

  dp-metrics:
    enabled: true
    containerPort: 8889
    servicePort: 8889
    protocol: TCP

add dp-metrics port to serviceMonitor

  dp-metrics:
    enabled: true
    containerPort: 8889
    servicePort: 8889
    protocol: TCP

I do see the otel-collector metrics but not the exported mertrics

# https://github.com/open-telemetry/opentelemetry-helm-charts/blob/main/charts/opentelemetry-collector/values.yaml
---
# Default values for opentelemetry-collector.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.

nameOverride: ""
fullnameOverride: ${fullnameOverride}

# Valid values are "daemonset" and "deployment".
mode: "deployment"

# Handles basic configuration of components that
# also require k8s modifications to work correctly.
# .Values.config can be used to modify/add to a preset
# component configuration, but CANNOT be used to remove
# preset configuration. If you require removal of any
# sections of a preset configuration, you cannot use
# the preset. Instead, configure the component manually in
# .Values.config and use the other fields supplied in the
# values.yaml to configure k8s as necessary.
presets:
  # Configures the collector to collect logs.
  # Adds the filelog receiver to the logs pipeline
  # and adds the necessary volumes and volume mounts.
  # Best used with mode = daemonset.
  logsCollection:
    enabled: false
    includeCollectorLogs: false
  # Configures the collector to collect host metrics.
  # Adds the hostmetrics receiver to the metrics pipeline
  # and adds the necessary volumes and volume mounts.
  # Best used with mode = daemonset.
  hostMetrics:
    enabled: false

configMap:
  # Specifies whether a configMap should be created (true by default)
  create: true

config:
  exporters:
    logging: {}
    prometheus:
      endpoint: "0.0.0.0:8889"
      namespace: "data-pipeline"
      resource_to_telemetry_conversion:
        enabled: true
  extensions:
    # The health_check extension is mandatory for this chart.
    # Without the health_check extension the collector will fail the readiness and liveliness probes.
    # The health_check extension can be modified, but should never be removed.
    health_check: {}
    memory_ballast: null
  processors:
    batch: null
    # If set to null, will be overridden with values based on k8s resource limits
    memory_limiter: null
  receivers:
    jaeger: null
    otlp:
      protocols:
        grpc:
          endpoint: 0.0.0.0:${otlp_grpc_port}
        http: null
    prometheus: null
    zipkin: null
  service:
    telemetry: null
    extensions:
      - health_check
    pipelines:
      logs: null
      metrics:
        exporters:
          - logging
          - prometheus
        processors:
        receivers:
          - otlp
      traces: null
#        exporters:
#          - logging
#        processors:
#        receivers:
#          - otlp

image:
  # If you want to use the core image `otel/opentelemetry-collector`, you also need to change `command.name` value to `otelcol`.
  repository: otel/opentelemetry-collector-contrib
  pullPolicy: IfNotPresent
  # Overrides the image tag whose default is the chart appVersion.
  tag: ""
imagePullSecrets: []

# OpenTelemetry Collector executable
command:
  name: otelcol-contrib
  extraArgs: []

serviceAccount:
  # Specifies whether a service account should be created
  create: true
  # Annotations to add to the service account
  annotations: {}
  # The name of the service account to use.
  # If not set and create is true, a name is generated using the fullname template
  name: ""

clusterRole:
  # Specifies whether a clusterRole should be created
  create: false
  # Annotations to add to the clusterRole
  annotations: {}
  # The name of the clusterRole to use.
  # If not set and create is true, a name is generated using the fullname template
  name: ""
  # A set of rules as documented here : https://kubernetes.io/docs/reference/access-authn-authz/rbac/
  rules: []
  # - apiGroups:
  #   - ''
  #   resources:
  #   - 'pods'
  #   - 'nodes'
  #   verbs:
  #   - 'get'
  #   - 'list'
  #   - 'watch'

  clusterRoleBinding:
    # Annotations to add to the clusterRoleBinding
    annotations: {}
    # The name of the clusterRoleBinding to use.
    # If not set and create is true, a name is generated using the fullname template
    name: ""

podSecurityContext: {}
securityContext: {}

nodeSelector:
  service: management
tolerations: []
affinity: {}

# Allows for pod scheduler prioritisation
priorityClassName: ""

extraEnvs: []
extraVolumes: []
extraVolumeMounts: []

# Configuration for ports
ports:
  otlp:
    enabled: true
    containerPort: ${otlp_grpc_port}
    servicePort: ${otlp_grpc_port}
    hostPort: ${otlp_grpc_port}
    protocol: TCP
  otlp-http:
    enabled: true
    containerPort: 4318
    servicePort: 4318
    hostPort: 4318
    protocol: TCP
  jaeger-compact:
    enabled: false
    containerPort: 6831
    servicePort: 6831
    hostPort: 6831
    protocol: UDP
  jaeger-thrift:
    enabled: false
    containerPort: 14268
    servicePort: 14268
    hostPort: 14268
    protocol: TCP
  jaeger-grpc:
    enabled: false
    containerPort: 14250
    servicePort: 14250
    hostPort: 14250
    protocol: TCP
  zipkin:
    enabled: false
    containerPort: 9411
    servicePort: 9411
    hostPort: 9411
    protocol: TCP
  metrics:
    # The metrics port is disabled by default. However you need to enable the port
    # in order to use the ServiceMonitor (serviceMonitor.enabled) or PodMonitor (podMonitor.enabled).
    enabled: true
    containerPort: 8888
    servicePort: 8888
    protocol: TCP
  dp-metrics:
    enabled: true
    containerPort: 8889
    servicePort: 8889
    protocol: TCP

# Deprecated.  Use presets.logsCollection instead.
containerLogs:
  enabled: false

resources:
  limits:
    cpu: 1
    memory: 2Gi

podAnnotations: {}

podLabels: {}

# Host networking requested for this pod. Use the host's network namespace.
hostNetwork: false

# Pod DNS policy ClusterFirst, ClusterFirstWithHostNet, None, Default, None
dnsPolicy: ""

# only used with deployment mode
replicaCount: 1

annotations: {}

# List of init container specs, e.g. for copying a binary to be executed as a lifecycle hook.
initContainers: []
# initContainers:
#   - name: test
#     command:
#       - cp
#     args:
#       - /bin/sleep
#       - /test/sleep
#     image: busybox:latest
#     volumeMounts:
#       - name: test
#         mountPath: /test

# Pod lifecycle policies.
lifecycleHooks: {}
# lifecycleHooks:
#   preStop:
#     exec:
#       command:
#       - /test/sleep
#       - "5"

service:
  type: ClusterIP
  annotations: {}

ingress:
  enabled: false
  annotations: {}
#  ingressClassName: nginx
#  hosts:
#    - host: collector.example.com
#      paths:
#        - path: /
#          pathType: Prefix
#          port: 4318
#  tls:
#    - secretName: collector-tls
#      hosts:
#        - collector.example.com

podMonitor:
  # The pod monitor by default scrapes the metrics port.
  # The metrics port needs to be enabled as well.
  enabled: false
  metricsEndpoints:
  - port: metrics
    # interval: 15s

  # additional labels for the PodMonitor
  extraLabels: {}
  #   release: kube-prometheus-stack

serviceMonitor:
  # The service monitor by default scrapes the metrics port.
  # The metrics port needs to be enabled as well.
  enabled: true
  metricsEndpoints:
  - port: metrics
    # interval: 15s
  - port: dp-metrics

  # additional labels for the ServiceMonitor
  extraLabels:
    release: prometheus
  #  release: kube-prometheus-stack

# PodDisruptionBudget is used only if deployment enabled
podDisruptionBudget:
  enabled: false
#   minAvailable: 2
#   maxUnavailable: 1

# autoscaling is used only if deployment enabled
autoscaling:
  enabled: false
  minReplicas: 1
  maxReplicas: 10
  targetCPUUtilizationPercentage: 80
  # targetMemoryUtilizationPercentage: 80

prometheusRule:
  enabled: false
  groups: []
  # Create default rules for monitoring the collector
  defaultRules:
    enabled: false

  # additional labels for the PrometheusRule
  extraLabels: {}
seany89 commented 1 year ago

@oxeye-schmil Could this be related to an issue I am having as well https://github.com/open-telemetry/opentelemetry-helm-charts/issues/538 ?