prometheus-operator / kube-prometheus

Use Prometheus to monitor Kubernetes and applications running on Kubernetes
https://prometheus-operator.dev/
Apache License 2.0
6.7k stars 1.92k forks source link

After enable Thanos sidecar, will the retention parameter be invalid? #1950

Open Wyifei opened 1 year ago

Wyifei commented 1 year ago

Prometheus config as below:

apiVersion: v1 items:

After enable thanos sidecar, prometheus was configured below parameter(I'm not sure how it's configred by Prometheus operator) --storage.tsdb.max-block-duration=2h --storage.tsdb.min-block-duration=2h

I found that the retention parameter doesn't work after thanos enabled, I can only query the metrices of the past 4 hours in Grafana dashboard if the datasource is prometheus. (For sure, if the datasource is thanos, I can get all data).

I have 2 questions about the mechanism of retention of Prometheus :

  1. If this thanos and prometheus works as design? If it was, which mean after enable thanos sidecar, the retention parameter is invalid and the local data only keep 4 hour data and the older data need to be get from remote object store.
  2. Why I can query metrices of past 4 hours but not 2 hour? According to parameter storage.tsdb.min-block-duration=2h, I should only could query metrices of past 2hours.
xuanyuanaosheng commented 7 months ago

This is kubespere prometheus config:

apiVersion: monitoring.coreos.com/v1
kind: Prometheus
metadata:
  annotations:
    kubectl.kubernetes.io/last-applied-configuration: >
      {"apiVersion":"monitoring.coreos.com/v1","kind":"Prometheus","metadata":{"annotations":{},"labels":{"app.kubernetes.io/component":"prometheus","app.kubernetes.io/instance":"k8s","app.kubernetes.io/name":"prometheus","app.kubernetes.io/part-of":"kube-prometheus","app.kubernetes.io/version":"2.39.1"},"name":"k8s","namespace":"kubesphere-monitoring-system"},"spec":{"affinity":{"nodeAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"preference":{"matchExpressions":[{"key":"node-role.kubernetes.io/monitoring","operator":"Exists"}]},"weight":100}]},"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"podAffinityTerm":{"labelSelector":{"matchLabels":{"app.kubernetes.io/component":"prometheus","app.kubernetes.io/instance":"k8s","app.kubernetes.io/name":"prometheus","app.kubernetes.io/part-of":"kube-prometheus"}},"namespaces":["kubesphere-monitoring-system"],"topologyKey":"kubernetes.io/hostname"},"weight":100}]}},"alerting":{"alertmanagers":[{"name":"alertmanager-main","namespace":"kubesphere-monitoring-system","port":"web"}]},"evaluationInterval":"1m","image":"prom/prometheus:v2.39.1","nodeSelector":{"kubernetes.io/os":"linux"},"podMetadata":{"labels":{"app.kubernetes.io/component":"prometheus","app.kubernetes.io/instance":"k8s","app.kubernetes.io/name":"prometheus","app.kubernetes.io/part-of":"kube-prometheus","app.kubernetes.io/version":"2.39.1"}},"podMonitorNamespaceSelector":{},"podMonitorSelector":{},"query":{"maxConcurrency":1000},"replicas":2,"resources":{"limits":{"cpu":"4","memory":"16Gi"},"requests":{"cpu":"200m","memory":"400Mi"}},"retention":"7d","ruleNamespaceSelector":{},"ruleSelector":{"matchLabels":{"prometheus":"k8s","role":"alert-rules"}},"scrapeInterval":"1m","securityContext":{"fsGroup":0,"runAsNonRoot":false,"runAsUser":0},"serviceAccountName":"prometheus-k8s","serviceMonitorNamespaceSelector":{},"serviceMonitorSelector":{},"storage":{"volumeClaimTemplate":{"spec":{"resources":{"requests":{"storage":"30Gi"}}}}},"tolerations":[{"effect":"NoSchedule","key":"dedicated","operator":"Equal","value":"monitoring"}],"version":"v2.39.1"}}
  labels:
    app.kubernetes.io/component: prometheus
    app.kubernetes.io/instance: k8s
    app.kubernetes.io/name: prometheus
    app.kubernetes.io/part-of: kube-prometheus
    app.kubernetes.io/version: 2.39.1
  name: k8s
  namespace: kubesphere-monitoring-system
spec:
  affinity:
    nodeAffinity:
      preferredDuringSchedulingIgnoredDuringExecution:
        - preference:
            matchExpressions:
              - key: node-role.kubernetes.io/monitoring
                operator: Exists
          weight: 100
    podAntiAffinity:
      preferredDuringSchedulingIgnoredDuringExecution:
        - podAffinityTerm:
            labelSelector:
              matchLabels:
                app.kubernetes.io/component: prometheus
                app.kubernetes.io/instance: k8s
                app.kubernetes.io/name: prometheus
                app.kubernetes.io/part-of: kube-prometheus
            namespaces:
              - kubesphere-monitoring-system
            topologyKey: kubernetes.io/hostname
          weight: 100
  alerting:
    alertmanagers:
      - name: alertmanager-main
        namespace: kubesphere-monitoring-system
        port: web
  evaluationInterval: 1m
  image: 'prom/prometheus:v2.39.1'
  nodeSelector:
    kubernetes.io/os: linux
  podMetadata:
    labels:
      app.kubernetes.io/component: prometheus
      app.kubernetes.io/instance: k8s
      app.kubernetes.io/name: prometheus
      app.kubernetes.io/part-of: kube-prometheus
      app.kubernetes.io/version: 2.39.1
  podMonitorNamespaceSelector: {}
  podMonitorSelector: {}
  query:
    maxConcurrency: 1000
  replicas: 1
  resources:
    limits:
      cpu: '4'
      memory: 16Gi
    requests:
      cpu: 200m
      memory: 400Mi
  retention: 7d
  ruleNamespaceSelector: {}
  ruleSelector:
    matchLabels:
      prometheus: k8s
      role: alert-rules
  scrapeInterval: 1m
  securityContext:
    fsGroup: 0
    runAsNonRoot: false
    runAsUser: 0
  serviceAccountName: prometheus-k8s
  serviceMonitorNamespaceSelector: {}
  serviceMonitorSelector: {}
  storage:
    volumeClaimTemplate:
      spec:
        resources:
          requests:
            storage: 30Gi

#### This section is added by myself
  thanos:
    image: 'thanosio/thanos:v0.25.2'
    version: v0.25.2
################

  tolerations:
    - effect: NoSchedule
      key: dedicated
      operator: Equal
      value: monitoring
  version: v2.39.1

But I can not find the containter about thanos-sidcar ?

Any idears?