Closed JulienOrain closed 2 weeks ago
@JulienOrain for some reason i cannot modify the PR could you try this? values:
## @section Logstash configuration
##
logstash:
## @param logstash.url Logstash instance URL
##
url: "http://logstash:9600"
## @param logstash.httpTimeout http timeout
##
httpTimeout: "3s"
## @param logstash.httpInsecure http insecure
##
httpInsecure: false
## @section Web settings
##
web:
## @param web.path Path under which to expose metrics.
##
path: /
## @section PodMonitor settings
##
podMonitor:
## If true, a PodMonitor CRD is created for a Prometheus Operator
## https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor
##
## @param podMonitor.enabled Enable pod monitor creation
##
enabled: false
## @param podMonitor.apiVersion Set pod monitor apiVersion
##
apiVersion: "monitoring.coreos.com/v1"
## @param podMonitor.namespace Set pod monitor namespace
##
namespace: ""
## @param podMonitor.labels Set pod monitor labels
##
labels: {}
## @param podMonitor.interval Set pod monitor interval
##
interval: 60s
## @param podMonitor.scrapeTimeout Set pod monitor scrapeTimeout
##
scrapeTimeout: 10s
## @param podMonitor.honorLabels Set pod monitor honorLabels
##
honorLabels: true
## @param podMonitor.scheme Set pod monitor scheme
##
scheme: http
## @param podMonitor.relabelings Set pod monitor relabelings
##
relabelings: []
## @param podMonitor.metricRelabelings Set pod monitor metricRelabelings
##
metricRelabelings: []
## @section Image settings
##
image:
## @param image.repository Image repository
##
repository: "kuskoman/logstash-exporter"
## @param image.tag Image tag, if not set the appVersion is used
##
tag: ""
## @param image.pullPolicy Image pull policy
## Options: Always, Never, IfNotPresent
##
pullPolicy: IfNotPresent
## @param fullnameOverride Override the fullname of the chart
##
fullnameOverride: ""
## @section Deployment settings
##
deployment:
## @param deployment.replicas Number of replicas for the deployment
##
replicas: 1
## @param deployment.restartPolicy Restart policy for the deployment.
## Options: Always, OnFailure, Never
##
restartPolicy: Always
## @param deployment.annotations Additional deployment annotations
##
annotations: {}
## @param deployment.labels Additional deployment labels
##
labels: {}
## @param deployment.pullSecret Kubernetes secret for pulling the image
##
pullSecret: []
## @param deployment.resources Resource requests and limits
##
resources: {}
## @param deployment.nodeSelector Node selector for the deployment
##
nodeSelector: {}
## @param deployment.tolerations Tolerations for the deployment
##
tolerations: []
## @param deployment.podAnnotations Additional pod annotations
##
podAnnotations: {}
## @param deployment.podLabels Additional pod labels
##
podLabels: {}
## @param deployment.affinity Affinity for the deployment
##
affinity: {}
## @param deployment.env Additional environment variables
##
env: {}
## @param deployment.envFrom Additional environment variables from config maps or secrets
##
envFrom: []
## @param deployment.priorityClassName Priority class name for the deployment
##
priorityClassName: ""
## @param deployment.dnsConfig DNS configuration for the deployment
##
dnsConfig: {}
## @param deployment.securityContext Security context for the deployment
##
securityContext: {}
## @section Liveness probe settings
##
livenessProbe:
httpGet:
## @param deployment.livenessProbe.httpGet.path Path for liveness probe
##
path: /health
## @param deployment.livenessProbe.httpGet.port Port for liveness probe
##
port: 9198
## @param deployment.livenessProbe.initialDelaySeconds Initial delay for liveness probe
##
initialDelaySeconds: 30
## @param deployment.livenessProbe.periodSeconds Period for liveness probe
##
periodSeconds: 10
## @param deployment.livenessProbe.timeoutSeconds Timeout for liveness probe
##
timeoutSeconds: 5
## @param deployment.livenessProbe.successThreshold Success threshold for liveness probe
##
successThreshold: 1
## @param deployment.livenessProbe.failureThreshold Failure threshold for liveness probe
##
failureThreshold: 3
## @param deployment.readinessProbe Readiness probe configuration
##
readinessProbe: {}
## @section Rolling update settings
##
rollingUpdate:
## @param deployment.rollingUpdate.maxSurge Maximum surge for rolling update
##
maxSurge: 1
## @param deployment.rollingUpdate.maxUnavailable Maximum unavailable for rolling update
##
maxUnavailable: 0
## @section metricsPort settings
##
metricsPort:
## @param deployment.metricsPort.name Name of the port
##
name: http
## @section Service settings
##
service:
## @param service.type Service type
##
type: ClusterIP
## @param service.port Service port
##
port: 9198
## @param service.annotations Additional service annotations
##
annotations: {}
## @param service.labels Additional service labels
##
labels: {}
## @section ServiceAccount settings
##
serviceAccount:
## @param serviceAccount.enabled Enable service account creation
##
enabled: false
## @param serviceAccount.create Create service account
##
create: false
## @param serviceAccount.name Service account name
##
name: ""
## @param serviceAccount.annotations Additional service account annotations
##
annotations: {}
README:
# Logstash-exporter
[![codecov](https://codecov.io/gh/kuskoman/logstash-exporter/branch/master/graph/badge.svg?token=ISIVB93OC6)](https://codecov.io/gh/kuskoman/logstash-exporter)
Export metrics from Logstash to Prometheus.
The project was created as rewrite of existing awesome application
[logstash_exporter](https://github.com/BonnierNews/logstash_exporter),
which was also written in Go, but it was not maintained for a long time.
A lot of code was reused from the original project.
**Important:** Because of limited workforces, this project is tested only against a single Logstash version.
You can check the tested version in [docker-compose.yml](./docker-compose.yml) file.
Using this exporter with other versions of Logstash may not work properly (although most of the metrics should work).
## Usage
### Running the app
The application can be run in two ways:
- using the binary executable
- using the Docker image
Additionally [Helm chart](./chart/) is provided for easy deployment to Kubernetes.
#### Binary Executable
The binary executable can be downloaded from the [releases page](https://github.com/kuskoman/logstash-exporter/releases).
Linux binary is available under `https://github.com/kuskoman/logstash-exporter/releases/download/v${VERSION}/logstash-exporter-linux`.
The binary can be run without additional arguments, as the configuration is loaded from the `.env` file and environment variables.
Each binary should contain a SHA256 checksum file, which can be used to verify the integrity of the binary.
VERSION="test-tag" \
OS="linux" \
wget "https://github.com/kuskoman/logstash-exporter/releases/download/${VERSION}/logstash-exporter-${OS}" && \
wget "https://github.com/kuskoman/logstash-exporter/releases/download/${VERSION}/logstash-exporter-${OS}.sha256" && \
sha256sum -c logstash-exporter-${OS}.sha256
It is recommended to use the binary executable in combination with the [systemd](https://systemd.io/) service.
The application should not require any of root privileges, so it is recommended to run it as a non-root user.
##### Unstable (master) version
The unstable version of the application can be downloaded from the
[GitHub Actions](https://github.com/kuskoman/logstash-exporter/actions?query=branch%3Amaster+workflow%3A%22Go+application+CI%2FCD%22).
The latest successful build can be found under the `Go application CI/CD` workflow (already selected in the link).
To download the binary, simply go to the link location, click on the latest successful build, and download the binary
from the `Artifacts` section on the bottom of the page.
You are able to download artifact from any workflow run, not only master branch. To do that, go to
[GitHub Actions without master filter](https://github.com/kuskoman/logstash-exporter/actions?query=workflow%3A%22Go+application+CI%2FCD%22),
select the workflow run you want to download artifact from, and download the binary from the `Artifacts` section.
#### Docker Image
The Docker image is available under `kuskoman/logstash-exporter:<tag>`.
You can pull the image using the following command:
docker pull kuskoman/logstash-exporter:<tag>
You can browse tags on the [Docker Hub](https://hub.docker.com/r/kuskoman/logstash-exporter/tags).
The Docker image can be run using the following command:
docker run -d \
-p 9198:9198 \
-e LOGSTASH_URL=http://logstash:9600 \
kuskoman/logstash-exporter:<tag>
##### Unstable (master) image
The unstable version of the Docker image can be downloaded from the
[GitHub Container Registry](https://github.com/users/kuskoman/packages/container/package/logstash-exporter).
To pull the image from command line simply use:
docker pull ghcr.io/kuskoman/logstash-exporter:master
#### Helm Chart
The Helm chart has its own [README](./chart/README.md).
### Endpoints
- `/metrics`: Exposes metrics in Prometheus format.
- `/health`: Returns 200 if app runs properly.
### Configuration
The application can be configured using the following environment variables, which are also loaded from `.env` file:
| Variable Name | Description | Default Value |
|---------------|-----------------------------------------------------------------------------------------------|-------------------------|
| `LOGSTASH_URL`| URL to Logstash API | `http://localhost:9600` |
| `PORT` | Port on which the application will be exposed | `9198` |
| `HOST` | Host on which the application will be exposed | `""` (empty string) |
| `LOG_LEVEL` | [Log level](https://pkg.go.dev/golang.org/x/exp/slog#Level) (defaults to "info" if not set) | `""` (empty string) |
| `HTTP_TIMEOUT`| Timeout for HTTP requests to Logstash API in [Go duration format](https://golang.org/pkg/time/#ParseDuration) | `2s` |
All configuration variables can be checked in the [config directory](./config/).
## Building
### Makefile
#### Available Commands
<!--- GENERATED by ./scripts/add_descriptions_to_readme.sh --->
- `make all`: Builds binary executables for all OS (Win, Darwin, Linux).
- `make run`: Runs the Go Exporter application.
- `make build-linux`: Builds a binary executable for Linux.
- `make build-darwin`: Builds a binary executable for Darwin.
- `make build-windows`: Builds a binary executable for Windows.
- `make build-linux-arm`: Builds a binary executable for Linux ARM.
- `make build-docker`: Builds a Docker image for the Go Exporter application.
- `make build-docker-multi`: Builds a multi-arch Docker image (`amd64` and `arm64`).
- `make clean`: Deletes all binary executables in the out directory.
- `make test`: Runs all tests.
- `make test-coverage`: Displays test coverage report.
- `make compose`: Starts a Docker-compose configuration.
- `make wait-for-compose`: Starts a Docker-compose configuration until it's ready.
- `make compose-down`: Stops a Docker-compose configuration.
- `make verify-metrics`: Verifies the metrics from the Go Exporter application.
- `make pull`: Pulls the Docker image from the registry.
- `make logs`: Shows logs from the Docker-compose configuration.
- `make minify`: Minifies the binary executables.
- `make install-helm-readme`: Installs readme-generator-for-helm tool.
- `make helm-readme`: Generates Helm chart README.md file.
- `make clean-elasticsearch`: Cleans Elasticsearch data, works only with default ES port. The command may take a very long time to complete.
- `make update-readme-descriptions`: Update Makefile descriptions in main README.md.
- `make update-snapshots`: Updates snapshot for test data and runs tests.
- `make upgrade-dependencies`: Upgrades all dependencies.
- `make help`: Shows info about available commands.
<!--- **************************************************** --->
#### File Structure
The main Go Exporter application is located in the cmd/exporter/main.go file.
The binary executables are saved in the out directory.
#### Example Usage
<!--- GENERATED by ./scripts/add_descriptions_to_readme.sh --->
Builds binary executables for all OS (Win, Darwin, Linux):
make all
Runs the Go Exporter application:
make run
Builds a binary executable for Linux:
make build-linux
Builds a binary executable for Darwin:
make build-darwin
Builds a binary executable for Windows:
make build-windows
Builds a Docker image for the Go Exporter application:
make build-docker
Builds a multi-arch Docker image (`amd64` and `arm64`):
make build-docker-multi
Deletes all binary executables in the out directory:
make clean
Runs all tests:
make test
Displays test coverage report:
make test-coverage
Starts a Docker-compose configuration:
make compose
Starts a Docker-compose configuration until it's ready:
make wait-for-compose
Stops a Docker-compose configuration:
make compose-down
Verifies the metrics from the Go Exporter application:
make verify-metrics
Pulls the Docker image from the registry:
make pull
Shows logs from the Docker-compose configuration:
make logs
Minifies the binary executables:
make minify
Installs readme-generator-for-helm tool:
make install-helm-readme
Generates Helm chart README.md file:
make helm-readme
Cleans Elasticsearch data, works only with default ES port. The command may take a very long time to complete:
make clean-elasticsearch
Shows info about available commands:
make help
<!--- **************************************************** --->
## Helper Scripts
Application repository contains some helper scripts, which can be used to improve process
of building, testing, and running the application. These scripts are not useful for the end user,
but they can be useful for all potential contributors.
The helper scripts are located in the [scripts](./scripts/) directory.
### add_metrics_to_readme.sh
This [script](./scripts/add_metrics_to_readme.sh) is used to add metrics table to the README.md file.
Usage:
./scripts/add_metrics_to_readme.sh
### create_release_notes.sh
This [script](./scripts/create_release_notes.sh) is used to create release notes for the GitHub release.
Used primarily by the [CI workflow](./.github/workflows/go-application.yml).
### generate_helm_readme.sh
This [script](./scripts/generate_helm_readme.sh) is used to generate Helm chart [README.md](./chart/README.md) file.
The readme contains all the configuration variables from the [values.yaml](./chart/values.yaml) file.
### install_helm_readme_generator.sh
This [script](./scripts/install_helm_readme_generator.sh) is used to install
[readme-generator-for-helm](https://github.com/bitnami-labs/readme-generator-for-helm) tool.
The tool is used to generate Helm chart [README.md](./chart/README.md) file.
The script installs the tool under [helm-generator](./helm-generator) directory.
### verify_metrics.sh
This [script](./scripts/verify_metrics.sh) is used to verify the metrics from the Go Exporter application.
Can be used both locally and in the CI workflow.
./scripts/verify_metrics.sh
## Testing process
The application contains both unit and integration tests. All the tests are executed in the CI workflow.
### Unit Tests
Unit tests are located in the same directories as the tested files.
To run all unit tests, use the following command:
make test
### Integration Tests
Integration tests checks if Prometheus metrics are exposed properly.
To run them you must setup development [docker-compose](./docker-compose.yml) file.
make wait-for-compose
Then you can run the tests:
make verify-metrics
## Grafana Dashboard
A Grafana Dashboard designed for metrics from Logstash-exporter on Kubernetes is available at
[grafana.com/grafana/dashboards/18628-logstash-on-kubernetes-dashboard/](https://grafana.com/grafana/dashboards/18628-logstash-on-kubernetes-dashboard/).
This dashboard's JSON source is at [excalq/grafana-logstash-kubernetes](https://github.com/excalq/grafana-logstash-kubernetes).
(If not using Kubernetes, change `$pod` to `$instance` in the JSON.)
![Grafana Dashboard](https://grafana.com/api/dashboards/18628/images/14184/image)
## Additional Information
This projects code was reviewed by [Boldly Go](https://www.youtube.com/@boldlygo)
in an awesome [video](https://www.youtube.com/watch?v=Oe6L5ZmqCDE), which in
a huge way helped me to improve the code quality.
## Contributing
If you want to contribute to this project, please read the [CONTRIBUTING.md](./CONTRIBUTING.md) file.
## Metrics
Table of exported metrics:
<!-- METRICS_TABLE_START -->
| Name | Type | Description |
| ----------- | ----------- | ----------- |
| logstash_exporter_build_info | gauge | A metric with a constant '1' value labeled by version, revision, branch, goversion from which logstash_exporter was built, and the goos and goarch for the build. |
| logstash_info_build | counter | A metric with a constant '1' value labeled by build date, sha, and snapshot of the logstash instance. |
| logstash_info_node | counter | A metric with a constant '1' value labeled by node name, version, host, http_address, and id of the logstash instance. |
| logstash_info_pipeline_batch_delay | counter | Amount of time to wait for events to fill the batch before sending to the filter and output stages. |
| logstash_info_pipeline_batch_size | counter | Number of events to retrieve from the input queue before sending to the filter and output stages. |
| logstash_info_pipeline_workers | counter | Number of worker threads that will process pipeline events. |
| logstash_info_status | counter | A metric with a constant '1' value labeled by status. |
| logstash_info_up | gauge | A metric that returns 1 if the node is up, 0 otherwise. |
| logstash_stats_events_duration_millis | gauge | Duration of events processing in milliseconds. |
| logstash_stats_events_filtered | gauge | Number of events filtered out. |
| logstash_stats_events_in | gauge | Number of events received. |
| logstash_stats_events_out | gauge | Number of events out. |
| logstash_stats_events_queue_push_duration_millis | gauge | Duration of events push to queue in milliseconds. |
| logstash_stats_flow_filter_current | gauge | Current number of events in the filter queue. |
| logstash_stats_flow_filter_lifetime | gauge | Lifetime number of events in the filter queue. |
| logstash_stats_flow_input_current | gauge | Current number of events in the input queue. |
| logstash_stats_flow_input_lifetime | gauge | Lifetime number of events in the input queue. |
| logstash_stats_flow_output_current | gauge | Current number of events in the output queue. |
| logstash_stats_flow_output_lifetime | gauge | Lifetime number of events in the output queue. |
| logstash_stats_flow_queue_backpressure_current | gauge | Current number of events in the backpressure queue. |
| logstash_stats_flow_queue_backpressure_lifetime | gauge | Lifetime number of events in the backpressure queue. |
| logstash_stats_flow_worker_concurrency_current | gauge | Current number of workers. |
| logstash_stats_flow_worker_concurrency_lifetime | gauge | Lifetime number of workers. |
| logstash_stats_jvm_mem_heap_committed_bytes | gauge | Amount of heap memory in bytes that is committed for the Java virtual machine to use. |
| logstash_stats_jvm_mem_heap_max_bytes | gauge | Maximum amount of heap memory in bytes that can be used for memory management. |
| logstash_stats_jvm_mem_heap_used_bytes | gauge | Amount of used heap memory in bytes. |
| logstash_stats_jvm_mem_heap_used_percent | gauge | Percentage of the heap memory that is used. |
| logstash_stats_jvm_mem_non_heap_committed_bytes | gauge | Amount of non-heap memory in bytes that is committed for the Java virtual machine to use. |
| logstash_stats_jvm_mem_pool_committed_bytes | gauge | Amount of bytes that are committed for the Java virtual machine to use in a given JVM memory pool. |
| logstash_stats_jvm_mem_pool_max_bytes | gauge | Maximum amount of bytes that can be used in a given JVM memory pool. |
| logstash_stats_jvm_mem_pool_peak_max_bytes | gauge | Highest value of bytes that were used in a given JVM memory pool. |
| logstash_stats_jvm_mem_pool_peak_used_bytes | gauge | Peak used bytes of a given JVM memory pool. |
| logstash_stats_jvm_mem_pool_used_bytes | gauge | Currently used bytes of a given JVM memory pool. |
| logstash_stats_jvm_threads_count | gauge | Number of live threads including both daemon and non-daemon threads. |
| logstash_stats_jvm_threads_peak_count | gauge | Peak live thread count since the Java virtual machine started or peak was reset. |
| logstash_stats_jvm_uptime_millis | gauge | Uptime of the JVM in milliseconds. |
| logstash_stats_pipeline_dead_letter_queue_dropped_events | counter | Number of events dropped by the dead letter queue. |
| logstash_stats_pipeline_dead_letter_queue_expired_events | counter | Number of events expired in the dead letter queue. |
| logstash_stats_pipeline_dead_letter_queue_max_size_in_bytes | gauge | Maximum size of the dead letter queue in bytes. |
| logstash_stats_pipeline_dead_letter_queue_size_in_bytes | gauge | Current size of the dead letter queue in bytes. |
| logstash_stats_pipeline_events_duration | counter | Time needed to process event. |
| logstash_stats_pipeline_events_filtered | counter | Number of events that have been filtered out by this pipeline. |
| logstash_stats_pipeline_events_in | counter | Number of events that have been inputted into this pipeline. |
| logstash_stats_pipeline_events_out | counter | Number of events that have been processed by this pipeline. |
| logstash_stats_pipeline_events_queue_push_duration | counter | Time needed to push event to queue. |
| logstash_stats_pipeline_flow_filter_current | gauge | Current number of events in the filter queue. |
| logstash_stats_pipeline_flow_filter_lifetime | counter | Lifetime number of events in the filter queue. |
| logstash_stats_pipeline_flow_input_current | gauge | Current number of events in the input queue. |
| logstash_stats_pipeline_flow_input_lifetime | counter | Lifetime number of events in the input queue. |
| logstash_stats_pipeline_flow_output_current | gauge | Current number of events in the output queue. |
| logstash_stats_pipeline_flow_output_lifetime | counter | Lifetime number of events in the output queue. |
| logstash_stats_pipeline_flow_queue_backpressure_current | gauge | Current number of events in the backpressure queue. |
| logstash_stats_pipeline_flow_queue_backpressure_lifetime | counter | Lifetime number of events in the backpressure queue. |
| logstash_stats_pipeline_flow_worker_concurrency_current | gauge | Current number of workers. |
| logstash_stats_pipeline_flow_worker_concurrency_lifetime | counter | Lifetime number of workers. |
| logstash_stats_pipeline_plugin_bulk_requests_errors | counter | Number of bulk request errors. |
| logstash_stats_pipeline_plugin_bulk_requests_responses | counter | Bulk request HTTP response counts by code. |
| logstash_stats_pipeline_plugin_documents_non_retryable_failures | counter | Number of output events with non-retryable failures. |
| logstash_stats_pipeline_plugin_documents_successes | counter | Number of successful bulk requests. |
| logstash_stats_pipeline_plugin_events_duration | counter | Time spent processing events in this plugin. |
| logstash_stats_pipeline_plugin_events_in | counter | Number of events received this pipeline. |
| logstash_stats_pipeline_plugin_events_out | counter | Number of events output by this pipeline. |
| logstash_stats_pipeline_plugin_events_queue_push_duration | counter | Time spent pushing events into the input queue. |
| logstash_stats_pipeline_queue_events_count | counter | Number of events in the queue. |
| logstash_stats_pipeline_queue_events_queue_size | counter | Number of events that the queue can accommodate |
| logstash_stats_pipeline_queue_max_size_in_bytes | counter | Maximum size of given queue in bytes. |
| logstash_stats_pipeline_reloads_failures | counter | Number of failed pipeline reloads. |
| logstash_stats_pipeline_reloads_successes | counter | Number of successful pipeline reloads. |
| logstash_stats_pipeline_reloads_last_failure_timestamp | gauge | Timestamp of last failed pipeline reload. |
| logstash_stats_pipeline_reloads_last_success_timestamp | gauge | Timestamp of last successful pipeline reload. |
| logstash_stats_pipeline_up | gauge | Whether the pipeline is up or not. |
| logstash_stats_process_cpu_load_average_1m | gauge | Total 1m system load average. |
| logstash_stats_process_cpu_load_average_5m | gauge | Total 5m system load average. |
| logstash_stats_process_cpu_load_average_15m | gauge | Total 15m system load average. |
| logstash_stats_process_cpu_percent | gauge | CPU usage of the process. |
| logstash_stats_process_cpu_total_millis | gauge | Total CPU time used by the process. |
| logstash_stats_process_max_file_descriptors | gauge | Limit of open file descriptors. |
| logstash_stats_process_mem_total_virtual | gauge | Total virtual memory used by the process. |
| logstash_stats_process_open_file_descriptors | gauge | Number of currently open file descriptors. |
| logstash_stats_queue_events_count | gauge | Number of events in the queue. |
| logstash_stats_reload_failures | gauge | Number of failed reloads. |
| logstash_stats_reload_successes | gauge | Number of successful reloads. |
<!-- METRICS_TABLE_END -->
schema.json:
{
"title": "Chart Values",
"type": "object",
"properties": {
"logstash": {
"type": "object",
"properties": {
"url": {
"type": "string",
"description": "Logstash instance URL",
"default": "http://logstash:9600"
},
"httpTimeout": {
"type": "string",
"description": "http timeout",
"default": "3s"
},
"httpInsecure": {
"type": "boolean",
"description": "http insecure",
"default": false
}
}
},
"web": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path under which to expose metrics.",
"default": "/"
}
}
},
"podMonitor": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enable pod monitor creation",
"default": false
},
"apiVersion": {
"type": "string",
"description": "Set pod monitor apiVersion",
"default": "monitoring.coreos.com/v1"
},
"namespace": {
"type": "string",
"description": "Set pod monitor namespace",
"default": ""
},
"labels": {
"type": "object",
"description": "Set pod monitor labels",
"default": {}
},
"interval": {
"type": "string",
"description": "Set pod monitor interval",
"default": "60s"
},
"scrapeTimeout": {
"type": "string",
"description": "Set pod monitor scrapeTimeout",
"default": "10s"
},
"honorLabels": {
"type": "boolean",
"description": "Set pod monitor honorLabels",
"default": true
},
"scheme": {
"type": "string",
"description": "Set pod monitor scheme",
"default": "http"
},
"relabelings": {
"type": "array",
"description": "Set pod monitor relabelings",
"default": [],
"items": {}
},
"metricRelabelings": {
"type": "array",
"description": "Set pod monitor metricRelabelings",
"default": [],
"items": {}
}
}
},
"image": {
"type": "object",
"properties": {
"repository": {
"type": "string",
"description": "Image repository",
"default": "kuskoman/logstash-exporter"
},
"tag": {
"type": "string",
"description": "Image tag, if not set the appVersion is used",
"default": ""
},
"pullPolicy": {
"type": "string",
"description": "Image pull policy",
"default": "IfNotPresent"
}
}
},
"fullnameOverride": {
"type": "string",
"description": "Override the fullname of the chart",
"default": ""
},
"deployment": {
"type": "object",
"properties": {
"replicas": {
"type": "number",
"description": "Number of replicas for the deployment",
"default": 1
},
"restartPolicy": {
"type": "string",
"description": "Restart policy for the deployment.",
"default": "Always"
},
"annotations": {
"type": "object",
"description": "Additional deployment annotations",
"default": {}
},
"labels": {
"type": "object",
"description": "Additional deployment labels",
"default": {}
},
"pullSecret": {
"type": "array",
"description": "Kubernetes secret for pulling the image",
"default": [],
"items": {}
},
"resources": {
"type": "object",
"description": "Resource requests and limits",
"default": {}
},
"nodeSelector": {
"type": "object",
"description": "Node selector for the deployment",
"default": {}
},
"tolerations": {
"type": "array",
"description": "Tolerations for the deployment",
"default": [],
"items": {}
},
"podAnnotations": {
"type": "object",
"description": "Additional pod annotations",
"default": {}
},
"podLabels": {
"type": "object",
"description": "Additional pod labels",
"default": {}
},
"affinity": {
"type": "object",
"description": "Affinity for the deployment",
"default": {}
},
"env": {
"type": "object",
"description": "Additional environment variables",
"default": {}
},
"envFrom": {
"type": "array",
"description": "Additional environment variables from config maps or secrets",
"default": [],
"items": {}
},
"priorityClassName": {
"type": "string",
"description": "Priority class name for the deployment",
"default": ""
},
"dnsConfig": {
"type": "object",
"description": "DNS configuration for the deployment",
"default": {}
},
"securityContext": {
"type": "object",
"description": "Security context for the deployment",
"default": {}
},
"livenessProbe": {
"type": "object",
"properties": {
"httpGet": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path for liveness probe",
"default": "/health"
},
"port": {
"type": "number",
"description": "Port for liveness probe",
"default": 9198
}
}
},
"initialDelaySeconds": {
"type": "number",
"description": "Initial delay for liveness probe",
"default": 30
},
"periodSeconds": {
"type": "number",
"description": "Period for liveness probe",
"default": 10
},
"timeoutSeconds": {
"type": "number",
"description": "Timeout for liveness probe",
"default": 5
},
"successThreshold": {
"type": "number",
"description": "Success threshold for liveness probe",
"default": 1
},
"failureThreshold": {
"type": "number",
"description": "Failure threshold for liveness probe",
"default": 3
}
}
},
"readinessProbe": {
"type": "object",
"description": "Readiness probe configuration",
"default": {}
},
"rollingUpdate": {
"type": "object",
"properties": {
"maxSurge": {
"type": "number",
"description": "Maximum surge for rolling update",
"default": 1
},
"maxUnavailable": {
"type": "number",
"description": "Maximum unavailable for rolling update",
"default": 0
}
}
},
"metricsPort": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Name of the port",
"default": "http"
}
}
}
}
},
"service": {
"type": "object",
"properties": {
"type": {
"type": "string",
"description": "Service type",
"default": "ClusterIP"
},
"port": {
"type": "number",
"description": "Service port",
"default": 9198
},
"annotations": {
"type": "object",
"description": "Additional service annotations",
"default": {}
},
"labels": {
"type": "object",
"description": "Additional service labels",
"default": {}
}
}
},
"serviceAccount": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enable service account creation",
"default": false
},
"create": {
"type": "boolean",
"description": "Create service account",
"default": false
},
"name": {
"type": "string",
"description": "Service account name",
"default": ""
},
"annotations": {
"type": "object",
"description": "Additional service account annotations",
"default": {}
}
}
}
}
}
@JulienOrain I created branch pr-384 so you can copy changes
hello @kuskoman ! thanks for your answer. This MR is now up-to-date ;)
seems like pipeline is complaining about newline, could you add/remove it?
All modified and coverable lines are covered by tests :white_check_mark:
Project coverage is 93.14%. Comparing base (
198ad36
) to head (bb4cd00
). Report is 15 commits behind head on v1.
:umbrella: View full report in Codecov by Sentry.
:loudspeaker: Have feedback on the report? Share it here.
hello @kuskoman !
Thanks for the merge !
Do you plan to release a new v1.8.3
version ?
Thanks !
Regards,
Julien Orain
hello i can do it today please remind me if i forget about that
hello @kuskoman if by any chance you get any time in the week, I'd love to use the new release ! thanks !
@JulienOrain done
Hello
Some enhancements for the helm chart V1 :