Closed alannix-lw closed 1 year ago
Yeah, 100% agree - while I don't think there are any breaking changes in this update, there are enough changes to the configuration upon upgrade that I think it probably warrants a major version bump. I'd like to think that I'm overly sensitive to making changes without the awareness of the user, so open to any feedback.
Here was the plan that was produced when I upgraded a relatively standard config:
Terraform will perform the following actions:
# module.k8s_services.module.lacework_k8s_datacollector.kubernetes_daemonset.lacework_datacollector will be updated in-place
~ resource "kubernetes_daemonset" "lacework_datacollector" {
id = "lacework/tf-module-test"
# (1 unchanged attribute hidden)
~ spec {
# (2 unchanged attributes hidden)
~ template {
~ metadata {
~ annotations = {
- "lacework_config_version" = "38942621"
} -> (known after apply)
# (2 unchanged attributes hidden)
}
~ spec {
~ dns_policy = "ClusterFirst" -> "ClusterFirstWithHostNet"
~ termination_grace_period_seconds = 30 -> 20
# (9 unchanged attributes hidden)
+ affinity {
+ node_affinity {
+ required_during_scheduling_ignored_during_execution {
+ node_selector_term {
+ match_expressions {
+ key = "kubernetes.io/arch"
+ operator = "In"
+ values = [
+ "amd64",
+ "arm64",
]
}
+ match_expressions {
+ key = "kubernetes.io/os"
+ operator = "In"
+ values = [
+ "linux",
]
}
}
}
}
}
~ container {
name = "lacework"
# (9 unchanged attributes hidden)
+ env {
+ name = "LaceworkAccessToken"
+ value_from {
+ secret_key_ref {
+ key = "agent-access-token"
+ name = "tf-module-test-access-token"
}
}
}
~ security_context {
+ run_as_user = "0"
# (4 unchanged attributes hidden)
}
~ volume_mount {
~ mount_path = "/var/run/docker.sock" -> "/sys"
~ name = "run-sock" -> "sys"
# (2 unchanged attributes hidden)
}
~ volume_mount {
~ mount_path = "/var/run/docker.pid" -> "/var/log"
~ name = "run-pid" -> "log"
# (2 unchanged attributes hidden)
}
~ volume_mount {
~ mount_path = "/sys" -> "/etc/passwd"
~ name = "sys" -> "passwd"
~ read_only = false -> true
# (1 unchanged attribute hidden)
}
~ volume_mount {
~ mount_path = "/var/log" -> "/etc/group"
~ name = "log" -> "group"
~ read_only = false -> true
# (1 unchanged attribute hidden)
}
~ volume_mount {
~ mount_path = "/etc/passwd" -> "/var/lib/lacework/collector"
~ name = "passwd" -> "hostlacework"
~ read_only = true -> false
# (1 unchanged attribute hidden)
}
~ volume_mount {
~ mount_path = "/etc/group" -> "/var/lib/lacework/controller"
~ name = "group" -> "hostlaceworkcontroller"
~ read_only = true -> false
# (1 unchanged attribute hidden)
}
~ volume_mount {
~ mount_path = "/var/lib/lacework/collector" -> "/laceworkfim"
~ name = "hostlacework" -> "hostroot"
~ read_only = false -> true
# (1 unchanged attribute hidden)
}
~ volume_mount {
~ mount_path = "/laceworkfim" -> "/etc/podinfo"
~ name = "hostroot" -> "podinfo"
~ read_only = true -> false
# (1 unchanged attribute hidden)
}
- volume_mount {
- mount_path = "/etc/podinfo" -> null
- mount_propagation = "None" -> null
- name = "podinfo" -> null
- read_only = false -> null
}
# (4 unchanged blocks hidden)
}
~ toleration {
~ key = "node-role.kubernetes.io/master" -> "node-role.kubernetes.io/infra"
# (2 unchanged attributes hidden)
}
+ toleration {
+ effect = "NoSchedule"
+ key = "node-role.kubernetes.io/master"
+ operator = "Equal"
}
~ volume {
~ name = "run-sock" -> "sys"
~ host_path {
~ path = "/var/run/docker.sock" -> "/sys"
}
}
~ volume {
~ name = "run-pid" -> "log"
~ host_path {
~ path = "/var/run/docker.pid" -> "/var/log"
}
}
~ volume {
~ name = "sys" -> "passwd"
~ host_path {
~ path = "/sys" -> "/etc/passwd"
}
}
~ volume {
~ name = "log" -> "group"
~ host_path {
~ path = "/var/log" -> "/etc/group"
}
}
~ volume {
~ name = "passwd" -> "hostroot"
~ host_path {
~ path = "/etc/passwd" -> "/"
}
}
~ volume {
~ name = "group" -> "hostlacework"
~ host_path {
~ path = "/etc/group" -> "/var/lib/lacework/collector"
}
}
~ volume {
~ name = "hostroot" -> "hostlaceworkcontroller"
~ host_path {
~ path = "/" -> "/var/lib/lacework/controller"
}
}
~ volume {
~ name = "hostlacework" -> "config"
- host_path {
- path = "/var/lib/lacework/collector" -> null
}
+ secret {
+ default_mode = "0644"
+ secret_name = (known after apply)
+ items {
+ key = "config.json"
+ path = "config.json"
}
}
}
~ volume {
~ name = "config" -> "podinfo"
+ downward_api {
+ default_mode = "0644"
+ items {
+ path = "labels"
+ field_ref {
+ api_version = "v1"
+ field_path = "metadata.labels"
}
}
+ items {
+ path = "annotations"
+ field_ref {
+ api_version = "v1"
+ field_path = "metadata.annotations"
}
}
+ items {
+ path = "name"
+ field_ref {
+ api_version = "v1"
+ field_path = "metadata.name"
}
}
+ items {
+ path = "poduid"
+ field_ref {
+ api_version = "v1"
+ field_path = "metadata.uid"
}
}
+ items {
+ path = "namespace"
+ field_ref {
+ api_version = "v1"
+ field_path = "metadata.namespace"
}
}
}
- secret {
- default_mode = "0644" -> null
- optional = false -> null
- secret_name = "lacework-config-e39a0210529dd4ee" -> null
- items {
- key = "config.json" -> null
- path = "config.json" -> null
}
}
}
- volume {
- name = "podinfo" -> null
- downward_api {
- default_mode = "0644" -> null
- items {
- path = "labels" -> null
- field_ref {
- api_version = "v1" -> null
- field_path = "metadata.labels" -> null
}
}
- items {
- path = "annotations" -> null
- field_ref {
- api_version = "v1" -> null
- field_path = "metadata.annotations" -> null
}
}
}
}
# (1 unchanged block hidden)
}
}
# (2 unchanged blocks hidden)
}
# (1 unchanged block hidden)
}
# module.k8s_services.module.lacework_k8s_datacollector.kubernetes_secret.lacework_access_token will be created
+ resource "kubernetes_secret" "lacework_access_token" {
+ data = (sensitive value)
+ id = (known after apply)
+ type = "Opaque"
+ wait_for_service_account_token = true
+ metadata {
+ generation = (known after apply)
+ labels = {
+ "app" = "tf-module-test"
+ "tier" = "monitoring"
}
+ name = "tf-module-test-access-token"
+ namespace = "lacework"
+ resource_version = (known after apply)
+ uid = (known after apply)
}
}
# module.k8s_services.module.lacework_k8s_datacollector.kubernetes_secret.lacework_config must be replaced
-/+ resource "kubernetes_secret" "lacework_config" {
~ data = (sensitive value)
~ id = "lacework/lacework-config-e39a0210529dd4ee" -> (known after apply)
- immutable = false -> null
# (2 unchanged attributes hidden)
~ metadata {
- annotations = {} -> null
~ generation = 0 -> (known after apply)
~ labels = {
+ "app" = "tf-module-test-cluster"
+ "tier" = "monitoring"
}
~ name = "lacework-config-e39a0210529dd4ee" -> (known after apply) # forces replacement
~ resource_version = "38942621" -> (known after apply)
~ uid = "ed9513cf-86aa-4a57-84b5-08570f525ad1" -> (known after apply)
# (1 unchanged attribute hidden)
}
}
# module.k8s_services.module.lacework_k8s_datacollector.random_id.cluster_config_name_tail will be created
+ resource "random_id" "cluster_config_name_tail" {
+ b64_std = (known after apply)
+ b64_url = (known after apply)
+ byte_length = 8
+ dec = (known after apply)
+ hex = (known after apply)
+ id = (known after apply)
+ keepers = {
+ "data" = ""
}
}
# module.k8s_services.module.lacework_k8s_datacollector.random_id.config_name_tail will be destroyed
# (because random_id.config_name_tail is not in configuration)
- resource "random_id" "config_name_tail" {
- b64_std = "45oCEFKd1O4=" -> null
- b64_url = "45oCEFKd1O4" -> null
- byte_length = 8 -> null
- dec = "16400423262151890158" -> null
- hex = "e39a0210529dd4ee" -> null
- id = "45oCEFKd1O4" -> null
- keepers = {
- "data" = jsonencode(
{
- ebpf = {
- externalshortlivedattribution = "enabled"
}
- fim = {
- notify = "true"
}
- tags = {
- Env = "k8s"
}
- tokens = {
- AccessToken = "<redacted>"
}
}
)
} -> null
}
# module.k8s_services.module.lacework_k8s_datacollector.random_id.node_config_name_tail will be created
+ resource "random_id" "node_config_name_tail" {
+ b64_std = (known after apply)
+ b64_url = (known after apply)
+ byte_length = 8
+ dec = (known after apply)
+ hex = (known after apply)
+ id = (known after apply)
+ keepers = {
+ "data" = jsonencode(
{
+ ebpf = {
+ externalshortlivedattribution = "enabled"
}
+ fim = {
+ notify = "true"
}
+ tags = {
+ Env = "k8s"
}
}
)
}
}
Plan: 4 to add, 1 to change, 2 to destroy
Summary
This PR implements the necessary components to deploy the Lacework K8s Collector agent to allow for Kubernetes compliance analysis within the Lacework platform.
The configuration for this functionality was derived from the Lacework agent helm chart located here.
How did you test this change?
Implemented example configurations for CI testing, and deployed in a lab environment.
Issue
N/A