Closed adrianchifor closed 6 years ago
Same thing on sysdig/falco:dev
Silly me, was trying to load the module on GKE Container-Optimised OS. Apologies for the noise.
If you set the env variable SYSDIG_BPF_PROBE=""
you can leverage the eBPF probe instead of the kernel module. Just FYI
Oh nice, didn't know about this, would be a good idea to include in deployment/configuration docs.
@mfdii Just tried to enable eBPF probe but getting the following on 0.12.1:
* Trying to download precompiled BPF probe from https://s3.amazonaws.com/download.draios.com/stable/sysdig-probe-binaries/falco-probe-bpf-0.12.1-x86_64-4.4.111%2B-64959a6932af29adc303ff522e9dcf2e.o
curl: (22) The requested URL returned error: 404 Not Found
* Failure to find a BPF probe
@mstemm ^ FYI
How are you deploying Falco (daemonset or helm chart)?
If using the daemonset you need to mount the host's /etc directory to /host/etc in the Falco container.
The example daemonset doesn't have this as it's not needed for every platform.
We are going to have a sprint towards the end of October to clean up and document the eBPF support.
On Wed, Sep 26, 2018 at 5:49 AM Adrian Chifor notifications@github.com wrote:
@mstemm https://github.com/mstemm ^ FYI
— You are receiving this because you were mentioned. Reply to this email directly, view it on GitHub https://github.com/draios/falco/issues/425#issuecomment-424654898, or mute the thread https://github.com/notifications/unsubscribe-auth/ABn-Df6KKiKlOkTHNNtC5radq1jBSBsIks5ue024gaJpZM4W4pzP .
I'm having the same issue with the hosts /etc/ mounted and with SYSDIG_BPF_PROBE environment variable exported.
I've converted the yaml files from the examples to terraform:
resource "kubernetes_service_account" "falco_sa" {
metadata {
name = "falco-account"
labels = {
app = "falco"
role = "security"
}
}
}
resource "kubernetes_cluster_role" "falco_cr" {
metadata {
name = "falco-cluster-role"
labels = {
app = "falco"
role = "security"
}
}
rule {
api_groups = ["extensions", ""]
resources = ["nodes", "namespaces", "pods", "replicationcontrollers", "replicasets", "services", "daemonsets", "deployments", "events", "configmaps"]
verbs = ["get", "list", "watch"]
}
rule {
non_resource_urls = ["/healthz", "/healthz/*"]
verbs = ["get"]
}
}
resource "kubernetes_cluster_role_binding" "falco_crb" {
metadata {
name = "falco-cluster-role-bind"
labels = {
app = "falco"
role = "security"
}
}
subject {
kind = "ServiceAccount"
name = kubernetes_service_account.falco_sa.metadata.0.name
namespace = "default"
}
role_ref {
kind = "ClusterRole"
name = kubernetes_cluster_role.falco_cr.metadata.0.name
api_group = "rbac.authorization.k8s.io"
}
}
resource "kubernetes_config_map" "falco_cfgmap" {
metadata {
name = "falco-cfgmap"
labels = {
app = "falco"
role = "security"
}
}
data = {
"application_rules.yaml" = file("configs/falco/application_rules.yaml")
"falco_rules.local.yaml" = file("configs/falco/falco_rules.local.yaml")
"falco_rules.yaml" = file("configs/falco/falco_rules.yaml")
"k8s_audit_rules.yaml" = file("configs/falco/k8s_audit_rules.yaml")
}
}
resource "kubernetes_daemonset" "falco_ds" {
metadata {
name = "falco-daemonset"
labels = {
app = "falco"
role = "security"
}
}
spec {
selector {
match_labels = {
app = "falco"
role = "security"
}
}
template {
metadata {
labels = {
app = "falco"
role = "security"
}
}
spec {
service_account_name = kubernetes_service_account.falco_sa.metadata.0.name
volume {
name = "docker-socket"
host_path {
path = "/var/run/docker.socket"
}
}
volume {
name = "containerd-socket"
host_path {
path = "/run/containerd/containerd.sock"
}
}
volume {
name = "dev-fs"
host_path {
path = "/dev"
}
}
volume {
name = "proc-fs"
host_path {
path = "/proc"
}
}
volume {
name = "boot-fs"
host_path {
path = "/boot"
}
}
volume {
name = "lib-modules"
host_path {
path = "/lib/modules"
}
}
volume {
name = "usr-fs"
host_path {
path = "/usr"
}
}
volume {
name = "etc-fs"
host_path {
path = "/etc"
}
}
volume {
name = "falco-config"
config_map {
name = kubernetes_config_map.falco_cfgmap.metadata.0.name
}
}
container {
name = "falco"
image = "falcosecurity/falco:latest"
args = [
"/usr/bin/falco",
"--cri", "/host/run/containerd/containerd.sock",
"-K", "/var/run/secrets/kubernetes.io/serviceaccount/token",
"-k", "https://$(KUBERNETES_SERVICE_HOST)",
"-pk",
]
security_context {
privileged = true
}
env {
name = "SYSDIG_BPF_PROBE"
value = ""
}
volume_mount {
name = "docker-socket"
mount_path = "/host/var/run/docker.sock"
}
volume_mount {
name = "containerd-socket"
mount_path = "/host/run/containerd/containerd.sock"
}
volume_mount {
name = "dev-fs"
mount_path = "/host/dev"
}
volume_mount {
name = "proc-fs"
mount_path = "/host/proc"
read_only = true
}
volume_mount {
name = "boot-fs"
mount_path = "/host/boot"
read_only = true
}
volume_mount {
name = "lib-modules"
mount_path = "/host/lib/modules"
read_only = true
}
volume_mount {
name = "usr-fs"
mount_path = "/host/usr"
read_only = true
}
volume_mount {
name = "etc-fs"
mount_path = "/host/etc"
read_only = true
}
volume_mount {
name = "falco-config"
mount_path = "/etc/falco"
}
}
}
}
}
}
resource "kubernetes_service" "falco_svc" {
metadata {
name = kubernetes_daemonset.falco_ds.metadata.0.name
labels = {
app = "falco"
role = "security"
}
}
spec {
type = "ClusterIP"
port {
protocol = "TCP"
port = 8765
}
selector = {
app = "falco"
role = "security"
}
}
}
The output of describe daemonset looks correct:
Name: falco-daemonset
Selector: app=falco,role=security
Node-Selector: <none>
Labels: app=falco
role=security
Annotations: deprecated.daemonset.template.generation: 4
Desired Number of Nodes Scheduled: 3
Current Number of Nodes Scheduled: 3
Number of Nodes Scheduled with Up-to-date Pods: 3
Number of Nodes Scheduled with Available Pods: 0
Number of Nodes Misscheduled: 0
Pods Status: 3 Running / 0 Waiting / 0 Succeeded / 0 Failed
Pod Template:
Labels: app=falco
role=security
Service Account: falco-account
Containers:
falco:
Image: falcosecurity/falco:latest
Port: <none>
Host Port: <none>
Args:
/usr/bin/falco
--cri
/host/run/containerd/containerd.sock
-K
/var/run/secrets/kubernetes.io/serviceaccount/token
-k
https://$(KUBERNETES_SERVICE_HOST)
-pk
Environment:
SYSDIG_BPF_PROBE:
Mounts:
/etc/falco from falco-config (rw)
/host/boot from boot-fs (ro)
/host/dev from dev-fs (rw)
/host/etc from etc-fs (ro)
/host/lib/modules from lib-modules (ro)
/host/proc from proc-fs (ro)
/host/run/containerd/containerd.sock from containerd-socket (rw)
/host/usr from usr-fs (ro)
/host/var/run/docker.sock from docker-socket (rw)
Volumes:
docker-socket:
Type: HostPath (bare host directory volume)
Path: /var/run/docker.socket
HostPathType:
containerd-socket:
Type: HostPath (bare host directory volume)
Path: /run/containerd/containerd.sock
HostPathType:
dev-fs:
Type: HostPath (bare host directory volume)
Path: /dev
HostPathType:
proc-fs:
Type: HostPath (bare host directory volume)
Path: /proc
HostPathType:
boot-fs:
Type: HostPath (bare host directory volume)
Path: /boot
HostPathType:
lib-modules:
Type: HostPath (bare host directory volume)
Path: /lib/modules
HostPathType:
usr-fs:
Type: HostPath (bare host directory volume)
Path: /usr
HostPathType:
etc-fs:
Type: HostPath (bare host directory volume)
Path: /etc
HostPathType:
falco-config:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: falco-cfgmap
Optional: false
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal SuccessfulCreate 45m daemonset-controller Created pod: falco-daemonset-vvfm4
Normal SuccessfulCreate 45m daemonset-controller Created pod: falco-daemonset-r6xx2
Normal SuccessfulCreate 45m daemonset-controller Created pod: falco-daemonset-rk64p
Normal SuccessfulDelete 34m daemonset-controller Deleted pod: falco-daemonset-vvfm4
Normal SuccessfulDelete 34m daemonset-controller Deleted pod: falco-daemonset-r6xx2
Normal SuccessfulDelete 34m daemonset-controller Deleted pod: falco-daemonset-rk64p
Normal SuccessfulCreate 34m daemonset-controller Created pod: falco-daemonset-ghpbt
Normal SuccessfulCreate 34m daemonset-controller Created pod: falco-daemonset-6n7fr
Normal SuccessfulCreate 34m daemonset-controller Created pod: falco-daemonset-dz42d
Normal SuccessfulDelete 19m daemonset-controller Deleted pod: falco-daemonset-dz42d
Normal SuccessfulDelete 19m daemonset-controller Deleted pod: falco-daemonset-ghpbt
Normal SuccessfulDelete 19m daemonset-controller Deleted pod: falco-daemonset-6n7fr
Normal SuccessfulCreate 19m daemonset-controller Created pod: falco-daemonset-7l9wr
Normal SuccessfulCreate 19m daemonset-controller Created pod: falco-daemonset-4p9xg
Normal SuccessfulCreate 19m daemonset-controller Created pod: falco-daemonset-hn24r
Normal SuccessfulDelete 8m33s daemonset-controller Deleted pod: falco-daemonset-hn24r
Normal SuccessfulDelete 8m33s daemonset-controller Deleted pod: falco-daemonset-4p9xg
Normal SuccessfulDelete 8m33s daemonset-controller Deleted pod: falco-daemonset-7l9wr
Normal SuccessfulCreate 8m23s daemonset-controller Created pod: falco-daemonset-t7xgr
Normal SuccessfulCreate 8m23s daemonset-controller Created pod: falco-daemonset-ptpvj
Normal SuccessfulCreate 8m23s daemonset-controller Created pod: falco-daemonset-dkctn
And my logs:
* Setting up /usr/src links from host
* Mounting debugfs
Found kernel config at /proc/config.gz
* COS detected (build 11647.163.0), downloading and setting up kernel headers
* Downloading https://storage.googleapis.com/cos-tools/11647.163.0/kernel-src.tar.gz
* Extracting kernel sources
* Configuring kernel
scripts/sign-file.c:25:30: fatal error: openssl/opensslv.h: No such file or directory
compilation terminated.
make[1]: *** [scripts/Makefile.host:102: scripts/sign-file] Error 1
make: *** [Makefile:572: scripts] Error 2
* Trying to compile BPF probe falco-probe-bpf (falco-probe-bpf-0.15.0-x86_64-4.14.94+-d7aaf2e0f41bfe30d6d84fe8e754c0d9.o)
In file included from /usr/src/falco-0.15.0/bpf/probe.c:23:
/usr/src/falco-0.15.0/bpf/fillers.h:2017:26: error: no member named 'loginuid' in 'struct task_struct'
loginuid = _READ(task->loginuid);
~~~~ ^
/usr/src/falco-0.15.0/bpf/plumbing_helpers.h:18:28: note: expanded from macro '_READ'
#define _READ(P) ({ typeof(P) _val; \
^
In file included from /usr/src/falco-0.15.0/bpf/probe.c:23:
/usr/src/falco-0.15.0/bpf/fillers.h:2017:26: error: no member named 'loginuid' in 'struct task_struct'
loginuid = _READ(task->loginuid);
~~~~ ^
/usr/src/falco-0.15.0/bpf/plumbing_helpers.h:20:44: note: expanded from macro '_READ'
bpf_probe_read(&_val, sizeof(_val), &P); \
^
In file included from /usr/src/falco-0.15.0/bpf/probe.c:23:
/usr/src/falco-0.15.0/bpf/fillers.h:2017:12: error: assigning to 'kuid_t' from incompatible type 'void'
loginuid = _READ(task->loginuid);
^ ~~~~~~~~~~~~~~~~~~~~~
3 errors generated.
make[2]: *** [/usr/src/falco-0.15.0/bpf/Makefile:33: /usr/src/falco-0.15.0/bpf/probe.o] Error 1
make[1]: *** [Makefile:1541: _module_/usr/src/falco-0.15.0/bpf] Error 2
make: *** [Makefile:18: all] Error 2
mv: cannot stat '/usr/src/falco-0.15.0/bpf/probe.o': No such file or directory
* Trying to download precompiled BPF probe from https://s3.amazonaws.com/download.draios.com/stable/sysdig-probe-binaries/falco-probe-bpf-0.15.0-x86_64-4.14.94%2B-d7aaf2e0f41bfe30d6d84fe8e754c0d9.o
curl: (22) The requested URL returned error: 404 Not Found
* Failure to find a BPF probe
@mfdii is it okay adding this information for a closed ticket? I can open a new one if necessary, as both issues look similar, I wanted to try to keep the information together.
@caquino can you file a separate issue for this? We've also noticed this internally and from what we can tell it's something specific to the COS used by google: https://chromium.googlesource.com/chromiumos/third_party/kernel/+/096925a44076ba5c52faa84d255a847130ff341e%5E%21/#F2.
Trying to run
sysdig/falco:latest
as a k8s daemonset and getting:The S3 download is giving me: