iovisor / bcc

BCC - Tools for BPF-based Linux IO analysis, networking, monitoring, and more
Apache License 2.0
20.23k stars 3.84k forks source link

Can't resolve resolve event data properly #2972

Open kloud1989 opened 4 years ago

kloud1989 commented 4 years ago

I'm writing a tool to record every TCP event. However, I can only resolve IPV4 events properly, IPV6 events can't be resolved properly. Here is a sample result: image

Here is my tool implementation. Anything wrong? I can record IPV6 events correctly if record upon tcp_sendmsg/tcp_recvmsg entries without BPF_TABLE applied. However, to record the end time of the event, I have to use the return kprobe too, thus BPF_TABLE is used.

I'm running this tool on Chromium OS, kernel V5.4.

tcptrans.c:

#include <uapi/linux/ptrace.h>
#include <net/sock.h>
#include <bcc/proto.h>
#include <net/inet_sock.h>

enum TCP_EVENT_TYPE {
    SEND = 0,
    RECV = 1
};

enum IP_TYPE {
    IPV4 = 0,
    IPV6 = 1
};

struct ipv4_event_t {
    u32 pid;
    u8 type;
    u32 saddr;
    u32 daddr;
    u16 sport;
    u16 dport;
    u64 size;
    u64 start;
    u64 end;
};
BPF_PERF_OUTPUT(ipv4_events);

struct ipv6_event_t {
    u32 pid;
    u8 type;
    unsigned __int128 saddr;
    unsigned __int128 daddr;
    u16 sport;
    u16 dport;
    u64 size;
    u64 start;
    u64 end;
};
BPF_PERF_OUTPUT(ipv6_events);

struct tcp_event_t {
    u8 ip_type;
};

// Update event details on entry, restore on return.
BPF_HASH(tcp_event_tmp, u64, struct tcp_event_t);
BPF_HASH(ipv4_event_tmp, u64, struct ipv4_event_t);
BPF_HASH(ipv6_event_tmp, u64, struct ipv6_event_t);

static int read_ipv4_event(struct ipv4_event_t *tuple, struct sock *skp) {
  u32 saddr = skp->__sk_common.skc_rcv_saddr;
  u32 daddr = skp->__sk_common.skc_daddr;
  struct inet_sock *sockp = (struct inet_sock *)skp;
  u16 sport = sockp->inet_sport;
  u16 dport = skp->__sk_common.skc_dport;

  tuple->saddr = saddr;
  tuple->daddr = daddr;
  tuple->sport = sport;
  tuple->dport = dport;

  return 0;
}

static int read_ipv6_event(struct ipv6_event_t *tuple, struct sock *skp) {
  unsigned __int128 saddr = 0, daddr = 0;
  struct inet_sock *sockp = (struct inet_sock *)skp;
  u16 sport = sockp->inet_sport;
  u16 dport = skp->__sk_common.skc_dport;

  bpf_probe_read(&saddr, sizeof(saddr),
                 skp->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
  bpf_probe_read(&daddr, sizeof(daddr),
                 skp->__sk_common.skc_v6_daddr.in6_u.u6_addr32);

  tuple->saddr = saddr;
  tuple->daddr = daddr;
  tuple->sport = sport;
  tuple->dport = dport;

  return 0;
}

static void record_event(struct sock *sk, enum TCP_EVENT_TYPE event_type, size_t size) {
    u64 id = bpf_get_current_pid_tgid();
    u32 pid = id;
    FILTER

    struct tcp_event_t tcp_event = {};
    u64 ts_ns = bpf_ktime_get_ns();

    u16 family = sk->__sk_common.skc_family;
    if (family == AF_INET) {
        tcp_event.ip_type = IPV4;
        tcp_event_tmp.update(&id, &tcp_event);

        struct ipv4_event_t ipv4_event = {};
        read_ipv4_event(&ipv4_event, sk);
        ipv4_event.pid = pid;
        ipv4_event.type = event_type;
        ipv4_event.size = size;
        ipv4_event.start = ts_ns;
        ipv4_event.end = 0;
        ipv4_event_tmp.update(&id, &ipv4_event);
    } else if (family == AF_INET6) {
        tcp_event.ip_type = IPV6;
        tcp_event_tmp.update(&id, &tcp_event);

        struct ipv6_event_t ipv6_event = {};
        read_ipv6_event(&ipv6_event, sk);
        ipv6_event.pid = pid;
        ipv6_event.type = event_type;
        ipv6_event.size = size;
        ipv6_event.start = ts_ns;
        ipv6_event.end = 0;
        ipv6_event_tmp.update(&id, &ipv6_event);
    }
    // else drop
}

static void output_event(struct pt_regs *ctx, enum TCP_EVENT_TYPE event_type) {
    u64 id = bpf_get_current_pid_tgid();
    u32 pid = id;
    FILTER

    struct tcp_event_t *tcp_event = tcp_event_tmp.lookup(&id);
    if (tcp_event == 0) {
        return;
    }

    u64 ts_ns = bpf_ktime_get_ns();
    int ret = PT_REGS_RC(ctx);
    switch (tcp_event->ip_type) {
        case IPV4: {
            struct ipv4_event_t *ipv4_event = ipv4_event_tmp.lookup(&id);
            if (ipv4_event == 0) {
                return;
            }
            struct ipv4_event_t ipv4_data = {};
            ipv4_data.pid = pid;
            ipv4_data.type = event_type;
            ipv4_data.saddr = ipv4_event->saddr;
            ipv4_data.daddr = ipv4_event->daddr;
            ipv4_data.sport = ipv4_event->sport;
            ipv4_data.dport = ipv4_event->dport;
            ipv4_data.size = ret;
            ipv4_data.start = ipv4_event->start;
            ipv4_data.end = ts_ns;
            ipv4_events.perf_submit(ctx, &ipv4_data, sizeof(ipv4_data));
            ipv4_event_tmp.delete(&id);
            break;
        }
        case IPV6: {
            struct ipv6_event_t *ipv6_event = ipv6_event_tmp.lookup(&id);
            if (ipv6_event == 0) {
                return;
            }
            struct ipv6_event_t ipv6_data = {};
            ipv6_data.pid = pid;
            ipv6_data.type = event_type;
            ipv6_data.saddr = ipv6_event->saddr;
            ipv6_data.daddr = ipv6_event->daddr;
            ipv6_data.sport = ipv6_event->sport;
            ipv6_data.dport = ipv6_event->dport;
            ipv6_data.size = ret;
            ipv6_data.start = ipv6_event->start;
            ipv6_data.end = ts_ns;
            ipv6_events.perf_submit(ctx, &ipv6_data, sizeof(ipv6_data));
            ipv6_event_tmp.delete(&id);
            break;
        }
    }
    tcp_event_tmp.delete(&id);
}

int trace_tcp_sendmsg_entry(struct pt_regs *ctx, struct sock *sk,
    struct msghdr *msg, size_t size) {
    record_event(sk, SEND, size);
    return 0;
}

int trace_tcp_sendmsg_return(struct pt_regs *ctx) {
    output_event(ctx, SEND);
    return 0;
}

int trace_tcp_recvmsg_entry(struct pt_regs *ctx, struct sock *sk, struct msghdr *msg, 
    size_t len, int nonblock, int flags, int *addr_len) {
    record_event(sk, RECV, len);
    return 0;
}

int trace_tcp_recvmsg_return(struct pt_regs *ctx) {
    output_event(ctx, RECV);
    return 0;
}

tcptrans.py:

#!/usr/bin/python

from __future__ import print_function
from bcc import BPF
import argparse
import os
from socket import inet_ntop, AF_INET, AF_INET6
from struct import pack
from time import sleep, strftime
from subprocess import call
from collections import namedtuple, defaultdict
import pandas as pd
import signal

"""
parser = argparse.ArgumentParser(
    description="Trace TCP send/recv events by host",
    formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-p", "--pid",
                    help="trace this PID only")
parser.add_argument("-o", "--output",
                    help="output file name")
parser.add_argument("--ebpf", action="store_true",
                    help=argparse.SUPPRESS)
args = parser.parse_args()
debug = 0

dir_path = os.path.dirname(os.path.realpath(__file__))
bcc_code_path = dir_path + "/tcptrans.c"
if not os.path.isfile(bcc_code_path):
    print("Missing BCC probe code...")
    exit()

with open(bcc_code_path, 'r') as f:
    bpf_text = f.read()

# code substitutions
if args.pid:
    bpf_text = bpf_text.replace('FILTER',
                                'if (pid != %s) { return; }' % args.pid)
else:
    bpf_text = bpf_text.replace('FILTER', '')

output = "tcptrans"
if args.output:
    output = args.output

if debug or args.ebpf:
    print(bpf_text)
    if args.ebpf:
        exit()

# initialize BPF
b = BPF(text=bpf_text)
b.attach_kprobe(event="tcp_sendmsg", fn_name="trace_tcp_sendmsg_entry")
b.attach_kretprobe(event="tcp_sendmsg", fn_name="trace_tcp_sendmsg_return")
b.attach_kprobe(event="tcp_recvmsg", fn_name="trace_tcp_recvmsg_entry")
b.attach_kretprobe(event="tcp_recvmsg", fn_name="trace_tcp_recvmsg_return")

print("Tracing TCP SENV/RECV calls. Ctrl-C to end.")

tcp_events = []
event_types = {
    0: "SEND",
    1: "RECV"
}

def add_ipv4_event(cpu, data, size):
    event = b["ipv4_events"].event(data)
    v4_event = {
        "pid": event.pid,
        "cpu": cpu,
        "type": event_types[event.type],
        "saddr": inet_ntop(AF_INET, pack("I", event.saddr)),
        "daddr": inet_ntop(AF_INET, pack("I", event.daddr)),
        "sport": event.sport,
        "dport": event.dport,
        "size": event.size,
        "start": event.start,
        "end": event.end
    }
    tcp_events.append(v4_event)

def add_ipv6_event(cpu, data, size):
    event = b["ipv6_events"].event(data)
    v6_event = {
        "pid": event.pid,
        "cpu": cpu,
        "type": event_types[event.type],
        "saddr": inet_ntop(AF_INET6, event.saddr),
        "daddr": inet_ntop(AF_INET6, event.daddr),
        "sport": event.sport,
        "dport": event.dport,
        "size": event.size,
        "start": event.start,
        "end": event.end
    }
    tcp_events.append(v6_event)

b["ipv4_events"].open_perf_buffer(add_ipv4_event)
b["ipv6_events"].open_perf_buffer(add_ipv6_event)

def collect_data():
    b.perf_buffer_poll()
    df_entry = pd.DataFrame(tcp_events,
                            columns=["pid", "cpu", "type", "saddr", "daddr",
                                     "sport", "dport", "size", "start", "end"])
    df_entry.to_csv(output + ".csv", index=False)

def signal_handler(sig, frame):
    collect_data()
    exit()

signal.signal(signal.SIGINT, signal_handler)
while True:
    try:
        sleep(1)
    except KeyboardInterrupt:
        collect_data()
        exit()
willfindlay commented 4 years ago

Your code works fine for me. What exactly is the problem you're having?

On an unrelated note, putting your perf_buffer_poll call at the end of your program will cause it to drop events if you run it for long enough. You want to poll the perf buffer in your while loop so you can consume events.

kloud1989 commented 4 years ago

Your code works fine for me. What exactly is the problem you're having?

The problem is that, as in the sample result, the last two events in the table are IPV6 events. However, the saddr, daddr and the follow-up fields can't be resolved properly. Since the code works fine for you, I have to believe the problem is related to kernel versions or OS versions. What's the platform you are running on?

On an unrelated note, putting your perf_buffer_pollcall at the end of your program will cause it to drop events if you run it for long enough. You want to poll the perf buffer in your while loop so you can consume events.

Ahh, you're right! I've tried this before, with a very old kernel, and it didn't work well when I stopped the tool by Ctrl+C. Now as I tried with kernel v5.4, it works fine!

willfindlay commented 4 years ago

I'm on Arch Linux 5.7.2, latest bcc from master. FWIW have you had a look at https://github.com/iovisor/bcc/blob/master/tools/tcptracer.py which seems to be quite similar to what you are trying to do here?

kloud1989 commented 4 years ago

I referenced to this _tcptracer.py when writing my tool. Modifications are required to trace every send/recv events. I can't tell what I'm doing wrong.