snorez / ebpf-fuzzer

fuzz the linux kernel bpf verifier
77 stars 10 forks source link

[ebpf_fuzzer]: total: 0, valid: 0(-nan%), crash: 0(reason: 0) AND load_prog() err #2

Open OrangeGzY opened 2 years ago

OrangeGzY commented 2 years ago

Thanks to your great idea and project !

When I run the project with the correct kernel CONFIG (including CONFIG_BPF_SYSCALL=y , CONFIG_BPF_JIT=y),

but I still got the ouput:

root@ubuntu:~/ebpf-fuzzer# ./ebpf_fuzzer /root/ebpf-fuzzer/config 0
qemu_fuzzlib_env_setup ...done
[ebpf_fuzzer]: total: 0, valid: 0(-nan%), crash: 0(reason: 0)
[ebpf_fuzzer]: total: 0, valid: 0(-nan%), crash: 0(reason: 0)
[ebpf_fuzzer]: total: 0, valid: 0(-nan%), crash: 0(reason: 0)
[ebpf_fuzzer]: total: 0, valid: 0(-nan%), crash: 0(reason: 0)

After that, I tried to run:

./ebpf_fuzzer /root/ebpf-fuzzer/config 1

And then got a test file in /tmp/test_sample.c

So I compile this file in the host and it succeed.

Finally I run the file, but I got :

root@ubuntu:~/ebpf-fuzzer# ./ebpf_fuzzer ./config 1
root@ubuntu:~/ebpf-fuzzer# gcc /tmp/test_sample.c -o ./test_sample

root@ubuntu:~/ebpf-fuzzer# ./test_sample 
update_storage_map done.
repro failed

Then I check the test_sample.c and open some fprintf() for error, and re-compile test_sample.c , which finally got:

root@ubuntu:~/ebpf-fuzzer# ./test_sample 
update_storage_map done.
load_prog() err
repro failed

It seems that the struct bpf_insn __insns[] load failed which made the fuzzer in a abnormal state?

The test_sample.c is :


#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <errno.h>
#include <assert.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/time.h>
#include <fcntl.h>
#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <signal.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <linux/bpf.h>
#include <linux/bpf_common.h>
#include <sys/prctl.h>

enum qemu_fuzzlib_inst_res {
    QEMU_FUZZLIB_INST_INVALID = -1,
    QEMU_FUZZLIB_INST_NOT_TESTED = 0,
    QEMU_FUZZLIB_INST_VALID,
    QEMU_FUZZLIB_INST_BOOM,
};

typedef __s8    s8;
typedef __s16   s16;
typedef __s32   s32;
typedef __s64   s64;
typedef __u8    u8;
typedef __u16   u16;
typedef __u32   u32;
typedef __u64   u64;

struct xmsg {
    unsigned long       special_value;
    unsigned long       insn_cnt;
    struct bpf_insn     insns[BPF_MAXINSNS];
};

#ifndef BPF_JMP32
#define BPF_JMP32   0x06
#endif

/* ArgX, context and stack frame pointer register positions. Note,
 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
 * calls in BPF_CALL instruction.
 */
#define BPF_REG_ARG1    BPF_REG_1
#define BPF_REG_ARG2    BPF_REG_2
#define BPF_REG_ARG3    BPF_REG_3
#define BPF_REG_ARG4    BPF_REG_4
#define BPF_REG_ARG5    BPF_REG_5
#define BPF_REG_CTX BPF_REG_6
#define BPF_REG_FP  BPF_REG_10

/* Additional register mappings for converted user programs. */
#define BPF_REG_A   BPF_REG_0
#define BPF_REG_X   BPF_REG_7
#define BPF_REG_TMP BPF_REG_2   /* scratch reg */
#define BPF_REG_D   BPF_REG_8   /* data, callee-saved */
#define BPF_REG_H   BPF_REG_9   /* hlen, callee-saved */

/* Kernel hidden auxiliary/helper register. */
#define BPF_REG_AX      MAX_BPF_REG
#define MAX_BPF_EXT_REG     (MAX_BPF_REG + 1)
#define MAX_BPF_JIT_REG     MAX_BPF_EXT_REG

/* unused opcode to mark special call to bpf_tail_call() helper */
#define BPF_TAIL_CALL   0xf0

/* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS   0xe0

/* As per nm, we expose JITed images as text (code) section for
 * kallsyms. That way, tools like perf can find it to match
 * addresses.
 */
#define BPF_SYM_ELF_TYPE    't'

/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK   512

/* Helper macros for filter block array initializers. */

/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */

#define BPF_ALU64_REG(OP, DST, SRC)             \
    ((struct bpf_insn) {                    \
        .code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,    \
        .dst_reg = DST,                 \
        .src_reg = SRC,                 \
        .off   = 0,                 \
        .imm   = 0 })

#define BPF_ALU32_REG(OP, DST, SRC)             \
    ((struct bpf_insn) {                    \
        .code  = BPF_ALU | BPF_OP(OP) | BPF_X,      \
        .dst_reg = DST,                 \
        .src_reg = SRC,                 \
        .off   = 0,                 \
        .imm   = 0 })

/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */

#define BPF_ALU64_IMM(OP, DST, IMM)             \
    ((struct bpf_insn) {                    \
        .code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,    \
        .dst_reg = DST,                 \
        .src_reg = 0,                   \
        .off   = 0,                 \
        .imm   = IMM })

#define BPF_ALU32_IMM(OP, DST, IMM)             \
    ((struct bpf_insn) {                    \
        .code  = BPF_ALU | BPF_OP(OP) | BPF_K,      \
        .dst_reg = DST,                 \
        .src_reg = 0,                   \
        .off   = 0,                 \
        .imm   = IMM })

/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */

#define BPF_ENDIAN(TYPE, DST, LEN)              \
    ((struct bpf_insn) {                    \
        .code  = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
        .dst_reg = DST,                 \
        .src_reg = 0,                   \
        .off   = 0,                 \
        .imm   = LEN })

/* Short form of mov, dst_reg = src_reg */

#define BPF_MOV64_REG(DST, SRC)                 \
    ((struct bpf_insn) {                    \
        .code  = BPF_ALU64 | BPF_MOV | BPF_X,       \
        .dst_reg = DST,                 \
        .src_reg = SRC,                 \
        .off   = 0,                 \
        .imm   = 0 })

#define BPF_MOV32_REG(DST, SRC)                 \
    ((struct bpf_insn) {                    \
        .code  = BPF_ALU | BPF_MOV | BPF_X,     \
        .dst_reg = DST,                 \
        .src_reg = SRC,                 \
        .off   = 0,                 \
        .imm   = 0 })

/* Short form of mov, dst_reg = imm32 */

#define BPF_MOV64_IMM(DST, IMM)                 \
    ((struct bpf_insn) {                    \
        .code  = BPF_ALU64 | BPF_MOV | BPF_K,       \
        .dst_reg = DST,                 \
        .src_reg = 0,                   \
        .off   = 0,                 \
        .imm   = IMM })

#define BPF_MOV32_IMM(DST, IMM)                 \
    ((struct bpf_insn) {                    \
        .code  = BPF_ALU | BPF_MOV | BPF_K,     \
        .dst_reg = DST,                 \
        .src_reg = 0,                   \
        .off   = 0,                 \
        .imm   = IMM })

/* Special form of mov32, used for doing explicit zero extension on dst. */
#define BPF_ZEXT_REG(DST)                   \
    ((struct bpf_insn) {                    \
        .code  = BPF_ALU | BPF_MOV | BPF_X,     \
        .dst_reg = DST,                 \
        .src_reg = DST,                 \
        .off   = 0,                 \
        .imm   = 1 })

/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
#define BPF_LD_IMM64(DST, IMM)                  \
    BPF_LD_IMM64_RAW(DST, 0, IMM)

#define BPF_LD_IMM64_RAW(DST, SRC, IMM)             \
    ((struct bpf_insn) {                    \
        .code  = BPF_LD | BPF_DW | BPF_IMM,     \
        .dst_reg = DST,                 \
        .src_reg = SRC,                 \
        .off   = 0,                 \
        .imm   = (__u32) (IMM) }),          \
    ((struct bpf_insn) {                    \
        .code  = 0, /* zero is reserved opcode */   \
        .dst_reg = 0,                   \
        .src_reg = 0,                   \
        .off   = 0,                 \
        .imm   = ((__u64) (IMM)) >> 32 })

/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
#define BPF_LD_MAP_FD(DST, MAP_FD)              \
    BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)

/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */

#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)          \
    ((struct bpf_insn) {                    \
        .code  = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE),   \
        .dst_reg = DST,                 \
        .src_reg = SRC,                 \
        .off   = 0,                 \
        .imm   = IMM })

#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM)          \
    ((struct bpf_insn) {                    \
        .code  = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
        .dst_reg = DST,                 \
        .src_reg = SRC,                 \
        .off   = 0,                 \
        .imm   = IMM })

/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */

#define BPF_LD_ABS(SIZE, IMM)                   \
    ((struct bpf_insn) {                    \
        .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
        .dst_reg = 0,                   \
        .src_reg = 0,                   \
        .off   = 0,                 \
        .imm   = IMM })

/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */

#define BPF_LD_IND(SIZE, SRC, IMM)              \
    ((struct bpf_insn) {                    \
        .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
        .dst_reg = 0,                   \
        .src_reg = SRC,                 \
        .off   = 0,                 \
        .imm   = IMM })

/* Memory load, dst_reg = *(uint *) (src_reg + off16) */

#define BPF_LDX_MEM(SIZE, DST, SRC, OFF)            \
    ((struct bpf_insn) {                    \
        .code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,    \
        .dst_reg = DST,                 \
        .src_reg = SRC,                 \
        .off   = OFF,                   \
        .imm   = 0 })

/* Memory store, *(uint *) (dst_reg + off16) = src_reg */

#define BPF_STX_MEM(SIZE, DST, SRC, OFF)            \
    ((struct bpf_insn) {                    \
        .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,    \
        .dst_reg = DST,                 \
        .src_reg = SRC,                 \
        .off   = OFF,                   \
        .imm   = 0 })

/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */

#define BPF_STX_XADD(SIZE, DST, SRC, OFF)           \
    ((struct bpf_insn) {                    \
        .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,   \
        .dst_reg = DST,                 \
        .src_reg = SRC,                 \
        .off   = OFF,                   \
        .imm   = 0 })

/* Memory store, *(uint *) (dst_reg + off16) = imm32 */

#define BPF_ST_MEM(SIZE, DST, OFF, IMM)             \
    ((struct bpf_insn) {                    \
        .code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
        .dst_reg = DST,                 \
        .src_reg = 0,                   \
        .off   = OFF,                   \
        .imm   = IMM })

/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */

#define BPF_JMP_REG(OP, DST, SRC, OFF)              \
    ((struct bpf_insn) {                    \
        .code  = BPF_JMP | BPF_OP(OP) | BPF_X,      \
        .dst_reg = DST,                 \
        .src_reg = SRC,                 \
        .off   = OFF,                   \
        .imm   = 0 })

/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */

#define BPF_JMP_IMM(OP, DST, IMM, OFF)              \
    ((struct bpf_insn) {                    \
        .code  = BPF_JMP | BPF_OP(OP) | BPF_K,      \
        .dst_reg = DST,                 \
        .src_reg = 0,                   \
        .off   = OFF,                   \
        .imm   = IMM })

/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */

#define BPF_JMP32_REG(OP, DST, SRC, OFF)            \
    ((struct bpf_insn) {                    \
        .code  = BPF_JMP32 | BPF_OP(OP) | BPF_X,    \
        .dst_reg = DST,                 \
        .src_reg = SRC,                 \
        .off   = OFF,                   \
        .imm   = 0 })

/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */

#define BPF_JMP32_IMM(OP, DST, IMM, OFF)            \
    ((struct bpf_insn) {                    \
        .code  = BPF_JMP32 | BPF_OP(OP) | BPF_K,    \
        .dst_reg = DST,                 \
        .src_reg = 0,                   \
        .off   = OFF,                   \
        .imm   = IMM })

/* Unconditional jumps, goto pc + off16 */

#define BPF_JMP_A(OFF)                      \
    ((struct bpf_insn) {                    \
        .code  = BPF_JMP | BPF_JA,          \
        .dst_reg = 0,                   \
        .src_reg = 0,                   \
        .off   = OFF,                   \
        .imm   = 0 })

/* Relative call */

#define BPF_CALL_REL(TGT)                   \
    ((struct bpf_insn) {                    \
        .code  = BPF_JMP | BPF_CALL,            \
        .dst_reg = 0,                   \
        .src_reg = BPF_PSEUDO_CALL,         \
        .off   = 0,                 \
        .imm   = TGT })

#define __bpf_call_base 0
#define BPF_EMIT_CALL(FUNC)                 \
    ((struct bpf_insn) {                    \
        .code  = BPF_JMP | BPF_CALL,            \
        .dst_reg = 0,                   \
        .src_reg = 0,                   \
        .off   = 0,                 \
        .imm   = ((FUNC) - __bpf_call_base) })

/* Raw code statement block */

#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)          \
    ((struct bpf_insn) {                    \
        .code  = CODE,                  \
        .dst_reg = DST,                 \
        .src_reg = SRC,                 \
        .off   = OFF,                   \
        .imm   = IMM })

/* Program exit */

#define BPF_EXIT_INSN()                     \
    ((struct bpf_insn) {                    \
        .code  = BPF_JMP | BPF_EXIT,            \
        .dst_reg = 0,                   \
        .src_reg = 0,                   \
        .off   = 0,                 \
        .imm   = 0 })

#define LISTENER_PORT       (1337)
#define LISTENER_BACKLOG    (0x30)
#define STORAGE_MAP_SIZE    (8192)
#define FUZZ_MAP_SIZE       (8192)

#define ARRAY_CNT(arr)  (sizeof(arr) / sizeof(arr[0]))

#define CORRUPT_FD_CONST    10
#define STORAGE_FD_CONST    11
#define CORRUPT_REG     BPF_REG_9
#define STORAGE_REG     BPF_REG_8
#define SPECIAL_REG     BPF_REG_7
#define INVALID_P_REG       BPF_REG_6
#define LEAKED_V_REG        BPF_REG_5
#define UMAX_REG        BPF_REG_4
#define EXTRA0_REG      BPF_REG_3
#define EXTRA1_REG      BPF_REG_2
#define EXTRA2_REG      BPF_REG_1
#define MAGIC_VAL1      0x4142434445464748
#define MAGIC_VAL2      0x494a4b4c4d4e4f40

static int bpf(unsigned int cmd, union bpf_attr *attr, size_t size)
{
    return syscall(SYS_bpf, cmd, attr, size);
}

static int update_storage_map(int fd, unsigned long special_val)
{
    uint64_t key = 0;
    unsigned long buf[STORAGE_MAP_SIZE / sizeof(long)];
    buf[0] = special_val;
    for (int i = 1; i < (STORAGE_MAP_SIZE / sizeof(long)); i++) {
        buf[i] = MAGIC_VAL2;
    }
    union bpf_attr attr = {
        .map_fd = fd,
        .key = (uint64_t)&key,
        .value = (uint64_t)&buf,
    };

    return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
}

static int update_corrupt_map(int fd)
{
    uint64_t key = 0;
    unsigned long buf[STORAGE_MAP_SIZE / sizeof(long)];
    for (int i = 0; i < (STORAGE_MAP_SIZE / sizeof(long)); i++) {
        buf[i] = MAGIC_VAL1;
    }
    union bpf_attr attr = {
        .map_fd = fd,
        .key = (uint64_t)&key,
        .value = (uint64_t)&buf,
    };

    return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
}

static int init_maps(int *corrupt_map_fd, int *storage_map_fd)
{
    union bpf_attr corrupt_map = {
        .map_type = BPF_MAP_TYPE_ARRAY,
        .key_size = 4,
        .value_size = STORAGE_MAP_SIZE,
        .max_entries = 1,
    };
    strcpy(corrupt_map.map_name, "corrupt_map");
    *corrupt_map_fd = (int)bpf(BPF_MAP_CREATE, &corrupt_map,
                   sizeof(corrupt_map));
    if (*corrupt_map_fd < 0)
        return -1;

    if (update_corrupt_map(*corrupt_map_fd) < 0)
        return -1;

    union bpf_attr storage_map = {
        .map_type = BPF_MAP_TYPE_ARRAY,
        .key_size = 4,
        .value_size = STORAGE_MAP_SIZE,
        .max_entries = 1,
    };
    strcpy(corrupt_map.map_name, "storage_map");
    *storage_map_fd = (int)bpf(BPF_MAP_CREATE, &storage_map,
                   sizeof(storage_map));
    if (*storage_map_fd < 0)
        return -1;

    if (update_storage_map(*storage_map_fd, 0) < 0)
        return -1;

    return 0;
}

static int read_map(int fd, void *buf, size_t size)
{
    assert(size <= (STORAGE_MAP_SIZE));

    unsigned long lk[STORAGE_MAP_SIZE / sizeof(long)];
    memset(lk, 0, sizeof(lk));
    uint64_t key = 0;
    union bpf_attr lookup_map = {
        .map_fd = fd,
        .key = (uint64_t)&key,
        .value = (uint64_t)&lk,
    };

    int err = bpf(BPF_MAP_LOOKUP_ELEM, &lookup_map, sizeof(lookup_map));
    if (err < 0) {
        return -1;
    }

    memcpy(buf, lk, size);

    return 0;
}

static int setup_listener_sock(int port, int backlog)
{
    int sock_fd = socket(AF_INET,
                SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC,
                0);
    if (sock_fd < 0) {
        return sock_fd;
    }

    struct sockaddr_in servaddr;
    servaddr.sin_family = AF_INET;
    servaddr.sin_port = htons(port);
    servaddr.sin_addr.s_addr = htonl(INADDR_ANY);

    int err = bind(sock_fd, (struct sockaddr *)&servaddr, sizeof(servaddr));
    if (err < 0) {
        close(sock_fd);
        return err;
    }

    err = listen(sock_fd, backlog);
    if (err < 0) {
        close(sock_fd);
        return err;
    }

    return sock_fd;
}

static int setup_send_sock(void)
{
    return socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0);
}

#define LOG_BUF_SIZE    65536
static char bpf_log_buf[LOG_BUF_SIZE];

static int load_prog(struct bpf_insn *insns, size_t insn_count)
{
    union bpf_attr prog = {};
    prog.license = (uint64_t)"GPL";
    strcpy(prog.prog_name, "ebpf_fuzzer");
    prog.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
    prog.insn_cnt = insn_count;
    prog.insns = (uint64_t)insns;
    prog.log_buf = (uint64_t)bpf_log_buf;
    prog.log_size = LOG_BUF_SIZE;
    prog.log_level = 1;

    int prog_fd = bpf(BPF_PROG_LOAD, &prog, sizeof(prog));
    if (prog_fd < 0) {
        return -1;
    }

    return prog_fd;
}

static int exec_prog(int prog_fd, int *_err)
{
    int listener_sock = setup_listener_sock(LISTENER_PORT, LISTENER_BACKLOG);
    int send_sock = setup_send_sock();

    if ((listener_sock < 0) || (send_sock < 0)) {
        return -1;
    }

    if (setsockopt(listener_sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd,
            sizeof(prog_fd)) < 0) {
        return -1;
    }

    struct sockaddr_in servaddr;
    servaddr.sin_family = AF_INET;
    servaddr.sin_port = htons(LISTENER_PORT);
    servaddr.sin_addr.s_addr = htonl(INADDR_ANY);

    int err;
    err = connect(send_sock, (struct sockaddr *)&servaddr, sizeof(servaddr));
    if (err < 0) {
        *_err = errno;
    }

    close(listener_sock);
    close(send_sock);
    return (err < 0) ? 1 : 0;
}

static int detect_oob(char *buf0, char *buf1, size_t size)
{
    char *b = &buf1[8];
    unsigned long *_b = (unsigned long *)buf1;
    for (int i = 0; i < 8; i++) {
        if ((b[i] > 0x4f) || (b[i] < 0x40)) {
            fprintf(stderr, "[1]: %lx\n", _b[1]);
            return 1;
        }
    }

    fprintf(stderr, "[2]: %lx\n", _b[2]);
    return 0;
}

static int repro_xmsg(int corrupt_map_fd, int storage_map_fd, struct xmsg *msg)
{
    int err = 0;
    char buf0[STORAGE_MAP_SIZE];
    char buf1[STORAGE_MAP_SIZE];

    err = update_storage_map(storage_map_fd, msg->special_value);
    if (err < 0) {
        fprintf(stderr, "update_storage_map err\n");
        return -1;
    }
    fprintf(stderr, "update_storage_map done.\n");

    err = read_map(storage_map_fd, buf0, STORAGE_MAP_SIZE);
    if (err < 0) {
        fprintf(stderr, "read_map err\n");
        return -1;
    }

    /* load and execute prog */
    int prog_fd = load_prog(msg->insns, msg->insn_cnt);
    if (prog_fd < 0) {
        fprintf(stderr, "load_prog() err\n");
        return -1;
    }
    fprintf(stderr, "%ld, %s.\n", strlen(bpf_log_buf), bpf_log_buf);

    int connect_err;
    err = exec_prog(prog_fd, &connect_err);
    if (err != 1) {
        /* prog not execute successfully */
        return 0;
    }
    fprintf(stderr, "exec_prog done.\n");

    /* read the map again, check the content */
    err = read_map(storage_map_fd, buf1, STORAGE_MAP_SIZE);
    if (err < 0) {
        fprintf(stderr, "read_map err\n");
        return -1;
    }

    if (detect_oob(buf0, buf1, STORAGE_MAP_SIZE)) {
        return 1;
    }

    return 0;
}

int main(int argc, char *argv[])
{
    struct xmsg msg;
    int corrupt_map_fd, storage_map_fd;
    int err;

    err = init_maps(&corrupt_map_fd, &storage_map_fd);
    if (err < 0) {
        fprintf(stderr, "init_maps err\n");
        return QEMU_FUZZLIB_INST_NOT_TESTED;
    }
    dup2(corrupt_map_fd, CORRUPT_FD_CONST);
    dup2(storage_map_fd, STORAGE_FD_CONST);
    close(corrupt_map_fd);
    close(storage_map_fd);
    corrupt_map_fd = CORRUPT_FD_CONST;
    storage_map_fd = STORAGE_FD_CONST;
    memset(&msg, 0, sizeof(msg));

    struct bpf_insn __insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0x0),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 0xfffffffc),
BPF_LD_MAP_FD(BPF_REG_1, 0xa),
BPF_EMIT_CALL(0x1),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_0, 0x0),
BPF_MOV64_IMM(BPF_REG_0, 0x0),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 0xfffffffc),
BPF_LD_MAP_FD(BPF_REG_1, 0xb),
BPF_EMIT_CALL(0x1),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_0, 0x0),
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
BPF_LD_IMM64(BPF_REG_3, 0xd9f080a750714ba2),
BPF_ALU64_REG(BPF_DIV, BPF_REG_3, BPF_REG_7),
BPF_MOV64_IMM(BPF_REG_0, 0x0),
BPF_MOV64_IMM(BPF_REG_0, 0x0),
BPF_JMP32_REG(BPF_JA, BPF_REG_3, BPF_REG_7, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_7),
BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_7),
BPF_MOV32_REG(BPF_REG_3, BPF_REG_7),
BPF_MOV64_IMM(BPF_REG_0, 0x0),
BPF_MOV64_IMM(BPF_REG_0, 0x0),
BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_3, 1),
BPF_EXIT_INSN(),
BPF_ALU32_REG(BPF_RSH, BPF_REG_7, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_0, 0x0),
BPF_ALU32_IMM(BPF_MOD, BPF_REG_7, 0x74ea35c1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0x6effe2c3, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 0xfb1a558c),
BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0x18d2ddfb),
BPF_JMP_REG(BPF_JA, BPF_REG_3, BPF_REG_7, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_7, 0xb9699376),
BPF_JMP_REG(BPF_JSGE, BPF_REG_3, BPF_REG_7, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_6),
BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_9, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_8, BPF_REG_5, 8),
BPF_MOV64_IMM(BPF_REG_0, 0x1),
BPF_EXIT_INSN(),
    };

    msg.special_value = 0x3695615b1b9746ab;
    msg.insn_cnt = ARRAY_CNT(__insns);
    memcpy(msg.insns, __insns, msg.insn_cnt * sizeof(struct bpf_insn));

    err = repro_xmsg(corrupt_map_fd, storage_map_fd, &msg);
    if (err == 1) {
        fprintf(stderr, "repro done\n");
        return QEMU_FUZZLIB_INST_BOOM;
    } else if (err == 0) {
        fprintf(stderr, "repro failed\n");
        return QEMU_FUZZLIB_INST_VALID;
    } else if (err == -1) {
        fprintf(stderr, "repro failed\n");
        return QEMU_FUZZLIB_INST_INVALID;
    }
}
OrangeGzY commented 2 years ago

And I modify the struct bpf_insn __insns[] , After that it load succeed, and exec

root@ubuntu:~/ebpf-fuzzer# ./test_sample 
update_storage_map done.
97, processed 6 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0
.
exec_prog done.
[2]: 494a4b4c4d4e4f40
repro failed

So I'm wondering if the struct bpf_insn __insns[] generation can still be optimized :)In order to avoid this kind of situation

snorez commented 2 years ago

So I'm wondering if the struct bpf_insn __insns[] generation can still be optimized :)In order to avoid this kind of situation

Yes, it can be optimized.

For the first question, you can check your config file and the guest log in the instance_* folder.