kubewarden / kwctl

Go-to CLI tool for Kubewarden users
https://kubewarden.io
Apache License 2.0
74 stars 16 forks source link

Implement benchmark subcommand #54

Closed flavio closed 2 years ago

flavio commented 3 years ago

It would be nice to allow policy authors to benchmark their policies. This would be useful to spot how the source language influences the final evaluation time of the policies.

Using regular benchmark solutions won't work, because they would start kwctl multiple times. On each invocation kwctl would have to load and optimize the Wasm module. This is an initial performance hit that goes away once the policy is "hot" in memory (as it happens with policy-server).

Acceptance criteria

flavio commented 3 years ago

@srenatus provides some rough code we might want to reuse :bow:

use criterion::{black_box, criterion_group, criterion_main, Criterion};
use policy_evaluator::policy_evaluator::PolicyEvaluator;
use anyhow::Result;
use std::process;
use std::fs::File;
use std::path::Path;
use std::io::BufReader;
use tracing::Level;
use tracing_subscriber::FmtSubscriber;
struct Req {
    file: String,
    allow: bool,
}
fn bench_group(c: &mut Criterion, dir: String, reqs: Vec<Req>) {
    let mut group = c.benchmark_group(dir.to_string());
    for m in ["policy.wasm", "kw_policy.wasm"].iter() {
        for r in reqs.iter() {
            group.bench_function(format!("{}/{}/{}", *m, r.file, r.allow), |b| {
                let request = read_request_file(&format!("{}/{}", dir.to_string(), &r.file)).unwrap();
                let mut policy_evaluator = black_box(PolicyEvaluator::new(&Path::new(&format!("{}/{}",  dir.to_string(), m)), None).unwrap());
                // run once before benchmarking
                let vr = policy_evaluator.validate(request.clone());
                if r.allow != vr.allowed {
                    fatal_error(format!("unexpected result from {}: {:?}", m, vr))
                }
                b.iter(|| policy_evaluator.validate(request.clone()));
                // run once more, see if we're still reporting the correct result
                let vr_after = policy_evaluator.validate(request.clone());
                if r.allow != vr.allowed {
                    fatal_error(format!("unexpected result from {} (after benchmark): {:?}", m, vr_after))
                }
            });
        }
    }
    group.finish();
}
fn validate_benchmark(c: &mut Criterion) {
    let subscriber = FmtSubscriber::builder()
        .with_max_level(Level::TRACE)
        .finish();
    tracing::subscriber::set_global_default(subscriber)
        .expect("setting default subscriber failed");
    bench_group(c, "pod-privileged-policy".to_string(), vec![
        Req{file: "request_pass.json".to_string(), allow: true},
        Req{file: "request_deny.json".to_string(), allow: false},
    ]);
    bench_group(c, "psp-allow-privilege-escalation".to_string(), vec![
        Req{file: "request_pass.json".to_string(), allow: true},
        Req{file: "request_container_deny.json".to_string(), allow: false},
        Req{file: "request_init_container_deny.json".to_string(), allow: false},
    ]);
}
fn read_request_file(path: &str) -> Result<serde_json::Value> {
    let file = File::open(path)?;
    let reader = BufReader::new(file);
    let v = serde_json::from_reader(reader)?;
    Ok(v)
}
fn fatal_error(msg: String) {
    println!("{}", msg);
    process::exit(1);
}
criterion_group!(benches, validate_benchmark);
criterion_main!(benches);