Closed danielSanchezQ closed 3 years ago
Implements #44
I made some basic benchmark to check what would be the penalisation:
use criterion::async_executor::AsyncExecutor;
use criterion::{criterion_group, criterion_main, Criterion};
use once_cell::sync::OnceCell;
async fn client_plain(client: &reqwest::Client) {
client
.get("https://www.google.es")
.send()
.await
.expect("Couldn't get google response");
}
async fn client_once_cell(client: &OnceCell<reqwest::Client>) {
client
.get_or_init(reqwest::Client::new)
.get("https://www.google.es")
.send()
.await
.expect("Couldn't get google response");
}
pub fn client_plain_benchmark(c: &mut Criterion) {
let client = reqwest::Client::new();
let runtime = tokio::runtime::Runtime::new().unwrap();
let mut group = c.benchmark_group("Clients");
let once_client = OnceCell::new();
group.bench_function("plain client 10", |b| {
b.to_async(&runtime).iter(|| client_plain(&client))
});
group.bench_function("once client 10", |b| {
b.to_async(&runtime).iter(|| client_once_cell(&once_client))
});
}
criterion_group!(benches, client_plain_benchmark);
criterion_main!(benches);
Results show negligible performance differences:
Benchmarking Clients/plain client 10: Collecting 100 samples in estimated 23.022 s (100 iterations)
Benchmarking Clients/plain client 10: Analyzing
Clients/plain client 10 time: [213.32 ms 216.64 ms 220.36 ms]
Found 10 outliers among 100 measurements (10.00%)
6 (6.00%) high mild
4 (4.00%) high severe
Benchmarking Clients/once client 10
Benchmarking Clients/once client 10: Warming up for 3.0000 s
Benchmarking Clients/once client 10: Collecting 100 samples in estimated 21.577 s (100 iterations)
Benchmarking Clients/once client 10: Analyzing
Clients/once client 10 time: [206.35 ms 209.60 ms 213.54 ms]
Found 7 outliers among 100 measurements (7.00%)
5 (5.00%) high mild
2 (2.00%) high severe
Safe to merge ❤️
This PR adds the ability to allow overriding the
reqwest::Client
used by the proxy.