Skip to content

Commit 840af5b

Browse files
committed
dev: repository benchmark uses criterion
1 parent 72d0678 commit 840af5b

File tree

29 files changed

+369
-478
lines changed

29 files changed

+369
-478
lines changed

Cargo.lock

+1
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ anyhow = "1.0.79"
7979
hex-literal = "0.4.1"
8080

8181
[dev-dependencies]
82-
criterion = { version = "0.5.1", features = ["async_tokio"] }
82+
criterion = { version = "0", features = ["async_tokio"] }
8383
local-ip-address = "0"
8484
mockall = "0"
8585
once_cell = "1.18.0"

contrib/bencode/src/mutable/encode.rs

-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
use std::iter::Extend;
2-
31
use crate::access::bencode::{BRefAccess, RefKind};
42
use crate::access::dict::BDictAccess;
53
use crate::access::list::BListAccess;

contrib/bencode/src/reference/bencode_ref.rs

-1
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,6 @@ impl<'a> BRefAccessExt<'a> for BencodeRef<'a> {
125125

126126
#[cfg(test)]
127127
mod tests {
128-
use std::default::Default;
129128

130129
use crate::access::bencode::BRefAccess;
131130
use crate::reference::bencode_ref::BencodeRef;

contrib/bencode/src/reference/decode.rs

-1
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,6 @@ fn peek_byte(bytes: &[u8], pos: usize) -> BencodeParseResult<u8> {
177177

178178
#[cfg(test)]
179179
mod tests {
180-
use std::default::Default;
181180

182181
use crate::access::bencode::BRefAccess;
183182
use crate::reference::bencode_ref::BencodeRef;

contrib/bencode/src/reference/decode_opt.rs

-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
use std::default::Default;
2-
31
const DEFAULT_MAX_RECURSION: usize = 50;
42
const DEFAULT_CHECK_KEY_SORT: bool = false;
53
const DEFAULT_ENFORCE_FULL_DECODE: bool = true;

packages/configuration/src/lib.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -482,7 +482,7 @@ pub struct Configuration {
482482
/// peers from the torrent peer list.
483483
pub inactive_peer_cleanup_interval: u64,
484484
/// If enabled, the tracker will remove torrents that have no peers.
485-
/// THe clean up torrent job runs every `inactive_peer_cleanup_interval`
485+
/// The clean up torrent job runs every `inactive_peer_cleanup_interval`
486486
/// seconds and it removes inactive peers. Eventually, the peer list of a
487487
/// torrent could be empty and the torrent will be removed if this option is
488488
/// enabled.

packages/primitives/src/lib.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ pub enum DatabaseDriver {
5454
// TODO: Move to the database crate once that gets its own crate.
5555
/// The Sqlite3 database driver.
5656
Sqlite3,
57-
/// The MySQL database driver.
57+
/// The `MySQL` database driver.
5858
MySQL,
5959
}
6060

packages/primitives/src/peer.rs

-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@
2424
use std::net::{IpAddr, SocketAddr};
2525
use std::sync::Arc;
2626

27-
use serde;
2827
use serde::Serialize;
2928

3029
use crate::announce_event::AnnounceEvent;

packages/torrent-repository/Cargo.toml

+8-1
Original file line numberDiff line numberDiff line change
@@ -23,4 +23,11 @@ torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../pr
2323
torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" }
2424
serde = { version = "1", features = ["derive"] }
2525
tdyne-peer-id = "1"
26-
tdyne-peer-id-registry = "0"
26+
tdyne-peer-id-registry = "0"
27+
28+
[dev-dependencies]
29+
criterion = { version = "0", features = ["async_tokio"] }
30+
31+
[[bench]]
32+
harness = false
33+
name = "repository_benchmark"

packages/torrent-repository/benches/helpers/args.rs

-15
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -1,178 +1,147 @@
1-
use std::time::Duration;
1+
use std::time::{Duration, Instant};
22

3-
use clap::Parser;
43
use futures::stream::FuturesUnordered;
54
use torrust_tracker_primitives::info_hash::InfoHash;
65
use torrust_tracker_torrent_repository::repository::UpdateTorrentAsync;
76

8-
use super::args::Args;
9-
use super::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER};
7+
use super::utils::{generate_unique_info_hashes, DEFAULT_PEER};
108

11-
pub async fn add_one_torrent<V>(samples: usize) -> (Duration, Duration)
9+
pub async fn add_one_torrent<V>(samples: u64) -> Duration
1210
where
1311
V: UpdateTorrentAsync + Default,
1412
{
15-
let mut results: Vec<Duration> = Vec::with_capacity(samples);
13+
let start = Instant::now();
1614

1715
for _ in 0..samples {
1816
let torrent_repository = V::default();
1917

2018
let info_hash = InfoHash([0; 20]);
2119

22-
let start_time = std::time::Instant::now();
23-
2420
torrent_repository
2521
.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER)
2622
.await;
27-
28-
let result = start_time.elapsed();
29-
30-
results.push(result);
3123
}
3224

33-
get_average_and_adjusted_average_from_results(results)
25+
start.elapsed()
3426
}
3527

3628
// Add one torrent ten thousand times in parallel (depending on the set worker threads)
37-
pub async fn update_one_torrent_in_parallel<V>(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration)
29+
pub async fn update_one_torrent_in_parallel<V>(runtime: &tokio::runtime::Runtime, samples: u64, sleep: Option<u64>) -> Duration
3830
where
3931
V: UpdateTorrentAsync + Default + Clone + Send + Sync + 'static,
4032
{
41-
let args = Args::parse();
42-
let mut results: Vec<Duration> = Vec::with_capacity(samples);
43-
44-
for _ in 0..samples {
45-
let torrent_repository = V::default();
46-
let info_hash: &'static InfoHash = &InfoHash([0; 20]);
47-
let handles = FuturesUnordered::new();
48-
49-
// Add the torrent/peer to the torrent repository
50-
torrent_repository
51-
.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER)
52-
.await;
33+
let torrent_repository = V::default();
34+
let info_hash: &'static InfoHash = &InfoHash([0; 20]);
35+
let handles = FuturesUnordered::new();
5336

54-
let start_time = std::time::Instant::now();
37+
// Add the torrent/peer to the torrent repository
38+
torrent_repository
39+
.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER)
40+
.await;
5541

56-
for _ in 0..10_000 {
57-
let torrent_repository_clone = torrent_repository.clone();
42+
let start = Instant::now();
5843

59-
let handle = runtime.spawn(async move {
60-
torrent_repository_clone
61-
.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER)
62-
.await;
63-
64-
if let Some(sleep_time) = args.sleep {
65-
let start_time = std::time::Instant::now();
66-
67-
while start_time.elapsed().as_nanos() < u128::from(sleep_time) {}
68-
}
69-
});
44+
for _ in 0..samples {
45+
let torrent_repository_clone = torrent_repository.clone();
7046

71-
handles.push(handle);
72-
}
47+
let handle = runtime.spawn(async move {
48+
torrent_repository_clone
49+
.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER)
50+
.await;
7351

74-
// Await all tasks
75-
futures::future::join_all(handles).await;
52+
if let Some(sleep_time) = sleep {
53+
let start_time = std::time::Instant::now();
7654

77-
let result = start_time.elapsed();
55+
while start_time.elapsed().as_nanos() < u128::from(sleep_time) {}
56+
}
57+
});
7858

79-
results.push(result);
59+
handles.push(handle);
8060
}
8161

82-
get_average_and_adjusted_average_from_results(results)
62+
// Await all tasks
63+
futures::future::join_all(handles).await;
64+
65+
start.elapsed()
8366
}
8467

8568
// Add ten thousand torrents in parallel (depending on the set worker threads)
86-
pub async fn add_multiple_torrents_in_parallel<V>(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration)
69+
pub async fn add_multiple_torrents_in_parallel<V>(runtime: &tokio::runtime::Runtime, samples: u64, sleep: Option<u64>) -> Duration
8770
where
8871
V: UpdateTorrentAsync + Default + Clone + Send + Sync + 'static,
8972
{
90-
let args = Args::parse();
91-
let mut results: Vec<Duration> = Vec::with_capacity(samples);
92-
93-
for _ in 0..samples {
94-
let torrent_repository = V::default();
95-
let info_hashes = generate_unique_info_hashes(10_000);
96-
let handles = FuturesUnordered::new();
73+
let torrent_repository = V::default();
74+
let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in a usize"));
75+
let handles = FuturesUnordered::new();
9776

98-
let start_time = std::time::Instant::now();
77+
let start = Instant::now();
9978

100-
for info_hash in info_hashes {
101-
let torrent_repository_clone = torrent_repository.clone();
79+
for info_hash in info_hashes {
80+
let torrent_repository_clone = torrent_repository.clone();
10281

103-
let handle = runtime.spawn(async move {
104-
torrent_repository_clone
105-
.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER)
106-
.await;
107-
108-
if let Some(sleep_time) = args.sleep {
109-
let start_time = std::time::Instant::now();
110-
111-
while start_time.elapsed().as_nanos() < u128::from(sleep_time) {}
112-
}
113-
});
114-
115-
handles.push(handle);
116-
}
82+
let handle = runtime.spawn(async move {
83+
torrent_repository_clone
84+
.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER)
85+
.await;
11786

118-
// Await all tasks
119-
futures::future::join_all(handles).await;
87+
if let Some(sleep_time) = sleep {
88+
let start_time = std::time::Instant::now();
12089

121-
let result = start_time.elapsed();
90+
while start_time.elapsed().as_nanos() < u128::from(sleep_time) {}
91+
}
92+
});
12293

123-
results.push(result);
94+
handles.push(handle);
12495
}
12596

126-
get_average_and_adjusted_average_from_results(results)
97+
// Await all tasks
98+
futures::future::join_all(handles).await;
99+
100+
start.elapsed()
127101
}
128102

129103
// Async update ten thousand torrents in parallel (depending on the set worker threads)
130-
pub async fn update_multiple_torrents_in_parallel<V>(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration)
104+
pub async fn update_multiple_torrents_in_parallel<V>(
105+
runtime: &tokio::runtime::Runtime,
106+
samples: u64,
107+
sleep: Option<u64>,
108+
) -> Duration
131109
where
132110
V: UpdateTorrentAsync + Default + Clone + Send + Sync + 'static,
133111
{
134-
let args = Args::parse();
135-
let mut results: Vec<Duration> = Vec::with_capacity(samples);
112+
let torrent_repository = V::default();
113+
let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in usize"));
114+
let handles = FuturesUnordered::new();
136115

137-
for _ in 0..samples {
138-
let torrent_repository = V::default();
139-
let info_hashes = generate_unique_info_hashes(10_000);
140-
let handles = FuturesUnordered::new();
141-
142-
// Add the torrents/peers to the torrent repository
143-
for info_hash in &info_hashes {
144-
torrent_repository
145-
.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER)
146-
.await;
147-
}
148-
149-
let start_time = std::time::Instant::now();
150-
151-
for info_hash in info_hashes {
152-
let torrent_repository_clone = torrent_repository.clone();
153-
154-
let handle = runtime.spawn(async move {
155-
torrent_repository_clone
156-
.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER)
157-
.await;
116+
// Add the torrents/peers to the torrent repository
117+
for info_hash in &info_hashes {
118+
torrent_repository
119+
.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER)
120+
.await;
121+
}
158122

159-
if let Some(sleep_time) = args.sleep {
160-
let start_time = std::time::Instant::now();
123+
let start = Instant::now();
161124

162-
while start_time.elapsed().as_nanos() < u128::from(sleep_time) {}
163-
}
164-
});
125+
for info_hash in info_hashes {
126+
let torrent_repository_clone = torrent_repository.clone();
165127

166-
handles.push(handle);
167-
}
128+
let handle = runtime.spawn(async move {
129+
torrent_repository_clone
130+
.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER)
131+
.await;
168132

169-
// Await all tasks
170-
futures::future::join_all(handles).await;
133+
if let Some(sleep_time) = sleep {
134+
let start_time = std::time::Instant::now();
171135

172-
let result = start_time.elapsed();
136+
while start_time.elapsed().as_nanos() < u128::from(sleep_time) {}
137+
}
138+
});
173139

174-
results.push(result);
140+
handles.push(handle);
175141
}
176142

177-
get_average_and_adjusted_average_from_results(results)
143+
// Await all tasks
144+
futures::future::join_all(handles).await;
145+
146+
start.elapsed()
178147
}
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
pub mod args;
21
pub mod asyn;
32
pub mod sync;
43
pub mod utils;

0 commit comments

Comments
 (0)