Skip to content

Commit d81b693

Browse files
committed
dev: finish repo tests
1 parent 1b36562 commit d81b693

File tree

9 files changed

+216
-17
lines changed

9 files changed

+216
-17
lines changed

packages/primitives/src/info_hash.rs

+7
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,13 @@ impl std::convert::From<&DefaultHasher> for InfoHash {
8989
}
9090
}
9191

92+
impl std::convert::From<&i32> for InfoHash {
93+
fn from(n: &i32) -> InfoHash {
94+
let n = n.to_le_bytes();
95+
InfoHash([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, n[0], n[1], n[2], n[3]])
96+
}
97+
}
98+
9299
impl std::convert::From<[u8; 20]> for InfoHash {
93100
fn from(val: [u8; 20]) -> Self {
94101
InfoHash(val)

packages/primitives/src/pagination.rs

+2-6
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
1+
use derive_more::Constructor;
12
use serde::Deserialize;
23

34
/// A struct to keep information about the page when results are being paginated
4-
#[derive(Deserialize, Copy, Clone, Debug, PartialEq)]
5+
#[derive(Deserialize, Copy, Clone, Debug, PartialEq, Constructor)]
56
pub struct Pagination {
67
/// The page number, starting at 0
78
pub offset: u32,
@@ -10,11 +11,6 @@ pub struct Pagination {
1011
}
1112

1213
impl Pagination {
13-
#[must_use]
14-
pub fn new(offset: u32, limit: u32) -> Self {
15-
Self { offset, limit }
16-
}
17-
1814
#[must_use]
1915
pub fn new_with_options(offset_option: Option<u32>, limit_option: Option<u32>) -> Self {
2016
let offset = match offset_option {

packages/torrent-repository/src/entry/mod.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ pub trait EntrySync {
6565
#[allow(clippy::module_name_repetitions)]
6666
pub trait EntryAsync {
6767
fn get_stats(&self) -> impl std::future::Future<Output = SwarmMetadata> + Send;
68-
fn is_good(&self, policy: &TrackerPolicy) -> impl std::future::Future<Output = bool> + Send;
68+
fn check_good(self, policy: &TrackerPolicy) -> impl std::future::Future<Output = bool> + Send;
6969
fn peers_is_empty(&self) -> impl std::future::Future<Output = bool> + Send;
7070
fn get_peers_len(&self) -> impl std::future::Future<Output = usize> + Send;
7171
fn get_peers(&self, limit: Option<usize>) -> impl std::future::Future<Output = Vec<Arc<peer::Peer>>> + Send;

packages/torrent-repository/src/entry/mutex_tokio.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ impl EntryAsync for EntryMutexTokio {
1313
self.lock().await.get_stats()
1414
}
1515

16-
async fn is_good(&self, policy: &TrackerPolicy) -> bool {
16+
async fn check_good(self, policy: &TrackerPolicy) -> bool {
1717
self.lock().await.is_good(policy)
1818
}
1919

packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs

+21-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
use std::collections::BTreeMap;
2+
use std::iter::zip;
23
use std::pin::Pin;
34
use std::sync::Arc;
45

@@ -124,8 +125,27 @@ where
124125
}
125126

126127
async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) {
128+
let handles: Vec<Pin<Box<dyn Future<Output = Option<InfoHash>> + Send>>>;
129+
130+
{
131+
let db = self.get_torrents();
132+
133+
handles = zip(db.keys().copied(), db.values().cloned())
134+
.map(|(infohash, torrent)| {
135+
torrent
136+
.check_good(policy)
137+
.map(move |good| if good { None } else { Some(infohash) })
138+
.boxed()
139+
})
140+
.collect::<Vec<_>>();
141+
}
142+
143+
let not_good = join_all(handles).await;
144+
127145
let mut db = self.get_torrents_mut();
128146

129-
db.retain(|_, e| e.blocking_lock().is_good(policy));
147+
for remove in not_good.into_iter().flatten() {
148+
drop(db.remove(&remove));
149+
}
130150
}
131151
}

packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs

+11-1
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,16 @@ where
119119
async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) {
120120
let mut db = self.get_torrents_mut().await;
121121

122-
db.retain(|_, e| e.blocking_lock().is_good(policy));
122+
let mut not_good = Vec::<InfoHash>::default();
123+
124+
for (&infohash, torrent) in db.iter() {
125+
if !torrent.clone().check_good(policy).await {
126+
not_good.push(infohash);
127+
}
128+
}
129+
130+
for remove in not_good {
131+
drop(db.remove(&remove));
132+
}
123133
}
124134
}

packages/torrent-repository/tests/common/repo.rs

-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ pub(crate) enum Repo {
2020
TokioMutexTokio(TorrentsRwLockTokioMutexTokio),
2121
}
2222

23-
#[allow(dead_code)]
2423
impl Repo {
2524
pub(crate) async fn get(&self, key: &InfoHash) -> Option<EntrySingle> {
2625
match self {

packages/torrent-repository/tests/common/torrent.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ impl Torrent {
2727
match self {
2828
Torrent::Single(entry) => entry.is_good(policy),
2929
Torrent::MutexStd(entry) => entry.is_good(policy),
30-
Torrent::MutexTokio(entry) => entry.clone().is_good(policy).await,
30+
Torrent::MutexTokio(entry) => entry.clone().check_good(policy).await,
3131
}
3232
}
3333

packages/torrent-repository/tests/repository/mod.rs

+172-5
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,11 @@
1+
use std::collections::{BTreeMap, HashSet};
12
use std::hash::{DefaultHasher, Hash, Hasher};
23

34
use rstest::{fixture, rstest};
5+
use torrust_tracker_configuration::TrackerPolicy;
46
use torrust_tracker_primitives::announce_event::AnnounceEvent;
57
use torrust_tracker_primitives::info_hash::InfoHash;
8+
use torrust_tracker_primitives::pagination::Pagination;
69
use torrust_tracker_primitives::{NumberOfBytes, PersistentTorrents};
710
use torrust_tracker_torrent_repository::entry::Entry as _;
811
use torrust_tracker_torrent_repository::repository::{RwLockStd, RwLockTokio};
@@ -104,6 +107,39 @@ fn three() -> Entries {
104107
]
105108
}
106109

110+
#[fixture]
111+
fn many_out_of_order() -> Entries {
112+
let mut entries: HashSet<(InfoHash, EntrySingle)> = HashSet::default();
113+
114+
for i in 0..408 {
115+
let mut entry = EntrySingle::default();
116+
entry.insert_or_update_peer(&a_started_peer(i));
117+
118+
entries.insert((InfoHash::from(&i), entry));
119+
}
120+
121+
// we keep the random order from the hashed set for the vector.
122+
entries.iter().map(|(i, e)| (*i, e.clone())).collect()
123+
}
124+
125+
#[fixture]
126+
fn many_hashed_in_order() -> Entries {
127+
let mut entries: BTreeMap<InfoHash, EntrySingle> = BTreeMap::default();
128+
129+
for i in 0..408 {
130+
let mut entry = EntrySingle::default();
131+
entry.insert_or_update_peer(&a_started_peer(i));
132+
133+
let hash: &mut DefaultHasher = &mut DefaultHasher::default();
134+
hash.write_i32(i);
135+
136+
entries.insert(InfoHash::from(&hash.clone()), entry);
137+
}
138+
139+
// We return the entries in-order from from the b-tree map.
140+
entries.iter().map(|(i, e)| (*i, e.clone())).collect()
141+
}
142+
107143
#[fixture]
108144
fn persistent_empty() -> PersistentTorrents {
109145
PersistentTorrents::default()
@@ -141,13 +177,50 @@ async fn make(repo: &Repo, entries: &Entries) {
141177
}
142178
}
143179

180+
#[fixture]
181+
fn paginated_limit_zero() -> Pagination {
182+
Pagination::new(0, 0)
183+
}
184+
185+
#[fixture]
186+
fn paginated_limit_one() -> Pagination {
187+
Pagination::new(0, 1)
188+
}
189+
190+
#[fixture]
191+
fn paginated_limit_one_offset_one() -> Pagination {
192+
Pagination::new(1, 1)
193+
}
194+
195+
#[fixture]
196+
fn policy_none() -> TrackerPolicy {
197+
TrackerPolicy::new(false, 0, false)
198+
}
199+
200+
#[fixture]
201+
fn policy_persist() -> TrackerPolicy {
202+
TrackerPolicy::new(false, 0, true)
203+
}
204+
205+
#[fixture]
206+
fn policy_remove() -> TrackerPolicy {
207+
TrackerPolicy::new(true, 0, false)
208+
}
209+
210+
#[fixture]
211+
fn policy_remove_persist() -> TrackerPolicy {
212+
TrackerPolicy::new(true, 0, true)
213+
}
214+
144215
#[rstest]
145216
#[case::empty(empty())]
146217
#[case::default(default())]
147218
#[case::started(started())]
148219
#[case::completed(completed())]
149220
#[case::downloaded(downloaded())]
150221
#[case::three(three())]
222+
#[case::out_of_order(many_out_of_order())]
223+
#[case::in_order(many_hashed_in_order())]
151224
#[tokio::test]
152225
async fn it_should_get_a_torrent_entry(
153226
#[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo,
@@ -169,17 +242,77 @@ async fn it_should_get_a_torrent_entry(
169242
#[case::completed(completed())]
170243
#[case::downloaded(downloaded())]
171244
#[case::three(three())]
245+
#[case::out_of_order(many_out_of_order())]
246+
#[case::in_order(many_hashed_in_order())]
172247
#[tokio::test]
173-
async fn it_should_get_entries(
248+
async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order(
174249
#[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo,
175250
#[case] entries: Entries,
251+
many_out_of_order: Entries,
176252
) {
177253
make(&repo, &entries).await;
178254

179-
if entries.first().is_some() {
180-
assert!(entries.contains(repo.get_paginated(None).await.first().expect("it should have at least one")));
181-
} else {
182-
assert!(repo.get_paginated(None).await.is_empty());
255+
let entries_a = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::<Vec<_>>();
256+
257+
make(&repo, &many_out_of_order).await;
258+
259+
let entries_b = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::<Vec<_>>();
260+
261+
let is_equal = entries_b.iter().take(entries_a.len()).copied().collect::<Vec<_>>() == entries_a;
262+
263+
let is_sorted = entries_b.windows(2).all(|w| w[0] <= w[1]);
264+
265+
assert!(
266+
is_equal || is_sorted,
267+
"The order is unstable: {is_equal}, or is sorted {is_sorted}."
268+
);
269+
}
270+
271+
#[rstest]
272+
#[case::empty(empty())]
273+
#[case::default(default())]
274+
#[case::started(started())]
275+
#[case::completed(completed())]
276+
#[case::downloaded(downloaded())]
277+
#[case::three(three())]
278+
#[case::out_of_order(many_out_of_order())]
279+
#[case::in_order(many_hashed_in_order())]
280+
#[tokio::test]
281+
async fn it_should_get_paginated(
282+
#[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo,
283+
#[case] entries: Entries,
284+
#[values(paginated_limit_zero(), paginated_limit_one(), paginated_limit_one_offset_one())] paginated: Pagination,
285+
) {
286+
make(&repo, &entries).await;
287+
288+
let mut info_hashes = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::<Vec<_>>();
289+
info_hashes.sort();
290+
291+
match paginated {
292+
// it should return empty if limit is zero.
293+
Pagination { limit: 0, .. } => assert_eq!(repo.get_paginated(Some(&paginated)).await, vec![]),
294+
295+
// it should return a single entry if the limit is one.
296+
Pagination { limit: 1, offset: 0 } => {
297+
if info_hashes.is_empty() {
298+
assert_eq!(repo.get_paginated(Some(&paginated)).await.len(), 0);
299+
} else {
300+
let page = repo.get_paginated(Some(&paginated)).await;
301+
assert_eq!(page.len(), 1);
302+
assert_eq!(page.first().map(|(i, _)| i), info_hashes.first());
303+
}
304+
}
305+
306+
// it should return the only the second entry if both the limit and the offset are one.
307+
Pagination { limit: 1, offset: 1 } => {
308+
if info_hashes.len() > 1 {
309+
let page = repo.get_paginated(Some(&paginated)).await;
310+
assert_eq!(page.len(), 1);
311+
assert_eq!(page[0].0, info_hashes[1]);
312+
}
313+
}
314+
// the other cases are not yet tested.
315+
_ => {}
183316
}
184317
}
185318

@@ -190,6 +323,8 @@ async fn it_should_get_entries(
190323
#[case::completed(completed())]
191324
#[case::downloaded(downloaded())]
192325
#[case::three(three())]
326+
#[case::out_of_order(many_out_of_order())]
327+
#[case::in_order(many_hashed_in_order())]
193328
#[tokio::test]
194329
async fn it_should_get_metrics(
195330
#[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo,
@@ -220,6 +355,8 @@ async fn it_should_get_metrics(
220355
#[case::completed(completed())]
221356
#[case::downloaded(downloaded())]
222357
#[case::three(three())]
358+
#[case::out_of_order(many_out_of_order())]
359+
#[case::in_order(many_hashed_in_order())]
223360
#[tokio::test]
224361
async fn it_should_import_persistent_torrents(
225362
#[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo,
@@ -247,6 +384,8 @@ async fn it_should_import_persistent_torrents(
247384
#[case::completed(completed())]
248385
#[case::downloaded(downloaded())]
249386
#[case::three(three())]
387+
#[case::out_of_order(many_out_of_order())]
388+
#[case::in_order(many_hashed_in_order())]
250389
#[tokio::test]
251390
async fn it_should_remove_an_entry(
252391
#[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo,
@@ -272,6 +411,8 @@ async fn it_should_remove_an_entry(
272411
#[case::completed(completed())]
273412
#[case::downloaded(downloaded())]
274413
#[case::three(three())]
414+
#[case::out_of_order(many_out_of_order())]
415+
#[case::in_order(many_hashed_in_order())]
275416
#[tokio::test]
276417
async fn it_should_remove_inactive_peers(
277418
#[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo,
@@ -335,3 +476,29 @@ async fn it_should_remove_inactive_peers(
335476
assert!(!entry.get_peers(None).contains(&peer.into()));
336477
}
337478
}
479+
480+
#[rstest]
481+
#[case::empty(empty())]
482+
#[case::default(default())]
483+
#[case::started(started())]
484+
#[case::completed(completed())]
485+
#[case::downloaded(downloaded())]
486+
#[case::three(three())]
487+
#[case::out_of_order(many_out_of_order())]
488+
#[case::in_order(many_hashed_in_order())]
489+
#[tokio::test]
490+
async fn it_should_remove_peerless_torrents(
491+
#[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo,
492+
#[case] entries: Entries,
493+
#[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy,
494+
) {
495+
make(&repo, &entries).await;
496+
497+
repo.remove_peerless_torrents(&policy).await;
498+
499+
let torrents = repo.get_paginated(None).await;
500+
501+
for (_, entry) in torrents {
502+
assert!(entry.is_good(&policy));
503+
}
504+
}

0 commit comments

Comments
 (0)