1
+ use std:: collections:: { BTreeMap , HashSet } ;
1
2
use std:: hash:: { DefaultHasher , Hash , Hasher } ;
2
3
3
4
use rstest:: { fixture, rstest} ;
5
+ use torrust_tracker_configuration:: TrackerPolicy ;
4
6
use torrust_tracker_primitives:: announce_event:: AnnounceEvent ;
5
7
use torrust_tracker_primitives:: info_hash:: InfoHash ;
8
+ use torrust_tracker_primitives:: pagination:: Pagination ;
6
9
use torrust_tracker_primitives:: { NumberOfBytes , PersistentTorrents } ;
7
10
use torrust_tracker_torrent_repository:: entry:: Entry as _;
8
11
use torrust_tracker_torrent_repository:: repository:: { RwLockStd , RwLockTokio } ;
@@ -104,6 +107,39 @@ fn three() -> Entries {
104
107
]
105
108
}
106
109
110
+ #[ fixture]
111
+ fn many_out_of_order ( ) -> Entries {
112
+ let mut entries: HashSet < ( InfoHash , EntrySingle ) > = HashSet :: default ( ) ;
113
+
114
+ for i in 0 ..408 {
115
+ let mut entry = EntrySingle :: default ( ) ;
116
+ entry. insert_or_update_peer ( & a_started_peer ( i) ) ;
117
+
118
+ entries. insert ( ( InfoHash :: from ( & i) , entry) ) ;
119
+ }
120
+
121
+ // we keep the random order from the hashed set for the vector.
122
+ entries. iter ( ) . map ( |( i, e) | ( * i, e. clone ( ) ) ) . collect ( )
123
+ }
124
+
125
+ #[ fixture]
126
+ fn many_hashed_in_order ( ) -> Entries {
127
+ let mut entries: BTreeMap < InfoHash , EntrySingle > = BTreeMap :: default ( ) ;
128
+
129
+ for i in 0 ..408 {
130
+ let mut entry = EntrySingle :: default ( ) ;
131
+ entry. insert_or_update_peer ( & a_started_peer ( i) ) ;
132
+
133
+ let hash: & mut DefaultHasher = & mut DefaultHasher :: default ( ) ;
134
+ hash. write_i32 ( i) ;
135
+
136
+ entries. insert ( InfoHash :: from ( & hash. clone ( ) ) , entry) ;
137
+ }
138
+
139
+ // We return the entries in-order from from the b-tree map.
140
+ entries. iter ( ) . map ( |( i, e) | ( * i, e. clone ( ) ) ) . collect ( )
141
+ }
142
+
107
143
#[ fixture]
108
144
fn persistent_empty ( ) -> PersistentTorrents {
109
145
PersistentTorrents :: default ( )
@@ -141,13 +177,50 @@ async fn make(repo: &Repo, entries: &Entries) {
141
177
}
142
178
}
143
179
180
+ #[ fixture]
181
+ fn paginated_limit_zero ( ) -> Pagination {
182
+ Pagination :: new ( 0 , 0 )
183
+ }
184
+
185
+ #[ fixture]
186
+ fn paginated_limit_one ( ) -> Pagination {
187
+ Pagination :: new ( 0 , 1 )
188
+ }
189
+
190
+ #[ fixture]
191
+ fn paginated_limit_one_offset_one ( ) -> Pagination {
192
+ Pagination :: new ( 1 , 1 )
193
+ }
194
+
195
+ #[ fixture]
196
+ fn policy_none ( ) -> TrackerPolicy {
197
+ TrackerPolicy :: new ( false , 0 , false )
198
+ }
199
+
200
+ #[ fixture]
201
+ fn policy_persist ( ) -> TrackerPolicy {
202
+ TrackerPolicy :: new ( false , 0 , true )
203
+ }
204
+
205
+ #[ fixture]
206
+ fn policy_remove ( ) -> TrackerPolicy {
207
+ TrackerPolicy :: new ( true , 0 , false )
208
+ }
209
+
210
+ #[ fixture]
211
+ fn policy_remove_persist ( ) -> TrackerPolicy {
212
+ TrackerPolicy :: new ( true , 0 , true )
213
+ }
214
+
144
215
#[ rstest]
145
216
#[ case:: empty( empty( ) ) ]
146
217
#[ case:: default( default ( ) ) ]
147
218
#[ case:: started( started( ) ) ]
148
219
#[ case:: completed( completed( ) ) ]
149
220
#[ case:: downloaded( downloaded( ) ) ]
150
221
#[ case:: three( three( ) ) ]
222
+ #[ case:: out_of_order( many_out_of_order( ) ) ]
223
+ #[ case:: in_order( many_hashed_in_order( ) ) ]
151
224
#[ tokio:: test]
152
225
async fn it_should_get_a_torrent_entry (
153
226
#[ values( standard( ) , standard_mutex( ) , standard_tokio( ) , tokio_std( ) , tokio_mutex( ) , tokio_tokio( ) ) ] repo : Repo ,
@@ -169,17 +242,77 @@ async fn it_should_get_a_torrent_entry(
169
242
#[ case:: completed( completed( ) ) ]
170
243
#[ case:: downloaded( downloaded( ) ) ]
171
244
#[ case:: three( three( ) ) ]
245
+ #[ case:: out_of_order( many_out_of_order( ) ) ]
246
+ #[ case:: in_order( many_hashed_in_order( ) ) ]
172
247
#[ tokio:: test]
173
- async fn it_should_get_entries (
248
+ async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order (
174
249
#[ values( standard( ) , standard_mutex( ) , standard_tokio( ) , tokio_std( ) , tokio_mutex( ) , tokio_tokio( ) ) ] repo : Repo ,
175
250
#[ case] entries : Entries ,
251
+ many_out_of_order : Entries ,
176
252
) {
177
253
make ( & repo, & entries) . await ;
178
254
179
- if entries. first ( ) . is_some ( ) {
180
- assert ! ( entries. contains( repo. get_paginated( None ) . await . first( ) . expect( "it should have at least one" ) ) ) ;
181
- } else {
182
- assert ! ( repo. get_paginated( None ) . await . is_empty( ) ) ;
255
+ let entries_a = repo. get_paginated ( None ) . await . iter ( ) . map ( |( i, _) | * i) . collect :: < Vec < _ > > ( ) ;
256
+
257
+ make ( & repo, & many_out_of_order) . await ;
258
+
259
+ let entries_b = repo. get_paginated ( None ) . await . iter ( ) . map ( |( i, _) | * i) . collect :: < Vec < _ > > ( ) ;
260
+
261
+ let is_equal = entries_b. iter ( ) . take ( entries_a. len ( ) ) . copied ( ) . collect :: < Vec < _ > > ( ) == entries_a;
262
+
263
+ let is_sorted = entries_b. windows ( 2 ) . all ( |w| w[ 0 ] <= w[ 1 ] ) ;
264
+
265
+ assert ! (
266
+ is_equal || is_sorted,
267
+ "The order is unstable: {is_equal}, or is sorted {is_sorted}."
268
+ ) ;
269
+ }
270
+
271
+ #[ rstest]
272
+ #[ case:: empty( empty( ) ) ]
273
+ #[ case:: default( default ( ) ) ]
274
+ #[ case:: started( started( ) ) ]
275
+ #[ case:: completed( completed( ) ) ]
276
+ #[ case:: downloaded( downloaded( ) ) ]
277
+ #[ case:: three( three( ) ) ]
278
+ #[ case:: out_of_order( many_out_of_order( ) ) ]
279
+ #[ case:: in_order( many_hashed_in_order( ) ) ]
280
+ #[ tokio:: test]
281
+ async fn it_should_get_paginated (
282
+ #[ values( standard( ) , standard_mutex( ) , standard_tokio( ) , tokio_std( ) , tokio_mutex( ) , tokio_tokio( ) ) ] repo : Repo ,
283
+ #[ case] entries : Entries ,
284
+ #[ values( paginated_limit_zero( ) , paginated_limit_one( ) , paginated_limit_one_offset_one( ) ) ] paginated : Pagination ,
285
+ ) {
286
+ make ( & repo, & entries) . await ;
287
+
288
+ let mut info_hashes = repo. get_paginated ( None ) . await . iter ( ) . map ( |( i, _) | * i) . collect :: < Vec < _ > > ( ) ;
289
+ info_hashes. sort ( ) ;
290
+
291
+ match paginated {
292
+ // it should return empty if limit is zero.
293
+ Pagination { limit : 0 , .. } => assert_eq ! ( repo. get_paginated( Some ( & paginated) ) . await , vec![ ] ) ,
294
+
295
+ // it should return a single entry if the limit is one.
296
+ Pagination { limit : 1 , offset : 0 } => {
297
+ if info_hashes. is_empty ( ) {
298
+ assert_eq ! ( repo. get_paginated( Some ( & paginated) ) . await . len( ) , 0 ) ;
299
+ } else {
300
+ let page = repo. get_paginated ( Some ( & paginated) ) . await ;
301
+ assert_eq ! ( page. len( ) , 1 ) ;
302
+ assert_eq ! ( page. first( ) . map( |( i, _) | i) , info_hashes. first( ) ) ;
303
+ }
304
+ }
305
+
306
+ // it should return the only the second entry if both the limit and the offset are one.
307
+ Pagination { limit : 1 , offset : 1 } => {
308
+ if info_hashes. len ( ) > 1 {
309
+ let page = repo. get_paginated ( Some ( & paginated) ) . await ;
310
+ assert_eq ! ( page. len( ) , 1 ) ;
311
+ assert_eq ! ( page[ 0 ] . 0 , info_hashes[ 1 ] ) ;
312
+ }
313
+ }
314
+ // the other cases are not yet tested.
315
+ _ => { }
183
316
}
184
317
}
185
318
@@ -190,6 +323,8 @@ async fn it_should_get_entries(
190
323
#[ case:: completed( completed( ) ) ]
191
324
#[ case:: downloaded( downloaded( ) ) ]
192
325
#[ case:: three( three( ) ) ]
326
+ #[ case:: out_of_order( many_out_of_order( ) ) ]
327
+ #[ case:: in_order( many_hashed_in_order( ) ) ]
193
328
#[ tokio:: test]
194
329
async fn it_should_get_metrics (
195
330
#[ values( standard( ) , standard_mutex( ) , standard_tokio( ) , tokio_std( ) , tokio_mutex( ) , tokio_tokio( ) ) ] repo : Repo ,
@@ -220,6 +355,8 @@ async fn it_should_get_metrics(
220
355
#[ case:: completed( completed( ) ) ]
221
356
#[ case:: downloaded( downloaded( ) ) ]
222
357
#[ case:: three( three( ) ) ]
358
+ #[ case:: out_of_order( many_out_of_order( ) ) ]
359
+ #[ case:: in_order( many_hashed_in_order( ) ) ]
223
360
#[ tokio:: test]
224
361
async fn it_should_import_persistent_torrents (
225
362
#[ values( standard( ) , standard_mutex( ) , standard_tokio( ) , tokio_std( ) , tokio_mutex( ) , tokio_tokio( ) ) ] repo : Repo ,
@@ -247,6 +384,8 @@ async fn it_should_import_persistent_torrents(
247
384
#[ case:: completed( completed( ) ) ]
248
385
#[ case:: downloaded( downloaded( ) ) ]
249
386
#[ case:: three( three( ) ) ]
387
+ #[ case:: out_of_order( many_out_of_order( ) ) ]
388
+ #[ case:: in_order( many_hashed_in_order( ) ) ]
250
389
#[ tokio:: test]
251
390
async fn it_should_remove_an_entry (
252
391
#[ values( standard( ) , standard_mutex( ) , standard_tokio( ) , tokio_std( ) , tokio_mutex( ) , tokio_tokio( ) ) ] repo : Repo ,
@@ -272,6 +411,8 @@ async fn it_should_remove_an_entry(
272
411
#[ case:: completed( completed( ) ) ]
273
412
#[ case:: downloaded( downloaded( ) ) ]
274
413
#[ case:: three( three( ) ) ]
414
+ #[ case:: out_of_order( many_out_of_order( ) ) ]
415
+ #[ case:: in_order( many_hashed_in_order( ) ) ]
275
416
#[ tokio:: test]
276
417
async fn it_should_remove_inactive_peers (
277
418
#[ values( standard( ) , standard_mutex( ) , standard_tokio( ) , tokio_std( ) , tokio_mutex( ) , tokio_tokio( ) ) ] repo : Repo ,
@@ -335,3 +476,29 @@ async fn it_should_remove_inactive_peers(
335
476
assert ! ( !entry. get_peers( None ) . contains( & peer. into( ) ) ) ;
336
477
}
337
478
}
479
+
480
+ #[ rstest]
481
+ #[ case:: empty( empty( ) ) ]
482
+ #[ case:: default( default ( ) ) ]
483
+ #[ case:: started( started( ) ) ]
484
+ #[ case:: completed( completed( ) ) ]
485
+ #[ case:: downloaded( downloaded( ) ) ]
486
+ #[ case:: three( three( ) ) ]
487
+ #[ case:: out_of_order( many_out_of_order( ) ) ]
488
+ #[ case:: in_order( many_hashed_in_order( ) ) ]
489
+ #[ tokio:: test]
490
+ async fn it_should_remove_peerless_torrents (
491
+ #[ values( standard( ) , standard_mutex( ) , standard_tokio( ) , tokio_std( ) , tokio_mutex( ) , tokio_tokio( ) ) ] repo : Repo ,
492
+ #[ case] entries : Entries ,
493
+ #[ values( policy_none( ) , policy_persist( ) , policy_remove( ) , policy_remove_persist( ) ) ] policy : TrackerPolicy ,
494
+ ) {
495
+ make ( & repo, & entries) . await ;
496
+
497
+ repo. remove_peerless_torrents ( & policy) . await ;
498
+
499
+ let torrents = repo. get_paginated ( None ) . await ;
500
+
501
+ for ( _, entry) in torrents {
502
+ assert ! ( entry. is_good( & policy) ) ;
503
+ }
504
+ }
0 commit comments