From 5c0047aadfb08c05f9ba603fb139b29b69924954 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 25 Mar 2024 11:21:03 +0800 Subject: [PATCH 1/9] dev: refactor torrent repository extracted async and sync implementations --- cSpell.json | 1 + .../src/benches/asyn.rs | 41 ++- .../src/benches/sync.rs | 42 +-- .../torrent-repository-benchmarks/src/main.rs | 50 +-- src/core/mod.rs | 8 +- src/core/services/torrent.rs | 1 + src/core/torrent/mod.rs | 7 +- src/core/torrent/repository.rs | 301 ------------------ src/core/torrent/repository_asyn.rs | 188 +++++++++++ src/core/torrent/repository_sync.rs | 122 +++++++ tests/servers/health_check_api/environment.rs | 1 + tests/servers/http/responses/scrape.rs | 4 + 12 files changed, 402 insertions(+), 364 deletions(-) delete mode 100644 src/core/torrent/repository.rs create mode 100644 src/core/torrent/repository_asyn.rs create mode 100644 src/core/torrent/repository_sync.rs diff --git a/cSpell.json b/cSpell.json index 16dff714e..6d8b68c92 100644 --- a/cSpell.json +++ b/cSpell.json @@ -5,6 +5,7 @@ "alekitto", "appuser", "Arvid", + "asyn", "autoclean", "AUTOINCREMENT", "automock", diff --git a/packages/torrent-repository-benchmarks/src/benches/asyn.rs b/packages/torrent-repository-benchmarks/src/benches/asyn.rs index 33f9e85fa..9482d821c 100644 --- a/packages/torrent-repository-benchmarks/src/benches/asyn.rs +++ b/packages/torrent-repository-benchmarks/src/benches/asyn.rs @@ -3,17 +3,20 @@ use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository::TRepositoryAsync; +use torrust_tracker::core::torrent::repository_asyn::{RepositoryAsync, RepositoryTokioRwLock}; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; -pub async fn async_add_one_torrent(samples: usize) -> (Duration, Duration) { +pub async fn async_add_one_torrent(samples: usize) -> (Duration, Duration) +where + RepositoryTokioRwLock: RepositoryAsync, +{ let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); + let torrent_repository = Arc::new(RepositoryTokioRwLock::::default()); let info_hash = InfoHash([0; 20]); @@ -32,15 +35,16 @@ pub async fn async_add_one_torrent( } // Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn async_update_one_torrent_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: usize, -) -> (Duration, Duration) { +pub async fn async_update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +where + T: Send + Sync + 'static, + RepositoryTokioRwLock: RepositoryAsync, +{ let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); + let torrent_repository = Arc::new(RepositoryTokioRwLock::::default()); let info_hash: &'static InfoHash = &InfoHash([0; 20]); let handles = FuturesUnordered::new(); @@ -81,15 +85,16 @@ pub async fn async_update_one_torrent_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: usize, -) -> (Duration, Duration) { +pub async fn async_add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +where + T: Send + Sync + 'static, + RepositoryTokioRwLock: RepositoryAsync, +{ let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); + let torrent_repository = Arc::new(RepositoryTokioRwLock::::default()); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); @@ -125,15 +130,19 @@ pub async fn async_add_multiple_torrents_in_parallel( +pub async fn async_update_multiple_torrents_in_parallel( runtime: &tokio::runtime::Runtime, samples: usize, -) -> (Duration, Duration) { +) -> (Duration, Duration) +where + T: Send + Sync + 'static, + RepositoryTokioRwLock: RepositoryAsync, +{ let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); + let torrent_repository = Arc::new(RepositoryTokioRwLock::::default()); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); diff --git a/packages/torrent-repository-benchmarks/src/benches/sync.rs b/packages/torrent-repository-benchmarks/src/benches/sync.rs index dac7ab810..c37fa9f4a 100644 --- a/packages/torrent-repository-benchmarks/src/benches/sync.rs +++ b/packages/torrent-repository-benchmarks/src/benches/sync.rs @@ -3,7 +3,7 @@ use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository::Repository; +use torrust_tracker::core::torrent::repository_sync::{RepositoryStdRwLock, RepositorySync}; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; @@ -11,11 +11,14 @@ use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjuste // Simply add one torrent #[must_use] -pub fn add_one_torrent(samples: usize) -> (Duration, Duration) { +pub fn add_one_torrent(samples: usize) -> (Duration, Duration) +where + RepositoryStdRwLock: RepositorySync, +{ let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); + let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); let info_hash = InfoHash([0; 20]); @@ -32,15 +35,16 @@ pub fn add_one_torrent(samples: usize) -> } // Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: usize, -) -> (Duration, Duration) { +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +where + T: Send + Sync + 'static, + RepositoryStdRwLock: RepositorySync, +{ let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); + let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); let info_hash: &'static InfoHash = &InfoHash([0; 20]); let handles = FuturesUnordered::new(); @@ -77,15 +81,16 @@ pub async fn update_one_torrent_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: usize, -) -> (Duration, Duration) { +pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +where + T: Send + Sync + 'static, + RepositoryStdRwLock: RepositorySync, +{ let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); + let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); @@ -119,15 +124,16 @@ pub async fn add_multiple_torrents_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: usize, -) -> (Duration, Duration) { +pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +where + T: Send + Sync + 'static, + RepositoryStdRwLock: RepositorySync, +{ let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); + let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); diff --git a/packages/torrent-repository-benchmarks/src/main.rs b/packages/torrent-repository-benchmarks/src/main.rs index 0d9db73ac..eab8e3803 100644 --- a/packages/torrent-repository-benchmarks/src/main.rs +++ b/packages/torrent-repository-benchmarks/src/main.rs @@ -7,7 +7,7 @@ use torrust_torrent_repository_benchmarks::benches::asyn::{ use torrust_torrent_repository_benchmarks::benches::sync::{ add_multiple_torrents_in_parallel, add_one_torrent, update_multiple_torrents_in_parallel, update_one_torrent_in_parallel, }; -use torrust_tracker::core::torrent::repository::{AsyncSync, RepositoryAsync, RepositoryAsyncSingle, Sync, SyncSingle}; +use torrust_tracker::core::torrent::{Entry, EntryMutexStd, EntryMutexTokio}; #[allow(clippy::too_many_lines)] #[allow(clippy::print_literal)] @@ -25,67 +25,67 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(async_add_one_torrent::(1_000_000)) + rt.block_on(async_add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) ); if let Some(true) = args.compare { println!(); println!("std::sync::RwLock>"); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_one_torrent", - add_one_torrent::(1_000_000) - ); + println!("{}: Avg/AdjAvg: {:?}", "add_one_torrent", add_one_torrent::(1_000_000)); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); println!("std::sync::RwLock>>>"); - println!("{}: Avg/AdjAvg: {:?}", "add_one_torrent", add_one_torrent::(1_000_000)); + println!( + "{}: Avg/AdjAvg: {:?}", + "add_one_torrent", + add_one_torrent::(1_000_000) + ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -94,22 +94,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(async_add_one_torrent::(1_000_000)) + rt.block_on(async_add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -118,22 +118,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(async_add_one_torrent::(1_000_000)) + rt.block_on(async_add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) ); } } diff --git a/src/core/mod.rs b/src/core/mod.rs index dac298462..c392ead75 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -455,7 +455,8 @@ use torrust_tracker_primitives::TrackerMode; use self::auth::Key; use self::error::Error; use self::peer::Peer; -use self::torrent::repository::{RepositoryAsyncSingle, TRepositoryAsync}; +use self::torrent::repository_asyn::{RepositoryAsync, RepositoryTokioRwLock}; +use self::torrent::Entry; use crate::core::databases::Database; use crate::core::torrent::{SwarmMetadata, SwarmStats}; use crate::shared::bit_torrent::info_hash::InfoHash; @@ -481,7 +482,7 @@ pub struct Tracker { policy: TrackerPolicy, keys: tokio::sync::RwLock>, whitelist: tokio::sync::RwLock>, - pub torrents: Arc, + pub torrents: Arc>, stats_event_sender: Option>, stats_repository: statistics::Repo, external_ip: Option, @@ -579,7 +580,7 @@ impl Tracker { mode, keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), - torrents: Arc::new(RepositoryAsyncSingle::new()), + torrents: Arc::new(RepositoryTokioRwLock::::default()), stats_event_sender, stats_repository, database, @@ -1754,6 +1755,7 @@ mod tests { use aquatic_udp_protocol::AnnounceEvent; use crate::core::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; + use crate::core::torrent::repository_asyn::RepositoryAsync; #[tokio::test] async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index fc24e7c4c..eca6cbf3b 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -9,6 +9,7 @@ use std::sync::Arc; use serde::Deserialize; use crate::core::peer::Peer; +use crate::core::torrent::repository_asyn::RepositoryAsync; use crate::core::Tracker; use crate::shared::bit_torrent::info_hash::InfoHash; diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index c4a1b0df9..b5ebb1054 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -28,8 +28,10 @@ //! Peer that don not have a full copy of the torrent data are called "leechers". //! //! > **NOTICE**: that both [`SwarmMetadata`] and [`SwarmStats`] contain the same information. [`SwarmMetadata`] is using the names used on [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). -pub mod repository; +pub mod repository_asyn; +pub mod repository_sync; +use std::sync::Arc; use std::time::Duration; use aquatic_udp_protocol::AnnounceEvent; @@ -53,6 +55,9 @@ pub struct Entry { pub completed: u32, } +pub type EntryMutexTokio = Arc>; +pub type EntryMutexStd = Arc>; + /// Swarm statistics for one torrent. /// Swarm metadata dictionary in the scrape response. /// diff --git a/src/core/torrent/repository.rs b/src/core/torrent/repository.rs deleted file mode 100644 index d4f8ee5e3..000000000 --- a/src/core/torrent/repository.rs +++ /dev/null @@ -1,301 +0,0 @@ -use std::sync::Arc; - -use crate::core::peer; -use crate::core::torrent::{Entry, SwarmStats}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -pub trait Repository { - fn new() -> Self; - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool); -} - -pub trait TRepositoryAsync { - fn new() -> Self; - fn update_torrent_with_peer_and_get_stats( - &self, - info_hash: &InfoHash, - peer: &peer::Peer, - ) -> impl std::future::Future + Send; -} - -/// Structure that holds all torrents. Using `std::sync` locks. -pub struct Sync { - torrents: std::sync::RwLock>>>, -} - -impl Sync { - /// Returns the get torrents of this [`Sync`]. - /// - /// # Panics - /// - /// Panics if unable to read the torrent. - pub fn get_torrents( - &self, - ) -> std::sync::RwLockReadGuard<'_, std::collections::BTreeMap>>> { - self.torrents.read().expect("unable to get torrent list") - } - - /// Returns the mutable get torrents of this [`Sync`]. - /// - /// # Panics - /// - /// Panics if unable to write to the torrents list. - pub fn get_torrents_mut( - &self, - ) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap>>> { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl Repository for Sync { - fn new() -> Self { - Self { - torrents: std::sync::RwLock::new(std::collections::BTreeMap::new()), - } - } - - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut(); - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(std::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().unwrap(); - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -/// Structure that holds all torrents. Using `std::sync` locks. -pub struct SyncSingle { - torrents: std::sync::RwLock>, -} - -impl SyncSingle { - /// Returns the get torrents of this [`SyncSingle`]. - /// - /// # Panics - /// - /// Panics if unable to get torrent list. - pub fn get_torrents(&self) -> std::sync::RwLockReadGuard<'_, std::collections::BTreeMap> { - self.torrents.read().expect("unable to get torrent list") - } - - /// Returns the get torrents of this [`SyncSingle`]. - /// - /// # Panics - /// - /// Panics if unable to get writable torrent list. - pub fn get_torrents_mut(&self) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl Repository for SyncSingle { - fn new() -> Self { - Self { - torrents: std::sync::RwLock::new(std::collections::BTreeMap::new()), - } - } - - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let mut torrents = self.torrents.write().unwrap(); - - let torrent_entry = match torrents.entry(*info_hash) { - std::collections::btree_map::Entry::Vacant(vacant) => vacant.insert(Entry::new()), - std::collections::btree_map::Entry::Occupied(entry) => entry.into_mut(), - }; - - let stats_updated = torrent_entry.insert_or_update_peer(peer); - let stats = torrent_entry.get_stats(); - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -/// Structure that holds all torrents. Using `tokio::sync` locks. -#[allow(clippy::module_name_repetitions)] -pub struct RepositoryAsync { - torrents: tokio::sync::RwLock>>>, -} - -impl TRepositoryAsync for RepositoryAsync { - fn new() -> Self { - Self { - torrents: tokio::sync::RwLock::new(std::collections::BTreeMap::new()), - } - } - - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut().await; - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(tokio::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().await; - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -impl RepositoryAsync { - pub async fn get_torrents( - &self, - ) -> tokio::sync::RwLockReadGuard<'_, std::collections::BTreeMap>>> { - self.torrents.read().await - } - - pub async fn get_torrents_mut( - &self, - ) -> tokio::sync::RwLockWriteGuard<'_, std::collections::BTreeMap>>> { - self.torrents.write().await - } -} - -/// Structure that holds all torrents. Using a `tokio::sync` lock for the torrents map an`std::sync`nc lock for the inner torrent entry. -pub struct AsyncSync { - torrents: tokio::sync::RwLock>>>, -} - -impl TRepositoryAsync for AsyncSync { - fn new() -> Self { - Self { - torrents: tokio::sync::RwLock::new(std::collections::BTreeMap::new()), - } - } - - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut().await; - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(std::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().unwrap(); - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -impl AsyncSync { - pub async fn get_torrents( - &self, - ) -> tokio::sync::RwLockReadGuard<'_, std::collections::BTreeMap>>> { - self.torrents.read().await - } - - pub async fn get_torrents_mut( - &self, - ) -> tokio::sync::RwLockWriteGuard<'_, std::collections::BTreeMap>>> { - self.torrents.write().await - } -} - -#[allow(clippy::module_name_repetitions)] -pub struct RepositoryAsyncSingle { - torrents: tokio::sync::RwLock>, -} - -impl TRepositoryAsync for RepositoryAsyncSingle { - fn new() -> Self { - Self { - torrents: tokio::sync::RwLock::new(std::collections::BTreeMap::new()), - } - } - - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let (stats, stats_updated) = { - let mut torrents_lock = self.torrents.write().await; - let torrent_entry = torrents_lock.entry(*info_hash).or_insert(Entry::new()); - let stats_updated = torrent_entry.insert_or_update_peer(peer); - let stats = torrent_entry.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -impl RepositoryAsyncSingle { - pub async fn get_torrents(&self) -> tokio::sync::RwLockReadGuard<'_, std::collections::BTreeMap> { - self.torrents.read().await - } - - pub async fn get_torrents_mut(&self) -> tokio::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { - self.torrents.write().await - } -} diff --git a/src/core/torrent/repository_asyn.rs b/src/core/torrent/repository_asyn.rs new file mode 100644 index 000000000..ac3724c3b --- /dev/null +++ b/src/core/torrent/repository_asyn.rs @@ -0,0 +1,188 @@ +use std::sync::Arc; + +use super::{EntryMutexStd, EntryMutexTokio}; +use crate::core::peer; +use crate::core::torrent::{Entry, SwarmStats}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +pub trait RepositoryAsync: Default { + fn update_torrent_with_peer_and_get_stats( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + ) -> impl std::future::Future + Send; + + fn get_torrents<'a>( + &'a self, + ) -> impl std::future::Future>> + Send + where + std::collections::BTreeMap: 'a; + + fn get_torrents_mut<'a>( + &'a self, + ) -> impl std::future::Future>> + Send + where + std::collections::BTreeMap: 'a; +} + +pub struct RepositoryTokioRwLock { + torrents: tokio::sync::RwLock>, +} + +impl RepositoryAsync for RepositoryTokioRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { + let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); + + let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { + existing_torrent_entry + } else { + let mut torrents_lock = self.get_torrents_mut().await; + let entry = torrents_lock + .entry(*info_hash) + .or_insert(Arc::new(tokio::sync::Mutex::new(Entry::new()))); + entry.clone() + }; + + let (stats, stats_updated) = { + let mut torrent_entry_lock = torrent_entry.lock().await; + let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); + let stats = torrent_entry_lock.get_stats(); + + (stats, stats_updated) + }; + + ( + SwarmStats { + downloaded: stats.1, + complete: stats.0, + incomplete: stats.2, + }, + stats_updated, + ) + } + + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl Default for RepositoryTokioRwLock { + fn default() -> Self { + Self { + torrents: tokio::sync::RwLock::default(), + } + } +} + +impl RepositoryAsync for RepositoryTokioRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { + let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); + + let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { + existing_torrent_entry + } else { + let mut torrents_lock = self.get_torrents_mut().await; + let entry = torrents_lock + .entry(*info_hash) + .or_insert(Arc::new(std::sync::Mutex::new(Entry::new()))); + entry.clone() + }; + + let (stats, stats_updated) = { + let mut torrent_entry_lock = torrent_entry.lock().unwrap(); + let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); + let stats = torrent_entry_lock.get_stats(); + + (stats, stats_updated) + }; + + ( + SwarmStats { + downloaded: stats.1, + complete: stats.0, + incomplete: stats.2, + }, + stats_updated, + ) + } + + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl Default for RepositoryTokioRwLock { + fn default() -> Self { + Self { + torrents: tokio::sync::RwLock::default(), + } + } +} + +impl RepositoryAsync for RepositoryTokioRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { + let (stats, stats_updated) = { + let mut torrents_lock = self.torrents.write().await; + let torrent_entry = torrents_lock.entry(*info_hash).or_insert(Entry::new()); + let stats_updated = torrent_entry.insert_or_update_peer(peer); + let stats = torrent_entry.get_stats(); + + (stats, stats_updated) + }; + + ( + SwarmStats { + downloaded: stats.1, + complete: stats.0, + incomplete: stats.2, + }, + stats_updated, + ) + } + + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>(&'a self) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl Default for RepositoryTokioRwLock { + fn default() -> Self { + Self { + torrents: tokio::sync::RwLock::default(), + } + } +} diff --git a/src/core/torrent/repository_sync.rs b/src/core/torrent/repository_sync.rs new file mode 100644 index 000000000..76fc36fa2 --- /dev/null +++ b/src/core/torrent/repository_sync.rs @@ -0,0 +1,122 @@ +use std::sync::{Arc, RwLock}; + +use super::EntryMutexStd; +use crate::core::peer; +use crate::core::torrent::{Entry, SwarmStats}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +pub trait RepositorySync: Default { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool); + + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a; + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a; +} + +pub struct RepositoryStdRwLock { + torrents: std::sync::RwLock>, +} + +impl RepositorySync for RepositoryStdRwLock { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { + let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); + + let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { + existing_torrent_entry + } else { + let mut torrents_lock = self.get_torrents_mut(); + let entry = torrents_lock + .entry(*info_hash) + .or_insert(Arc::new(std::sync::Mutex::new(Entry::new()))); + entry.clone() + }; + + let (stats, stats_updated) = { + let mut torrent_entry_lock = torrent_entry.lock().unwrap(); + let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); + let stats = torrent_entry_lock.get_stats(); + + (stats, stats_updated) + }; + + ( + SwarmStats { + downloaded: stats.1, + complete: stats.0, + incomplete: stats.2, + }, + stats_updated, + ) + } + + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl Default for RepositoryStdRwLock { + fn default() -> Self { + Self { + torrents: RwLock::default(), + } + } +} + +impl RepositorySync for RepositoryStdRwLock { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { + let mut torrents = self.torrents.write().unwrap(); + + let torrent_entry = match torrents.entry(*info_hash) { + std::collections::btree_map::Entry::Vacant(vacant) => vacant.insert(Entry::new()), + std::collections::btree_map::Entry::Occupied(entry) => entry.into_mut(), + }; + + let stats_updated = torrent_entry.insert_or_update_peer(peer); + let stats = torrent_entry.get_stats(); + + ( + SwarmStats { + downloaded: stats.1, + complete: stats.0, + incomplete: stats.2, + }, + stats_updated, + ) + } + + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl Default for RepositoryStdRwLock { + fn default() -> Self { + Self { + torrents: RwLock::default(), + } + } +} diff --git a/tests/servers/health_check_api/environment.rs b/tests/servers/health_check_api/environment.rs index 37344858d..0856985d5 100644 --- a/tests/servers/health_check_api/environment.rs +++ b/tests/servers/health_check_api/environment.rs @@ -12,6 +12,7 @@ use torrust_tracker_configuration::HealthCheckApi; #[derive(Debug)] pub enum Error { + #[allow(dead_code)] Error(String), } diff --git a/tests/servers/http/responses/scrape.rs b/tests/servers/http/responses/scrape.rs index eadecb603..fc741cbf4 100644 --- a/tests/servers/http/responses/scrape.rs +++ b/tests/servers/http/responses/scrape.rs @@ -73,9 +73,13 @@ impl ResponseBuilder { #[derive(Debug)] pub enum BencodeParseError { + #[allow(dead_code)] InvalidValueExpectedDict { value: Value }, + #[allow(dead_code)] InvalidValueExpectedInt { value: Value }, + #[allow(dead_code)] InvalidFileField { value: Value }, + #[allow(dead_code)] MissingFileField { field_name: String }, } From 48ce42624dea8321d93375a7f57b37aeab3280ed Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 10 Feb 2024 07:31:20 +0800 Subject: [PATCH 2/9] dev: bench torrent/repository add sync_asyn variant --- .../src/benches/asyn.rs | 20 +- .../src/benches/mod.rs | 1 + .../src/benches/sync.rs | 9 +- .../src/benches/sync_asyn.rs | 185 ++++++++++++++++++ .../torrent-repository-benchmarks/src/main.rs | 76 ++++--- src/core/mod.rs | 2 +- src/core/torrent/mod.rs | 13 ++ src/core/torrent/repository_asyn.rs | 21 +- src/core/torrent/repository_sync.rs | 69 ++++++- 9 files changed, 335 insertions(+), 61 deletions(-) create mode 100644 packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs diff --git a/packages/torrent-repository-benchmarks/src/benches/asyn.rs b/packages/torrent-repository-benchmarks/src/benches/asyn.rs index 9482d821c..d36de9695 100644 --- a/packages/torrent-repository-benchmarks/src/benches/asyn.rs +++ b/packages/torrent-repository-benchmarks/src/benches/asyn.rs @@ -4,14 +4,15 @@ use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; use torrust_tracker::core::torrent::repository_asyn::{RepositoryAsync, RepositoryTokioRwLock}; +use torrust_tracker::core::torrent::UpdateTorrentAsync; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; -pub async fn async_add_one_torrent(samples: usize) -> (Duration, Duration) +pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) where - RepositoryTokioRwLock: RepositoryAsync, + RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, { let mut results: Vec = Vec::with_capacity(samples); @@ -35,10 +36,10 @@ where } // Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn async_update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryTokioRwLock: RepositoryAsync, + RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -85,10 +86,10 @@ where } // Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn async_add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryTokioRwLock: RepositoryAsync, + RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -130,13 +131,10 @@ where } // Async update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn async_update_multiple_torrents_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: usize, -) -> (Duration, Duration) +pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryTokioRwLock: RepositoryAsync, + RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); diff --git a/packages/torrent-repository-benchmarks/src/benches/mod.rs b/packages/torrent-repository-benchmarks/src/benches/mod.rs index 1026aa4bf..7450f4bcc 100644 --- a/packages/torrent-repository-benchmarks/src/benches/mod.rs +++ b/packages/torrent-repository-benchmarks/src/benches/mod.rs @@ -1,3 +1,4 @@ pub mod asyn; pub mod sync; +pub mod sync_asyn; pub mod utils; diff --git a/packages/torrent-repository-benchmarks/src/benches/sync.rs b/packages/torrent-repository-benchmarks/src/benches/sync.rs index c37fa9f4a..3dee93421 100644 --- a/packages/torrent-repository-benchmarks/src/benches/sync.rs +++ b/packages/torrent-repository-benchmarks/src/benches/sync.rs @@ -4,6 +4,7 @@ use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; use torrust_tracker::core::torrent::repository_sync::{RepositoryStdRwLock, RepositorySync}; +use torrust_tracker::core::torrent::UpdateTorrentSync; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; @@ -13,7 +14,7 @@ use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjuste #[must_use] pub fn add_one_torrent(samples: usize) -> (Duration, Duration) where - RepositoryStdRwLock: RepositorySync, + RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, { let mut results: Vec = Vec::with_capacity(samples); @@ -38,7 +39,7 @@ where pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync, + RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -84,7 +85,7 @@ where pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync, + RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -127,7 +128,7 @@ where pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync, + RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); diff --git a/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs b/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs new file mode 100644 index 000000000..11ce6ed0c --- /dev/null +++ b/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs @@ -0,0 +1,185 @@ +use std::sync::Arc; +use std::time::Duration; + +use clap::Parser; +use futures::stream::FuturesUnordered; +use torrust_tracker::core::torrent::repository_sync::{RepositoryStdRwLock, RepositorySync}; +use torrust_tracker::core::torrent::UpdateTorrentAsync; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + +use crate::args::Args; +use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; + +// Simply add one torrent +#[must_use] +pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) +where + RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, +{ + let mut results: Vec = Vec::with_capacity(samples); + + for _ in 0..samples { + let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); + + let info_hash = InfoHash([0; 20]); + + let start_time = std::time::Instant::now(); + + torrent_repository + .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) + .await; + + let result = start_time.elapsed(); + + results.push(result); + } + + get_average_and_adjusted_average_from_results(results) +} + +// Add one torrent ten thousand times in parallel (depending on the set worker threads) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +where + T: Send + Sync + 'static, + RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, +{ + let args = Args::parse(); + let mut results: Vec = Vec::with_capacity(samples); + + for _ in 0..samples { + let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); + let info_hash: &'static InfoHash = &InfoHash([0; 20]); + let handles = FuturesUnordered::new(); + + // Add the torrent/peer to the torrent repository + torrent_repository + .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) + .await; + + let start_time = std::time::Instant::now(); + + for _ in 0..10_000 { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone + .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) + .await; + + if let Some(sleep_time) = args.sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + let result = start_time.elapsed(); + + results.push(result); + } + + get_average_and_adjusted_average_from_results(results) +} + +// Add ten thousand torrents in parallel (depending on the set worker threads) +pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +where + T: Send + Sync + 'static, + RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, +{ + let args = Args::parse(); + let mut results: Vec = Vec::with_capacity(samples); + + for _ in 0..samples { + let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); + let info_hashes = generate_unique_info_hashes(10_000); + let handles = FuturesUnordered::new(); + + let start_time = std::time::Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone + .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) + .await; + + if let Some(sleep_time) = args.sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + let result = start_time.elapsed(); + + results.push(result); + } + + get_average_and_adjusted_average_from_results(results) +} + +// Update ten thousand torrents in parallel (depending on the set worker threads) +pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +where + T: Send + Sync + 'static, + RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, +{ + let args = Args::parse(); + let mut results: Vec = Vec::with_capacity(samples); + + for _ in 0..samples { + let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); + let info_hashes = generate_unique_info_hashes(10_000); + let handles = FuturesUnordered::new(); + + // Add the torrents/peers to the torrent repository + for info_hash in &info_hashes { + torrent_repository + .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) + .await; + } + + let start_time = std::time::Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone + .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) + .await; + + if let Some(sleep_time) = args.sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + let result = start_time.elapsed(); + + results.push(result); + } + + get_average_and_adjusted_average_from_results(results) +} diff --git a/packages/torrent-repository-benchmarks/src/main.rs b/packages/torrent-repository-benchmarks/src/main.rs index eab8e3803..4a293b832 100644 --- a/packages/torrent-repository-benchmarks/src/main.rs +++ b/packages/torrent-repository-benchmarks/src/main.rs @@ -1,12 +1,6 @@ use clap::Parser; use torrust_torrent_repository_benchmarks::args::Args; -use torrust_torrent_repository_benchmarks::benches::asyn::{ - async_add_multiple_torrents_in_parallel, async_add_one_torrent, async_update_multiple_torrents_in_parallel, - async_update_one_torrent_in_parallel, -}; -use torrust_torrent_repository_benchmarks::benches::sync::{ - add_multiple_torrents_in_parallel, add_one_torrent, update_multiple_torrents_in_parallel, update_one_torrent_in_parallel, -}; +use torrust_torrent_repository_benchmarks::benches::{asyn, sync, sync_asyn}; use torrust_tracker::core::torrent::{Entry, EntryMutexStd, EntryMutexTokio}; #[allow(clippy::too_many_lines)] @@ -25,43 +19,47 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(async_add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); if let Some(true) = args.compare { println!(); println!("std::sync::RwLock>"); - println!("{}: Avg/AdjAvg: {:?}", "add_one_torrent", add_one_torrent::(1_000_000)); + println!( + "{}: Avg/AdjAvg: {:?}", + "add_one_torrent", + sync::add_one_torrent::(1_000_000) + ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync::update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -70,22 +68,46 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - add_one_torrent::(1_000_000) + sync::add_one_torrent::(1_000_000) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "update_one_torrent_in_parallel", + rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "add_multiple_torrents_in_parallel", + rt.block_on(sync::add_multiple_torrents_in_parallel::(&rt, 10)) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "update_multiple_torrents_in_parallel", + rt.block_on(sync::update_multiple_torrents_in_parallel::(&rt, 10)) + ); + + println!(); + + println!("std::sync::RwLock>>>"); + println!( + "{}: Avg/AdjAvg: {:?}", + "add_one_torrent", + rt.block_on(sync_asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(sync_asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync_asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync_asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -94,22 +116,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(async_add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -118,22 +140,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(async_add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); } } diff --git a/src/core/mod.rs b/src/core/mod.rs index c392ead75..56b30f955 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -456,7 +456,7 @@ use self::auth::Key; use self::error::Error; use self::peer::Peer; use self::torrent::repository_asyn::{RepositoryAsync, RepositoryTokioRwLock}; -use self::torrent::Entry; +use self::torrent::{Entry, UpdateTorrentAsync}; use crate::core::databases::Database; use crate::core::torrent::{SwarmMetadata, SwarmStats}; use crate::shared::bit_torrent::info_hash::InfoHash; diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index b5ebb1054..49c1f61f8 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -39,8 +39,21 @@ use derive_more::Constructor; use serde::{Deserialize, Serialize}; use super::peer::{self, Peer}; +use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::clock::{Current, TimeNow}; +pub trait UpdateTorrentSync { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool); +} + +pub trait UpdateTorrentAsync { + fn update_torrent_with_peer_and_get_stats( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + ) -> impl std::future::Future + Send; +} + /// A data structure containing all the information about a torrent in the tracker. /// /// This is the tracker entry for a given torrent and contains the swarm data, diff --git a/src/core/torrent/repository_asyn.rs b/src/core/torrent/repository_asyn.rs index ac3724c3b..ad10f85b4 100644 --- a/src/core/torrent/repository_asyn.rs +++ b/src/core/torrent/repository_asyn.rs @@ -1,17 +1,11 @@ use std::sync::Arc; -use super::{EntryMutexStd, EntryMutexTokio}; +use super::{EntryMutexStd, EntryMutexTokio, UpdateTorrentAsync}; use crate::core::peer; use crate::core::torrent::{Entry, SwarmStats}; use crate::shared::bit_torrent::info_hash::InfoHash; pub trait RepositoryAsync: Default { - fn update_torrent_with_peer_and_get_stats( - &self, - info_hash: &InfoHash, - peer: &peer::Peer, - ) -> impl std::future::Future + Send; - fn get_torrents<'a>( &'a self, ) -> impl std::future::Future>> + Send @@ -28,8 +22,7 @@ pub trait RepositoryAsync: Default { pub struct RepositoryTokioRwLock { torrents: tokio::sync::RwLock>, } - -impl RepositoryAsync for RepositoryTokioRwLock { +impl UpdateTorrentAsync for RepositoryTokioRwLock { async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); @@ -60,7 +53,9 @@ impl RepositoryAsync for RepositoryTokioRwLock stats_updated, ) } +} +impl RepositoryAsync for RepositoryTokioRwLock { async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> where std::collections::BTreeMap: 'a, @@ -86,7 +81,7 @@ impl Default for RepositoryTokioRwLock { } } -impl RepositoryAsync for RepositoryTokioRwLock { +impl UpdateTorrentAsync for RepositoryTokioRwLock { async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); @@ -117,7 +112,9 @@ impl RepositoryAsync for RepositoryTokioRwLock { stats_updated, ) } +} +impl RepositoryAsync for RepositoryTokioRwLock { async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> where std::collections::BTreeMap: 'a, @@ -143,7 +140,7 @@ impl Default for RepositoryTokioRwLock { } } -impl RepositoryAsync for RepositoryTokioRwLock { +impl UpdateTorrentAsync for RepositoryTokioRwLock { async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { let (stats, stats_updated) = { let mut torrents_lock = self.torrents.write().await; @@ -163,7 +160,9 @@ impl RepositoryAsync for RepositoryTokioRwLock { stats_updated, ) } +} +impl RepositoryAsync for RepositoryTokioRwLock { async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> where std::collections::BTreeMap: 'a, diff --git a/src/core/torrent/repository_sync.rs b/src/core/torrent/repository_sync.rs index 76fc36fa2..3b01eb8be 100644 --- a/src/core/torrent/repository_sync.rs +++ b/src/core/torrent/repository_sync.rs @@ -1,13 +1,11 @@ use std::sync::{Arc, RwLock}; -use super::EntryMutexStd; +use super::{EntryMutexStd, EntryMutexTokio, UpdateTorrentAsync, UpdateTorrentSync}; use crate::core::peer; use crate::core::torrent::{Entry, SwarmStats}; use crate::shared::bit_torrent::info_hash::InfoHash; pub trait RepositorySync: Default { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool); - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> where std::collections::BTreeMap: 'a; @@ -21,7 +19,62 @@ pub struct RepositoryStdRwLock { torrents: std::sync::RwLock>, } -impl RepositorySync for RepositoryStdRwLock { +impl UpdateTorrentAsync for RepositoryStdRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { + let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); + + let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { + existing_torrent_entry + } else { + let mut torrents_lock = self.get_torrents_mut(); + let entry = torrents_lock + .entry(*info_hash) + .or_insert(Arc::new(tokio::sync::Mutex::new(Entry::new()))); + entry.clone() + }; + + let (stats, stats_updated) = { + let mut torrent_entry_lock = torrent_entry.lock().await; + let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); + let stats = torrent_entry_lock.get_stats(); + + (stats, stats_updated) + }; + + ( + SwarmStats { + downloaded: stats.1, + complete: stats.0, + incomplete: stats.2, + }, + stats_updated, + ) + } +} +impl RepositorySync for RepositoryStdRwLock { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl Default for RepositoryStdRwLock { + fn default() -> Self { + Self { + torrents: RwLock::default(), + } + } +} +impl UpdateTorrentSync for RepositoryStdRwLock { fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); @@ -52,7 +105,8 @@ impl RepositorySync for RepositoryStdRwLock { stats_updated, ) } - +} +impl RepositorySync for RepositoryStdRwLock { fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> where std::collections::BTreeMap: 'a, @@ -76,7 +130,7 @@ impl Default for RepositoryStdRwLock { } } -impl RepositorySync for RepositoryStdRwLock { +impl UpdateTorrentSync for RepositoryStdRwLock { fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { let mut torrents = self.torrents.write().unwrap(); @@ -97,7 +151,8 @@ impl RepositorySync for RepositoryStdRwLock { stats_updated, ) } - +} +impl RepositorySync for RepositoryStdRwLock { fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> where std::collections::BTreeMap: 'a, From 1025125572b99504b0b882d7b54e7179d4ef25e9 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 10 Feb 2024 16:21:37 +0800 Subject: [PATCH 3/9] dev: create torrent repo trait and extract entry --- Cargo.lock | 4 +- cSpell.json | 1 + .../src/benches/asyn.rs | 19 +- .../src/benches/sync.rs | 12 +- .../src/benches/sync_asyn.rs | 12 +- .../torrent-repository-benchmarks/src/main.rs | 34 +- src/core/databases/mod.rs | 4 +- src/core/mod.rs | 154 ++----- src/core/peer.rs | 53 +++ src/core/services/torrent.rs | 55 ++- src/core/torrent/entry.rs | 241 +++++++++++ src/core/torrent/mod.rs | 219 ++-------- src/core/torrent/repository/mod.rs | 30 ++ src/core/torrent/repository/std_sync.rs | 365 +++++++++++++++++ src/core/torrent/repository/tokio_sync.rs | 378 ++++++++++++++++++ src/core/torrent/repository_asyn.rs | 187 --------- src/core/torrent/repository_sync.rs | 177 -------- .../apis/v1/context/torrent/handlers.rs | 2 +- src/servers/http/v1/responses/announce.rs | 11 +- src/servers/http/v1/services/announce.rs | 4 +- src/servers/udp/handlers.rs | 7 +- 21 files changed, 1222 insertions(+), 747 deletions(-) create mode 100644 src/core/torrent/entry.rs create mode 100644 src/core/torrent/repository/mod.rs create mode 100644 src/core/torrent/repository/std_sync.rs create mode 100644 src/core/torrent/repository/tokio_sync.rs delete mode 100644 src/core/torrent/repository_asyn.rs delete mode 100644 src/core/torrent/repository_sync.rs diff --git a/Cargo.lock b/Cargo.lock index 5722032b8..26fb919af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3086,9 +3086,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" diff --git a/cSpell.json b/cSpell.json index 6d8b68c92..da11cd29a 100644 --- a/cSpell.json +++ b/cSpell.json @@ -36,6 +36,7 @@ "Containerfile", "curr", "Cyberneering", + "dashmap", "datagram", "datetime", "debuginfo", diff --git a/packages/torrent-repository-benchmarks/src/benches/asyn.rs b/packages/torrent-repository-benchmarks/src/benches/asyn.rs index d36de9695..737a99f3c 100644 --- a/packages/torrent-repository-benchmarks/src/benches/asyn.rs +++ b/packages/torrent-repository-benchmarks/src/benches/asyn.rs @@ -3,8 +3,8 @@ use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository_asyn::{RepositoryAsync, RepositoryTokioRwLock}; -use torrust_tracker::core::torrent::UpdateTorrentAsync; +use torrust_tracker::core::torrent::repository::tokio_sync::RepositoryTokioRwLock; +use torrust_tracker::core::torrent::repository::UpdateTorrentAsync; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; @@ -12,7 +12,8 @@ use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjuste pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) where - RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, + T: Default, + RepositoryTokioRwLock: UpdateTorrentAsync + Default, { let mut results: Vec = Vec::with_capacity(samples); @@ -38,8 +39,8 @@ where // Add one torrent ten thousand times in parallel (depending on the set worker threads) pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Send + Sync + 'static, - RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, + T: Default + Send + Sync + 'static, + RepositoryTokioRwLock: UpdateTorrentAsync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -88,8 +89,8 @@ where // Add ten thousand torrents in parallel (depending on the set worker threads) pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Send + Sync + 'static, - RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, + T: Default + Send + Sync + 'static, + RepositoryTokioRwLock: UpdateTorrentAsync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -133,8 +134,8 @@ where // Async update ten thousand torrents in parallel (depending on the set worker threads) pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Send + Sync + 'static, - RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, + T: Default + Send + Sync + 'static, + RepositoryTokioRwLock: UpdateTorrentAsync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); diff --git a/packages/torrent-repository-benchmarks/src/benches/sync.rs b/packages/torrent-repository-benchmarks/src/benches/sync.rs index 3dee93421..ea694a38c 100644 --- a/packages/torrent-repository-benchmarks/src/benches/sync.rs +++ b/packages/torrent-repository-benchmarks/src/benches/sync.rs @@ -3,8 +3,8 @@ use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository_sync::{RepositoryStdRwLock, RepositorySync}; -use torrust_tracker::core::torrent::UpdateTorrentSync; +use torrust_tracker::core::torrent::repository::std_sync::RepositoryStdRwLock; +use torrust_tracker::core::torrent::repository::UpdateTorrentSync; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; @@ -14,7 +14,7 @@ use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjuste #[must_use] pub fn add_one_torrent(samples: usize) -> (Duration, Duration) where - RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, + RepositoryStdRwLock: UpdateTorrentSync + Default, { let mut results: Vec = Vec::with_capacity(samples); @@ -39,7 +39,7 @@ where pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, + RepositoryStdRwLock: UpdateTorrentSync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -85,7 +85,7 @@ where pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, + RepositoryStdRwLock: UpdateTorrentSync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -128,7 +128,7 @@ where pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, + RepositoryStdRwLock: UpdateTorrentSync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); diff --git a/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs b/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs index 11ce6ed0c..8efed9856 100644 --- a/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs +++ b/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs @@ -3,8 +3,8 @@ use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository_sync::{RepositoryStdRwLock, RepositorySync}; -use torrust_tracker::core::torrent::UpdateTorrentAsync; +use torrust_tracker::core::torrent::repository::std_sync::RepositoryStdRwLock; +use torrust_tracker::core::torrent::repository::UpdateTorrentAsync; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; @@ -14,7 +14,7 @@ use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjuste #[must_use] pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) where - RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, + RepositoryStdRwLock: UpdateTorrentAsync + Default, { let mut results: Vec = Vec::with_capacity(samples); @@ -41,7 +41,7 @@ where pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, + RepositoryStdRwLock: UpdateTorrentAsync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -91,7 +91,7 @@ where pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, + RepositoryStdRwLock: UpdateTorrentAsync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -136,7 +136,7 @@ where pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, + RepositoryStdRwLock: UpdateTorrentAsync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); diff --git a/packages/torrent-repository-benchmarks/src/main.rs b/packages/torrent-repository-benchmarks/src/main.rs index 4a293b832..d7291afe2 100644 --- a/packages/torrent-repository-benchmarks/src/main.rs +++ b/packages/torrent-repository-benchmarks/src/main.rs @@ -1,7 +1,7 @@ use clap::Parser; use torrust_torrent_repository_benchmarks::args::Args; use torrust_torrent_repository_benchmarks::benches::{asyn, sync, sync_asyn}; -use torrust_tracker::core::torrent::{Entry, EntryMutexStd, EntryMutexTokio}; +use torrust_tracker::core::torrent::entry::{Entry, MutexStd, MutexTokio}; #[allow(clippy::too_many_lines)] #[allow(clippy::print_literal)] @@ -68,22 +68,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - sync::add_one_torrent::(1_000_000) + sync::add_one_torrent::(1_000_000) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(sync::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(sync::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync::update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -92,22 +92,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(sync_asyn::add_one_torrent::(1_000_000)) + rt.block_on(sync_asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(sync_asyn::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(sync_asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(sync_asyn::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync_asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(sync_asyn::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync_asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -116,22 +116,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -140,22 +140,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); } } diff --git a/src/core/databases/mod.rs b/src/core/databases/mod.rs index b80b11987..b3dcdd48e 100644 --- a/src/core/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -56,6 +56,8 @@ use self::error::Error; use crate::core::auth::{self, Key}; use crate::shared::bit_torrent::info_hash::InfoHash; +pub type PersistentTorrents = Vec<(InfoHash, u32)>; + struct Builder where T: Database, @@ -125,7 +127,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to load. - async fn load_persistent_torrents(&self) -> Result, Error>; + async fn load_persistent_torrents(&self) -> Result; /// It saves the torrent metrics data into the database. /// diff --git a/src/core/mod.rs b/src/core/mod.rs index 56b30f955..b070f90db 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -102,11 +102,11 @@ //! //! pub struct AnnounceData { //! pub peers: Vec, -//! pub swarm_stats: SwarmStats, +//! pub swarm_stats: SwarmMetadata, //! pub policy: AnnouncePolicy, // the tracker announce policy. //! } //! -//! pub struct SwarmStats { +//! pub struct SwarmMetadata { //! pub completed: u32, // The number of peers that have ever completed downloading //! pub seeders: u32, // The number of active peers that have completed downloading (seeders) //! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) @@ -232,16 +232,11 @@ //! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) //! } //! -//! pub struct SwarmStats { -//! pub completed: u32, // The number of peers that have ever completed downloading -//! pub seeders: u32, // The number of active peers that have completed downloading (seeders) -//! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) -//! } //! ``` //! //! > **NOTICE**: that `complete` or `completed` peers are the peers that have completed downloading, but only the active ones are considered "seeders". //! -//! `SwarmStats` struct follows name conventions for `scrape` responses. See [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmStats` +//! `SwarmMetadata` struct follows name conventions for `scrape` responses. See [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmMetadata` //! is used for the rest of cases. //! //! Refer to [`torrent`] module for more details about these data structures. @@ -439,14 +434,13 @@ pub mod services; pub mod statistics; pub mod torrent; -use std::collections::{BTreeMap, HashMap}; +use std::collections::HashMap; use std::net::IpAddr; use std::panic::Location; use std::sync::Arc; use std::time::Duration; use derive_more::Constructor; -use futures::future::join_all; use log::debug; use tokio::sync::mpsc::error::SendError; use torrust_tracker_configuration::{AnnouncePolicy, Configuration}; @@ -455,10 +449,11 @@ use torrust_tracker_primitives::TrackerMode; use self::auth::Key; use self::error::Error; use self::peer::Peer; -use self::torrent::repository_asyn::{RepositoryAsync, RepositoryTokioRwLock}; -use self::torrent::{Entry, UpdateTorrentAsync}; +use self::torrent::entry::{Entry, ReadInfo, ReadPeers}; +use self::torrent::repository::tokio_sync::RepositoryTokioRwLock; +use self::torrent::repository::{Repository, UpdateTorrentAsync}; use crate::core::databases::Database; -use crate::core::torrent::{SwarmMetadata, SwarmStats}; +use crate::core::torrent::SwarmMetadata; use crate::shared::bit_torrent::info_hash::InfoHash; /// The maximum number of returned peers for a torrent. @@ -515,9 +510,9 @@ pub struct TrackerPolicy { pub struct AnnounceData { /// The list of peers that are downloading the same torrent. /// It excludes the peer that made the request. - pub peers: Vec, + pub peers: Vec>, /// Swarm statistics - pub stats: SwarmStats, + pub stats: SwarmMetadata, pub policy: AnnouncePolicy, } @@ -685,10 +680,8 @@ impl Tracker { /// It returns the data for a `scrape` response. async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { - let torrents = self.torrents.get_torrents().await; - - match torrents.get(info_hash) { - Some(torrent_entry) => torrent_entry.get_swarm_metadata(), + match self.torrents.get(info_hash).await { + Some(torrent_entry) => torrent_entry.get_stats(), None => SwarmMetadata::default(), } } @@ -704,47 +697,25 @@ impl Tracker { pub async fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; - let mut torrents = self.torrents.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(&info_hash) { - continue; - } - - let torrent_entry = torrent::Entry { - peers: BTreeMap::default(), - completed, - }; - - torrents.insert(info_hash, torrent_entry); - } + self.torrents.import_persistent(&persistent_torrents).await; Ok(()) } - async fn get_torrent_peers_for_peer(&self, info_hash: &InfoHash, peer: &Peer) -> Vec { - let read_lock = self.torrents.get_torrents().await; - - match read_lock.get(info_hash) { + async fn get_torrent_peers_for_peer(&self, info_hash: &InfoHash, peer: &Peer) -> Vec> { + match self.torrents.get(info_hash).await { None => vec![], - Some(entry) => entry - .get_peers_for_peer(peer, TORRENT_PEERS_LIMIT) - .into_iter() - .copied() - .collect(), + Some(entry) => entry.get_peers_for_peer(peer, Some(TORRENT_PEERS_LIMIT)), } } /// # Context: Tracker /// /// Get all torrent peers for a given torrent - pub async fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec { - let read_lock = self.torrents.get_torrents().await; - - match read_lock.get(info_hash) { + pub async fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { + match self.torrents.get(info_hash).await { None => vec![], - Some(entry) => entry.get_peers(TORRENT_PEERS_LIMIT).into_iter().copied().collect(), + Some(entry) => entry.get_peers(Some(TORRENT_PEERS_LIMIT)), } } @@ -753,11 +724,15 @@ impl Tracker { /// needed for a `announce` request response. /// /// # Context: Tracker - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::SwarmStats { + pub async fn update_torrent_with_peer_and_get_stats( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + ) -> torrent::SwarmMetadata { // code-review: consider splitting the function in two (command and query segregation). // `update_torrent_with_peer` and `get_stats` - let (stats, stats_updated) = self.torrents.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + let (stats_updated, stats) = self.torrents.update_torrent_with_peer_and_get_stats(info_hash, peer).await; if self.policy.persistent_torrent_completed_stat && stats_updated { let completed = stats.downloaded; @@ -777,71 +752,18 @@ impl Tracker { /// # Panics /// Panics if unable to get the torrent metrics. pub async fn get_torrents_metrics(&self) -> TorrentsMetrics { - let arc_torrents_metrics = Arc::new(tokio::sync::Mutex::new(TorrentsMetrics { - seeders: 0, - completed: 0, - leechers: 0, - torrents: 0, - })); - - let db = self.torrents.get_torrents().await.clone(); - - let futures = db - .values() - .map(|torrent_entry| { - let torrent_entry = torrent_entry.clone(); - let torrents_metrics = arc_torrents_metrics.clone(); - - async move { - tokio::spawn(async move { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - torrents_metrics.lock().await.seeders += u64::from(seeders); - torrents_metrics.lock().await.completed += u64::from(completed); - torrents_metrics.lock().await.leechers += u64::from(leechers); - torrents_metrics.lock().await.torrents += 1; - }) - .await - .expect("Error torrent_metrics spawn"); - } - }) - .collect::>(); - - join_all(futures).await; - - let torrents_metrics = Arc::try_unwrap(arc_torrents_metrics).expect("Could not unwrap arc_torrents_metrics"); - - torrents_metrics.into_inner() + self.torrents.get_metrics().await } /// Remove inactive peers and (optionally) peerless torrents /// /// # Context: Tracker pub async fn cleanup_torrents(&self) { - let mut torrents_lock = self.torrents.get_torrents_mut().await; - // If we don't need to remove torrents we will use the faster iter if self.policy.remove_peerless_torrents { - let mut cleaned_torrents_map: BTreeMap = BTreeMap::new(); - - for (info_hash, torrent_entry) in &mut *torrents_lock { - torrent_entry.remove_inactive_peers(self.policy.max_peer_timeout); - - if torrent_entry.peers.is_empty() { - continue; - } - - if self.policy.persistent_torrent_completed_stat && torrent_entry.completed == 0 { - continue; - } - - cleaned_torrents_map.insert(*info_hash, torrent_entry.clone()); - } - - *torrents_lock = cleaned_torrents_map; + self.torrents.remove_peerless_torrents(&self.policy).await; } else { - for torrent_entry in (*torrents_lock).values_mut() { - torrent_entry.remove_inactive_peers(self.policy.max_peer_timeout); - } + self.torrents.remove_inactive_peers(self.policy.max_peer_timeout).await; } } @@ -1093,6 +1015,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; + use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; @@ -1233,7 +1156,7 @@ mod tests { let peers = tracker.get_torrent_peers(&info_hash).await; - assert_eq!(peers, vec![peer]); + assert_eq!(peers, vec![Arc::new(peer)]); } #[tokio::test] @@ -1275,6 +1198,8 @@ mod tests { mod handling_an_announce_request { + use std::sync::Arc; + use crate::core::tests::the_tracker::{ peer_ip, public_tracker, sample_info_hash, sample_peer, sample_peer_1, sample_peer_2, }; @@ -1400,7 +1325,7 @@ mod tests { let mut peer = sample_peer_2(); let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; - assert_eq!(announce_data.peers, vec![previously_announced_peer]); + assert_eq!(announce_data.peers, vec![Arc::new(previously_announced_peer)]); } mod it_should_update_the_swarm_stats_for_the_torrent { @@ -1755,7 +1680,7 @@ mod tests { use aquatic_udp_protocol::AnnounceEvent; use crate::core::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; - use crate::core::torrent::repository_asyn::RepositoryAsync; + use crate::core::torrent::repository::Repository; #[tokio::test] async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { @@ -1774,14 +1699,15 @@ mod tests { assert_eq!(swarm_stats.downloaded, 1); // Remove the newly updated torrent from memory - tracker.torrents.get_torrents_mut().await.remove(&info_hash); + tracker.torrents.remove(&info_hash).await; tracker.load_torrents_from_database().await.unwrap(); - let torrents = tracker.torrents.get_torrents().await; - assert!(torrents.contains_key(&info_hash)); - - let torrent_entry = torrents.get(&info_hash).unwrap(); + let torrent_entry = tracker + .torrents + .get(&info_hash) + .await + .expect("it should be able to get entry"); // It persists the number of completed peers. assert_eq!(torrent_entry.completed, 1); diff --git a/src/core/peer.rs b/src/core/peer.rs index 16aa1fe56..eb2b7b759 100644 --- a/src/core/peer.rs +++ b/src/core/peer.rs @@ -22,6 +22,7 @@ //! ``` use std::net::{IpAddr, SocketAddr}; use std::panic::Location; +use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde::Serialize; @@ -85,6 +86,58 @@ pub struct Peer { pub event: AnnounceEvent, } +pub trait ReadInfo { + fn is_seeder(&self) -> bool; + fn get_event(&self) -> AnnounceEvent; + fn get_id(&self) -> Id; + fn get_updated(&self) -> DurationSinceUnixEpoch; + fn get_address(&self) -> SocketAddr; +} + +impl ReadInfo for Peer { + fn is_seeder(&self) -> bool { + self.left.0 <= 0 && self.event != AnnounceEvent::Stopped + } + + fn get_event(&self) -> AnnounceEvent { + self.event + } + + fn get_id(&self) -> Id { + self.peer_id + } + + fn get_updated(&self) -> DurationSinceUnixEpoch { + self.updated + } + + fn get_address(&self) -> SocketAddr { + self.peer_addr + } +} + +impl ReadInfo for Arc { + fn is_seeder(&self) -> bool { + self.left.0 <= 0 && self.event != AnnounceEvent::Stopped + } + + fn get_event(&self) -> AnnounceEvent { + self.event + } + + fn get_id(&self) -> Id { + self.peer_id + } + + fn get_updated(&self) -> DurationSinceUnixEpoch { + self.updated + } + + fn get_address(&self) -> SocketAddr { + self.peer_addr + } +} + impl Peer { #[must_use] pub fn is_seeder(&self) -> bool { diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index eca6cbf3b..b265066f0 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -9,7 +9,8 @@ use std::sync::Arc; use serde::Deserialize; use crate::core::peer::Peer; -use crate::core::torrent::repository_asyn::RepositoryAsync; +use crate::core::torrent::entry::{self, ReadInfo}; +use crate::core::torrent::repository::Repository; use crate::core::Tracker; use crate::shared::bit_torrent::info_hash::InfoHash; @@ -94,41 +95,37 @@ impl Default for Pagination { /// It returns all the information the tracker has about one torrent in a [Info] struct. pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { - let db = tracker.torrents.get_torrents().await; - - let torrent_entry_option = db.get(info_hash); + let torrent_entry_option = tracker.torrents.get(info_hash).await; let torrent_entry = torrent_entry_option?; - let (seeders, completed, leechers) = torrent_entry.get_stats(); + let stats = entry::ReadInfo::get_stats(&torrent_entry); - let peers = torrent_entry.get_all_peers(); + let peers = entry::ReadPeers::get_peers(&torrent_entry, None); let peers = Some(peers.iter().map(|peer| (**peer)).collect()); Some(Info { info_hash: *info_hash, - seeders: u64::from(seeders), - completed: u64::from(completed), - leechers: u64::from(leechers), + seeders: u64::from(stats.complete), + completed: u64::from(stats.downloaded), + leechers: u64::from(stats.incomplete), peers, }) } /// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. -pub async fn get_torrents_page(tracker: Arc, pagination: &Pagination) -> Vec { - let db = tracker.torrents.get_torrents().await; - +pub async fn get_torrents_page(tracker: Arc, pagination: Option<&Pagination>) -> Vec { let mut basic_infos: Vec = vec![]; - for (info_hash, torrent_entry) in db.iter().skip(pagination.offset as usize).take(pagination.limit as usize) { - let (seeders, completed, leechers) = torrent_entry.get_stats(); + for (info_hash, torrent_entry) in tracker.torrents.get_paginated(pagination).await { + let stats = entry::ReadInfo::get_stats(&torrent_entry); basic_infos.push(BasicInfo { - info_hash: *info_hash, - seeders: u64::from(seeders), - completed: u64::from(completed), - leechers: u64::from(leechers), + info_hash, + seeders: u64::from(stats.complete), + completed: u64::from(stats.downloaded), + leechers: u64::from(stats.incomplete), }); } @@ -137,19 +134,15 @@ pub async fn get_torrents_page(tracker: Arc, pagination: &Pagination) - /// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Vec { - let db = tracker.torrents.get_torrents().await; - let mut basic_infos: Vec = vec![]; for info_hash in info_hashes { - if let Some(entry) = db.get(info_hash) { - let (seeders, completed, leechers) = entry.get_stats(); - + if let Some(stats) = tracker.torrents.get(info_hash).await.map(|t| t.get_stats()) { basic_infos.push(BasicInfo { info_hash: *info_hash, - seeders: u64::from(seeders), - completed: u64::from(completed), - leechers: u64::from(leechers), + seeders: u64::from(stats.complete), + completed: u64::from(stats.downloaded), + leechers: u64::from(stats.incomplete), }); } } @@ -254,7 +247,7 @@ mod tests { async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let tracker = Arc::new(tracker_factory(&tracker_configuration())); - let torrents = get_torrents_page(tracker.clone(), &Pagination::default()).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; assert_eq!(torrents, vec![]); } @@ -270,7 +263,7 @@ mod tests { .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) .await; - let torrents = get_torrents_page(tracker.clone(), &Pagination::default()).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; assert_eq!( torrents, @@ -302,7 +295,7 @@ mod tests { let offset = 0; let limit = 1; - let torrents = get_torrents_page(tracker.clone(), &Pagination::new(offset, limit)).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); } @@ -326,7 +319,7 @@ mod tests { let offset = 1; let limit = 4000; - let torrents = get_torrents_page(tracker.clone(), &Pagination::new(offset, limit)).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); assert_eq!( @@ -356,7 +349,7 @@ mod tests { .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) .await; - let torrents = get_torrents_page(tracker.clone(), &Pagination::default()).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; assert_eq!( torrents, diff --git a/src/core/torrent/entry.rs b/src/core/torrent/entry.rs new file mode 100644 index 000000000..619cce9b3 --- /dev/null +++ b/src/core/torrent/entry.rs @@ -0,0 +1,241 @@ +use std::fmt::Debug; +use std::sync::Arc; +use std::time::Duration; + +use aquatic_udp_protocol::AnnounceEvent; +use serde::{Deserialize, Serialize}; + +use super::SwarmMetadata; +use crate::core::peer::{self, ReadInfo as _}; +use crate::core::TrackerPolicy; +use crate::shared::clock::{Current, TimeNow}; + +/// A data structure containing all the information about a torrent in the tracker. +/// +/// This is the tracker entry for a given torrent and contains the swarm data, +/// that's the list of all the peers trying to download the same torrent. +/// The tracker keeps one entry like this for every torrent. +#[derive(Serialize, Deserialize, Clone, Debug, Default)] +pub struct Entry { + /// The swarm: a network of peers that are all trying to download the torrent associated to this entry + #[serde(skip)] + pub peers: std::collections::BTreeMap>, + /// The number of peers that have ever completed downloading the torrent associated to this entry + pub completed: u32, +} + +pub type MutexStd = Arc>; +pub type MutexTokio = Arc>; + +pub trait ReadInfo { + /// It returns the swarm metadata (statistics) as a struct: + /// + /// `(seeders, completed, leechers)` + fn get_stats(&self) -> SwarmMetadata; + + /// Returns True if Still a Valid Entry according to the Tracker Policy + fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool; +} + +pub trait ReadPeers { + /// Get all swarm peers, optionally limiting the result. + fn get_peers(&self, limit: Option) -> Vec>; + + /// It returns the list of peers for a given peer client, optionally limiting the + /// result. + /// + /// It filters out the input peer, typically because we want to return this + /// list of peers to that client peer. + fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec>; +} + +pub trait ReadAsync { + /// Get all swarm peers, optionally limiting the result. + fn get_peers(&self, limit: Option) -> impl std::future::Future>> + Send; + + /// It returns the list of peers for a given peer client, optionally limiting the + /// result. + /// + /// It filters out the input peer, typically because we want to return this + /// list of peers to that client peer. + fn get_peers_for_peer( + &self, + client: &peer::Peer, + limit: Option, + ) -> impl std::future::Future>> + Send; +} + +pub trait Update { + /// It updates a peer and returns true if the number of complete downloads have increased. + /// + /// The number of peers that have complete downloading is synchronously updated when peers are updated. + /// That's the total torrent downloads counter. + fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool; + + // It preforms a combined operation of `insert_or_update_peer` and `get_stats`. + fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata); + + /// It removes peer from the swarm that have not been updated for more than `max_peer_timeout` seconds + fn remove_inactive_peers(&mut self, max_peer_timeout: u32); +} + +pub trait UpdateSync { + fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool; + fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata); + fn remove_inactive_peers(&self, max_peer_timeout: u32); +} + +pub trait UpdateAsync { + fn insert_or_update_peer(&self, peer: &peer::Peer) -> impl std::future::Future + Send; + + fn insert_or_update_peer_and_get_stats( + &self, + peer: &peer::Peer, + ) -> impl std::future::Future + std::marker::Send; + + fn remove_inactive_peers(&self, max_peer_timeout: u32) -> impl std::future::Future + Send; +} + +impl ReadInfo for Entry { + #[allow(clippy::cast_possible_truncation)] + fn get_stats(&self) -> SwarmMetadata { + let complete: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; + let incomplete: u32 = self.peers.len() as u32 - complete; + + SwarmMetadata { + downloaded: self.completed, + complete, + incomplete, + } + } + + fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { + if policy.persistent_torrent_completed_stat && self.completed > 0 { + return true; + } + + if policy.remove_peerless_torrents && self.peers.is_empty() { + return false; + } + + true + } +} + +impl ReadPeers for Entry { + fn get_peers(&self, limit: Option) -> Vec> { + match limit { + Some(limit) => self.peers.values().take(limit).cloned().collect(), + None => self.peers.values().cloned().collect(), + } + } + + fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { + match limit { + Some(limit) => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer.get_address() != client.get_address()) + // Limit the number of peers on the result + .take(limit) + .cloned() + .collect(), + None => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer.get_address() != client.get_address()) + .cloned() + .collect(), + } + } +} + +impl ReadPeers for MutexStd { + fn get_peers(&self, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers(limit) + } + + fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers_for_peer(client, limit) + } +} + +impl ReadAsync for MutexTokio { + async fn get_peers(&self, limit: Option) -> Vec> { + self.lock().await.get_peers(limit) + } + + async fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { + self.lock().await.get_peers_for_peer(client, limit) + } +} + +impl Update for Entry { + fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { + let mut did_torrent_stats_change: bool = false; + + match peer.get_event() { + AnnounceEvent::Stopped => { + drop(self.peers.remove(&peer.get_id())); + } + AnnounceEvent::Completed => { + let peer_old = self.peers.insert(peer.get_id(), Arc::new(*peer)); + // Don't count if peer was not previously known and not already completed. + if peer_old.is_some_and(|p| p.event != AnnounceEvent::Completed) { + self.completed += 1; + did_torrent_stats_change = true; + } + } + _ => { + drop(self.peers.insert(peer.get_id(), Arc::new(*peer))); + } + } + + did_torrent_stats_change + } + + fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let changed = self.insert_or_update_peer(peer); + let stats = self.get_stats(); + (changed, stats) + } + + fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { + let current_cutoff = Current::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); + self.peers.retain(|_, peer| peer.get_updated() > current_cutoff); + } +} + +impl UpdateSync for MutexStd { + fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool { + self.lock().expect("it should lock the entry").insert_or_update_peer(peer) + } + + fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.lock() + .expect("it should lock the entry") + .insert_or_update_peer_and_get_stats(peer) + } + + fn remove_inactive_peers(&self, max_peer_timeout: u32) { + self.lock() + .expect("it should lock the entry") + .remove_inactive_peers(max_peer_timeout); + } +} + +impl UpdateAsync for MutexTokio { + async fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool { + self.lock().await.insert_or_update_peer(peer) + } + + async fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.lock().await.insert_or_update_peer_and_get_stats(peer) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + self.lock().await.remove_inactive_peers(max_peer_timeout); + } +} diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index 49c1f61f8..608765cf8 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -27,49 +27,11 @@ //! - The number of peers that have NOT completed downloading the torrent and are still active, that means they are actively participating in the network. //! Peer that don not have a full copy of the torrent data are called "leechers". //! -//! > **NOTICE**: that both [`SwarmMetadata`] and [`SwarmStats`] contain the same information. [`SwarmMetadata`] is using the names used on [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). -pub mod repository_asyn; -pub mod repository_sync; +//! > **NOTICE**: that both [`SwarmMetadata`] and [`SwarmMetadata`] contain the same information. [`SwarmMetadata`] is using the names used on [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). +pub mod entry; +pub mod repository; -use std::sync::Arc; -use std::time::Duration; - -use aquatic_udp_protocol::AnnounceEvent; use derive_more::Constructor; -use serde::{Deserialize, Serialize}; - -use super::peer::{self, Peer}; -use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::shared::clock::{Current, TimeNow}; - -pub trait UpdateTorrentSync { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool); -} - -pub trait UpdateTorrentAsync { - fn update_torrent_with_peer_and_get_stats( - &self, - info_hash: &InfoHash, - peer: &peer::Peer, - ) -> impl std::future::Future + Send; -} - -/// A data structure containing all the information about a torrent in the tracker. -/// -/// This is the tracker entry for a given torrent and contains the swarm data, -/// that's the list of all the peers trying to download the same torrent. -/// The tracker keeps one entry like this for every torrent. -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct Entry { - /// The swarm: a network of peers that are all trying to download the torrent associated to this entry - #[serde(skip)] - pub peers: std::collections::BTreeMap, - /// The number of peers that have ever completed downloading the torrent associated to this entry - pub completed: u32, -} - -pub type EntryMutexTokio = Arc>; -pub type EntryMutexStd = Arc>; /// Swarm statistics for one torrent. /// Swarm metadata dictionary in the scrape response. @@ -92,122 +54,6 @@ impl SwarmMetadata { } } -/// [`SwarmStats`] has the same form as [`SwarmMetadata`] -pub type SwarmStats = SwarmMetadata; - -impl Entry { - #[must_use] - pub fn new() -> Entry { - Entry { - peers: std::collections::BTreeMap::new(), - completed: 0, - } - } - - /// It updates a peer and returns true if the number of complete downloads have increased. - /// - /// The number of peers that have complete downloading is synchronously updated when peers are updated. - /// That's the total torrent downloads counter. - pub fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { - let mut did_torrent_stats_change: bool = false; - - match peer.event { - AnnounceEvent::Stopped => { - let _: Option = self.peers.remove(&peer.peer_id); - } - AnnounceEvent::Completed => { - let peer_old = self.peers.insert(peer.peer_id, *peer); - // Don't count if peer was not previously known and not already completed. - if peer_old.is_some_and(|p| p.event != AnnounceEvent::Completed) { - self.completed += 1; - did_torrent_stats_change = true; - } - } - _ => { - let _: Option = self.peers.insert(peer.peer_id, *peer); - } - } - - did_torrent_stats_change - } - - /// Get all swarm peers. - #[must_use] - pub fn get_all_peers(&self) -> Vec<&peer::Peer> { - self.peers.values().collect() - } - - /// Get swarm peers, limiting the result. - #[must_use] - pub fn get_peers(&self, limit: usize) -> Vec<&peer::Peer> { - self.peers.values().take(limit).collect() - } - - /// It returns the list of peers for a given peer client. - /// - /// It filters out the input peer, typically because we want to return this - /// list of peers to that client peer. - #[must_use] - pub fn get_all_peers_for_peer(&self, client: &Peer) -> Vec<&peer::Peer> { - self.peers - .values() - // Take peers which are not the client peer - .filter(|peer| peer.peer_addr != client.peer_addr) - .collect() - } - - /// It returns the list of peers for a given peer client, limiting the - /// result. - /// - /// It filters out the input peer, typically because we want to return this - /// list of peers to that client peer. - #[must_use] - pub fn get_peers_for_peer(&self, client: &Peer, limit: usize) -> Vec<&peer::Peer> { - self.peers - .values() - // Take peers which are not the client peer - .filter(|peer| peer.peer_addr != client.peer_addr) - // Limit the number of peers on the result - .take(limit) - .collect() - } - - /// It returns the swarm metadata (statistics) as a tuple: - /// - /// `(seeders, completed, leechers)` - #[allow(clippy::cast_possible_truncation)] - #[must_use] - pub fn get_stats(&self) -> (u32, u32, u32) { - let seeders: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; - let leechers: u32 = self.peers.len() as u32 - seeders; - (seeders, self.completed, leechers) - } - - /// It returns the swarm metadata (statistics) as an struct - #[must_use] - pub fn get_swarm_metadata(&self) -> SwarmMetadata { - // code-review: consider using always this function instead of `get_stats`. - let (seeders, completed, leechers) = self.get_stats(); - SwarmMetadata { - complete: seeders, - downloaded: completed, - incomplete: leechers, - } - } - - /// It removes peer from the swarm that have not been updated for more than `max_peer_timeout` seconds - pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { - let current_cutoff = Current::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); - self.peers.retain(|_, peer| peer.updated > current_cutoff); - } -} - -impl Default for Entry { - fn default() -> Self { - Self::new() - } -} - #[cfg(test)] mod tests { @@ -215,11 +61,12 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::ops::Sub; + use std::sync::Arc; use std::time::Duration; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::core::torrent::Entry; + use crate::core::torrent::entry::{self, ReadInfo, ReadPeers, Update}; use crate::core::{peer, TORRENT_PEERS_LIMIT}; use crate::shared::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; @@ -291,59 +138,59 @@ mod tests { #[test] fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { - let torrent_entry = Entry::new(); + let torrent_entry = entry::Entry::default(); - assert_eq!(torrent_entry.get_all_peers().len(), 0); + assert_eq!(torrent_entry.get_peers(None).len(), 0); } #[test] fn a_new_peer_can_be_added_to_a_torrent_entry() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - assert_eq!(*torrent_entry.get_all_peers()[0], torrent_peer); - assert_eq!(torrent_entry.get_all_peers().len(), 1); + assert_eq!(*torrent_entry.get_peers(None)[0], torrent_peer); + assert_eq!(torrent_entry.get_peers(None).len(), 1); } #[test] fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - assert_eq!(torrent_entry.get_all_peers(), vec![&torrent_peer]); + assert_eq!(torrent_entry.get_peers(None), vec![Arc::new(torrent_peer)]); } #[test] fn a_peer_can_be_updated_in_a_torrent_entry() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer torrent_peer.event = AnnounceEvent::Completed; // Update the peer torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - assert_eq!(torrent_entry.get_all_peers()[0].event, AnnounceEvent::Completed); + assert_eq!(torrent_entry.get_peers(None)[0].event, AnnounceEvent::Completed); } #[test] fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer torrent_peer.event = AnnounceEvent::Stopped; // Update the peer torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - assert_eq!(torrent_entry.get_all_peers().len(), 0); + assert_eq!(torrent_entry.get_peers(None).len(), 0); } #[test] fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -357,7 +204,7 @@ mod tests { #[test] fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( ) { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Add a peer that did not exist before in the entry @@ -369,20 +216,20 @@ mod tests { #[test] fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_the_client_that_is_making_the_request() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add peer // Get peers excluding the one we have just added - let peers = torrent_entry.get_all_peers_for_peer(&torrent_peer); + let peers = torrent_entry.get_peers_for_peer(&torrent_peer, None); assert_eq!(peers.len(), 0); } #[test] fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let peer_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); @@ -399,7 +246,7 @@ mod tests { torrent_entry.insert_or_update_peer(&torrent_peer_2); // Get peers for peer 1 - let peers = torrent_entry.get_all_peers_for_peer(&torrent_peer_1); + let peers = torrent_entry.get_peers_for_peer(&torrent_peer_1, None); // The peer 2 using the same IP but different port should be included assert_eq!(peers[0].peer_addr.ip(), Ipv4Addr::new(127, 0, 0, 1)); @@ -416,7 +263,7 @@ mod tests { #[test] fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { @@ -426,35 +273,35 @@ mod tests { torrent_entry.insert_or_update_peer(&torrent_peer); } - let peers = torrent_entry.get_peers(TORRENT_PEERS_LIMIT); + let peers = torrent_entry.get_peers(Some(TORRENT_PEERS_LIMIT)); assert_eq!(peers.len(), 74); } #[test] fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let torrent_seeder = a_torrent_seeder(); torrent_entry.insert_or_update_peer(&torrent_seeder); // Add seeder - assert_eq!(torrent_entry.get_stats().0, 1); + assert_eq!(torrent_entry.get_stats().complete, 1); } #[test] fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let torrent_leecher = a_torrent_leecher(); torrent_entry.insert_or_update_peer(&torrent_leecher); // Add leecher - assert_eq!(torrent_entry.get_stats().2, 1); + assert_eq!(torrent_entry.get_stats().incomplete, 1); } #[test] fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( ) { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -462,28 +309,28 @@ mod tests { torrent_peer.event = AnnounceEvent::Completed; torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer - let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().1; + let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().complete; assert_eq!(number_of_previously_known_peers_with_completed_torrent, 1); } #[test] fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Announce "Completed" torrent download event. // It's the first event announced from this peer. torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); // Add the peer - let number_of_peers_with_completed_torrent = torrent_entry.get_stats().1; + let number_of_peers_with_completed_torrent = torrent_entry.get_stats().downloaded; assert_eq!(number_of_peers_with_completed_torrent, 0); } #[test] fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let timeout = 120u32; diff --git a/src/core/torrent/repository/mod.rs b/src/core/torrent/repository/mod.rs new file mode 100644 index 000000000..3af33aebe --- /dev/null +++ b/src/core/torrent/repository/mod.rs @@ -0,0 +1,30 @@ +use super::SwarmMetadata; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +pub mod std_sync; +pub mod tokio_sync; + +pub trait Repository: Default { + fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; + fn get_metrics(&self) -> impl std::future::Future + Send; + fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) -> impl std::future::Future + Send; + fn remove(&self, key: &InfoHash) -> impl std::future::Future> + Send; + fn remove_inactive_peers(&self, max_peer_timeout: u32) -> impl std::future::Future + Send; + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; +} + +pub trait UpdateTorrentSync { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata); +} + +pub trait UpdateTorrentAsync { + fn update_torrent_with_peer_and_get_stats( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + ) -> impl std::future::Future + Send; +} diff --git a/src/core/torrent/repository/std_sync.rs b/src/core/torrent/repository/std_sync.rs new file mode 100644 index 000000000..ba38db6ed --- /dev/null +++ b/src/core/torrent/repository/std_sync.rs @@ -0,0 +1,365 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use futures::executor::block_on; +use futures::future::join_all; + +use super::{Repository, UpdateTorrentAsync, UpdateTorrentSync}; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::torrent::entry::{Entry, ReadInfo, Update, UpdateAsync, UpdateSync}; +use crate::core::torrent::{entry, SwarmMetadata}; +use crate::core::{peer, TorrentsMetrics}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +#[derive(Default)] +pub struct RepositoryStdRwLock { + torrents: std::sync::RwLock>, +} + +impl RepositoryStdRwLock { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl UpdateTorrentAsync for RepositoryStdRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); + + let torrent_entry = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { + existing_torrent_entry + } else { + let mut torrents_lock = self.get_torrents_mut(); + let entry = torrents_lock.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + torrent_entry.insert_or_update_peer_and_get_stats(peer).await + } +} +impl Repository for RepositoryStdRwLock { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexTokio)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let db = self.get_torrents(); + let metrics: Arc> = Arc::default(); + + let futures = db.values().map(|e| { + let metrics = metrics.clone(); + let entry = e.clone(); + + tokio::spawn(async move { + let stats = entry.lock().await.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + }) + }); + + block_on(join_all(futures)); + + *metrics.blocking_lock_owned() + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexTokio::new( + Entry { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let db = self.get_torrents(); + + let futures = db.values().map(|e| { + let entry = e.clone(); + tokio::spawn(async move { entry.lock().await.remove_inactive_peers(max_peer_timeout) }) + }); + + block_on(join_all(futures)); + } + + async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); + } +} + +impl RepositoryStdRwLock { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl UpdateTorrentSync for RepositoryStdRwLock { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); + + let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { + existing_torrent_entry + } else { + let mut torrents_lock = self.get_torrents_mut(); + let entry = torrents_lock + .entry(*info_hash) + .or_insert(Arc::new(std::sync::Mutex::new(Entry::default()))); + entry.clone() + }; + + torrent_entry.insert_or_update_peer_and_get_stats(peer) + } +} +impl Repository for RepositoryStdRwLock { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let db = self.get_torrents(); + let metrics: Arc> = Arc::default(); + + let futures = db.values().map(|e| { + let metrics = metrics.clone(); + let entry = e.clone(); + + tokio::spawn(async move { + let stats = entry.lock().expect("it should lock the entry").get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + }) + }); + + block_on(join_all(futures)); + + *metrics.blocking_lock_owned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexStd)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexStd::new( + Entry { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let db = self.get_torrents(); + + let futures = db.values().map(|e| { + let entry = e.clone(); + tokio::spawn(async move { + entry + .lock() + .expect("it should get lock for entry") + .remove_inactive_peers(max_peer_timeout); + }) + }); + + block_on(join_all(futures)); + } + + async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); + } +} + +impl RepositoryStdRwLock { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("it should get the read lock") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("it should get the write lock") + } +} + +impl UpdateTorrentSync for RepositoryStdRwLock { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let mut torrents = self.torrents.write().unwrap(); + + let torrent_entry = match torrents.entry(*info_hash) { + std::collections::btree_map::Entry::Vacant(vacant) => vacant.insert(Entry::default()), + std::collections::btree_map::Entry::Occupied(entry) => entry.into_mut(), + }; + + torrent_entry.insert_or_update_peer_and_get_stats(peer) + } +} +impl Repository for RepositoryStdRwLock { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let db = self.get_torrents(); + let metrics: Arc> = Arc::default(); + + let futures = db.values().map(|e| { + let metrics = metrics.clone(); + let entry = e.clone(); + + tokio::spawn(async move { + let stats = entry.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + }) + }); + + block_on(join_all(futures)); + + *metrics.blocking_lock_owned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, Entry)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = Entry { + peers: BTreeMap::default(), + completed: *completed, + }; + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let mut db = self.get_torrents_mut(); + + drop(db.values_mut().map(|e| e.remove_inactive_peers(max_peer_timeout))); + } + + async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.is_not_zombie(policy)); + } +} diff --git a/src/core/torrent/repository/tokio_sync.rs b/src/core/torrent/repository/tokio_sync.rs new file mode 100644 index 000000000..83edf1188 --- /dev/null +++ b/src/core/torrent/repository/tokio_sync.rs @@ -0,0 +1,378 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use futures::future::join_all; + +use super::{Repository, UpdateTorrentAsync}; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::torrent::entry::{Entry, ReadInfo, Update, UpdateAsync, UpdateSync}; +use crate::core::torrent::{entry, SwarmMetadata}; +use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +#[derive(Default)] +pub struct RepositoryTokioRwLock { + torrents: tokio::sync::RwLock>, +} + +impl RepositoryTokioRwLock { + async fn get_torrents<'a>( + &'a self, + ) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl UpdateTorrentAsync for RepositoryTokioRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_torrent; + { + let db = self.torrents.read().await; + maybe_torrent = db.get(info_hash).cloned(); + } + + let torrent = if let Some(torrent) = maybe_torrent { + torrent + } else { + let entry = entry::MutexTokio::default(); + let mut db = self.torrents.write().await; + db.insert(*info_hash, entry.clone()); + entry + }; + + torrent.insert_or_update_peer_and_get_stats(peer).await + } +} + +impl Repository for RepositoryTokioRwLock { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexTokio)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let db = self.get_torrents().await; + let metrics: Arc> = Arc::default(); + + let futures = db.values().map(|e| { + let metrics = metrics.clone(); + let entry = e.clone(); + + tokio::spawn(async move { + let stats = entry.lock().await.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + }) + }); + + join_all(futures).await; + + *metrics.lock_owned().await + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexTokio::new( + Entry { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let db = self.get_torrents().await; + + let futures = db.values().map(|e| { + let entry = e.clone(); + tokio::spawn(async move { entry.lock().await.remove_inactive_peers(max_peer_timeout) }) + }); + + join_all(futures).await; + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); + } +} + +impl RepositoryTokioRwLock { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl UpdateTorrentAsync for RepositoryTokioRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_torrent; + { + let db = self.torrents.read().await; + maybe_torrent = db.get(info_hash).cloned(); + } + + let torrent = if let Some(torrent) = maybe_torrent { + torrent + } else { + let entry = entry::MutexStd::default(); + let mut db = self.torrents.write().await; + db.insert(*info_hash, entry.clone()); + entry + }; + + torrent.insert_or_update_peer_and_get_stats(peer) + } +} + +impl Repository for RepositoryTokioRwLock { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexStd)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let db = self.get_torrents().await; + let metrics: Arc> = Arc::default(); + + let futures = db.values().map(|e| { + let metrics = metrics.clone(); + let entry = e.clone(); + + tokio::spawn(async move { + let stats = entry.lock().expect("it should lock the entry").get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + }) + }); + + join_all(futures).await; + + *metrics.lock_owned().await + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexStd::new( + Entry { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let db = self.get_torrents().await; + + let futures = db.values().map(|e| { + let entry = e.clone(); + tokio::spawn(async move { + entry + .lock() + .expect("it should get lock for entry") + .remove_inactive_peers(max_peer_timeout); + }) + }); + + join_all(futures).await; + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); + } +} + +impl RepositoryTokioRwLock { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>(&'a self) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl UpdateTorrentAsync for RepositoryTokioRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let mut db = self.torrents.write().await; + + let torrent = db.entry(*info_hash).or_insert(Entry::default()); + + torrent.insert_or_update_peer_and_get_stats(peer) + } +} + +impl Repository for RepositoryTokioRwLock { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, Entry)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let db = self.get_torrents().await; + let metrics: Arc> = Arc::default(); + + let futures = db.values().map(|e| { + let metrics = metrics.clone(); + let entry = e.clone(); + + tokio::spawn(async move { + let stats = entry.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + }) + }); + + join_all(futures).await; + + *metrics.lock_owned().await + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = Entry { + peers: BTreeMap::default(), + completed: *completed, + }; + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let mut db = self.get_torrents_mut().await; + + drop(db.values_mut().map(|e| e.remove_inactive_peers(max_peer_timeout))); + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.is_not_zombie(policy)); + } +} diff --git a/src/core/torrent/repository_asyn.rs b/src/core/torrent/repository_asyn.rs deleted file mode 100644 index ad10f85b4..000000000 --- a/src/core/torrent/repository_asyn.rs +++ /dev/null @@ -1,187 +0,0 @@ -use std::sync::Arc; - -use super::{EntryMutexStd, EntryMutexTokio, UpdateTorrentAsync}; -use crate::core::peer; -use crate::core::torrent::{Entry, SwarmStats}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -pub trait RepositoryAsync: Default { - fn get_torrents<'a>( - &'a self, - ) -> impl std::future::Future>> + Send - where - std::collections::BTreeMap: 'a; - - fn get_torrents_mut<'a>( - &'a self, - ) -> impl std::future::Future>> + Send - where - std::collections::BTreeMap: 'a; -} - -pub struct RepositoryTokioRwLock { - torrents: tokio::sync::RwLock>, -} -impl UpdateTorrentAsync for RepositoryTokioRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut().await; - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(tokio::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().await; - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -impl RepositoryAsync for RepositoryTokioRwLock { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl Default for RepositoryTokioRwLock { - fn default() -> Self { - Self { - torrents: tokio::sync::RwLock::default(), - } - } -} - -impl UpdateTorrentAsync for RepositoryTokioRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut().await; - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(std::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().unwrap(); - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -impl RepositoryAsync for RepositoryTokioRwLock { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl Default for RepositoryTokioRwLock { - fn default() -> Self { - Self { - torrents: tokio::sync::RwLock::default(), - } - } -} - -impl UpdateTorrentAsync for RepositoryTokioRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let (stats, stats_updated) = { - let mut torrents_lock = self.torrents.write().await; - let torrent_entry = torrents_lock.entry(*info_hash).or_insert(Entry::new()); - let stats_updated = torrent_entry.insert_or_update_peer(peer); - let stats = torrent_entry.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -impl RepositoryAsync for RepositoryTokioRwLock { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>(&'a self) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl Default for RepositoryTokioRwLock { - fn default() -> Self { - Self { - torrents: tokio::sync::RwLock::default(), - } - } -} diff --git a/src/core/torrent/repository_sync.rs b/src/core/torrent/repository_sync.rs deleted file mode 100644 index 3b01eb8be..000000000 --- a/src/core/torrent/repository_sync.rs +++ /dev/null @@ -1,177 +0,0 @@ -use std::sync::{Arc, RwLock}; - -use super::{EntryMutexStd, EntryMutexTokio, UpdateTorrentAsync, UpdateTorrentSync}; -use crate::core::peer; -use crate::core::torrent::{Entry, SwarmStats}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -pub trait RepositorySync: Default { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a; - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a; -} - -pub struct RepositoryStdRwLock { - torrents: std::sync::RwLock>, -} - -impl UpdateTorrentAsync for RepositoryStdRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut(); - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(tokio::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().await; - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} -impl RepositorySync for RepositoryStdRwLock { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl Default for RepositoryStdRwLock { - fn default() -> Self { - Self { - torrents: RwLock::default(), - } - } -} -impl UpdateTorrentSync for RepositoryStdRwLock { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut(); - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(std::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().unwrap(); - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} -impl RepositorySync for RepositoryStdRwLock { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl Default for RepositoryStdRwLock { - fn default() -> Self { - Self { - torrents: RwLock::default(), - } - } -} - -impl UpdateTorrentSync for RepositoryStdRwLock { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let mut torrents = self.torrents.write().unwrap(); - - let torrent_entry = match torrents.entry(*info_hash) { - std::collections::btree_map::Entry::Vacant(vacant) => vacant.insert(Entry::new()), - std::collections::btree_map::Entry::Occupied(entry) => entry.into_mut(), - }; - - let stats_updated = torrent_entry.insert_or_update_peer(peer); - let stats = torrent_entry.get_stats(); - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} -impl RepositorySync for RepositoryStdRwLock { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl Default for RepositoryStdRwLock { - fn default() -> Self { - Self { - torrents: RwLock::default(), - } - } -} diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index dcb92dec3..999580da7 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -82,7 +82,7 @@ pub async fn get_torrents_handler(State(tracker): State>, paginatio torrent_list_response( &get_torrents_page( tracker.clone(), - &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), + Some(&Pagination::new_with_options(pagination.0.offset, pagination.0.limit)), ) .await, ) diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index b1b474ea9..619632ae4 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -79,7 +79,7 @@ impl From for Normal { incomplete: data.stats.incomplete.into(), interval: data.policy.interval.into(), min_interval: data.policy.interval_min.into(), - peers: data.peers.into_iter().collect(), + peers: data.peers.iter().map(AsRef::as_ref).copied().collect(), } } } @@ -116,7 +116,7 @@ pub struct Compact { impl From for Compact { fn from(data: AnnounceData) -> Self { - let compact_peers: Vec = data.peers.into_iter().collect(); + let compact_peers: Vec = data.peers.iter().map(AsRef::as_ref).copied().collect(); let (peers, peers6): (Vec>, Vec>) = compact_peers.into_iter().collect(); @@ -313,12 +313,13 @@ impl FromIterator> for CompactPeersEncoded { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; use torrust_tracker_configuration::AnnouncePolicy; use crate::core::peer::fixture::PeerBuilder; use crate::core::peer::Id; - use crate::core::torrent::SwarmStats; + use crate::core::torrent::SwarmMetadata; use crate::core::AnnounceData; use crate::servers::http::v1::responses::announce::{Announce, Compact, Normal, Response}; @@ -350,8 +351,8 @@ mod tests { )) .build(); - let peers = vec![peer_ipv4, peer_ipv6]; - let stats = SwarmStats::new(333, 333, 444); + let peers = vec![Arc::new(peer_ipv4), Arc::new(peer_ipv6)]; + let stats = SwarmMetadata::new(333, 333, 444); AnnounceData::new(peers, stats, policy) } diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index b791defd7..b53697eed 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -98,7 +98,7 @@ mod tests { use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; use crate::core::peer::Peer; - use crate::core::torrent::SwarmStats; + use crate::core::torrent::SwarmMetadata; use crate::core::{statistics, AnnounceData, Tracker}; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; @@ -113,7 +113,7 @@ mod tests { let expected_announce_data = AnnounceData { peers: vec![], - stats: SwarmStats { + stats: SwarmMetadata { downloaded: 0, complete: 1, incomplete: 0, diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 91a371a7b..f42e11424 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -642,7 +642,7 @@ mod tests { .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip), client_port)) .into(); - assert_eq!(peers[0], expected_peer); + assert_eq!(peers[0], Arc::new(expected_peer)); } #[tokio::test] @@ -770,6 +770,7 @@ mod tests { mod from_a_loopback_ip { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; @@ -809,7 +810,7 @@ mod tests { .with_peer_addr(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) .into(); - assert_eq!(peers[0], expected_peer); + assert_eq!(peers[0], Arc::new(expected_peer)); } } } @@ -863,7 +864,7 @@ mod tests { .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); - assert_eq!(peers[0], expected_peer); + assert_eq!(peers[0], Arc::new(expected_peer)); } #[tokio::test] From 4b2d6fefc2840b93cb23c9fa7a3fdd34a4ee0f9b Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 11 Feb 2024 13:30:39 +0800 Subject: [PATCH 4/9] dev: extract repo implementations and benchmarks --- .../src/benches/asyn.rs | 30 +- .../src/benches/mod.rs | 1 - .../src/benches/sync.rs | 29 +- .../src/benches/sync_asyn.rs | 185 --------- .../torrent-repository-benchmarks/src/main.rs | 89 +++-- src/core/mod.rs | 17 +- src/core/services/torrent.rs | 8 +- src/core/torrent/entry.rs | 74 +++- src/core/torrent/mod.rs | 41 +- src/core/torrent/repository/mod.rs | 20 +- src/core/torrent/repository/rw_lock_std.rs | 122 ++++++ .../repository/rw_lock_std_mutex_std.rs | 143 +++++++ .../repository/rw_lock_std_mutex_tokio.rs | 141 +++++++ src/core/torrent/repository/rw_lock_tokio.rs | 124 ++++++ .../repository/rw_lock_tokio_mutex_std.rs | 146 +++++++ .../repository/rw_lock_tokio_mutex_tokio.rs | 144 +++++++ src/core/torrent/repository/std_sync.rs | 365 ----------------- src/core/torrent/repository/tokio_sync.rs | 378 ------------------ 18 files changed, 1015 insertions(+), 1042 deletions(-) delete mode 100644 packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs create mode 100644 src/core/torrent/repository/rw_lock_std.rs create mode 100644 src/core/torrent/repository/rw_lock_std_mutex_std.rs create mode 100644 src/core/torrent/repository/rw_lock_std_mutex_tokio.rs create mode 100644 src/core/torrent/repository/rw_lock_tokio.rs create mode 100644 src/core/torrent/repository/rw_lock_tokio_mutex_std.rs create mode 100644 src/core/torrent/repository/rw_lock_tokio_mutex_tokio.rs delete mode 100644 src/core/torrent/repository/std_sync.rs delete mode 100644 src/core/torrent/repository/tokio_sync.rs diff --git a/packages/torrent-repository-benchmarks/src/benches/asyn.rs b/packages/torrent-repository-benchmarks/src/benches/asyn.rs index 737a99f3c..dffd31682 100644 --- a/packages/torrent-repository-benchmarks/src/benches/asyn.rs +++ b/packages/torrent-repository-benchmarks/src/benches/asyn.rs @@ -1,24 +1,21 @@ -use std::sync::Arc; use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository::tokio_sync::RepositoryTokioRwLock; use torrust_tracker::core::torrent::repository::UpdateTorrentAsync; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; -pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) +pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) where - T: Default, - RepositoryTokioRwLock: UpdateTorrentAsync + Default, + V: UpdateTorrentAsync + Default, { let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryTokioRwLock::::default()); + let torrent_repository = V::default(); let info_hash = InfoHash([0; 20]); @@ -37,16 +34,15 @@ where } // Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Default + Send + Sync + 'static, - RepositoryTokioRwLock: UpdateTorrentAsync + Default, + V: UpdateTorrentAsync + Default + Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryTokioRwLock::::default()); + let torrent_repository = V::default(); let info_hash: &'static InfoHash = &InfoHash([0; 20]); let handles = FuturesUnordered::new(); @@ -87,16 +83,15 @@ where } // Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Default + Send + Sync + 'static, - RepositoryTokioRwLock: UpdateTorrentAsync + Default, + V: UpdateTorrentAsync + Default + Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryTokioRwLock::::default()); + let torrent_repository = V::default(); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); @@ -132,16 +127,15 @@ where } // Async update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Default + Send + Sync + 'static, - RepositoryTokioRwLock: UpdateTorrentAsync + Default, + V: UpdateTorrentAsync + Default + Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryTokioRwLock::::default()); + let torrent_repository = V::default(); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); diff --git a/packages/torrent-repository-benchmarks/src/benches/mod.rs b/packages/torrent-repository-benchmarks/src/benches/mod.rs index 7450f4bcc..1026aa4bf 100644 --- a/packages/torrent-repository-benchmarks/src/benches/mod.rs +++ b/packages/torrent-repository-benchmarks/src/benches/mod.rs @@ -1,4 +1,3 @@ pub mod asyn; pub mod sync; -pub mod sync_asyn; pub mod utils; diff --git a/packages/torrent-repository-benchmarks/src/benches/sync.rs b/packages/torrent-repository-benchmarks/src/benches/sync.rs index ea694a38c..04385bc55 100644 --- a/packages/torrent-repository-benchmarks/src/benches/sync.rs +++ b/packages/torrent-repository-benchmarks/src/benches/sync.rs @@ -1,9 +1,7 @@ -use std::sync::Arc; use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository::std_sync::RepositoryStdRwLock; use torrust_tracker::core::torrent::repository::UpdateTorrentSync; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; @@ -12,14 +10,14 @@ use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjuste // Simply add one torrent #[must_use] -pub fn add_one_torrent(samples: usize) -> (Duration, Duration) +pub fn add_one_torrent(samples: usize) -> (Duration, Duration) where - RepositoryStdRwLock: UpdateTorrentSync + Default, + V: UpdateTorrentSync + Default, { let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); + let torrent_repository = V::default(); let info_hash = InfoHash([0; 20]); @@ -36,16 +34,15 @@ where } // Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Send + Sync + 'static, - RepositoryStdRwLock: UpdateTorrentSync + Default, + V: UpdateTorrentSync + Default + Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); + let torrent_repository = V::default(); let info_hash: &'static InfoHash = &InfoHash([0; 20]); let handles = FuturesUnordered::new(); @@ -82,16 +79,15 @@ where } // Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Send + Sync + 'static, - RepositoryStdRwLock: UpdateTorrentSync + Default, + V: UpdateTorrentSync + Default + Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); + let torrent_repository = V::default(); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); @@ -125,16 +121,15 @@ where } // Update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Send + Sync + 'static, - RepositoryStdRwLock: UpdateTorrentSync + Default, + V: UpdateTorrentSync + Default + Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); + let torrent_repository = V::default(); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); diff --git a/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs b/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs deleted file mode 100644 index 8efed9856..000000000 --- a/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs +++ /dev/null @@ -1,185 +0,0 @@ -use std::sync::Arc; -use std::time::Duration; - -use clap::Parser; -use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository::std_sync::RepositoryStdRwLock; -use torrust_tracker::core::torrent::repository::UpdateTorrentAsync; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; - -use crate::args::Args; -use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; - -// Simply add one torrent -#[must_use] -pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) -where - RepositoryStdRwLock: UpdateTorrentAsync + Default, -{ - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); - - let info_hash = InfoHash([0; 20]); - - let start_time = std::time::Instant::now(); - - torrent_repository - .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) - .await; - - let result = start_time.elapsed(); - - results.push(result); - } - - get_average_and_adjusted_average_from_results(results) -} - -// Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) -where - T: Send + Sync + 'static, - RepositoryStdRwLock: UpdateTorrentAsync + Default, -{ - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); - let info_hash: &'static InfoHash = &InfoHash([0; 20]); - let handles = FuturesUnordered::new(); - - // Add the torrent/peer to the torrent repository - torrent_repository - .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) - .await; - - let start_time = std::time::Instant::now(); - - for _ in 0..10_000 { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone - .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) - .await; - - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - let result = start_time.elapsed(); - - results.push(result); - } - - get_average_and_adjusted_average_from_results(results) -} - -// Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) -where - T: Send + Sync + 'static, - RepositoryStdRwLock: UpdateTorrentAsync + Default, -{ - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); - let info_hashes = generate_unique_info_hashes(10_000); - let handles = FuturesUnordered::new(); - - let start_time = std::time::Instant::now(); - - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone - .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) - .await; - - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - let result = start_time.elapsed(); - - results.push(result); - } - - get_average_and_adjusted_average_from_results(results) -} - -// Update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) -where - T: Send + Sync + 'static, - RepositoryStdRwLock: UpdateTorrentAsync + Default, -{ - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); - let info_hashes = generate_unique_info_hashes(10_000); - let handles = FuturesUnordered::new(); - - // Add the torrents/peers to the torrent repository - for info_hash in &info_hashes { - torrent_repository - .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) - .await; - } - - let start_time = std::time::Instant::now(); - - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone - .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) - .await; - - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - let result = start_time.elapsed(); - - results.push(result); - } - - get_average_and_adjusted_average_from_results(results) -} diff --git a/packages/torrent-repository-benchmarks/src/main.rs b/packages/torrent-repository-benchmarks/src/main.rs index d7291afe2..b935cea43 100644 --- a/packages/torrent-repository-benchmarks/src/main.rs +++ b/packages/torrent-repository-benchmarks/src/main.rs @@ -1,7 +1,12 @@ +use std::sync::Arc; + use clap::Parser; use torrust_torrent_repository_benchmarks::args::Args; -use torrust_torrent_repository_benchmarks::benches::{asyn, sync, sync_asyn}; -use torrust_tracker::core::torrent::entry::{Entry, MutexStd, MutexTokio}; +use torrust_torrent_repository_benchmarks::benches::{asyn, sync}; +use torrust_tracker::core::torrent::{ + TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, + TorrentsRwLockTokioMutexTokio, +}; #[allow(clippy::too_many_lines)] #[allow(clippy::print_literal)] @@ -15,147 +20,167 @@ fn main() { .build() .unwrap(); - println!("tokio::sync::RwLock>"); + println!("TorrentsRwLockTokio"); println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::>(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::>(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::>(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::>( + &rt, 10 + )) ); if let Some(true) = args.compare { println!(); - println!("std::sync::RwLock>"); + println!("TorrentsRwLockStd"); println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - sync::add_one_torrent::(1_000_000) + sync::add_one_torrent::>(1_000_000) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(sync::update_one_torrent_in_parallel::>(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(sync::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync::add_multiple_torrents_in_parallel::>(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(sync::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync::update_multiple_torrents_in_parallel::>(&rt, 10)) ); println!(); - println!("std::sync::RwLock>>>"); + println!("TorrentsRwLockStdMutexStd"); println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - sync::add_one_torrent::(1_000_000) + sync::add_one_torrent::>(1_000_000) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(sync::update_one_torrent_in_parallel::>( + &rt, 10 + )) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(sync::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync::add_multiple_torrents_in_parallel::>( + &rt, 10 + )) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(sync::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync::update_multiple_torrents_in_parallel::>( + &rt, 10 + )) ); println!(); - println!("std::sync::RwLock>>>"); + println!("TorrentsRwLockStdMutexTokio"); println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(sync_asyn::add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::>(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(sync_asyn::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::>( + &rt, 10 + )) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(sync_asyn::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::>( + &rt, 10 + )) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(sync_asyn::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::>(&rt, 10)) ); println!(); - println!("tokio::sync::RwLock>>>"); + println!("TorrentsRwLockTokioMutexStd"); println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::>(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::>( + &rt, 10 + )) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::>( + &rt, 10 + )) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::>(&rt, 10)) ); println!(); - println!("tokio::sync::RwLock>>>"); + println!("TorrentsRwLockTokioMutexTokio"); println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::>(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::>( + &rt, 10 + )) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::>( + &rt, 10 + )) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::>(&rt, 10)) ); } } diff --git a/src/core/mod.rs b/src/core/mod.rs index b070f90db..15d7b9c39 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -449,9 +449,9 @@ use torrust_tracker_primitives::TrackerMode; use self::auth::Key; use self::error::Error; use self::peer::Peer; -use self::torrent::entry::{Entry, ReadInfo, ReadPeers}; -use self::torrent::repository::tokio_sync::RepositoryTokioRwLock; -use self::torrent::repository::{Repository, UpdateTorrentAsync}; +use self::torrent::entry::{ReadInfo, ReadPeers}; +use self::torrent::repository::{Repository, UpdateTorrentSync}; +use self::torrent::Torrents; use crate::core::databases::Database; use crate::core::torrent::SwarmMetadata; use crate::shared::bit_torrent::info_hash::InfoHash; @@ -477,7 +477,7 @@ pub struct Tracker { policy: TrackerPolicy, keys: tokio::sync::RwLock>, whitelist: tokio::sync::RwLock>, - pub torrents: Arc>, + pub torrents: Arc, stats_event_sender: Option>, stats_repository: statistics::Repo, external_ip: Option, @@ -575,7 +575,7 @@ impl Tracker { mode, keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), - torrents: Arc::new(RepositoryTokioRwLock::::default()), + torrents: Arc::default(), stats_event_sender, stats_repository, database, @@ -732,7 +732,7 @@ impl Tracker { // code-review: consider splitting the function in two (command and query segregation). // `update_torrent_with_peer` and `get_stats` - let (stats_updated, stats) = self.torrents.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + let (stats_updated, stats) = self.torrents.update_torrent_with_peer_and_get_stats(info_hash, peer); if self.policy.persistent_torrent_completed_stat && stats_updated { let completed = stats.downloaded; @@ -1680,6 +1680,7 @@ mod tests { use aquatic_udp_protocol::AnnounceEvent; use crate::core::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; + use crate::core::torrent::entry::ReadInfo; use crate::core::torrent::repository::Repository; #[tokio::test] @@ -1710,10 +1711,10 @@ mod tests { .expect("it should be able to get entry"); // It persists the number of completed peers. - assert_eq!(torrent_entry.completed, 1); + assert_eq!(torrent_entry.get_stats().downloaded, 1); // It does not persist the peers - assert!(torrent_entry.peers.is_empty()); + assert!(torrent_entry.peers_is_empty()); } } } diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index b265066f0..78dab12c4 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -9,7 +9,7 @@ use std::sync::Arc; use serde::Deserialize; use crate::core::peer::Peer; -use crate::core::torrent::entry::{self, ReadInfo}; +use crate::core::torrent::entry::{ReadInfo, ReadPeers}; use crate::core::torrent::repository::Repository; use crate::core::Tracker; use crate::shared::bit_torrent::info_hash::InfoHash; @@ -99,9 +99,9 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op let torrent_entry = torrent_entry_option?; - let stats = entry::ReadInfo::get_stats(&torrent_entry); + let stats = torrent_entry.get_stats(); - let peers = entry::ReadPeers::get_peers(&torrent_entry, None); + let peers = torrent_entry.get_peers(None); let peers = Some(peers.iter().map(|peer| (**peer)).collect()); @@ -119,7 +119,7 @@ pub async fn get_torrents_page(tracker: Arc, pagination: Option<&Pagina let mut basic_infos: Vec = vec![]; for (info_hash, torrent_entry) in tracker.torrents.get_paginated(pagination).await { - let stats = entry::ReadInfo::get_stats(&torrent_entry); + let stats = torrent_entry.get_stats(); basic_infos.push(BasicInfo { info_hash, diff --git a/src/core/torrent/entry.rs b/src/core/torrent/entry.rs index 619cce9b3..815abd4fb 100644 --- a/src/core/torrent/entry.rs +++ b/src/core/torrent/entry.rs @@ -19,11 +19,11 @@ use crate::shared::clock::{Current, TimeNow}; pub struct Entry { /// The swarm: a network of peers that are all trying to download the torrent associated to this entry #[serde(skip)] - pub peers: std::collections::BTreeMap>, + pub(crate) peers: std::collections::BTreeMap>, /// The number of peers that have ever completed downloading the torrent associated to this entry - pub completed: u32, + pub(crate) completed: u32, } - +pub type Single = Entry; pub type MutexStd = Arc>; pub type MutexTokio = Arc>; @@ -35,6 +35,23 @@ pub trait ReadInfo { /// Returns True if Still a Valid Entry according to the Tracker Policy fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool; + + /// Returns True if the Peers is Empty + fn peers_is_empty(&self) -> bool; +} + +/// Same as [`ReadInfo`], but async. +pub trait ReadInfoAsync { + /// It returns the swarm metadata (statistics) as a struct: + /// + /// `(seeders, completed, leechers)` + fn get_stats(&self) -> impl std::future::Future + Send; + + /// Returns True if Still a Valid Entry according to the Tracker Policy + fn is_not_zombie(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; + + /// Returns True if the Peers is Empty + fn peers_is_empty(&self) -> impl std::future::Future + Send; } pub trait ReadPeers { @@ -49,15 +66,10 @@ pub trait ReadPeers { fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec>; } -pub trait ReadAsync { - /// Get all swarm peers, optionally limiting the result. +/// Same as [`ReadPeers`], but async. +pub trait ReadPeersAsync { fn get_peers(&self, limit: Option) -> impl std::future::Future>> + Send; - /// It returns the list of peers for a given peer client, optionally limiting the - /// result. - /// - /// It filters out the input peer, typically because we want to return this - /// list of peers to that client peer. fn get_peers_for_peer( &self, client: &peer::Peer, @@ -79,12 +91,14 @@ pub trait Update { fn remove_inactive_peers(&mut self, max_peer_timeout: u32); } +/// Same as [`Update`], except not `mut`. pub trait UpdateSync { fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool; fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata); fn remove_inactive_peers(&self, max_peer_timeout: u32); } +/// Same as [`Update`], except not `mut` and async. pub trait UpdateAsync { fn insert_or_update_peer(&self, peer: &peer::Peer) -> impl std::future::Future + Send; @@ -96,7 +110,7 @@ pub trait UpdateAsync { fn remove_inactive_peers(&self, max_peer_timeout: u32) -> impl std::future::Future + Send; } -impl ReadInfo for Entry { +impl ReadInfo for Single { #[allow(clippy::cast_possible_truncation)] fn get_stats(&self) -> SwarmMetadata { let complete: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; @@ -120,9 +134,41 @@ impl ReadInfo for Entry { true } + + fn peers_is_empty(&self) -> bool { + self.peers.is_empty() + } +} + +impl ReadInfo for MutexStd { + fn get_stats(&self) -> SwarmMetadata { + self.lock().expect("it should get a lock").get_stats() + } + + fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { + self.lock().expect("it should get a lock").is_not_zombie(policy) + } + + fn peers_is_empty(&self) -> bool { + self.lock().expect("it should get a lock").peers_is_empty() + } +} + +impl ReadInfoAsync for MutexTokio { + async fn get_stats(&self) -> SwarmMetadata { + self.lock().await.get_stats() + } + + async fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { + self.lock().await.is_not_zombie(policy) + } + + async fn peers_is_empty(&self) -> bool { + self.lock().await.peers_is_empty() + } } -impl ReadPeers for Entry { +impl ReadPeers for Single { fn get_peers(&self, limit: Option) -> Vec> { match limit { Some(limit) => self.peers.values().take(limit).cloned().collect(), @@ -162,7 +208,7 @@ impl ReadPeers for MutexStd { } } -impl ReadAsync for MutexTokio { +impl ReadPeersAsync for MutexTokio { async fn get_peers(&self, limit: Option) -> Vec> { self.lock().await.get_peers(limit) } @@ -172,7 +218,7 @@ impl ReadAsync for MutexTokio { } } -impl Update for Entry { +impl Update for Single { fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { let mut did_torrent_stats_change: bool = false; diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index 608765cf8..bfe068337 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -11,8 +11,6 @@ //! That's the most valuable information the peer want to get from the tracker, because it allows them to //! start downloading torrent from those peers. //! -//! > **NOTICE**: that both swarm data (torrent entries) and swarm metadata (aggregate counters) are related to only one torrent. -//! //! The "swarm metadata" contains aggregate data derived from the torrent entries. There two types of data: //! //! - For **active peers**: metrics related to the current active peers in the swarm. @@ -33,6 +31,15 @@ pub mod repository; use derive_more::Constructor; +pub type Torrents = TorrentsRwLockStdMutexStd; // Currently Used + +pub type TorrentsRwLockStd = repository::RwLockStd; +pub type TorrentsRwLockStdMutexStd = repository::RwLockStd; +pub type TorrentsRwLockStdMutexTokio = repository::RwLockStd; +pub type TorrentsRwLockTokio = repository::RwLockTokio; +pub type TorrentsRwLockTokioMutexStd = repository::RwLockTokio; +pub type TorrentsRwLockTokioMutexTokio = repository::RwLockTokio; + /// Swarm statistics for one torrent. /// Swarm metadata dictionary in the scrape response. /// @@ -138,14 +145,14 @@ mod tests { #[test] fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { - let torrent_entry = entry::Entry::default(); + let torrent_entry = entry::Single::default(); assert_eq!(torrent_entry.get_peers(None).len(), 0); } #[test] fn a_new_peer_can_be_added_to_a_torrent_entry() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -156,7 +163,7 @@ mod tests { #[test] fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -166,7 +173,7 @@ mod tests { #[test] fn a_peer_can_be_updated_in_a_torrent_entry() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -178,7 +185,7 @@ mod tests { #[test] fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -190,7 +197,7 @@ mod tests { #[test] fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -204,7 +211,7 @@ mod tests { #[test] fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( ) { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Add a peer that did not exist before in the entry @@ -216,7 +223,7 @@ mod tests { #[test] fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_the_client_that_is_making_the_request() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add peer @@ -229,7 +236,7 @@ mod tests { #[test] fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let peer_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); @@ -263,7 +270,7 @@ mod tests { #[test] fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { @@ -280,7 +287,7 @@ mod tests { #[test] fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let torrent_seeder = a_torrent_seeder(); torrent_entry.insert_or_update_peer(&torrent_seeder); // Add seeder @@ -290,7 +297,7 @@ mod tests { #[test] fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let torrent_leecher = a_torrent_leecher(); torrent_entry.insert_or_update_peer(&torrent_leecher); // Add leecher @@ -301,7 +308,7 @@ mod tests { #[test] fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( ) { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -316,7 +323,7 @@ mod tests { #[test] fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Announce "Completed" torrent download event. @@ -330,7 +337,7 @@ mod tests { #[test] fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let timeout = 120u32; diff --git a/src/core/torrent/repository/mod.rs b/src/core/torrent/repository/mod.rs index 3af33aebe..1c4ce8ae9 100644 --- a/src/core/torrent/repository/mod.rs +++ b/src/core/torrent/repository/mod.rs @@ -4,10 +4,14 @@ use crate::core::services::torrent::Pagination; use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; use crate::shared::bit_torrent::info_hash::InfoHash; -pub mod std_sync; -pub mod tokio_sync; +pub mod rw_lock_std; +pub mod rw_lock_std_mutex_std; +pub mod rw_lock_std_mutex_tokio; +pub mod rw_lock_tokio; +pub mod rw_lock_tokio_mutex_std; +pub mod rw_lock_tokio_mutex_tokio; -pub trait Repository: Default { +pub trait Repository: Default + 'static { fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; fn get_metrics(&self) -> impl std::future::Future + Send; fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; @@ -28,3 +32,13 @@ pub trait UpdateTorrentAsync { peer: &peer::Peer, ) -> impl std::future::Future + Send; } + +#[derive(Default)] +pub struct RwLockTokio { + torrents: tokio::sync::RwLock>, +} + +#[derive(Default)] +pub struct RwLockStd { + torrents: std::sync::RwLock>, +} diff --git a/src/core/torrent/repository/rw_lock_std.rs b/src/core/torrent/repository/rw_lock_std.rs new file mode 100644 index 000000000..9b3915bcb --- /dev/null +++ b/src/core/torrent/repository/rw_lock_std.rs @@ -0,0 +1,122 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use futures::future::join_all; + +use super::{Repository, UpdateTorrentSync}; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::torrent::entry::{self, ReadInfo, Update}; +use crate::core::torrent::{SwarmMetadata, TorrentsRwLockStd}; +use crate::core::{peer, TorrentsMetrics}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +impl TorrentsRwLockStd { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("it should get the read lock") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("it should get the write lock") + } +} + +impl UpdateTorrentSync for TorrentsRwLockStd { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let mut db = self.get_torrents_mut(); + + let entry = db.entry(*info_hash).or_insert(entry::Single::default()); + + entry.insert_or_update_peer_and_get_stats(peer) + } +} + +impl UpdateTorrentSync for Arc { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer) + } +} + +impl Repository for TorrentsRwLockStd { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let metrics: Arc> = Arc::default(); + + let mut handles = Vec::>::default(); + + for e in self.get_torrents().values() { + let entry = e.clone(); + let metrics = metrics.clone(); + handles.push(tokio::task::spawn(async move { + let stats = entry.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + })); + } + + join_all(handles).await; + + *metrics.lock_owned().await + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::Single)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = entry::Single { + peers: BTreeMap::default(), + completed: *completed, + }; + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let mut db = self.get_torrents_mut(); + + drop(db.values_mut().map(|e| e.remove_inactive_peers(max_peer_timeout))); + } + + async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.is_not_zombie(policy)); + } +} diff --git a/src/core/torrent/repository/rw_lock_std_mutex_std.rs b/src/core/torrent/repository/rw_lock_std_mutex_std.rs new file mode 100644 index 000000000..5a9a38f77 --- /dev/null +++ b/src/core/torrent/repository/rw_lock_std_mutex_std.rs @@ -0,0 +1,143 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use futures::future::join_all; + +use super::{Repository, UpdateTorrentSync}; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::torrent::entry::{ReadInfo, Update, UpdateSync}; +use crate::core::torrent::{entry, SwarmMetadata, TorrentsRwLockStdMutexStd}; +use crate::core::{peer, TorrentsMetrics}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +impl TorrentsRwLockStdMutexStd { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl UpdateTorrentSync for TorrentsRwLockStdMutexStd { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut(); + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer) + } +} + +impl UpdateTorrentSync for Arc { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer) + } +} + +impl Repository for TorrentsRwLockStdMutexStd { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let metrics: Arc> = Arc::default(); + + // todo:: replace with a ring buffer + let mut handles = Vec::>::default(); + + for e in self.get_torrents().values() { + let entry = e.clone(); + let metrics = metrics.clone(); + handles.push(tokio::task::spawn(async move { + let stats = entry.lock().expect("it should get the lock").get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + })); + } + + join_all(handles).await; + + *metrics.lock_owned().await + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexStd)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexStd::new( + entry::Single { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + // todo:: replace with a ring buffer + let mut handles = Vec::>::default(); + + for e in self.get_torrents().values() { + let entry = e.clone(); + handles.push(tokio::task::spawn(async move { + entry + .lock() + .expect("it should get lock for entry") + .remove_inactive_peers(max_peer_timeout); + })); + } + + join_all(handles).await; + } + + async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); + } +} diff --git a/src/core/torrent/repository/rw_lock_std_mutex_tokio.rs b/src/core/torrent/repository/rw_lock_std_mutex_tokio.rs new file mode 100644 index 000000000..1feb41e3e --- /dev/null +++ b/src/core/torrent/repository/rw_lock_std_mutex_tokio.rs @@ -0,0 +1,141 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use futures::future::join_all; + +use super::{Repository, UpdateTorrentAsync}; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::torrent::entry::{ReadInfo, Update, UpdateAsync}; +use crate::core::torrent::{entry, SwarmMetadata, TorrentsRwLockStdMutexTokio}; +use crate::core::{peer, TorrentsMetrics}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +impl TorrentsRwLockStdMutexTokio { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl UpdateTorrentAsync for TorrentsRwLockStdMutexTokio { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut(); + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer).await + } +} + +impl UpdateTorrentAsync for Arc { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer).await + } +} + +impl Repository for TorrentsRwLockStdMutexTokio { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexTokio)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let metrics: Arc> = Arc::default(); + + // todo:: replace with a ring buffer + let mut handles = Vec::>::default(); + + for e in self.get_torrents().values() { + let entry = e.clone(); + let metrics = metrics.clone(); + handles.push(tokio::task::spawn(async move { + let stats = entry.lock().await.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + })); + } + + join_all(handles).await; + + *metrics.lock_owned().await + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexTokio::new( + entry::Single { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + // todo:: replace with a ring buffer + + let mut handles = Vec::>::default(); + + for e in self.get_torrents().values() { + let entry = e.clone(); + handles.push(tokio::task::spawn(async move { + entry.lock().await.remove_inactive_peers(max_peer_timeout); + })); + } + + join_all(handles).await; + } + + async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); + } +} diff --git a/src/core/torrent/repository/rw_lock_tokio.rs b/src/core/torrent/repository/rw_lock_tokio.rs new file mode 100644 index 000000000..3d633a837 --- /dev/null +++ b/src/core/torrent/repository/rw_lock_tokio.rs @@ -0,0 +1,124 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use futures::future::join_all; + +use super::{Repository, UpdateTorrentAsync}; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::torrent::entry::{self, ReadInfo, Update}; +use crate::core::torrent::{SwarmMetadata, TorrentsRwLockTokio}; +use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +impl TorrentsRwLockTokio { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl UpdateTorrentAsync for TorrentsRwLockTokio { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let mut db = self.get_torrents_mut().await; + + let entry = db.entry(*info_hash).or_insert(entry::Single::default()); + + entry.insert_or_update_peer_and_get_stats(peer) + } +} + +impl UpdateTorrentAsync for Arc { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer).await + } +} + +impl Repository for TorrentsRwLockTokio { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::Single)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let metrics: Arc> = Arc::default(); + + let mut handles = Vec::>::default(); + + for e in self.get_torrents().await.values() { + let entry = e.clone(); + let metrics = metrics.clone(); + handles.push(tokio::task::spawn(async move { + let stats = entry.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + })); + } + + join_all(handles).await; + + *metrics.lock_owned().await + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = entry::Single { + peers: BTreeMap::default(), + completed: *completed, + }; + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let mut db = self.get_torrents_mut().await; + + drop(db.values_mut().map(|e| e.remove_inactive_peers(max_peer_timeout))); + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.is_not_zombie(policy)); + } +} diff --git a/src/core/torrent/repository/rw_lock_tokio_mutex_std.rs b/src/core/torrent/repository/rw_lock_tokio_mutex_std.rs new file mode 100644 index 000000000..3888c40b0 --- /dev/null +++ b/src/core/torrent/repository/rw_lock_tokio_mutex_std.rs @@ -0,0 +1,146 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use futures::future::join_all; + +use super::{Repository, UpdateTorrentAsync}; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::torrent::entry::{ReadInfo, Update, UpdateSync}; +use crate::core::torrent::{entry, SwarmMetadata, TorrentsRwLockTokioMutexStd}; +use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +impl TorrentsRwLockTokioMutexStd { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl UpdateTorrentAsync for TorrentsRwLockTokioMutexStd { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut().await; + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer) + } +} + +impl UpdateTorrentAsync for Arc { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer).await + } +} + +impl Repository for TorrentsRwLockTokioMutexStd { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexStd)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let metrics: Arc> = Arc::default(); + + // todo:: replace with a ring buffer + + let mut handles = Vec::>::default(); + + for e in self.get_torrents().await.values() { + let entry = e.clone(); + let metrics = metrics.clone(); + handles.push(tokio::task::spawn(async move { + let stats = entry.lock().expect("it should get a lock").get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + })); + } + + join_all(handles).await; + + *metrics.lock_owned().await + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexStd::new( + entry::Single { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + // todo:: replace with a ring buffer + let mut handles = Vec::>::default(); + + for e in self.get_torrents().await.values() { + let entry = e.clone(); + handles.push(tokio::task::spawn(async move { + entry + .lock() + .expect("it should get lock for entry") + .remove_inactive_peers(max_peer_timeout); + })); + } + + join_all(handles).await; + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); + } +} diff --git a/src/core/torrent/repository/rw_lock_tokio_mutex_tokio.rs b/src/core/torrent/repository/rw_lock_tokio_mutex_tokio.rs new file mode 100644 index 000000000..49e08d90c --- /dev/null +++ b/src/core/torrent/repository/rw_lock_tokio_mutex_tokio.rs @@ -0,0 +1,144 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use futures::future::join_all; + +use super::{Repository, UpdateTorrentAsync}; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::torrent::entry::{self, ReadInfo, Update, UpdateAsync}; +use crate::core::torrent::{SwarmMetadata, TorrentsRwLockTokioMutexTokio}; +use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +impl TorrentsRwLockTokioMutexTokio { + async fn get_torrents<'a>( + &'a self, + ) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl UpdateTorrentAsync for TorrentsRwLockTokioMutexTokio { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut().await; + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer).await + } +} + +impl UpdateTorrentAsync for Arc { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer).await + } +} + +impl Repository for TorrentsRwLockTokioMutexTokio { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexTokio)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let metrics: Arc> = Arc::default(); + + // todo:: replace with a ring buffer + let mut handles = Vec::>::default(); + + for e in self.get_torrents().await.values() { + let entry = e.clone(); + let metrics = metrics.clone(); + handles.push(tokio::task::spawn(async move { + let stats = entry.lock().await.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + })); + } + + join_all(handles).await; + + *metrics.lock_owned().await + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexTokio::new( + entry::Single { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + // todo:: replace with a ring buffer + let mut handles = Vec::>::default(); + + for e in self.get_torrents().await.values() { + let entry = e.clone(); + handles.push(tokio::task::spawn(async move { + entry.lock().await.remove_inactive_peers(max_peer_timeout); + })); + } + + join_all(handles).await; + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); + } +} diff --git a/src/core/torrent/repository/std_sync.rs b/src/core/torrent/repository/std_sync.rs deleted file mode 100644 index ba38db6ed..000000000 --- a/src/core/torrent/repository/std_sync.rs +++ /dev/null @@ -1,365 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::Arc; - -use futures::executor::block_on; -use futures::future::join_all; - -use super::{Repository, UpdateTorrentAsync, UpdateTorrentSync}; -use crate::core::databases::PersistentTorrents; -use crate::core::services::torrent::Pagination; -use crate::core::torrent::entry::{Entry, ReadInfo, Update, UpdateAsync, UpdateSync}; -use crate::core::torrent::{entry, SwarmMetadata}; -use crate::core::{peer, TorrentsMetrics}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -#[derive(Default)] -pub struct RepositoryStdRwLock { - torrents: std::sync::RwLock>, -} - -impl RepositoryStdRwLock { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl UpdateTorrentAsync for RepositoryStdRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); - - let torrent_entry = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut(); - let entry = torrents_lock.entry(*info_hash).or_insert(Arc::default()); - entry.clone() - }; - - torrent_entry.insert_or_update_peer_and_get_stats(peer).await - } -} -impl Repository for RepositoryStdRwLock { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents(); - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexTokio)> { - let db = self.get_torrents(); - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let db = self.get_torrents(); - let metrics: Arc> = Arc::default(); - - let futures = db.values().map(|e| { - let metrics = metrics.clone(); - let entry = e.clone(); - - tokio::spawn(async move { - let stats = entry.lock().await.get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - }) - }); - - block_on(join_all(futures)); - - *metrics.blocking_lock_owned() - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut db = self.get_torrents_mut(); - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if db.contains_key(info_hash) { - continue; - } - - let entry = entry::MutexTokio::new( - Entry { - peers: BTreeMap::default(), - completed: *completed, - } - .into(), - ); - - db.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut(); - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - let db = self.get_torrents(); - - let futures = db.values().map(|e| { - let entry = e.clone(); - tokio::spawn(async move { entry.lock().await.remove_inactive_peers(max_peer_timeout) }) - }); - - block_on(join_all(futures)); - } - - async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { - let mut db = self.get_torrents_mut(); - - db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); - } -} - -impl RepositoryStdRwLock { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl UpdateTorrentSync for RepositoryStdRwLock { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut(); - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(std::sync::Mutex::new(Entry::default()))); - entry.clone() - }; - - torrent_entry.insert_or_update_peer_and_get_stats(peer) - } -} -impl Repository for RepositoryStdRwLock { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents(); - db.get(key).cloned() - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let db = self.get_torrents(); - let metrics: Arc> = Arc::default(); - - let futures = db.values().map(|e| { - let metrics = metrics.clone(); - let entry = e.clone(); - - tokio::spawn(async move { - let stats = entry.lock().expect("it should lock the entry").get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - }) - }); - - block_on(join_all(futures)); - - *metrics.blocking_lock_owned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexStd)> { - let db = self.get_torrents(); - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut(); - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = entry::MutexStd::new( - Entry { - peers: BTreeMap::default(), - completed: *completed, - } - .into(), - ); - - torrents.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut(); - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - let db = self.get_torrents(); - - let futures = db.values().map(|e| { - let entry = e.clone(); - tokio::spawn(async move { - entry - .lock() - .expect("it should get lock for entry") - .remove_inactive_peers(max_peer_timeout); - }) - }); - - block_on(join_all(futures)); - } - - async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { - let mut db = self.get_torrents_mut(); - - db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); - } -} - -impl RepositoryStdRwLock { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("it should get the read lock") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("it should get the write lock") - } -} - -impl UpdateTorrentSync for RepositoryStdRwLock { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let mut torrents = self.torrents.write().unwrap(); - - let torrent_entry = match torrents.entry(*info_hash) { - std::collections::btree_map::Entry::Vacant(vacant) => vacant.insert(Entry::default()), - std::collections::btree_map::Entry::Occupied(entry) => entry.into_mut(), - }; - - torrent_entry.insert_or_update_peer_and_get_stats(peer) - } -} -impl Repository for RepositoryStdRwLock { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents(); - db.get(key).cloned() - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let db = self.get_torrents(); - let metrics: Arc> = Arc::default(); - - let futures = db.values().map(|e| { - let metrics = metrics.clone(); - let entry = e.clone(); - - tokio::spawn(async move { - let stats = entry.get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - }) - }); - - block_on(join_all(futures)); - - *metrics.blocking_lock_owned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, Entry)> { - let db = self.get_torrents(); - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut(); - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = Entry { - peers: BTreeMap::default(), - completed: *completed, - }; - - torrents.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut(); - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - let mut db = self.get_torrents_mut(); - - drop(db.values_mut().map(|e| e.remove_inactive_peers(max_peer_timeout))); - } - - async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { - let mut db = self.get_torrents_mut(); - - db.retain(|_, e| e.is_not_zombie(policy)); - } -} diff --git a/src/core/torrent/repository/tokio_sync.rs b/src/core/torrent/repository/tokio_sync.rs deleted file mode 100644 index 83edf1188..000000000 --- a/src/core/torrent/repository/tokio_sync.rs +++ /dev/null @@ -1,378 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::Arc; - -use futures::future::join_all; - -use super::{Repository, UpdateTorrentAsync}; -use crate::core::databases::PersistentTorrents; -use crate::core::services::torrent::Pagination; -use crate::core::torrent::entry::{Entry, ReadInfo, Update, UpdateAsync, UpdateSync}; -use crate::core::torrent::{entry, SwarmMetadata}; -use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -#[derive(Default)] -pub struct RepositoryTokioRwLock { - torrents: tokio::sync::RwLock>, -} - -impl RepositoryTokioRwLock { - async fn get_torrents<'a>( - &'a self, - ) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl UpdateTorrentAsync for RepositoryTokioRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let maybe_torrent; - { - let db = self.torrents.read().await; - maybe_torrent = db.get(info_hash).cloned(); - } - - let torrent = if let Some(torrent) = maybe_torrent { - torrent - } else { - let entry = entry::MutexTokio::default(); - let mut db = self.torrents.write().await; - db.insert(*info_hash, entry.clone()); - entry - }; - - torrent.insert_or_update_peer_and_get_stats(peer).await - } -} - -impl Repository for RepositoryTokioRwLock { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents().await; - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexTokio)> { - let db = self.get_torrents().await; - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let db = self.get_torrents().await; - let metrics: Arc> = Arc::default(); - - let futures = db.values().map(|e| { - let metrics = metrics.clone(); - let entry = e.clone(); - - tokio::spawn(async move { - let stats = entry.lock().await.get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - }) - }); - - join_all(futures).await; - - *metrics.lock_owned().await - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut db = self.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if db.contains_key(info_hash) { - continue; - } - - let entry = entry::MutexTokio::new( - Entry { - peers: BTreeMap::default(), - completed: *completed, - } - .into(), - ); - - db.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut().await; - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - let db = self.get_torrents().await; - - let futures = db.values().map(|e| { - let entry = e.clone(); - tokio::spawn(async move { entry.lock().await.remove_inactive_peers(max_peer_timeout) }) - }); - - join_all(futures).await; - } - - async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - let mut db = self.get_torrents_mut().await; - - db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); - } -} - -impl RepositoryTokioRwLock { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl UpdateTorrentAsync for RepositoryTokioRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let maybe_torrent; - { - let db = self.torrents.read().await; - maybe_torrent = db.get(info_hash).cloned(); - } - - let torrent = if let Some(torrent) = maybe_torrent { - torrent - } else { - let entry = entry::MutexStd::default(); - let mut db = self.torrents.write().await; - db.insert(*info_hash, entry.clone()); - entry - }; - - torrent.insert_or_update_peer_and_get_stats(peer) - } -} - -impl Repository for RepositoryTokioRwLock { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents().await; - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexStd)> { - let db = self.get_torrents().await; - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let db = self.get_torrents().await; - let metrics: Arc> = Arc::default(); - - let futures = db.values().map(|e| { - let metrics = metrics.clone(); - let entry = e.clone(); - - tokio::spawn(async move { - let stats = entry.lock().expect("it should lock the entry").get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - }) - }); - - join_all(futures).await; - - *metrics.lock_owned().await - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = entry::MutexStd::new( - Entry { - peers: BTreeMap::default(), - completed: *completed, - } - .into(), - ); - - torrents.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut().await; - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - let db = self.get_torrents().await; - - let futures = db.values().map(|e| { - let entry = e.clone(); - tokio::spawn(async move { - entry - .lock() - .expect("it should get lock for entry") - .remove_inactive_peers(max_peer_timeout); - }) - }); - - join_all(futures).await; - } - - async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - let mut db = self.get_torrents_mut().await; - - db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); - } -} - -impl RepositoryTokioRwLock { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>(&'a self) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl UpdateTorrentAsync for RepositoryTokioRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let mut db = self.torrents.write().await; - - let torrent = db.entry(*info_hash).or_insert(Entry::default()); - - torrent.insert_or_update_peer_and_get_stats(peer) - } -} - -impl Repository for RepositoryTokioRwLock { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents().await; - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, Entry)> { - let db = self.get_torrents().await; - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let db = self.get_torrents().await; - let metrics: Arc> = Arc::default(); - - let futures = db.values().map(|e| { - let metrics = metrics.clone(); - let entry = e.clone(); - - tokio::spawn(async move { - let stats = entry.get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - }) - }); - - join_all(futures).await; - - *metrics.lock_owned().await - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = Entry { - peers: BTreeMap::default(), - completed: *completed, - }; - - torrents.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut().await; - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - let mut db = self.get_torrents_mut().await; - - drop(db.values_mut().map(|e| e.remove_inactive_peers(max_peer_timeout))); - } - - async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - let mut db = self.get_torrents_mut().await; - - db.retain(|_, e| e.is_not_zombie(policy)); - } -} From 9a43815d00ed13e6867d07b64881d4a5391e64aa Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 13 Feb 2024 15:23:57 +0800 Subject: [PATCH 5/9] dev: move torrent/repository to packages --- .vscode/settings.json | 1 + Cargo.lock | 31 +- Cargo.toml | 6 +- cSpell.json | 4 + packages/configuration/src/lib.rs | 7 + packages/primitives/Cargo.toml | 4 + packages/primitives/src/announce_event.rs | 43 +++ packages/primitives/src/info_hash.rs | 165 ++++++++++ packages/primitives/src/lib.rs | 37 +++ packages/primitives/src/pagination.rs | 50 +++ {src/core => packages/primitives/src}/peer.rs | 186 +++++------- packages/primitives/src/swarm_metadata.rs | 22 ++ packages/primitives/src/torrent_metrics.rs | 25 ++ .../torrent-repository-benchmarks/Cargo.toml | 22 -- .../torrent-repository-benchmarks/README.md | 1 - .../torrent-repository-benchmarks/src/lib.rs | 2 - packages/torrent-repository/Cargo.toml | 24 ++ packages/torrent-repository/README.md | 11 + .../benches/helpers}/args.rs | 0 .../benches/helpers}/asyn.rs | 34 ++- .../benches/helpers}/mod.rs | 1 + .../benches/helpers}/sync.rs | 34 ++- .../benches/helpers}/utils.rs | 8 +- .../benches/repository-benchmark.rs} | 65 ++-- packages/torrent-repository/src/entry/mod.rs | 98 ++++++ .../torrent-repository/src/entry/mutex_std.rs | 50 +++ .../src/entry/mutex_tokio.rs | 46 +++ .../torrent-repository/src/entry/single.rs | 105 +++++++ packages/torrent-repository/src/lib.rs | 15 + .../torrent-repository/src}/repository/mod.rs | 32 +- .../src/repository/rw_lock_std.rs | 112 +++++++ .../src/repository/rw_lock_std_mutex_std.rs | 123 ++++++++ .../src/repository/rw_lock_std_mutex_tokio.rs | 131 ++++++++ .../src/repository/rw_lock_tokio.rs | 113 +++++++ .../src/repository/rw_lock_tokio_mutex_std.rs | 124 ++++++++ .../repository/rw_lock_tokio_mutex_tokio.rs | 124 ++++++++ src/bootstrap/jobs/torrent_cleanup.rs | 2 +- src/console/clients/checker/checks/http.rs | 2 +- src/console/clients/checker/checks/udp.rs | 2 +- src/console/clients/http/app.rs | 2 +- src/console/clients/udp/app.rs | 2 +- src/console/clients/udp/checker.rs | 2 +- src/core/auth.rs | 5 +- src/core/databases/mod.rs | 11 +- src/core/databases/mysql.rs | 2 +- src/core/databases/sqlite.rs | 5 +- src/core/error.rs | 3 +- src/core/mod.rs | 169 ++++++----- src/core/peer_tests.rs | 43 +++ src/core/services/statistics/mod.rs | 9 +- src/core/services/torrent.rs | 77 +---- src/core/torrent/entry.rs | 287 ------------------ src/core/torrent/mod.rs | 88 ++---- src/core/torrent/repository/rw_lock_std.rs | 122 -------- .../repository/rw_lock_std_mutex_std.rs | 143 --------- .../repository/rw_lock_std_mutex_tokio.rs | 141 --------- src/core/torrent/repository/rw_lock_tokio.rs | 124 -------- .../repository/rw_lock_tokio_mutex_std.rs | 146 --------- .../repository/rw_lock_tokio_mutex_tokio.rs | 144 --------- .../apis/v1/context/stats/resources.rs | 3 +- .../apis/v1/context/torrent/handlers.rs | 5 +- .../apis/v1/context/torrent/resources/peer.rs | 46 ++- .../v1/context/torrent/resources/torrent.rs | 16 +- .../apis/v1/context/whitelist/handlers.rs | 2 +- src/servers/http/mod.rs | 12 +- src/servers/http/percent_encoding.rs | 23 +- .../http/v1/extractors/announce_request.rs | 5 +- .../http/v1/extractors/scrape_request.rs | 3 +- src/servers/http/v1/handlers/announce.rs | 30 +- src/servers/http/v1/handlers/scrape.rs | 2 +- src/servers/http/v1/requests/announce.rs | 17 +- src/servers/http/v1/requests/scrape.rs | 7 +- src/servers/http/v1/responses/announce.rs | 41 +-- src/servers/http/v1/responses/scrape.rs | 9 +- src/servers/http/v1/services/announce.rs | 21 +- src/servers/http/v1/services/scrape.rs | 13 +- src/servers/udp/handlers.rs | 30 +- src/servers/udp/logging.rs | 2 +- src/servers/udp/mod.rs | 14 +- src/servers/udp/peer_builder.rs | 20 +- src/servers/udp/request.rs | 3 +- src/shared/bit_torrent/common.rs | 21 -- src/shared/bit_torrent/info_hash.rs | 169 ++--------- .../tracker/http/client/requests/announce.rs | 8 +- .../tracker/http/client/requests/scrape.rs | 3 +- .../tracker/http/client/responses/announce.rs | 7 +- src/shared/clock/mod.rs | 4 +- src/shared/clock/time_extent.rs | 12 +- src/shared/clock/utils.rs | 10 - tests/servers/api/environment.rs | 6 +- .../servers/api/v1/contract/context/stats.rs | 4 +- .../api/v1/contract/context/torrent.rs | 4 +- .../api/v1/contract/context/whitelist.rs | 2 +- tests/servers/http/environment.rs | 6 +- tests/servers/http/requests/announce.rs | 8 +- tests/servers/http/requests/scrape.rs | 2 +- tests/servers/http/responses/announce.rs | 6 +- tests/servers/http/v1/contract.rs | 36 +-- tests/servers/udp/environment.rs | 6 +- 99 files changed, 2072 insertions(+), 1953 deletions(-) create mode 100644 packages/primitives/src/announce_event.rs create mode 100644 packages/primitives/src/info_hash.rs create mode 100644 packages/primitives/src/pagination.rs rename {src/core => packages/primitives/src}/peer.rs (83%) create mode 100644 packages/primitives/src/swarm_metadata.rs create mode 100644 packages/primitives/src/torrent_metrics.rs delete mode 100644 packages/torrent-repository-benchmarks/Cargo.toml delete mode 100644 packages/torrent-repository-benchmarks/README.md delete mode 100644 packages/torrent-repository-benchmarks/src/lib.rs create mode 100644 packages/torrent-repository/Cargo.toml create mode 100644 packages/torrent-repository/README.md rename packages/{torrent-repository-benchmarks/src => torrent-repository/benches/helpers}/args.rs (100%) rename packages/{torrent-repository-benchmarks/src/benches => torrent-repository/benches/helpers}/asyn.rs (81%) rename packages/{torrent-repository-benchmarks/src/benches => torrent-repository/benches/helpers}/mod.rs (75%) rename packages/{torrent-repository-benchmarks/src/benches => torrent-repository/benches/helpers}/sync.rs (81%) rename packages/{torrent-repository-benchmarks/src/benches => torrent-repository/benches/helpers}/utils.rs (89%) rename packages/{torrent-repository-benchmarks/src/main.rs => torrent-repository/benches/repository-benchmark.rs} (71%) create mode 100644 packages/torrent-repository/src/entry/mod.rs create mode 100644 packages/torrent-repository/src/entry/mutex_std.rs create mode 100644 packages/torrent-repository/src/entry/mutex_tokio.rs create mode 100644 packages/torrent-repository/src/entry/single.rs create mode 100644 packages/torrent-repository/src/lib.rs rename {src/core/torrent => packages/torrent-repository/src}/repository/mod.rs (57%) create mode 100644 packages/torrent-repository/src/repository/rw_lock_std.rs create mode 100644 packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs create mode 100644 packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs create mode 100644 packages/torrent-repository/src/repository/rw_lock_tokio.rs create mode 100644 packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs create mode 100644 packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs create mode 100644 src/core/peer_tests.rs delete mode 100644 src/core/torrent/entry.rs delete mode 100644 src/core/torrent/repository/rw_lock_std.rs delete mode 100644 src/core/torrent/repository/rw_lock_std_mutex_std.rs delete mode 100644 src/core/torrent/repository/rw_lock_std_mutex_tokio.rs delete mode 100644 src/core/torrent/repository/rw_lock_tokio.rs delete mode 100644 src/core/torrent/repository/rw_lock_tokio_mutex_std.rs delete mode 100644 src/core/torrent/repository/rw_lock_tokio_mutex_tokio.rs diff --git a/.vscode/settings.json b/.vscode/settings.json index 701e89ccf..caa48dd01 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -31,4 +31,5 @@ "evenBetterToml.formatter.trailingNewline": true, "evenBetterToml.formatter.reorderKeys": true, "evenBetterToml.formatter.reorderArrays": true, + } \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 26fb919af..8ec922448 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3449,17 +3449,6 @@ dependencies = [ "winnow 0.6.5", ] -[[package]] -name = "torrust-torrent-repository-benchmarks" -version = "3.0.0-alpha.12-develop" -dependencies = [ - "aquatic_udp_protocol", - "clap", - "futures", - "tokio", - "torrust-tracker", -] - [[package]] name = "torrust-tracker" version = "3.0.0-alpha.12-develop" @@ -3471,7 +3460,6 @@ dependencies = [ "axum-client-ip", "axum-extra", "axum-server", - "binascii", "chrono", "clap", "colored", @@ -3498,8 +3486,6 @@ dependencies = [ "serde_bytes", "serde_json", "serde_repr", - "tdyne-peer-id", - "tdyne-peer-id-registry", "thiserror", "tokio", "torrust-tracker-configuration", @@ -3507,6 +3493,7 @@ dependencies = [ "torrust-tracker-located-error", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "tower-http", "trace", "tracing", @@ -3549,8 +3536,12 @@ dependencies = [ name = "torrust-tracker-primitives" version = "3.0.0-alpha.12-develop" dependencies = [ + "binascii", "derive_more", "serde", + "tdyne-peer-id", + "tdyne-peer-id-registry", + "thiserror", ] [[package]] @@ -3562,6 +3553,18 @@ dependencies = [ "torrust-tracker-primitives", ] +[[package]] +name = "torrust-tracker-torrent-repository" +version = "3.0.0-alpha.12-develop" +dependencies = [ + "clap", + "futures", + "serde", + "tokio", + "torrust-tracker-configuration", + "torrust-tracker-primitives", +] + [[package]] name = "tower" version = "0.4.13" diff --git a/Cargo.toml b/Cargo.toml index e6f196583..9610fffc2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,7 +37,6 @@ axum = { version = "0", features = ["macros"] } axum-client-ip = "0" axum-extra = { version = "0", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls"] } -binascii = "0" chrono = { version = "0", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive", "env"] } colored = "2" @@ -62,14 +61,13 @@ serde_bencode = "0" serde_bytes = "0" serde_json = "1" serde_repr = "0" -tdyne-peer-id = "1" -tdyne-peer-id-registry = "0" thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "packages/configuration" } torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.12-develop", path = "contrib/bencode" } torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "packages/located-error" } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "packages/primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-alpha.12-develop", path = "packages/torrent-repository" } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } trace = "0" tracing = "0" @@ -91,7 +89,7 @@ members = [ "packages/located-error", "packages/primitives", "packages/test-helpers", - "packages/torrent-repository-benchmarks", + "packages/torrent-repository" ] [profile.dev] diff --git a/cSpell.json b/cSpell.json index da11cd29a..6d5f71b85 100644 --- a/cSpell.json +++ b/cSpell.json @@ -50,6 +50,7 @@ "filesd", "flamegraph", "Freebox", + "Frostegård", "gecos", "Grcov", "hasher", @@ -68,6 +69,7 @@ "Intermodal", "intervali", "kcachegrind", + "Joakim", "keyout", "lcov", "leecher", @@ -96,6 +98,7 @@ "oneshot", "ostr", "Pando", + "peekable", "proot", "proto", "Quickstart", @@ -109,6 +112,7 @@ "reqwest", "rerequests", "ringbuf", + "ringsize", "rngs", "rosegment", "routable", diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 4068c046f..b3b146717 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -243,6 +243,13 @@ use thiserror::Error; use torrust_tracker_located_error::{DynError, Located, LocatedError}; use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; +#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] +pub struct TrackerPolicy { + pub remove_peerless_torrents: bool, + pub max_peer_timeout: u32, + pub persistent_torrent_completed_stat: bool, +} + /// Information required for loading config #[derive(Debug, Default, Clone)] pub struct Info { diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index efcce71a9..3b2406a69 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -16,4 +16,8 @@ version.workspace = true [dependencies] derive_more = "0" +thiserror = "1" +binascii = "0" serde = { version = "1", features = ["derive"] } +tdyne-peer-id = "1" +tdyne-peer-id-registry = "0" \ No newline at end of file diff --git a/packages/primitives/src/announce_event.rs b/packages/primitives/src/announce_event.rs new file mode 100644 index 000000000..16e47da99 --- /dev/null +++ b/packages/primitives/src/announce_event.rs @@ -0,0 +1,43 @@ +//! Copyright (c) 2020-2023 Joakim Frostegård and The Torrust Developers +//! +//! Distributed under Apache 2.0 license + +use serde::{Deserialize, Serialize}; + +/// Announce events. Described on the +/// [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, Serialize, Deserialize)] +pub enum AnnounceEvent { + /// The peer has started downloading the torrent. + Started, + /// The peer has ceased downloading the torrent. + Stopped, + /// The peer has completed downloading the torrent. + Completed, + /// This is one of the announcements done at regular intervals. + None, +} + +impl AnnounceEvent { + #[inline] + #[must_use] + pub fn from_i32(i: i32) -> Self { + match i { + 1 => Self::Completed, + 2 => Self::Started, + 3 => Self::Stopped, + _ => Self::None, + } + } + + #[inline] + #[must_use] + pub fn to_i32(&self) -> i32 { + match self { + AnnounceEvent::None => 0, + AnnounceEvent::Completed => 1, + AnnounceEvent::Started => 2, + AnnounceEvent::Stopped => 3, + } + } +} diff --git a/packages/primitives/src/info_hash.rs b/packages/primitives/src/info_hash.rs new file mode 100644 index 000000000..46ae6283e --- /dev/null +++ b/packages/primitives/src/info_hash.rs @@ -0,0 +1,165 @@ +use std::panic::Location; + +use thiserror::Error; + +/// `BitTorrent` Info Hash v1 +#[derive(PartialEq, Eq, Hash, Clone, Copy, Default, Debug)] +pub struct InfoHash(pub [u8; 20]); + +pub const INFO_HASH_BYTES_LEN: usize = 20; + +impl InfoHash { + /// Create a new `InfoHash` from a byte slice. + /// + /// # Panics + /// + /// Will panic if byte slice does not contains the exact amount of bytes need for the `InfoHash`. + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + assert_eq!(bytes.len(), INFO_HASH_BYTES_LEN); + let mut ret = Self([0u8; INFO_HASH_BYTES_LEN]); + ret.0.clone_from_slice(bytes); + ret + } + + /// Returns the `InfoHash` internal byte array. + #[must_use] + pub fn bytes(&self) -> [u8; 20] { + self.0 + } + + /// Returns the `InfoHash` as a hex string. + #[must_use] + pub fn to_hex_string(&self) -> String { + self.to_string() + } +} + +impl Ord for InfoHash { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl std::cmp::PartialOrd for InfoHash { + fn partial_cmp(&self, other: &InfoHash) -> Option { + Some(self.cmp(other)) + } +} + +impl std::fmt::Display for InfoHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut chars = [0u8; 40]; + binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); + write!(f, "{}", std::str::from_utf8(&chars).unwrap()) + } +} + +impl std::str::FromStr for InfoHash { + type Err = binascii::ConvertError; + + fn from_str(s: &str) -> Result { + let mut i = Self([0u8; 20]); + if s.len() != 40 { + return Err(binascii::ConvertError::InvalidInputLength); + } + binascii::hex2bin(s.as_bytes(), &mut i.0)?; + Ok(i) + } +} + +impl std::convert::From<&[u8]> for InfoHash { + fn from(data: &[u8]) -> InfoHash { + assert_eq!(data.len(), 20); + let mut ret = InfoHash([0u8; 20]); + ret.0.clone_from_slice(data); + ret + } +} + +impl std::convert::From<[u8; 20]> for InfoHash { + fn from(val: [u8; 20]) -> Self { + InfoHash(val) + } +} + +/// Errors that can occur when converting from a `Vec` to an `InfoHash`. +#[derive(Error, Debug)] +pub enum ConversionError { + /// Not enough bytes for infohash. An infohash is 20 bytes. + #[error("not enough bytes for infohash: {message} {location}")] + NotEnoughBytes { + location: &'static Location<'static>, + message: String, + }, + /// Too many bytes for infohash. An infohash is 20 bytes. + #[error("too many bytes for infohash: {message} {location}")] + TooManyBytes { + location: &'static Location<'static>, + message: String, + }, +} + +impl TryFrom> for InfoHash { + type Error = ConversionError; + + fn try_from(bytes: Vec) -> Result { + if bytes.len() < INFO_HASH_BYTES_LEN { + return Err(ConversionError::NotEnoughBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + if bytes.len() > INFO_HASH_BYTES_LEN { + return Err(ConversionError::TooManyBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + Ok(Self::from_bytes(&bytes)) + } +} + +impl serde::ser::Serialize for InfoHash { + fn serialize(&self, serializer: S) -> Result { + let mut buffer = [0u8; 40]; + let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); + let str_out = std::str::from_utf8(bytes_out).unwrap(); + serializer.serialize_str(str_out) + } +} + +impl<'de> serde::de::Deserialize<'de> for InfoHash { + fn deserialize>(des: D) -> Result { + des.deserialize_str(InfoHashVisitor) + } +} + +struct InfoHashVisitor; + +impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { + type Value = InfoHash; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "a 40 character long hash") + } + + fn visit_str(self, v: &str) -> Result { + if v.len() != 40 { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"a 40 character long string", + )); + } + + let mut res = InfoHash([0u8; 20]); + + if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"a hexadecimal string", + )); + }; + Ok(res) + } +} diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index f6a14b9e8..664c0c82d 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -4,8 +4,43 @@ //! which is a `BitTorrent` tracker server. These structures are used not only //! by the tracker server crate, but also by other crates in the Torrust //! ecosystem. +use std::time::Duration; + +use info_hash::InfoHash; use serde::{Deserialize, Serialize}; +pub mod announce_event; +pub mod info_hash; +pub mod pagination; +pub mod peer; +pub mod swarm_metadata; +pub mod torrent_metrics; + +/// Duration since the Unix Epoch. +pub type DurationSinceUnixEpoch = Duration; + +/// Serializes a `DurationSinceUnixEpoch` as a Unix timestamp in milliseconds. +/// # Errors +/// +/// Will return `serde::Serializer::Error` if unable to serialize the `unix_time_value`. +pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { + #[allow(clippy::cast_possible_truncation)] + ser.serialize_u64(unix_time_value.as_millis() as u64) +} + +/// IP version used by the peer to connect to the tracker: IPv4 or IPv6 +#[derive(PartialEq, Eq, Debug)] +pub enum IPVersion { + /// + IPv4, + /// + IPv6, +} + +/// Number of bytes downloaded, uploaded or pending to download (left) by the peer. +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, Serialize, Deserialize)] +pub struct NumberOfBytes(pub i64); + /// The database management system used by the tracker. /// /// Refer to: @@ -23,6 +58,8 @@ pub enum DatabaseDriver { MySQL, } +pub type PersistentTorrents = Vec<(InfoHash, u32)>; + /// The mode the tracker will run in. /// /// Refer to [Torrust Tracker Configuration](https://docs.rs/torrust-tracker-configuration) diff --git a/packages/primitives/src/pagination.rs b/packages/primitives/src/pagination.rs new file mode 100644 index 000000000..ab7dcfe2b --- /dev/null +++ b/packages/primitives/src/pagination.rs @@ -0,0 +1,50 @@ +use serde::Deserialize; + +/// A struct to keep information about the page when results are being paginated +#[derive(Deserialize, Copy, Clone, Debug, PartialEq)] +pub struct Pagination { + /// The page number, starting at 0 + pub offset: u32, + /// Page size. The number of results per page + pub limit: u32, +} + +impl Pagination { + #[must_use] + pub fn new(offset: u32, limit: u32) -> Self { + Self { offset, limit } + } + + #[must_use] + pub fn new_with_options(offset_option: Option, limit_option: Option) -> Self { + let offset = match offset_option { + Some(offset) => offset, + None => Pagination::default_offset(), + }; + let limit = match limit_option { + Some(offset) => offset, + None => Pagination::default_limit(), + }; + + Self { offset, limit } + } + + #[must_use] + pub fn default_offset() -> u32 { + 0 + } + + #[must_use] + pub fn default_limit() -> u32 { + 4000 + } +} + +impl Default for Pagination { + fn default() -> Self { + Self { + offset: Self::default_offset(), + limit: Self::default_limit(), + } + } +} diff --git a/src/core/peer.rs b/packages/primitives/src/peer.rs similarity index 83% rename from src/core/peer.rs rename to packages/primitives/src/peer.rs index eb2b7b759..5fb9e525f 100644 --- a/src/core/peer.rs +++ b/packages/primitives/src/peer.rs @@ -3,12 +3,12 @@ //! A sample peer: //! //! ```rust,no_run -//! use torrust_tracker::core::peer; +//! use torrust_tracker_primitives::peer; //! use std::net::SocketAddr; //! use std::net::IpAddr; //! use std::net::Ipv4Addr; -//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; -//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; +//! //! //! peer::Peer { //! peer_id: peer::Id(*b"-qB00000000000000000"), @@ -20,38 +20,26 @@ //! event: AnnounceEvent::Started, //! }; //! ``` + use std::net::{IpAddr, SocketAddr}; -use std::panic::Location; use std::sync::Arc; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde::Serialize; -use thiserror::Error; -use crate::shared::bit_torrent::common::{AnnounceEventDef, NumberOfBytesDef}; -use crate::shared::clock::utils::ser_unix_time_value; -use crate::shared::clock::DurationSinceUnixEpoch; - -/// IP version used by the peer to connect to the tracker: IPv4 or IPv6 -#[derive(PartialEq, Eq, Debug)] -pub enum IPVersion { - /// - IPv4, - /// - IPv6, -} +use crate::announce_event::AnnounceEvent; +use crate::{ser_unix_time_value, DurationSinceUnixEpoch, IPVersion, NumberOfBytes}; /// Peer struct used by the core `Tracker`. /// /// A sample peer: /// /// ```rust,no_run -/// use torrust_tracker::core::peer; +/// use torrust_tracker_primitives::peer; /// use std::net::SocketAddr; /// use std::net::IpAddr; /// use std::net::Ipv4Addr; -/// use torrust_tracker::shared::clock::DurationSinceUnixEpoch; -/// use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +/// use torrust_tracker_primitives::DurationSinceUnixEpoch; +/// /// /// peer::Peer { /// peer_id: peer::Id(*b"-qB00000000000000000"), @@ -73,16 +61,12 @@ pub struct Peer { #[serde(serialize_with = "ser_unix_time_value")] pub updated: DurationSinceUnixEpoch, /// The total amount of bytes uploaded by this peer so far - #[serde(with = "NumberOfBytesDef")] pub uploaded: NumberOfBytes, /// The total amount of bytes downloaded by this peer so far - #[serde(with = "NumberOfBytesDef")] pub downloaded: NumberOfBytes, /// The number of bytes this peer still has to download - #[serde(with = "NumberOfBytesDef")] pub left: NumberOfBytes, /// This is an optional key which maps to started, completed, or stopped (or empty, which is the same as not being present). - #[serde(with = "AnnounceEventDef")] pub event: AnnounceEvent, } @@ -162,22 +146,9 @@ impl Peer { } } -/// Peer ID. A 20-byte array. -/// -/// A string of length 20 which this downloader uses as its id. -/// Each downloader generates its own id at random at the start of a new download. -/// -/// A sample peer ID: -/// -/// ```rust,no_run -/// use torrust_tracker::core::peer; -/// -/// let peer_id = peer::Id(*b"-qB00000000000000000"); -/// ``` -#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] -pub struct Id(pub [u8; 20]); +use std::panic::Location; -const PEER_ID_BYTES_LEN: usize = 20; +use thiserror::Error; /// Error returned when trying to convert an invalid peer id from another type. /// @@ -196,30 +167,6 @@ pub enum IdConversionError { }, } -impl Id { - /// # Panics - /// - /// Will panic if byte slice does not contains the exact amount of bytes need for the `Id`. - #[must_use] - pub fn from_bytes(bytes: &[u8]) -> Self { - assert_eq!( - PEER_ID_BYTES_LEN, - bytes.len(), - "we are testing the equality of the constant: `PEER_ID_BYTES_LEN` ({}) and the supplied `bytes` length: {}", - PEER_ID_BYTES_LEN, - bytes.len(), - ); - let mut ret = Self([0u8; PEER_ID_BYTES_LEN]); - ret.0.clone_from_slice(bytes); - ret - } - - #[must_use] - pub fn to_bytes(&self) -> [u8; 20] { - self.0 - } -} - impl From<[u8; 20]> for Id { fn from(bytes: [u8; 20]) -> Self { Id(bytes) @@ -263,7 +210,47 @@ impl std::fmt::Display for Id { } } +/// Peer ID. A 20-byte array. +/// +/// A string of length 20 which this downloader uses as its id. +/// Each downloader generates its own id at random at the start of a new download. +/// +/// A sample peer ID: +/// +/// ```rust,no_run +/// use torrust_tracker_primitives::peer; +/// +/// let peer_id = peer::Id(*b"-qB00000000000000000"); +/// ``` +/// +#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] +pub struct Id(pub [u8; 20]); + +pub const PEER_ID_BYTES_LEN: usize = 20; + impl Id { + /// # Panics + /// + /// Will panic if byte slice does not contains the exact amount of bytes need for the `Id`. + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + assert_eq!( + PEER_ID_BYTES_LEN, + bytes.len(), + "we are testing the equality of the constant: `PEER_ID_BYTES_LEN` ({}) and the supplied `bytes` length: {}", + PEER_ID_BYTES_LEN, + bytes.len(), + ); + let mut ret = Self([0u8; PEER_ID_BYTES_LEN]); + ret.0.clone_from_slice(bytes); + ret + } + + #[must_use] + pub fn to_bytes(&self) -> [u8; 20] { + self.0 + } + #[must_use] /// Converts to hex string. /// @@ -329,12 +316,27 @@ impl Serialize for Id { } } +/// Marker Trait for Peer Vectors +pub trait Encoding: From + PartialEq {} + +impl FromIterator for Vec

{ + fn from_iter>(iter: T) -> Self { + let mut peers: Vec

= vec![]; + + for peer in iter { + peers.push(peer.into()); + } + + peers + } +} + pub mod fixture { use std::net::SocketAddr; - use aquatic_udp_protocol::NumberOfBytes; - use super::{Id, Peer}; + use crate::announce_event::AnnounceEvent; + use crate::{DurationSinceUnixEpoch, NumberOfBytes}; #[derive(PartialEq, Debug)] @@ -396,11 +398,11 @@ pub mod fixture { Self { peer_id: Id(*b"-qB00000000000000000"), peer_addr: std::net::SocketAddr::new(std::net::IpAddr::V4(std::net::Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: crate::shared::clock::DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), - event: aquatic_udp_protocol::AnnounceEvent::Started, + event: AnnounceEvent::Started, } } } @@ -409,7 +411,7 @@ pub mod fixture { #[cfg(test)] pub mod test { mod torrent_peer_id { - use crate::core::peer; + use crate::peer; #[test] fn should_be_instantiated_from_a_byte_slice() { @@ -518,50 +520,4 @@ pub mod test { assert_eq!(peer::Id(*b"-qB00000000000000000").to_bytes(), *b"-qB00000000000000000"); } } - - mod torrent_peer { - - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use serde_json::Value; - - use crate::core::peer::{self, Peer}; - use crate::shared::clock::{Current, Time}; - - #[test] - fn it_should_be_serializable() { - let torrent_peer = Peer { - peer_id: peer::Id(*b"-qB0000-000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: Current::now(), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - - let raw_json = serde_json::to_string(&torrent_peer).unwrap(); - - let expected_raw_json = r#" - { - "peer_id": { - "id": "0x2d7142303030302d303030303030303030303030", - "client": "qBittorrent" - }, - "peer_addr":"126.0.0.1:8080", - "updated":0, - "uploaded":0, - "downloaded":0, - "left":0, - "event":"Started" - } - "#; - - assert_eq!( - serde_json::from_str::(&raw_json).unwrap(), - serde_json::from_str::(expected_raw_json).unwrap() - ); - } - } } diff --git a/packages/primitives/src/swarm_metadata.rs b/packages/primitives/src/swarm_metadata.rs new file mode 100644 index 000000000..ca880b54d --- /dev/null +++ b/packages/primitives/src/swarm_metadata.rs @@ -0,0 +1,22 @@ +use derive_more::Constructor; + +/// Swarm statistics for one torrent. +/// Swarm metadata dictionary in the scrape response. +/// +/// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] +pub struct SwarmMetadata { + /// (i.e `completed`): The number of peers that have ever completed downloading + pub downloaded: u32, // + /// (i.e `seeders`): The number of active peers that have completed downloading (seeders) + pub complete: u32, //seeders + /// (i.e `leechers`): The number of active peers that have not completed downloading (leechers) + pub incomplete: u32, +} + +impl SwarmMetadata { + #[must_use] + pub fn zeroed() -> Self { + Self::default() + } +} diff --git a/packages/primitives/src/torrent_metrics.rs b/packages/primitives/src/torrent_metrics.rs new file mode 100644 index 000000000..c60507171 --- /dev/null +++ b/packages/primitives/src/torrent_metrics.rs @@ -0,0 +1,25 @@ +use std::ops::AddAssign; + +/// Structure that holds general `Tracker` torrents metrics. +/// +/// Metrics are aggregate values for all torrents. +#[derive(Copy, Clone, Debug, PartialEq, Default)] +pub struct TorrentsMetrics { + /// Total number of seeders for all torrents + pub seeders: u64, + /// Total number of peers that have ever completed downloading for all torrents. + pub completed: u64, + /// Total number of leechers for all torrents. + pub leechers: u64, + /// Total number of torrents. + pub torrents: u64, +} + +impl AddAssign for TorrentsMetrics { + fn add_assign(&mut self, rhs: Self) { + self.seeders += rhs.seeders; + self.completed += rhs.completed; + self.leechers += rhs.leechers; + self.torrents += rhs.torrents; + } +} diff --git a/packages/torrent-repository-benchmarks/Cargo.toml b/packages/torrent-repository-benchmarks/Cargo.toml deleted file mode 100644 index e8b22f52f..000000000 --- a/packages/torrent-repository-benchmarks/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -description = "A set of benchmarks for the torrent repository" -keywords = ["benchmarking", "library", "repository", "torrent"] -name = "torrust-torrent-repository-benchmarks" -readme = "README.md" - -authors.workspace = true -documentation.workspace = true -edition.workspace = true -homepage.workspace = true -license.workspace = true -publish.workspace = true -repository.workspace = true -rust-version.workspace = true -version.workspace = true - -[dependencies] -aquatic_udp_protocol = "0.8.0" -clap = { version = "4.4.8", features = ["derive"] } -futures = "0.3.29" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker = { path = "../../" } diff --git a/packages/torrent-repository-benchmarks/README.md b/packages/torrent-repository-benchmarks/README.md deleted file mode 100644 index 14183ea69..000000000 --- a/packages/torrent-repository-benchmarks/README.md +++ /dev/null @@ -1 +0,0 @@ -# Benchmarks of the torrent repository diff --git a/packages/torrent-repository-benchmarks/src/lib.rs b/packages/torrent-repository-benchmarks/src/lib.rs deleted file mode 100644 index 58ebc2057..000000000 --- a/packages/torrent-repository-benchmarks/src/lib.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod args; -pub mod benches; diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml new file mode 100644 index 000000000..0df82a2c6 --- /dev/null +++ b/packages/torrent-repository/Cargo.toml @@ -0,0 +1,24 @@ +[package] +description = "A library to provide error decorator with the location and the source of the original error." +keywords = ["torrents", "repository", "library"] +name = "torrust-tracker-torrent-repository" +readme = "README.md" + +authors.workspace = true +categories.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +clap = { version = "4.4.8", features = ["derive"] } +futures = "0.3.29" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" } +serde = { version = "1", features = ["derive"] } diff --git a/packages/torrent-repository/README.md b/packages/torrent-repository/README.md new file mode 100644 index 000000000..98d7d922b --- /dev/null +++ b/packages/torrent-repository/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Configuration + +A library to provide torrent repository to the [Torrust Tracker](https://github.com/torrust/torrust-tracker). + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-torrent-repository). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/torrent-repository-benchmarks/src/args.rs b/packages/torrent-repository/benches/helpers/args.rs similarity index 100% rename from packages/torrent-repository-benchmarks/src/args.rs rename to packages/torrent-repository/benches/helpers/args.rs diff --git a/packages/torrent-repository-benchmarks/src/benches/asyn.rs b/packages/torrent-repository/benches/helpers/asyn.rs similarity index 81% rename from packages/torrent-repository-benchmarks/src/benches/asyn.rs rename to packages/torrent-repository/benches/helpers/asyn.rs index dffd31682..4fb37104f 100644 --- a/packages/torrent-repository-benchmarks/src/benches/asyn.rs +++ b/packages/torrent-repository/benches/helpers/asyn.rs @@ -1,16 +1,17 @@ +use std::sync::Arc; use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository::UpdateTorrentAsync; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_torrent_repository::repository::RepositoryAsync; -use crate::args::Args; -use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; +use super::args::Args; +use super::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; -pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) +pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) where - V: UpdateTorrentAsync + Default, + V: RepositoryAsync + Default, { let mut results: Vec = Vec::with_capacity(samples); @@ -34,15 +35,16 @@ where } // Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - V: UpdateTorrentAsync + Default + Clone + Send + Sync + 'static, + V: RepositoryAsync + Default, + Arc: Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = V::default(); + let torrent_repository = Arc::::default(); let info_hash: &'static InfoHash = &InfoHash([0; 20]); let handles = FuturesUnordered::new(); @@ -83,15 +85,16 @@ where } // Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - V: UpdateTorrentAsync + Default + Clone + Send + Sync + 'static, + V: RepositoryAsync + Default, + Arc: Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = V::default(); + let torrent_repository = Arc::::default(); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); @@ -127,15 +130,16 @@ where } // Async update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - V: UpdateTorrentAsync + Default + Clone + Send + Sync + 'static, + V: RepositoryAsync + Default, + Arc: Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = V::default(); + let torrent_repository = Arc::::default(); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); diff --git a/packages/torrent-repository-benchmarks/src/benches/mod.rs b/packages/torrent-repository/benches/helpers/mod.rs similarity index 75% rename from packages/torrent-repository-benchmarks/src/benches/mod.rs rename to packages/torrent-repository/benches/helpers/mod.rs index 1026aa4bf..758c123bd 100644 --- a/packages/torrent-repository-benchmarks/src/benches/mod.rs +++ b/packages/torrent-repository/benches/helpers/mod.rs @@ -1,3 +1,4 @@ +pub mod args; pub mod asyn; pub mod sync; pub mod utils; diff --git a/packages/torrent-repository-benchmarks/src/benches/sync.rs b/packages/torrent-repository/benches/helpers/sync.rs similarity index 81% rename from packages/torrent-repository-benchmarks/src/benches/sync.rs rename to packages/torrent-repository/benches/helpers/sync.rs index 04385bc55..aa2f8188a 100644 --- a/packages/torrent-repository-benchmarks/src/benches/sync.rs +++ b/packages/torrent-repository/benches/helpers/sync.rs @@ -1,18 +1,19 @@ +use std::sync::Arc; use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository::UpdateTorrentSync; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_torrent_repository::repository::Repository; -use crate::args::Args; -use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; +use super::args::Args; +use super::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; // Simply add one torrent #[must_use] -pub fn add_one_torrent(samples: usize) -> (Duration, Duration) +pub fn add_one_torrent(samples: usize) -> (Duration, Duration) where - V: UpdateTorrentSync + Default, + V: Repository + Default, { let mut results: Vec = Vec::with_capacity(samples); @@ -34,15 +35,16 @@ where } // Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - V: UpdateTorrentSync + Default + Clone + Send + Sync + 'static, + V: Repository + Default, + Arc: Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = V::default(); + let torrent_repository = Arc::::default(); let info_hash: &'static InfoHash = &InfoHash([0; 20]); let handles = FuturesUnordered::new(); @@ -79,15 +81,16 @@ where } // Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - V: UpdateTorrentSync + Default + Clone + Send + Sync + 'static, + V: Repository + Default, + Arc: Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = V::default(); + let torrent_repository = Arc::::default(); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); @@ -121,15 +124,16 @@ where } // Update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - V: UpdateTorrentSync + Default + Clone + Send + Sync + 'static, + V: Repository + Default, + Arc: Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = V::default(); + let torrent_repository = Arc::::default(); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); diff --git a/packages/torrent-repository-benchmarks/src/benches/utils.rs b/packages/torrent-repository/benches/helpers/utils.rs similarity index 89% rename from packages/torrent-repository-benchmarks/src/benches/utils.rs rename to packages/torrent-repository/benches/helpers/utils.rs index ef1640038..aed9f40cf 100644 --- a/packages/torrent-repository-benchmarks/src/benches/utils.rs +++ b/packages/torrent-repository/benches/helpers/utils.rs @@ -2,10 +2,10 @@ use std::collections::HashSet; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::time::Duration; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use torrust_tracker::core::peer::{Id, Peer}; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -use torrust_tracker::shared::clock::DurationSinceUnixEpoch; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::{Id, Peer}; +use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfBytes}; pub const DEFAULT_PEER: Peer = Peer { peer_id: Id([0; 20]), diff --git a/packages/torrent-repository-benchmarks/src/main.rs b/packages/torrent-repository/benches/repository-benchmark.rs similarity index 71% rename from packages/torrent-repository-benchmarks/src/main.rs rename to packages/torrent-repository/benches/repository-benchmark.rs index b935cea43..bff34b256 100644 --- a/packages/torrent-repository-benchmarks/src/main.rs +++ b/packages/torrent-repository/benches/repository-benchmark.rs @@ -1,13 +1,14 @@ -use std::sync::Arc; +mod helpers; use clap::Parser; -use torrust_torrent_repository_benchmarks::args::Args; -use torrust_torrent_repository_benchmarks::benches::{asyn, sync}; -use torrust_tracker::core::torrent::{ +use torrust_tracker_torrent_repository::{ TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, }; +use crate::helpers::args::Args; +use crate::helpers::{asyn, sync}; + #[allow(clippy::too_many_lines)] #[allow(clippy::print_literal)] fn main() { @@ -24,24 +25,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::>(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::>(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::>(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::>( - &rt, 10 - )) + rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); if let Some(true) = args.compare { @@ -51,22 +50,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - sync::add_one_torrent::>(1_000_000) + sync::add_one_torrent::(1_000_000) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(sync::update_one_torrent_in_parallel::>(&rt, 10)) + rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(sync::add_multiple_torrents_in_parallel::>(&rt, 10)) + rt.block_on(sync::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(sync::update_multiple_torrents_in_parallel::>(&rt, 10)) + rt.block_on(sync::update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -75,26 +74,24 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - sync::add_one_torrent::>(1_000_000) + sync::add_one_torrent::(1_000_000) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(sync::update_one_torrent_in_parallel::>( - &rt, 10 - )) + rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(sync::add_multiple_torrents_in_parallel::>( + rt.block_on(sync::add_multiple_torrents_in_parallel::( &rt, 10 )) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(sync::update_multiple_torrents_in_parallel::>( + rt.block_on(sync::update_multiple_torrents_in_parallel::( &rt, 10 )) ); @@ -105,26 +102,28 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::>(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::>( + rt.block_on(asyn::update_one_torrent_in_parallel::( &rt, 10 )) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::>( + rt.block_on(asyn::add_multiple_torrents_in_parallel::( &rt, 10 )) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::>(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::( + &rt, 10 + )) ); println!(); @@ -133,26 +132,28 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::>(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::>( + rt.block_on(asyn::update_one_torrent_in_parallel::( &rt, 10 )) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::>( + rt.block_on(asyn::add_multiple_torrents_in_parallel::( &rt, 10 )) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::>(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::( + &rt, 10 + )) ); println!(); @@ -161,26 +162,26 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::>(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::>( + rt.block_on(asyn::update_one_torrent_in_parallel::( &rt, 10 )) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::>( + rt.block_on(asyn::add_multiple_torrents_in_parallel::( &rt, 10 )) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::>(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); } } diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs new file mode 100644 index 000000000..04aa597df --- /dev/null +++ b/packages/torrent-repository/src/entry/mod.rs @@ -0,0 +1,98 @@ +use std::fmt::Debug; +use std::sync::Arc; + +use serde::{Deserialize, Serialize}; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +pub mod mutex_std; +pub mod mutex_tokio; +pub mod single; + +pub trait Entry { + /// It returns the swarm metadata (statistics) as a struct: + /// + /// `(seeders, completed, leechers)` + fn get_stats(&self) -> SwarmMetadata; + + /// Returns True if Still a Valid Entry according to the Tracker Policy + fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool; + + /// Returns True if the Peers is Empty + fn peers_is_empty(&self) -> bool; + + /// Returns the number of Peers + fn get_peers_len(&self) -> usize; + + /// Get all swarm peers, optionally limiting the result. + fn get_peers(&self, limit: Option) -> Vec>; + + /// It returns the list of peers for a given peer client, optionally limiting the + /// result. + /// + /// It filters out the input peer, typically because we want to return this + /// list of peers to that client peer. + fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec>; + + /// It updates a peer and returns true if the number of complete downloads have increased. + /// + /// The number of peers that have complete downloading is synchronously updated when peers are updated. + /// That's the total torrent downloads counter. + fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool; + + // It preforms a combined operation of `insert_or_update_peer` and `get_stats`. + fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata); + + /// It removes peer from the swarm that have not been updated for more than `current_cutoff` seconds + fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch); +} + +#[allow(clippy::module_name_repetitions)] +pub trait EntrySync { + fn get_stats(&self) -> SwarmMetadata; + fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool; + fn peers_is_empty(&self) -> bool; + fn get_peers_len(&self) -> usize; + fn get_peers(&self, limit: Option) -> Vec>; + fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec>; + fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool; + fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata); + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); +} + +#[allow(clippy::module_name_repetitions)] +pub trait EntryAsync { + fn get_stats(self) -> impl std::future::Future + Send; + + #[allow(clippy::wrong_self_convention)] + fn is_not_zombie(self, policy: &TrackerPolicy) -> impl std::future::Future + Send; + fn peers_is_empty(self) -> impl std::future::Future + Send; + fn get_peers_len(self) -> impl std::future::Future + Send; + fn get_peers(self, limit: Option) -> impl std::future::Future>> + Send; + fn get_peers_for_peer( + self, + client: &peer::Peer, + limit: Option, + ) -> impl std::future::Future>> + Send; + fn insert_or_update_peer(self, peer: &peer::Peer) -> impl std::future::Future + Send; + fn insert_or_update_peer_and_get_stats( + self, + peer: &peer::Peer, + ) -> impl std::future::Future + std::marker::Send; + fn remove_inactive_peers(self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; +} + +/// A data structure containing all the information about a torrent in the tracker. +/// +/// This is the tracker entry for a given torrent and contains the swarm data, +/// that's the list of all the peers trying to download the same torrent. +/// The tracker keeps one entry like this for every torrent. +#[derive(Serialize, Deserialize, Clone, Debug, Default)] +pub struct Torrent { + /// The swarm: a network of peers that are all trying to download the torrent associated to this entry + #[serde(skip)] + pub(crate) peers: std::collections::BTreeMap>, + /// The number of peers that have ever completed downloading the torrent associated to this entry + pub(crate) completed: u32, +} diff --git a/packages/torrent-repository/src/entry/mutex_std.rs b/packages/torrent-repository/src/entry/mutex_std.rs new file mode 100644 index 000000000..df6228317 --- /dev/null +++ b/packages/torrent-repository/src/entry/mutex_std.rs @@ -0,0 +1,50 @@ +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntrySync}; +use crate::EntryMutexStd; + +impl EntrySync for EntryMutexStd { + fn get_stats(&self) -> SwarmMetadata { + self.lock().expect("it should get a lock").get_stats() + } + + fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { + self.lock().expect("it should get a lock").is_not_zombie(policy) + } + + fn peers_is_empty(&self) -> bool { + self.lock().expect("it should get a lock").peers_is_empty() + } + + fn get_peers_len(&self) -> usize { + self.lock().expect("it should get a lock").get_peers_len() + } + + fn get_peers(&self, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers(limit) + } + + fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers_for_peer(client, limit) + } + + fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool { + self.lock().expect("it should lock the entry").insert_or_update_peer(peer) + } + + fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.lock() + .expect("it should lock the entry") + .insert_or_update_peer_and_get_stats(peer) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + self.lock() + .expect("it should lock the entry") + .remove_inactive_peers(current_cutoff); + } +} diff --git a/packages/torrent-repository/src/entry/mutex_tokio.rs b/packages/torrent-repository/src/entry/mutex_tokio.rs new file mode 100644 index 000000000..c4d13fb43 --- /dev/null +++ b/packages/torrent-repository/src/entry/mutex_tokio.rs @@ -0,0 +1,46 @@ +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntryAsync}; +use crate::EntryMutexTokio; + +impl EntryAsync for EntryMutexTokio { + async fn get_stats(self) -> SwarmMetadata { + self.lock().await.get_stats() + } + + async fn is_not_zombie(self, policy: &TrackerPolicy) -> bool { + self.lock().await.is_not_zombie(policy) + } + + async fn peers_is_empty(self) -> bool { + self.lock().await.peers_is_empty() + } + + async fn get_peers_len(self) -> usize { + self.lock().await.get_peers_len() + } + + async fn get_peers(self, limit: Option) -> Vec> { + self.lock().await.get_peers(limit) + } + + async fn get_peers_for_peer(self, client: &peer::Peer, limit: Option) -> Vec> { + self.lock().await.get_peers_for_peer(client, limit) + } + + async fn insert_or_update_peer(self, peer: &peer::Peer) -> bool { + self.lock().await.insert_or_update_peer(peer) + } + + async fn insert_or_update_peer_and_get_stats(self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.lock().await.insert_or_update_peer_and_get_stats(peer) + } + + async fn remove_inactive_peers(self, current_cutoff: DurationSinceUnixEpoch) { + self.lock().await.remove_inactive_peers(current_cutoff); + } +} diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs new file mode 100644 index 000000000..7a5cf6240 --- /dev/null +++ b/packages/torrent-repository/src/entry/single.rs @@ -0,0 +1,105 @@ +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::peer::{self}; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::Entry; +use crate::EntrySingle; + +impl Entry for EntrySingle { + #[allow(clippy::cast_possible_truncation)] + fn get_stats(&self) -> SwarmMetadata { + let complete: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; + let incomplete: u32 = self.peers.len() as u32 - complete; + + SwarmMetadata { + downloaded: self.completed, + complete, + incomplete, + } + } + + fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { + if policy.persistent_torrent_completed_stat && self.completed > 0 { + return true; + } + + if policy.remove_peerless_torrents && self.peers.is_empty() { + return false; + } + + true + } + + fn peers_is_empty(&self) -> bool { + self.peers.is_empty() + } + + fn get_peers_len(&self) -> usize { + self.peers.len() + } + fn get_peers(&self, limit: Option) -> Vec> { + match limit { + Some(limit) => self.peers.values().take(limit).cloned().collect(), + None => self.peers.values().cloned().collect(), + } + } + + fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { + match limit { + Some(limit) => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != peer::ReadInfo::get_address(client)) + // Limit the number of peers on the result + .take(limit) + .cloned() + .collect(), + None => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != peer::ReadInfo::get_address(client)) + .cloned() + .collect(), + } + } + + fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { + let mut did_torrent_stats_change: bool = false; + + match peer::ReadInfo::get_event(peer) { + AnnounceEvent::Stopped => { + drop(self.peers.remove(&peer::ReadInfo::get_id(peer))); + } + AnnounceEvent::Completed => { + let peer_old = self.peers.insert(peer::ReadInfo::get_id(peer), Arc::new(*peer)); + // Don't count if peer was not previously known and not already completed. + if peer_old.is_some_and(|p| p.event != AnnounceEvent::Completed) { + self.completed += 1; + did_torrent_stats_change = true; + } + } + _ => { + drop(self.peers.insert(peer::ReadInfo::get_id(peer), Arc::new(*peer))); + } + } + + did_torrent_stats_change + } + + fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let changed = self.insert_or_update_peer(peer); + let stats = self.get_stats(); + (changed, stats) + } + + fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + self.peers + .retain(|_, peer| peer::ReadInfo::get_updated(peer) > current_cutoff); + } +} diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs new file mode 100644 index 000000000..903e1405e --- /dev/null +++ b/packages/torrent-repository/src/lib.rs @@ -0,0 +1,15 @@ +use std::sync::Arc; + +pub mod entry; +pub mod repository; + +pub type EntrySingle = entry::Torrent; +pub type EntryMutexStd = Arc>; +pub type EntryMutexTokio = Arc>; + +pub type TorrentsRwLockStd = repository::RwLockStd; +pub type TorrentsRwLockStdMutexStd = repository::RwLockStd; +pub type TorrentsRwLockStdMutexTokio = repository::RwLockStd; +pub type TorrentsRwLockTokio = repository::RwLockTokio; +pub type TorrentsRwLockTokioMutexStd = repository::RwLockTokio; +pub type TorrentsRwLockTokioMutexTokio = repository::RwLockTokio; diff --git a/src/core/torrent/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs similarity index 57% rename from src/core/torrent/repository/mod.rs rename to packages/torrent-repository/src/repository/mod.rs index 1c4ce8ae9..b46771163 100644 --- a/src/core/torrent/repository/mod.rs +++ b/packages/torrent-repository/src/repository/mod.rs @@ -1,8 +1,9 @@ -use super::SwarmMetadata; -use crate::core::databases::PersistentTorrents; -use crate::core::services::torrent::Pagination; -use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; -use crate::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; pub mod rw_lock_std; pub mod rw_lock_std_mutex_std; @@ -12,20 +13,25 @@ pub mod rw_lock_tokio_mutex_std; pub mod rw_lock_tokio_mutex_tokio; pub trait Repository: Default + 'static { + fn get(&self, key: &InfoHash) -> Option; + fn get_metrics(&self) -> TorrentsMetrics; + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, T)>; + fn import_persistent(&self, persistent_torrents: &PersistentTorrents); + fn remove(&self, key: &InfoHash) -> Option; + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); + fn remove_peerless_torrents(&self, policy: &TrackerPolicy); + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata); +} + +#[allow(clippy::module_name_repetitions)] +pub trait RepositoryAsync: Default + 'static { fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; fn get_metrics(&self) -> impl std::future::Future + Send; fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; fn import_persistent(&self, persistent_torrents: &PersistentTorrents) -> impl std::future::Future + Send; fn remove(&self, key: &InfoHash) -> impl std::future::Future> + Send; - fn remove_inactive_peers(&self, max_peer_timeout: u32) -> impl std::future::Future + Send; + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; -} - -pub trait UpdateTorrentSync { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata); -} - -pub trait UpdateTorrentAsync { fn update_torrent_with_peer_and_get_stats( &self, info_hash: &InfoHash, diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository/src/repository/rw_lock_std.rs new file mode 100644 index 000000000..bacef623d --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_std.rs @@ -0,0 +1,112 @@ +use std::collections::BTreeMap; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::Repository; +use crate::entry::Entry; +use crate::{EntrySingle, TorrentsRwLockStd}; + +impl TorrentsRwLockStd { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("it should get the read lock") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("it should get the write lock") + } +} + +impl Repository for TorrentsRwLockStd +where + EntrySingle: Entry, +{ + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let mut db = self.get_torrents_mut(); + + let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); + + entry.insert_or_update_peer_and_get_stats(peer) + } + + fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().values() { + let stats = entry.get_stats(); + metrics.seeders += u64::from(stats.complete); + metrics.completed += u64::from(stats.downloaded); + metrics.leechers += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntrySingle { + peers: BTreeMap::default(), + completed: *completed, + }; + + torrents.insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let mut db = self.get_torrents_mut(); + let entries = db.values_mut(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.is_not_zombie(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs new file mode 100644 index 000000000..9fca82ba8 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs @@ -0,0 +1,123 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::Repository; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle, TorrentsRwLockStdMutexStd}; + +impl TorrentsRwLockStdMutexStd { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl Repository for TorrentsRwLockStdMutexStd +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut(); + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer) + } + + fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().values() { + let stats = entry.lock().expect("it should get a lock").get_stats(); + metrics.seeders += u64::from(stats.complete); + metrics.completed += u64::from(stats.downloaded); + metrics.leechers += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let db = self.get_torrents(); + let entries = db.values().cloned(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs new file mode 100644 index 000000000..b9fb54469 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs @@ -0,0 +1,131 @@ +use std::collections::BTreeMap; +use std::pin::Pin; +use std::sync::Arc; + +use futures::future::join_all; +use futures::{Future, FutureExt}; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::{Entry, EntryAsync}; +use crate::{EntryMutexTokio, EntrySingle, TorrentsRwLockStdMutexTokio}; + +impl TorrentsRwLockStdMutexTokio { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl RepositoryAsync for TorrentsRwLockStdMutexTokio +where + EntryMutexTokio: EntryAsync, + EntrySingle: Entry, +{ + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut(); + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer).await + } + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexTokio)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + let entries: Vec<_> = self.get_torrents().values().cloned().collect(); + + for entry in entries { + let stats = entry.lock().await.get_stats(); + metrics.seeders += u64::from(stats.complete); + metrics.completed += u64::from(stats.downloaded); + metrics.leechers += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexTokio::new( + EntrySingle { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let handles: Vec + Send>>>; + { + let db = self.get_torrents(); + handles = db + .values() + .cloned() + .map(|e| e.remove_inactive_peers(current_cutoff).boxed()) + .collect(); + } + join_all(handles).await; + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio.rs new file mode 100644 index 000000000..d0b7ec751 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_tokio.rs @@ -0,0 +1,113 @@ +use std::collections::BTreeMap; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::Entry; +use crate::{EntrySingle, TorrentsRwLockTokio}; + +impl TorrentsRwLockTokio { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl RepositoryAsync for TorrentsRwLockTokio +where + EntrySingle: Entry, +{ + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let mut db = self.get_torrents_mut().await; + + let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); + + entry.insert_or_update_peer_and_get_stats(peer) + } + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().await.values() { + let stats = entry.get_stats(); + metrics.seeders += u64::from(stats.complete); + metrics.completed += u64::from(stats.downloaded); + metrics.leechers += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntrySingle { + peers: BTreeMap::default(), + completed: *completed, + }; + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let mut db = self.get_torrents_mut().await; + let entries = db.values_mut(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.is_not_zombie(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs new file mode 100644 index 000000000..f800d2001 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs @@ -0,0 +1,124 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle, TorrentsRwLockTokioMutexStd}; + +impl TorrentsRwLockTokioMutexStd { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl RepositoryAsync for TorrentsRwLockTokioMutexStd +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut().await; + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer) + } + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().await.values() { + let stats = entry.get_stats(); + metrics.seeders += u64::from(stats.complete); + metrics.completed += u64::from(stats.downloaded); + metrics.leechers += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let db = self.get_torrents().await; + let entries = db.values().cloned(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs new file mode 100644 index 000000000..7ce2cc74c --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -0,0 +1,124 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::{Entry, EntryAsync}; +use crate::{EntryMutexTokio, EntrySingle, TorrentsRwLockTokioMutexTokio}; + +impl TorrentsRwLockTokioMutexTokio { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl RepositoryAsync for TorrentsRwLockTokioMutexTokio +where + EntryMutexTokio: EntryAsync, + EntrySingle: Entry, +{ + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut().await; + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer).await + } + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexTokio)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().await.values().cloned() { + let stats = entry.get_stats().await; + metrics.seeders += u64::from(stats.complete); + metrics.completed += u64::from(stats.downloaded); + metrics.leechers += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexTokio::new( + EntrySingle { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let db = self.get_torrents().await; + let entries = db.values().cloned(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff).await; + } + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); + } +} diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index 6647e0249..300813430 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -44,7 +44,7 @@ pub fn start_job(config: &Configuration, tracker: &Arc) -> JoinHa if let Some(tracker) = weak_tracker.upgrade() { let start_time = Utc::now().time(); info!("Cleaning up torrents.."); - tracker.cleanup_torrents().await; + tracker.cleanup_torrents(); info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()); } else { break; diff --git a/src/console/clients/checker/checks/http.rs b/src/console/clients/checker/checks/http.rs index df1e9bc9a..501696df4 100644 --- a/src/console/clients/checker/checks/http.rs +++ b/src/console/clients/checker/checks/http.rs @@ -3,12 +3,12 @@ use std::str::FromStr; use colored::Colorize; use log::debug; use reqwest::Url as ServiceUrl; +use torrust_tracker_primitives::info_hash::InfoHash; use url::Url; use crate::console::clients::checker::console::Console; use crate::console::clients::checker::printer::Printer; use crate::console::clients::checker::service::{CheckError, CheckResult}; -use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; use crate::shared::bit_torrent::tracker::http::client::responses::scrape; diff --git a/src/console/clients/checker/checks/udp.rs b/src/console/clients/checker/checks/udp.rs index 890375b75..47a2a1a00 100644 --- a/src/console/clients/checker/checks/udp.rs +++ b/src/console/clients/checker/checks/udp.rs @@ -4,12 +4,12 @@ use aquatic_udp_protocol::{Port, TransactionId}; use colored::Colorize; use hex_literal::hex; use log::debug; +use torrust_tracker_primitives::info_hash::InfoHash; use crate::console::clients::checker::console::Console; use crate::console::clients::checker::printer::Printer; use crate::console::clients::checker::service::{CheckError, CheckResult}; use crate::console::clients::udp::checker; -use crate::shared::bit_torrent::info_hash::InfoHash; const ASSIGNED_BY_OS: u16 = 0; const RANDOM_TRANSACTION_ID: i32 = -888_840_697; diff --git a/src/console/clients/http/app.rs b/src/console/clients/http/app.rs index 80db07231..511fb6628 100644 --- a/src/console/clients/http/app.rs +++ b/src/console/clients/http/app.rs @@ -18,8 +18,8 @@ use std::str::FromStr; use anyhow::Context; use clap::{Parser, Subcommand}; use reqwest::Url; +use torrust_tracker_primitives::info_hash::InfoHash; -use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; use crate::shared::bit_torrent::tracker::http::client::responses::scrape; diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs index b9e31155d..540a25f30 100644 --- a/src/console/clients/udp/app.rs +++ b/src/console/clients/udp/app.rs @@ -64,11 +64,11 @@ use aquatic_udp_protocol::Response::{self, AnnounceIpv4, AnnounceIpv6, Scrape}; use aquatic_udp_protocol::{Port, TransactionId}; use clap::{Parser, Subcommand}; use log::{debug, LevelFilter}; +use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; use url::Url; use crate::console::clients::udp::checker; use crate::console::clients::udp::responses::{AnnounceResponseDto, ScrapeResponseDto}; -use crate::shared::bit_torrent::info_hash::InfoHash as TorrustInfoHash; const ASSIGNED_BY_OS: u16 = 0; const RANDOM_TRANSACTION_ID: i32 = -888_840_697; diff --git a/src/console/clients/udp/checker.rs b/src/console/clients/udp/checker.rs index b35139e49..12b8d764c 100644 --- a/src/console/clients/udp/checker.rs +++ b/src/console/clients/udp/checker.rs @@ -8,8 +8,8 @@ use aquatic_udp_protocol::{ }; use log::debug; use thiserror::Error; +use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; -use crate::shared::bit_torrent::info_hash::InfoHash as TorrustInfoHash; use crate::shared::bit_torrent::tracker::udp::client::{UdpClient, UdpTrackerClient}; #[derive(Error, Debug)] diff --git a/src/core/auth.rs b/src/core/auth.rs index 9fc9d6e7b..a7bb91aa4 100644 --- a/src/core/auth.rs +++ b/src/core/auth.rs @@ -13,7 +13,7 @@ //! //! ```rust,no_run //! use torrust_tracker::core::auth::Key; -//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; //! //! pub struct ExpiringKey { //! /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` @@ -48,9 +48,10 @@ use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; use thiserror::Error; use torrust_tracker_located_error::{DynError, LocatedError}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; -use crate::shared::clock::{convert_from_timestamp_to_datetime_utc, Current, DurationSinceUnixEpoch, Time, TimeNow}; +use crate::shared::clock::{convert_from_timestamp_to_datetime_utc, Current, Time, TimeNow}; #[must_use] /// It generates a new random 32-char authentication [`ExpiringKey`] diff --git a/src/core/databases/mod.rs b/src/core/databases/mod.rs index b3dcdd48e..b708ef4dc 100644 --- a/src/core/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -22,7 +22,7 @@ //! ---|---|--- //! `id` | 1 | Autoincrement id //! `info_hash` | `c1277613db1d28709b034a017ab2cae4be07ae10` | `BitTorrent` infohash V1 -//! `completed` | 20 | The number of peers that have ever completed downloading the torrent associated to this entry. See [`Entry`](crate::core::torrent::Entry) for more information. +//! `completed` | 20 | The number of peers that have ever completed downloading the torrent associated to this entry. See [`Entry`](torrust_tracker_torrent_repository::entry::Entry) for more information. //! //! > **NOTICE**: The peer list for a torrent is not persisted. Since peer have to re-announce themselves on intervals, the data is be //! regenerated again after some minutes. @@ -51,12 +51,11 @@ pub mod sqlite; use std::marker::PhantomData; use async_trait::async_trait; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::PersistentTorrents; use self::error::Error; use crate::core::auth::{self, Key}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -pub type PersistentTorrents = Vec<(InfoHash, u32)>; struct Builder where @@ -118,9 +117,9 @@ pub trait Database: Sync + Send { /// /// It returns an array of tuples with the torrent /// [`InfoHash`] and the - /// [`completed`](crate::core::torrent::Entry::completed) counter + /// [`completed`](torrust_tracker_torrent_repository::entry::Entry::completed) counter /// which is the number of times the torrent has been downloaded. - /// See [`Entry::completed`](crate::core::torrent::Entry::completed). + /// See [`Entry::completed`](torrust_tracker_torrent_repository::entry::Entry::completed). /// /// # Context: Torrent Metrics /// diff --git a/src/core/databases/mysql.rs b/src/core/databases/mysql.rs index c46300829..e37cdd9bf 100644 --- a/src/core/databases/mysql.rs +++ b/src/core/databases/mysql.rs @@ -8,12 +8,12 @@ use r2d2::Pool; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; +use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::DatabaseDriver; use super::{Database, Error}; use crate::core::auth::{self, Key}; use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; -use crate::shared::bit_torrent::info_hash::InfoHash; const DRIVER: DatabaseDriver = DatabaseDriver::MySQL; diff --git a/src/core/databases/sqlite.rs b/src/core/databases/sqlite.rs index bf2d6b8b9..5a3ac144a 100644 --- a/src/core/databases/sqlite.rs +++ b/src/core/databases/sqlite.rs @@ -5,12 +5,11 @@ use std::str::FromStr; use async_trait::async_trait; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; -use torrust_tracker_primitives::DatabaseDriver; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::{DatabaseDriver, DurationSinceUnixEpoch}; use super::{Database, Error}; use crate::core::auth::{self, Key}; -use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::shared::clock::DurationSinceUnixEpoch; const DRIVER: DatabaseDriver = DatabaseDriver::Sqlite3; diff --git a/src/core/error.rs b/src/core/error.rs index f1e622673..a826de349 100644 --- a/src/core/error.rs +++ b/src/core/error.rs @@ -9,6 +9,7 @@ use std::panic::Location; use torrust_tracker_located_error::LocatedError; +use torrust_tracker_primitives::info_hash::InfoHash; /// Authentication or authorization error returned by the core `Tracker` #[derive(thiserror::Error, Debug, Clone)] @@ -25,7 +26,7 @@ pub enum Error { // Authorization errors #[error("The torrent: {info_hash}, is not whitelisted, {location}")] TorrentNotWhitelisted { - info_hash: crate::shared::bit_torrent::info_hash::InfoHash, + info_hash: InfoHash, location: &'static Location<'static>, }, } diff --git a/src/core/mod.rs b/src/core/mod.rs index 15d7b9c39..f94c46543 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -52,13 +52,13 @@ //! The tracker responds to the peer with the list of other peers in the swarm so that //! the peer can contact them to start downloading pieces of the file from them. //! -//! Once you have instantiated the `Tracker` you can `announce` a new [`Peer`] with: +//! Once you have instantiated the `Tracker` you can `announce` a new [`peer::Peer`] with: //! //! ```rust,no_run -//! use torrust_tracker::core::peer; -//! use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; -//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +//! use torrust_tracker_primitives::peer; +//! use torrust_tracker_primitives::info_hash::InfoHash; +//! use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfBytes}; +//! use torrust_tracker_primitives::announce_event::AnnounceEvent; //! use std::net::SocketAddr; //! use std::net::IpAddr; //! use std::net::Ipv4Addr; @@ -97,11 +97,11 @@ //! The returned struct is: //! //! ```rust,no_run -//! use torrust_tracker::core::peer::Peer; +//! use torrust_tracker_primitives::peer; //! use torrust_tracker_configuration::AnnouncePolicy; //! //! pub struct AnnounceData { -//! pub peers: Vec, +//! pub peers: Vec, //! pub swarm_stats: SwarmMetadata, //! pub policy: AnnouncePolicy, // the tracker announce policy. //! } @@ -136,7 +136,7 @@ //! The returned struct is: //! //! ```rust,no_run -//! use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +//! use torrust_tracker_primitives::info_hash::InfoHash; //! use std::collections::HashMap; //! //! pub struct ScrapeData { @@ -165,7 +165,7 @@ //! There are two data structures for infohashes: byte arrays and hex strings: //! //! ```rust,no_run -//! use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +//! use torrust_tracker_primitives::info_hash::InfoHash; //! use std::str::FromStr; //! //! let info_hash: InfoHash = [255u8; 20].into(); @@ -246,14 +246,14 @@ //! A `Peer` is the struct used by the `Tracker` to keep peers data: //! //! ```rust,no_run -//! use torrust_tracker::core::peer::Id; +//! use torrust_tracker_primitives::peer; //! use std::net::SocketAddr; -//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; //! use aquatic_udp_protocol::NumberOfBytes; //! use aquatic_udp_protocol::AnnounceEvent; //! //! pub struct Peer { -//! pub peer_id: Id, // The peer ID +//! pub peer_id: peer::Id, // The peer ID //! pub peer_addr: SocketAddr, // Peer socket address //! pub updated: DurationSinceUnixEpoch, // Last time (timestamp) when the peer was updated //! pub uploaded: NumberOfBytes, // Number of bytes the peer has uploaded so far @@ -429,11 +429,12 @@ pub mod auth; pub mod databases; pub mod error; -pub mod peer; pub mod services; pub mod statistics; pub mod torrent; +pub mod peer_tests; + use std::collections::HashMap; use std::net::IpAddr; use std::panic::Location; @@ -443,18 +444,19 @@ use std::time::Duration; use derive_more::Constructor; use log::debug; use tokio::sync::mpsc::error::SendError; -use torrust_tracker_configuration::{AnnouncePolicy, Configuration}; -use torrust_tracker_primitives::TrackerMode; +use torrust_tracker_configuration::{AnnouncePolicy, Configuration, TrackerPolicy}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, TrackerMode}; +use torrust_tracker_torrent_repository::entry::EntrySync; +use torrust_tracker_torrent_repository::repository::Repository; use self::auth::Key; use self::error::Error; -use self::peer::Peer; -use self::torrent::entry::{ReadInfo, ReadPeers}; -use self::torrent::repository::{Repository, UpdateTorrentSync}; use self::torrent::Torrents; use crate::core::databases::Database; -use crate::core::torrent::SwarmMetadata; -use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::shared::clock::{self, TimeNow}; /// The maximum number of returned peers for a torrent. pub const TORRENT_PEERS_LIMIT: usize = 74; @@ -484,33 +486,12 @@ pub struct Tracker { on_reverse_proxy: bool, } -/// Structure that holds general `Tracker` torrents metrics. -/// -/// Metrics are aggregate values for all torrents. -#[derive(Copy, Clone, Debug, PartialEq, Default)] -pub struct TorrentsMetrics { - /// Total number of seeders for all torrents - pub seeders: u64, - /// Total number of peers that have ever completed downloading for all torrents. - pub completed: u64, - /// Total number of leechers for all torrents. - pub leechers: u64, - /// Total number of torrents. - pub torrents: u64, -} - -#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] -pub struct TrackerPolicy { - pub remove_peerless_torrents: bool, - pub max_peer_timeout: u32, - pub persistent_torrent_completed_stat: bool, -} /// Structure that holds the data returned by the `announce` request. #[derive(Clone, Debug, PartialEq, Constructor, Default)] pub struct AnnounceData { /// The list of peers that are downloading the same torrent. /// It excludes the peer that made the request. - pub peers: Vec>, + pub peers: Vec>, /// Swarm statistics pub stats: SwarmMetadata, pub policy: AnnouncePolicy, @@ -627,7 +608,7 @@ impl Tracker { /// # Context: Tracker /// /// BEP 03: [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). - pub async fn announce(&self, info_hash: &InfoHash, peer: &mut Peer, remote_client_ip: &IpAddr) -> AnnounceData { + pub async fn announce(&self, info_hash: &InfoHash, peer: &mut peer::Peer, remote_client_ip: &IpAddr) -> AnnounceData { // code-review: maybe instead of mutating the peer we could just return // a tuple with the new peer and the announce data: (Peer, AnnounceData). // It could even be a different struct: `StoredPeer` or `PublicPeer`. @@ -650,7 +631,7 @@ impl Tracker { // we should update the torrent and get the stats before we get the peer list. let stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - let peers = self.get_torrent_peers_for_peer(info_hash, peer).await; + let peers = self.get_torrent_peers_for_peer(info_hash, peer); AnnounceData { peers, @@ -669,7 +650,7 @@ impl Tracker { for info_hash in info_hashes { let swarm_metadata = match self.authorize(info_hash).await { - Ok(()) => self.get_swarm_metadata(info_hash).await, + Ok(()) => self.get_swarm_metadata(info_hash), Err(_) => SwarmMetadata::zeroed(), }; scrape_data.add_file(info_hash, swarm_metadata); @@ -679,8 +660,8 @@ impl Tracker { } /// It returns the data for a `scrape` response. - async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { - match self.torrents.get(info_hash).await { + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { + match self.torrents.get(info_hash) { Some(torrent_entry) => torrent_entry.get_stats(), None => SwarmMetadata::default(), } @@ -697,13 +678,13 @@ impl Tracker { pub async fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; - self.torrents.import_persistent(&persistent_torrents).await; + self.torrents.import_persistent(&persistent_torrents); Ok(()) } - async fn get_torrent_peers_for_peer(&self, info_hash: &InfoHash, peer: &Peer) -> Vec> { - match self.torrents.get(info_hash).await { + fn get_torrent_peers_for_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> Vec> { + match self.torrents.get(info_hash) { None => vec![], Some(entry) => entry.get_peers_for_peer(peer, Some(TORRENT_PEERS_LIMIT)), } @@ -712,8 +693,8 @@ impl Tracker { /// # Context: Tracker /// /// Get all torrent peers for a given torrent - pub async fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { - match self.torrents.get(info_hash).await { + pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { + match self.torrents.get(info_hash) { None => vec![], Some(entry) => entry.get_peers(Some(TORRENT_PEERS_LIMIT)), } @@ -724,11 +705,7 @@ impl Tracker { /// needed for a `announce` request response. /// /// # Context: Tracker - pub async fn update_torrent_with_peer_and_get_stats( - &self, - info_hash: &InfoHash, - peer: &peer::Peer, - ) -> torrent::SwarmMetadata { + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { // code-review: consider splitting the function in two (command and query segregation). // `update_torrent_with_peer` and `get_stats` @@ -751,19 +728,21 @@ impl Tracker { /// /// # Panics /// Panics if unable to get the torrent metrics. - pub async fn get_torrents_metrics(&self) -> TorrentsMetrics { - self.torrents.get_metrics().await + pub fn get_torrents_metrics(&self) -> TorrentsMetrics { + self.torrents.get_metrics() } /// Remove inactive peers and (optionally) peerless torrents /// /// # Context: Tracker - pub async fn cleanup_torrents(&self) { + pub fn cleanup_torrents(&self) { // If we don't need to remove torrents we will use the faster iter if self.policy.remove_peerless_torrents { - self.torrents.remove_peerless_torrents(&self.policy).await; + self.torrents.remove_peerless_torrents(&self.policy); } else { - self.torrents.remove_inactive_peers(self.policy.max_peer_timeout).await; + let current_cutoff = + clock::Current::sub(&Duration::from_secs(u64::from(self.policy.max_peer_timeout))).unwrap_or_default(); + self.torrents.remove_inactive_peers(current_cutoff); } } @@ -1017,14 +996,15 @@ mod tests { use std::str::FromStr; use std::sync::Arc; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; use crate::core::peer::{self, Peer}; use crate::core::services::tracker_factory; use crate::core::{TorrentsMetrics, Tracker}; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::shared::clock::DurationSinceUnixEpoch; + use crate::shared::bit_torrent::info_hash::fixture::gen_seeded_infohash; fn public_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_mode_public()) @@ -1132,7 +1112,7 @@ mod tests { async fn should_collect_torrent_metrics() { let tracker = public_tracker(); - let torrents_metrics = tracker.get_torrents_metrics().await; + let torrents_metrics = tracker.get_torrents_metrics(); assert_eq!( torrents_metrics, @@ -1154,7 +1134,7 @@ mod tests { tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - let peers = tracker.get_torrent_peers(&info_hash).await; + let peers = tracker.get_torrent_peers(&info_hash); assert_eq!(peers, vec![Arc::new(peer)]); } @@ -1168,7 +1148,7 @@ mod tests { tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - let peers = tracker.get_torrent_peers_for_peer(&info_hash, &peer).await; + let peers = tracker.get_torrent_peers_for_peer(&info_hash, &peer); assert_eq!(peers, vec![]); } @@ -1181,7 +1161,7 @@ mod tests { .update_torrent_with_peer_and_get_stats(&sample_info_hash(), &leecher()) .await; - let torrent_metrics = tracker.get_torrents_metrics().await; + let torrent_metrics = tracker.get_torrents_metrics(); assert_eq!( torrent_metrics, @@ -1194,6 +1174,34 @@ mod tests { ); } + #[tokio::test] + async fn it_should_get_many_the_torrent_metrics() { + let tracker = public_tracker(); + + let start_time = std::time::Instant::now(); + for i in 0..1_000_000 { + tracker + .update_torrent_with_peer_and_get_stats(&gen_seeded_infohash(&i), &leecher()) + .await; + } + let result_a = start_time.elapsed(); + + let start_time = std::time::Instant::now(); + let torrent_metrics = tracker.get_torrents_metrics(); + let result_b = start_time.elapsed(); + + assert_eq!( + (torrent_metrics), + (TorrentsMetrics { + seeders: 0, + completed: 0, + leechers: 1_000_000, + torrents: 1_000_000, + }), + "{result_a:?} {result_b:?}" + ); + } + mod for_all_config_modes { mod handling_an_announce_request { @@ -1376,9 +1384,10 @@ mod tests { use std::net::{IpAddr, Ipv4Addr}; + use torrust_tracker_primitives::info_hash::InfoHash; + use crate::core::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker}; use crate::core::{ScrapeData, SwarmMetadata}; - use crate::shared::bit_torrent::info_hash::InfoHash; #[tokio::test] async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent( @@ -1533,12 +1542,13 @@ mod tests { mod handling_an_scrape_request { + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + use crate::core::tests::the_tracker::{ complete_peer, incomplete_peer, peer_ip, sample_info_hash, whitelisted_tracker, }; - use crate::core::torrent::SwarmMetadata; use crate::core::ScrapeData; - use crate::shared::bit_torrent::info_hash::InfoHash; #[test] fn it_should_be_able_to_build_a_zeroed_scrape_data_for_a_list_of_info_hashes() { @@ -1677,11 +1687,12 @@ mod tests { } mod handling_torrent_persistence { - use aquatic_udp_protocol::AnnounceEvent; + + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_torrent_repository::entry::EntrySync; + use torrust_tracker_torrent_repository::repository::Repository; use crate::core::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; - use crate::core::torrent::entry::ReadInfo; - use crate::core::torrent::repository::Repository; #[tokio::test] async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { @@ -1700,15 +1711,11 @@ mod tests { assert_eq!(swarm_stats.downloaded, 1); // Remove the newly updated torrent from memory - tracker.torrents.remove(&info_hash).await; + tracker.torrents.remove(&info_hash); tracker.load_torrents_from_database().await.unwrap(); - let torrent_entry = tracker - .torrents - .get(&info_hash) - .await - .expect("it should be able to get entry"); + let torrent_entry = tracker.torrents.get(&info_hash).expect("it should be able to get entry"); // It persists the number of completed peers. assert_eq!(torrent_entry.get_stats().downloaded, 1); diff --git a/src/core/peer_tests.rs b/src/core/peer_tests.rs new file mode 100644 index 000000000..9e5b4be01 --- /dev/null +++ b/src/core/peer_tests.rs @@ -0,0 +1,43 @@ +#![cfg(test)] + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::{peer, NumberOfBytes}; + +use crate::shared::clock::{self, Time}; + +#[test] +fn it_should_be_serializable() { + let torrent_peer = peer::Peer { + peer_id: peer::Id(*b"-qB0000-000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: clock::Current::now(), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + + let raw_json = serde_json::to_string(&torrent_peer).unwrap(); + + let expected_raw_json = r#" + { + "peer_id": { + "id": "0x2d7142303030302d303030303030303030303030", + "client": "qBittorrent" + }, + "peer_addr":"126.0.0.1:8080", + "updated":0, + "uploaded":0, + "downloaded":0, + "left":0, + "event":"Started" + } + "#; + + assert_eq!( + serde_json::from_str::(&raw_json).unwrap(), + serde_json::from_str::(expected_raw_json).unwrap() + ); +} diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 3578c53aa..ee1c0c4fa 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -40,8 +40,10 @@ pub mod setup; use std::sync::Arc; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use crate::core::statistics::Metrics; -use crate::core::{TorrentsMetrics, Tracker}; +use crate::core::Tracker; /// All the metrics collected by the tracker. #[derive(Debug, PartialEq)] @@ -59,7 +61,7 @@ pub struct TrackerMetrics { /// It returns all the [`TrackerMetrics`] pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { - let torrents_metrics = tracker.get_torrents_metrics().await; + let torrents_metrics = tracker.get_torrents_metrics(); let stats = tracker.get_stats().await; TrackerMetrics { @@ -86,6 +88,7 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; use crate::core; @@ -105,7 +108,7 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: core::TorrentsMetrics::default(), + torrents_metrics: TorrentsMetrics::default(), protocol_metrics: core::statistics::Metrics::default(), } ); diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 78dab12c4..ce44af3a8 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -6,13 +6,13 @@ //! - [`get_torrents`]: it returns data about some torrent in bulk excluding the peer list. use std::sync::Arc; -use serde::Deserialize; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::peer; +use torrust_tracker_torrent_repository::entry::EntrySync; +use torrust_tracker_torrent_repository::repository::Repository; -use crate::core::peer::Peer; -use crate::core::torrent::entry::{ReadInfo, ReadPeers}; -use crate::core::torrent::repository::Repository; use crate::core::Tracker; -use crate::shared::bit_torrent::info_hash::InfoHash; /// It contains all the information the tracker has about a torrent #[derive(Debug, PartialEq)] @@ -26,7 +26,7 @@ pub struct Info { /// The total number of leechers for this torrent. Peers that actively downloading this torrent pub leechers: u64, /// The swarm: the list of peers that are actively trying to download or serving this torrent - pub peers: Option>, + pub peers: Option>, } /// It contains only part of the information the tracker has about a torrent @@ -44,58 +44,9 @@ pub struct BasicInfo { pub leechers: u64, } -/// A struct to keep information about the page when results are being paginated -#[derive(Deserialize)] -pub struct Pagination { - /// The page number, starting at 0 - pub offset: u32, - /// Page size. The number of results per page - pub limit: u32, -} - -impl Pagination { - #[must_use] - pub fn new(offset: u32, limit: u32) -> Self { - Self { offset, limit } - } - - #[must_use] - pub fn new_with_options(offset_option: Option, limit_option: Option) -> Self { - let offset = match offset_option { - Some(offset) => offset, - None => Pagination::default_offset(), - }; - let limit = match limit_option { - Some(offset) => offset, - None => Pagination::default_limit(), - }; - - Self { offset, limit } - } - - #[must_use] - pub fn default_offset() -> u32 { - 0 - } - - #[must_use] - pub fn default_limit() -> u32 { - 4000 - } -} - -impl Default for Pagination { - fn default() -> Self { - Self { - offset: Self::default_offset(), - limit: Self::default_limit(), - } - } -} - /// It returns all the information the tracker has about one torrent in a [Info] struct. pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { - let torrent_entry_option = tracker.torrents.get(info_hash).await; + let torrent_entry_option = tracker.torrents.get(info_hash); let torrent_entry = torrent_entry_option?; @@ -118,7 +69,7 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op pub async fn get_torrents_page(tracker: Arc, pagination: Option<&Pagination>) -> Vec { let mut basic_infos: Vec = vec![]; - for (info_hash, torrent_entry) in tracker.torrents.get_paginated(pagination).await { + for (info_hash, torrent_entry) in tracker.torrents.get_paginated(pagination) { let stats = torrent_entry.get_stats(); basic_infos.push(BasicInfo { @@ -137,7 +88,7 @@ pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Ve let mut basic_infos: Vec = vec![]; for info_hash in info_hashes { - if let Some(stats) = tracker.torrents.get(info_hash).await.map(|t| t.get_stats()) { + if let Some(stats) = tracker.torrents.get(info_hash).map(|t| t.get_stats()) { basic_infos.push(BasicInfo { info_hash: *info_hash, seeders: u64::from(stats.complete), @@ -154,10 +105,8 @@ pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Ve mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - - use crate::core::peer; - use crate::shared::clock::DurationSinceUnixEpoch; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; fn sample_peer() -> peer::Peer { peer::Peer { @@ -177,12 +126,12 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::core::services::torrent::tests::sample_peer; use crate::core::services::torrent::{get_torrent_info, Info}; use crate::core::services::tracker_factory; - use crate::shared::bit_torrent::info_hash::InfoHash; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -232,12 +181,12 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::core::services::torrent::tests::sample_peer; use crate::core::services::torrent::{get_torrents_page, BasicInfo, Pagination}; use crate::core::services::tracker_factory; - use crate::shared::bit_torrent::info_hash::InfoHash; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() diff --git a/src/core/torrent/entry.rs b/src/core/torrent/entry.rs deleted file mode 100644 index 815abd4fb..000000000 --- a/src/core/torrent/entry.rs +++ /dev/null @@ -1,287 +0,0 @@ -use std::fmt::Debug; -use std::sync::Arc; -use std::time::Duration; - -use aquatic_udp_protocol::AnnounceEvent; -use serde::{Deserialize, Serialize}; - -use super::SwarmMetadata; -use crate::core::peer::{self, ReadInfo as _}; -use crate::core::TrackerPolicy; -use crate::shared::clock::{Current, TimeNow}; - -/// A data structure containing all the information about a torrent in the tracker. -/// -/// This is the tracker entry for a given torrent and contains the swarm data, -/// that's the list of all the peers trying to download the same torrent. -/// The tracker keeps one entry like this for every torrent. -#[derive(Serialize, Deserialize, Clone, Debug, Default)] -pub struct Entry { - /// The swarm: a network of peers that are all trying to download the torrent associated to this entry - #[serde(skip)] - pub(crate) peers: std::collections::BTreeMap>, - /// The number of peers that have ever completed downloading the torrent associated to this entry - pub(crate) completed: u32, -} -pub type Single = Entry; -pub type MutexStd = Arc>; -pub type MutexTokio = Arc>; - -pub trait ReadInfo { - /// It returns the swarm metadata (statistics) as a struct: - /// - /// `(seeders, completed, leechers)` - fn get_stats(&self) -> SwarmMetadata; - - /// Returns True if Still a Valid Entry according to the Tracker Policy - fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool; - - /// Returns True if the Peers is Empty - fn peers_is_empty(&self) -> bool; -} - -/// Same as [`ReadInfo`], but async. -pub trait ReadInfoAsync { - /// It returns the swarm metadata (statistics) as a struct: - /// - /// `(seeders, completed, leechers)` - fn get_stats(&self) -> impl std::future::Future + Send; - - /// Returns True if Still a Valid Entry according to the Tracker Policy - fn is_not_zombie(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; - - /// Returns True if the Peers is Empty - fn peers_is_empty(&self) -> impl std::future::Future + Send; -} - -pub trait ReadPeers { - /// Get all swarm peers, optionally limiting the result. - fn get_peers(&self, limit: Option) -> Vec>; - - /// It returns the list of peers for a given peer client, optionally limiting the - /// result. - /// - /// It filters out the input peer, typically because we want to return this - /// list of peers to that client peer. - fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec>; -} - -/// Same as [`ReadPeers`], but async. -pub trait ReadPeersAsync { - fn get_peers(&self, limit: Option) -> impl std::future::Future>> + Send; - - fn get_peers_for_peer( - &self, - client: &peer::Peer, - limit: Option, - ) -> impl std::future::Future>> + Send; -} - -pub trait Update { - /// It updates a peer and returns true if the number of complete downloads have increased. - /// - /// The number of peers that have complete downloading is synchronously updated when peers are updated. - /// That's the total torrent downloads counter. - fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool; - - // It preforms a combined operation of `insert_or_update_peer` and `get_stats`. - fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata); - - /// It removes peer from the swarm that have not been updated for more than `max_peer_timeout` seconds - fn remove_inactive_peers(&mut self, max_peer_timeout: u32); -} - -/// Same as [`Update`], except not `mut`. -pub trait UpdateSync { - fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool; - fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata); - fn remove_inactive_peers(&self, max_peer_timeout: u32); -} - -/// Same as [`Update`], except not `mut` and async. -pub trait UpdateAsync { - fn insert_or_update_peer(&self, peer: &peer::Peer) -> impl std::future::Future + Send; - - fn insert_or_update_peer_and_get_stats( - &self, - peer: &peer::Peer, - ) -> impl std::future::Future + std::marker::Send; - - fn remove_inactive_peers(&self, max_peer_timeout: u32) -> impl std::future::Future + Send; -} - -impl ReadInfo for Single { - #[allow(clippy::cast_possible_truncation)] - fn get_stats(&self) -> SwarmMetadata { - let complete: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; - let incomplete: u32 = self.peers.len() as u32 - complete; - - SwarmMetadata { - downloaded: self.completed, - complete, - incomplete, - } - } - - fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { - if policy.persistent_torrent_completed_stat && self.completed > 0 { - return true; - } - - if policy.remove_peerless_torrents && self.peers.is_empty() { - return false; - } - - true - } - - fn peers_is_empty(&self) -> bool { - self.peers.is_empty() - } -} - -impl ReadInfo for MutexStd { - fn get_stats(&self) -> SwarmMetadata { - self.lock().expect("it should get a lock").get_stats() - } - - fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { - self.lock().expect("it should get a lock").is_not_zombie(policy) - } - - fn peers_is_empty(&self) -> bool { - self.lock().expect("it should get a lock").peers_is_empty() - } -} - -impl ReadInfoAsync for MutexTokio { - async fn get_stats(&self) -> SwarmMetadata { - self.lock().await.get_stats() - } - - async fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { - self.lock().await.is_not_zombie(policy) - } - - async fn peers_is_empty(&self) -> bool { - self.lock().await.peers_is_empty() - } -} - -impl ReadPeers for Single { - fn get_peers(&self, limit: Option) -> Vec> { - match limit { - Some(limit) => self.peers.values().take(limit).cloned().collect(), - None => self.peers.values().cloned().collect(), - } - } - - fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { - match limit { - Some(limit) => self - .peers - .values() - // Take peers which are not the client peer - .filter(|peer| peer.get_address() != client.get_address()) - // Limit the number of peers on the result - .take(limit) - .cloned() - .collect(), - None => self - .peers - .values() - // Take peers which are not the client peer - .filter(|peer| peer.get_address() != client.get_address()) - .cloned() - .collect(), - } - } -} - -impl ReadPeers for MutexStd { - fn get_peers(&self, limit: Option) -> Vec> { - self.lock().expect("it should get lock").get_peers(limit) - } - - fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { - self.lock().expect("it should get lock").get_peers_for_peer(client, limit) - } -} - -impl ReadPeersAsync for MutexTokio { - async fn get_peers(&self, limit: Option) -> Vec> { - self.lock().await.get_peers(limit) - } - - async fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { - self.lock().await.get_peers_for_peer(client, limit) - } -} - -impl Update for Single { - fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { - let mut did_torrent_stats_change: bool = false; - - match peer.get_event() { - AnnounceEvent::Stopped => { - drop(self.peers.remove(&peer.get_id())); - } - AnnounceEvent::Completed => { - let peer_old = self.peers.insert(peer.get_id(), Arc::new(*peer)); - // Don't count if peer was not previously known and not already completed. - if peer_old.is_some_and(|p| p.event != AnnounceEvent::Completed) { - self.completed += 1; - did_torrent_stats_change = true; - } - } - _ => { - drop(self.peers.insert(peer.get_id(), Arc::new(*peer))); - } - } - - did_torrent_stats_change - } - - fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let changed = self.insert_or_update_peer(peer); - let stats = self.get_stats(); - (changed, stats) - } - - fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { - let current_cutoff = Current::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); - self.peers.retain(|_, peer| peer.get_updated() > current_cutoff); - } -} - -impl UpdateSync for MutexStd { - fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool { - self.lock().expect("it should lock the entry").insert_or_update_peer(peer) - } - - fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata) { - self.lock() - .expect("it should lock the entry") - .insert_or_update_peer_and_get_stats(peer) - } - - fn remove_inactive_peers(&self, max_peer_timeout: u32) { - self.lock() - .expect("it should lock the entry") - .remove_inactive_peers(max_peer_timeout); - } -} - -impl UpdateAsync for MutexTokio { - async fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool { - self.lock().await.insert_or_update_peer(peer) - } - - async fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata) { - self.lock().await.insert_or_update_peer_and_get_stats(peer) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - self.lock().await.remove_inactive_peers(max_peer_timeout); - } -} diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index bfe068337..b5a2b4c07 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -2,8 +2,8 @@ //! //! There are to main data structures: //! -//! - A torrent [`Entry`]: it contains all the information stored by the tracker for one torrent. -//! - The [`SwarmMetadata`]: it contains aggregate information that can me derived from the torrent entries. +//! - A torrent [`Entry`](torrust_tracker_torrent_repository::entry::Entry): it contains all the information stored by the tracker for one torrent. +//! - The [`SwarmMetadata`](torrust_tracker_primitives::swarm_metadata::SwarmMetadata): it contains aggregate information that can me derived from the torrent entries. //! //! A "swarm" is a network of peers that are trying to download the same torrent. //! @@ -25,42 +25,11 @@ //! - The number of peers that have NOT completed downloading the torrent and are still active, that means they are actively participating in the network. //! Peer that don not have a full copy of the torrent data are called "leechers". //! -//! > **NOTICE**: that both [`SwarmMetadata`] and [`SwarmMetadata`] contain the same information. [`SwarmMetadata`] is using the names used on [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). -pub mod entry; -pub mod repository; -use derive_more::Constructor; +use torrust_tracker_torrent_repository::TorrentsRwLockStdMutexStd; pub type Torrents = TorrentsRwLockStdMutexStd; // Currently Used -pub type TorrentsRwLockStd = repository::RwLockStd; -pub type TorrentsRwLockStdMutexStd = repository::RwLockStd; -pub type TorrentsRwLockStdMutexTokio = repository::RwLockStd; -pub type TorrentsRwLockTokio = repository::RwLockTokio; -pub type TorrentsRwLockTokioMutexStd = repository::RwLockTokio; -pub type TorrentsRwLockTokioMutexTokio = repository::RwLockTokio; - -/// Swarm statistics for one torrent. -/// Swarm metadata dictionary in the scrape response. -/// -/// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) -#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] -pub struct SwarmMetadata { - /// (i.e `completed`): The number of peers that have ever completed downloading - pub downloaded: u32, // - /// (i.e `seeders`): The number of active peers that have completed downloading (seeders) - pub complete: u32, //seeders - /// (i.e `leechers`): The number of active peers that have not completed downloading (leechers) - pub incomplete: u32, -} - -impl SwarmMetadata { - #[must_use] - pub fn zeroed() -> Self { - Self::default() - } -} - #[cfg(test)] mod tests { @@ -71,11 +40,13 @@ mod tests { use std::sync::Arc; use std::time::Duration; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; + use torrust_tracker_torrent_repository::entry::Entry; + use torrust_tracker_torrent_repository::EntrySingle; - use crate::core::torrent::entry::{self, ReadInfo, ReadPeers, Update}; - use crate::core::{peer, TORRENT_PEERS_LIMIT}; - use crate::shared::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; + use crate::core::TORRENT_PEERS_LIMIT; + use crate::shared::clock::{self, StoppedTime, Time, TimeNow}; struct TorrentPeerBuilder { peer: peer::Peer, @@ -86,7 +57,7 @@ mod tests { let default_peer = peer::Peer { peer_id: peer::Id([0u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), - updated: Current::now(), + updated: clock::Current::now(), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), @@ -145,14 +116,14 @@ mod tests { #[test] fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { - let torrent_entry = entry::Single::default(); + let torrent_entry = EntrySingle::default(); assert_eq!(torrent_entry.get_peers(None).len(), 0); } #[test] fn a_new_peer_can_be_added_to_a_torrent_entry() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -163,7 +134,7 @@ mod tests { #[test] fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -173,7 +144,7 @@ mod tests { #[test] fn a_peer_can_be_updated_in_a_torrent_entry() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -185,7 +156,7 @@ mod tests { #[test] fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -197,7 +168,7 @@ mod tests { #[test] fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -211,7 +182,7 @@ mod tests { #[test] fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( ) { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Add a peer that did not exist before in the entry @@ -223,7 +194,7 @@ mod tests { #[test] fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_the_client_that_is_making_the_request() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add peer @@ -236,7 +207,7 @@ mod tests { #[test] fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let peer_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); @@ -270,7 +241,7 @@ mod tests { #[test] fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { @@ -287,7 +258,7 @@ mod tests { #[test] fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let torrent_seeder = a_torrent_seeder(); torrent_entry.insert_or_update_peer(&torrent_seeder); // Add seeder @@ -297,7 +268,7 @@ mod tests { #[test] fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let torrent_leecher = a_torrent_leecher(); torrent_entry.insert_or_update_peer(&torrent_leecher); // Add leecher @@ -308,7 +279,7 @@ mod tests { #[test] fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( ) { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -323,7 +294,7 @@ mod tests { #[test] fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Announce "Completed" torrent download event. @@ -337,12 +308,12 @@ mod tests { #[test] fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let timeout = 120u32; - let now = Working::now(); - Stopped::local_set(&now); + let now = clock::Working::now(); + clock::Stopped::local_set(&now); let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); let inactive_peer = TorrentPeerBuilder::default() @@ -350,9 +321,10 @@ mod tests { .into(); torrent_entry.insert_or_update_peer(&inactive_peer); // Add the peer - torrent_entry.remove_inactive_peers(timeout); + let current_cutoff = clock::Current::sub(&Duration::from_secs(u64::from(timeout))).unwrap_or_default(); + torrent_entry.remove_inactive_peers(current_cutoff); - assert_eq!(torrent_entry.peers.len(), 0); + assert_eq!(torrent_entry.get_peers_len(), 0); } } } diff --git a/src/core/torrent/repository/rw_lock_std.rs b/src/core/torrent/repository/rw_lock_std.rs deleted file mode 100644 index 9b3915bcb..000000000 --- a/src/core/torrent/repository/rw_lock_std.rs +++ /dev/null @@ -1,122 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::Arc; - -use futures::future::join_all; - -use super::{Repository, UpdateTorrentSync}; -use crate::core::databases::PersistentTorrents; -use crate::core::services::torrent::Pagination; -use crate::core::torrent::entry::{self, ReadInfo, Update}; -use crate::core::torrent::{SwarmMetadata, TorrentsRwLockStd}; -use crate::core::{peer, TorrentsMetrics}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -impl TorrentsRwLockStd { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("it should get the read lock") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("it should get the write lock") - } -} - -impl UpdateTorrentSync for TorrentsRwLockStd { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let mut db = self.get_torrents_mut(); - - let entry = db.entry(*info_hash).or_insert(entry::Single::default()); - - entry.insert_or_update_peer_and_get_stats(peer) - } -} - -impl UpdateTorrentSync for Arc { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer) - } -} - -impl Repository for TorrentsRwLockStd { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents(); - db.get(key).cloned() - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let metrics: Arc> = Arc::default(); - - let mut handles = Vec::>::default(); - - for e in self.get_torrents().values() { - let entry = e.clone(); - let metrics = metrics.clone(); - handles.push(tokio::task::spawn(async move { - let stats = entry.get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - })); - } - - join_all(handles).await; - - *metrics.lock_owned().await - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::Single)> { - let db = self.get_torrents(); - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut(); - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = entry::Single { - peers: BTreeMap::default(), - completed: *completed, - }; - - torrents.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut(); - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - let mut db = self.get_torrents_mut(); - - drop(db.values_mut().map(|e| e.remove_inactive_peers(max_peer_timeout))); - } - - async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { - let mut db = self.get_torrents_mut(); - - db.retain(|_, e| e.is_not_zombie(policy)); - } -} diff --git a/src/core/torrent/repository/rw_lock_std_mutex_std.rs b/src/core/torrent/repository/rw_lock_std_mutex_std.rs deleted file mode 100644 index 5a9a38f77..000000000 --- a/src/core/torrent/repository/rw_lock_std_mutex_std.rs +++ /dev/null @@ -1,143 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::Arc; - -use futures::future::join_all; - -use super::{Repository, UpdateTorrentSync}; -use crate::core::databases::PersistentTorrents; -use crate::core::services::torrent::Pagination; -use crate::core::torrent::entry::{ReadInfo, Update, UpdateSync}; -use crate::core::torrent::{entry, SwarmMetadata, TorrentsRwLockStdMutexStd}; -use crate::core::{peer, TorrentsMetrics}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -impl TorrentsRwLockStdMutexStd { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl UpdateTorrentSync for TorrentsRwLockStdMutexStd { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let maybe_entry = self.get_torrents().get(info_hash).cloned(); - - let entry = if let Some(entry) = maybe_entry { - entry - } else { - let mut db = self.get_torrents_mut(); - let entry = db.entry(*info_hash).or_insert(Arc::default()); - entry.clone() - }; - - entry.insert_or_update_peer_and_get_stats(peer) - } -} - -impl UpdateTorrentSync for Arc { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer) - } -} - -impl Repository for TorrentsRwLockStdMutexStd { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents(); - db.get(key).cloned() - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let metrics: Arc> = Arc::default(); - - // todo:: replace with a ring buffer - let mut handles = Vec::>::default(); - - for e in self.get_torrents().values() { - let entry = e.clone(); - let metrics = metrics.clone(); - handles.push(tokio::task::spawn(async move { - let stats = entry.lock().expect("it should get the lock").get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - })); - } - - join_all(handles).await; - - *metrics.lock_owned().await - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexStd)> { - let db = self.get_torrents(); - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut(); - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = entry::MutexStd::new( - entry::Single { - peers: BTreeMap::default(), - completed: *completed, - } - .into(), - ); - - torrents.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut(); - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - // todo:: replace with a ring buffer - let mut handles = Vec::>::default(); - - for e in self.get_torrents().values() { - let entry = e.clone(); - handles.push(tokio::task::spawn(async move { - entry - .lock() - .expect("it should get lock for entry") - .remove_inactive_peers(max_peer_timeout); - })); - } - - join_all(handles).await; - } - - async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { - let mut db = self.get_torrents_mut(); - - db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); - } -} diff --git a/src/core/torrent/repository/rw_lock_std_mutex_tokio.rs b/src/core/torrent/repository/rw_lock_std_mutex_tokio.rs deleted file mode 100644 index 1feb41e3e..000000000 --- a/src/core/torrent/repository/rw_lock_std_mutex_tokio.rs +++ /dev/null @@ -1,141 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::Arc; - -use futures::future::join_all; - -use super::{Repository, UpdateTorrentAsync}; -use crate::core::databases::PersistentTorrents; -use crate::core::services::torrent::Pagination; -use crate::core::torrent::entry::{ReadInfo, Update, UpdateAsync}; -use crate::core::torrent::{entry, SwarmMetadata, TorrentsRwLockStdMutexTokio}; -use crate::core::{peer, TorrentsMetrics}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -impl TorrentsRwLockStdMutexTokio { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl UpdateTorrentAsync for TorrentsRwLockStdMutexTokio { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let maybe_entry = self.get_torrents().get(info_hash).cloned(); - - let entry = if let Some(entry) = maybe_entry { - entry - } else { - let mut db = self.get_torrents_mut(); - let entry = db.entry(*info_hash).or_insert(Arc::default()); - entry.clone() - }; - - entry.insert_or_update_peer_and_get_stats(peer).await - } -} - -impl UpdateTorrentAsync for Arc { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer).await - } -} - -impl Repository for TorrentsRwLockStdMutexTokio { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents(); - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexTokio)> { - let db = self.get_torrents(); - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let metrics: Arc> = Arc::default(); - - // todo:: replace with a ring buffer - let mut handles = Vec::>::default(); - - for e in self.get_torrents().values() { - let entry = e.clone(); - let metrics = metrics.clone(); - handles.push(tokio::task::spawn(async move { - let stats = entry.lock().await.get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - })); - } - - join_all(handles).await; - - *metrics.lock_owned().await - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut db = self.get_torrents_mut(); - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if db.contains_key(info_hash) { - continue; - } - - let entry = entry::MutexTokio::new( - entry::Single { - peers: BTreeMap::default(), - completed: *completed, - } - .into(), - ); - - db.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut(); - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - // todo:: replace with a ring buffer - - let mut handles = Vec::>::default(); - - for e in self.get_torrents().values() { - let entry = e.clone(); - handles.push(tokio::task::spawn(async move { - entry.lock().await.remove_inactive_peers(max_peer_timeout); - })); - } - - join_all(handles).await; - } - - async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { - let mut db = self.get_torrents_mut(); - - db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); - } -} diff --git a/src/core/torrent/repository/rw_lock_tokio.rs b/src/core/torrent/repository/rw_lock_tokio.rs deleted file mode 100644 index 3d633a837..000000000 --- a/src/core/torrent/repository/rw_lock_tokio.rs +++ /dev/null @@ -1,124 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::Arc; - -use futures::future::join_all; - -use super::{Repository, UpdateTorrentAsync}; -use crate::core::databases::PersistentTorrents; -use crate::core::services::torrent::Pagination; -use crate::core::torrent::entry::{self, ReadInfo, Update}; -use crate::core::torrent::{SwarmMetadata, TorrentsRwLockTokio}; -use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -impl TorrentsRwLockTokio { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl UpdateTorrentAsync for TorrentsRwLockTokio { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let mut db = self.get_torrents_mut().await; - - let entry = db.entry(*info_hash).or_insert(entry::Single::default()); - - entry.insert_or_update_peer_and_get_stats(peer) - } -} - -impl UpdateTorrentAsync for Arc { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer).await - } -} - -impl Repository for TorrentsRwLockTokio { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents().await; - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::Single)> { - let db = self.get_torrents().await; - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let metrics: Arc> = Arc::default(); - - let mut handles = Vec::>::default(); - - for e in self.get_torrents().await.values() { - let entry = e.clone(); - let metrics = metrics.clone(); - handles.push(tokio::task::spawn(async move { - let stats = entry.get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - })); - } - - join_all(handles).await; - - *metrics.lock_owned().await - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = entry::Single { - peers: BTreeMap::default(), - completed: *completed, - }; - - torrents.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut().await; - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - let mut db = self.get_torrents_mut().await; - - drop(db.values_mut().map(|e| e.remove_inactive_peers(max_peer_timeout))); - } - - async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - let mut db = self.get_torrents_mut().await; - - db.retain(|_, e| e.is_not_zombie(policy)); - } -} diff --git a/src/core/torrent/repository/rw_lock_tokio_mutex_std.rs b/src/core/torrent/repository/rw_lock_tokio_mutex_std.rs deleted file mode 100644 index 3888c40b0..000000000 --- a/src/core/torrent/repository/rw_lock_tokio_mutex_std.rs +++ /dev/null @@ -1,146 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::Arc; - -use futures::future::join_all; - -use super::{Repository, UpdateTorrentAsync}; -use crate::core::databases::PersistentTorrents; -use crate::core::services::torrent::Pagination; -use crate::core::torrent::entry::{ReadInfo, Update, UpdateSync}; -use crate::core::torrent::{entry, SwarmMetadata, TorrentsRwLockTokioMutexStd}; -use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -impl TorrentsRwLockTokioMutexStd { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl UpdateTorrentAsync for TorrentsRwLockTokioMutexStd { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); - - let entry = if let Some(entry) = maybe_entry { - entry - } else { - let mut db = self.get_torrents_mut().await; - let entry = db.entry(*info_hash).or_insert(Arc::default()); - entry.clone() - }; - - entry.insert_or_update_peer_and_get_stats(peer) - } -} - -impl UpdateTorrentAsync for Arc { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer).await - } -} - -impl Repository for TorrentsRwLockTokioMutexStd { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents().await; - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexStd)> { - let db = self.get_torrents().await; - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let metrics: Arc> = Arc::default(); - - // todo:: replace with a ring buffer - - let mut handles = Vec::>::default(); - - for e in self.get_torrents().await.values() { - let entry = e.clone(); - let metrics = metrics.clone(); - handles.push(tokio::task::spawn(async move { - let stats = entry.lock().expect("it should get a lock").get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - })); - } - - join_all(handles).await; - - *metrics.lock_owned().await - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = entry::MutexStd::new( - entry::Single { - peers: BTreeMap::default(), - completed: *completed, - } - .into(), - ); - - torrents.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut().await; - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - // todo:: replace with a ring buffer - let mut handles = Vec::>::default(); - - for e in self.get_torrents().await.values() { - let entry = e.clone(); - handles.push(tokio::task::spawn(async move { - entry - .lock() - .expect("it should get lock for entry") - .remove_inactive_peers(max_peer_timeout); - })); - } - - join_all(handles).await; - } - - async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - let mut db = self.get_torrents_mut().await; - - db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); - } -} diff --git a/src/core/torrent/repository/rw_lock_tokio_mutex_tokio.rs b/src/core/torrent/repository/rw_lock_tokio_mutex_tokio.rs deleted file mode 100644 index 49e08d90c..000000000 --- a/src/core/torrent/repository/rw_lock_tokio_mutex_tokio.rs +++ /dev/null @@ -1,144 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::Arc; - -use futures::future::join_all; - -use super::{Repository, UpdateTorrentAsync}; -use crate::core::databases::PersistentTorrents; -use crate::core::services::torrent::Pagination; -use crate::core::torrent::entry::{self, ReadInfo, Update, UpdateAsync}; -use crate::core::torrent::{SwarmMetadata, TorrentsRwLockTokioMutexTokio}; -use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -impl TorrentsRwLockTokioMutexTokio { - async fn get_torrents<'a>( - &'a self, - ) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl UpdateTorrentAsync for TorrentsRwLockTokioMutexTokio { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); - - let entry = if let Some(entry) = maybe_entry { - entry - } else { - let mut db = self.get_torrents_mut().await; - let entry = db.entry(*info_hash).or_insert(Arc::default()); - entry.clone() - }; - - entry.insert_or_update_peer_and_get_stats(peer).await - } -} - -impl UpdateTorrentAsync for Arc { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer).await - } -} - -impl Repository for TorrentsRwLockTokioMutexTokio { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents().await; - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexTokio)> { - let db = self.get_torrents().await; - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let metrics: Arc> = Arc::default(); - - // todo:: replace with a ring buffer - let mut handles = Vec::>::default(); - - for e in self.get_torrents().await.values() { - let entry = e.clone(); - let metrics = metrics.clone(); - handles.push(tokio::task::spawn(async move { - let stats = entry.lock().await.get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - })); - } - - join_all(handles).await; - - *metrics.lock_owned().await - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut db = self.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if db.contains_key(info_hash) { - continue; - } - - let entry = entry::MutexTokio::new( - entry::Single { - peers: BTreeMap::default(), - completed: *completed, - } - .into(), - ); - - db.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut().await; - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - // todo:: replace with a ring buffer - let mut handles = Vec::>::default(); - - for e in self.get_torrents().await.values() { - let entry = e.clone(); - handles.push(tokio::task::spawn(async move { - entry.lock().await.remove_inactive_peers(max_peer_timeout); - })); - } - - join_all(handles).await; - } - - async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - let mut db = self.get_torrents_mut().await; - - db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); - } -} diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index b241c469c..48ac660cf 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -71,10 +71,11 @@ impl From for Stats { #[cfg(test)] mod tests { + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use super::Stats; use crate::core::services::statistics::TrackerMetrics; use crate::core::statistics::Metrics; - use crate::core::TorrentsMetrics; #[test] fn stats_resource_should_be_converted_from_tracker_metrics() { diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index 999580da7..15f70c8b6 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -10,13 +10,14 @@ use axum_extra::extract::Query; use log::debug; use serde::{de, Deserialize, Deserializer}; use thiserror::Error; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; -use crate::core::services::torrent::{get_torrent_info, get_torrents, get_torrents_page, Pagination}; +use crate::core::services::torrent::{get_torrent_info, get_torrents, get_torrents_page}; use crate::core::Tracker; use crate::servers::apis::v1::responses::invalid_info_hash_param_response; use crate::servers::apis::InfoHashParam; -use crate::shared::bit_torrent::info_hash::InfoHash; /// It handles the request to get the torrent data. /// diff --git a/src/servers/apis/v1/context/torrent/resources/peer.rs b/src/servers/apis/v1/context/torrent/resources/peer.rs index 752694393..e7a0802c1 100644 --- a/src/servers/apis/v1/context/torrent/resources/peer.rs +++ b/src/servers/apis/v1/context/torrent/resources/peer.rs @@ -1,7 +1,7 @@ //! `Peer` and Peer `Id` API resources. +use derive_more::From; use serde::{Deserialize, Serialize}; - -use crate::core; +use torrust_tracker_primitives::peer; /// `Peer` API resource. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -22,7 +22,7 @@ pub struct Peer { /// The peer's left bytes (pending to download). pub left: i64, /// The peer's event: `started`, `stopped`, `completed`. - /// See [`AnnounceEventDef`](crate::shared::bit_torrent::common::AnnounceEventDef). + /// See [`AnnounceEvent`](torrust_tracker_primitives::announce_event::AnnounceEvent). pub event: String, } @@ -35,8 +35,8 @@ pub struct Id { pub client: Option, } -impl From for Id { - fn from(peer_id: core::peer::Id) -> Self { +impl From for Id { + fn from(peer_id: peer::Id) -> Self { Id { id: peer_id.to_hex_string(), client: peer_id.get_client_name(), @@ -44,18 +44,32 @@ impl From for Id { } } -impl From for Peer { - #[allow(deprecated)] - fn from(peer: core::peer::Peer) -> Self { +impl From for Peer { + fn from(value: peer::Peer) -> Self { + #[allow(deprecated)] Peer { - peer_id: Id::from(peer.peer_id), - peer_addr: peer.peer_addr.to_string(), - updated: peer.updated.as_millis(), - updated_milliseconds_ago: peer.updated.as_millis(), - uploaded: peer.uploaded.0, - downloaded: peer.downloaded.0, - left: peer.left.0, - event: format!("{:?}", peer.event), + peer_id: Id::from(value.peer_id), + peer_addr: value.peer_addr.to_string(), + updated: value.updated.as_millis(), + updated_milliseconds_ago: value.updated.as_millis(), + uploaded: value.uploaded.0, + downloaded: value.downloaded.0, + left: value.left.0, + event: format!("{:?}", value.event), + } + } +} + +#[derive(From, PartialEq, Default)] +pub struct Vector(pub Vec); + +impl FromIterator for Vector { + fn from_iter>(iter: T) -> Self { + let mut peers = Vector::default(); + + for i in iter { + peers.0.push(i.into()); } + peers } } diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs index fc43fbb7a..2f1ace5c9 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -6,7 +6,6 @@ //! the JSON response. use serde::{Deserialize, Serialize}; -use super::peer; use crate::core::services::torrent::{BasicInfo, Info}; /// `Torrent` API resource. @@ -68,14 +67,16 @@ pub fn to_resource(basic_info_vec: &[BasicInfo]) -> Vec { impl From for Torrent { fn from(info: Info) -> Self { + let peers: Option = info.peers.map(|peers| peers.into_iter().collect()); + + let peers: Option> = peers.map(|peers| peers.0); + Self { info_hash: info.info_hash.to_string(), seeders: info.seeders, completed: info.completed, leechers: info.leechers, - peers: info - .peers - .map(|peers| peers.iter().map(|peer| peer::Peer::from(*peer)).collect()), + peers, } } } @@ -96,15 +97,14 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; use super::Torrent; - use crate::core::peer; use crate::core::services::torrent::{BasicInfo, Info}; use crate::servers::apis::v1::context::torrent::resources::peer::Peer; use crate::servers::apis::v1::context::torrent::resources::torrent::ListItem; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::shared::clock::DurationSinceUnixEpoch; fn sample_peer() -> peer::Peer { peer::Peer { diff --git a/src/servers/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs index fc32f667b..c88f8cc1d 100644 --- a/src/servers/apis/v1/context/whitelist/handlers.rs +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use axum::extract::{Path, State}; use axum::response::Response; +use torrust_tracker_primitives::info_hash::InfoHash; use super::responses::{ failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, @@ -12,7 +13,6 @@ use super::responses::{ use crate::core::Tracker; use crate::servers::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; use crate::servers::apis::InfoHashParam; -use crate::shared::bit_torrent::info_hash::InfoHash; /// It handles the request to add a torrent to the whitelist. /// diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index 08a59ef90..6e8b5a40e 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -206,15 +206,15 @@ //! //! ### Scrape //! -//! The `scrape` request allows a peer to get [swarm metadata](crate::core::torrent::SwarmMetadata) +//! The `scrape` request allows a peer to get [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) //! for multiple torrents at the same time. //! -//! The response contains the [swarm metadata](crate::core::torrent::SwarmMetadata) +//! The response contains the [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) //! for that torrent: //! -//! - [complete](crate::core::torrent::SwarmMetadata::complete) -//! - [downloaded](crate::core::torrent::SwarmMetadata::downloaded) -//! - [incomplete](crate::core::torrent::SwarmMetadata::incomplete) +//! - [complete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::complete) +//! - [downloaded](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::downloaded) +//! - [incomplete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::incomplete) //! //! **Query parameters** //! @@ -266,7 +266,7 @@ //! Where the `files` key contains a dictionary of dictionaries. The first //! dictionary key is the `info_hash` of the torrent (`iiiiiiiiiiiiiiiiiiii` in //! the example). The second level dictionary contains the -//! [swarm metadata](crate::core::torrent::SwarmMetadata) for that torrent. +//! [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) for that torrent. //! //! If you save the response as a file and you open it with a program that //! can handle binary data you would see: diff --git a/src/servers/http/percent_encoding.rs b/src/servers/http/percent_encoding.rs index 472b1e724..90f4b9a43 100644 --- a/src/servers/http/percent_encoding.rs +++ b/src/servers/http/percent_encoding.rs @@ -15,8 +15,8 @@ //! - //! - //! - -use crate::core::peer::{self, IdConversionError}; -use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; +use torrust_tracker_primitives::info_hash::{self, InfoHash}; +use torrust_tracker_primitives::peer; /// Percent decodes a percent encoded infohash. Internally an /// [`InfoHash`] is a 20-byte array. @@ -27,8 +27,8 @@ use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; /// ```rust /// use std::str::FromStr; /// use torrust_tracker::servers::http::percent_encoding::percent_decode_info_hash; -/// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -/// use torrust_tracker::core::peer; +/// use torrust_tracker_primitives::info_hash::InfoHash; +/// use torrust_tracker_primitives::peer; /// /// let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; /// @@ -44,12 +44,12 @@ use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; /// /// Will return `Err` if the decoded bytes do not represent a valid /// [`InfoHash`]. -pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { +pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { let bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); InfoHash::try_from(bytes) } -/// Percent decodes a percent encoded peer id. Internally a peer [`Id`](crate::core::peer::Id) +/// Percent decodes a percent encoded peer id. Internally a peer [`Id`](peer::Id) /// is a 20-byte array. /// /// For example, given the peer id `*b"-qB00000000000000000"`, @@ -58,8 +58,8 @@ pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result Result Result { +pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result { let bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); peer::Id::try_from(bytes) } @@ -80,9 +80,10 @@ pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result) -> Result) -> Result R /// /// It ignores the peer address in the announce request params. #[must_use] -fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> Peer { - Peer { +fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> peer::Peer { + peer::Peer { peer_id: announce_request.peer_id, peer_addr: SocketAddr::new(*peer_ip, announce_request.port), updated: Current::now(), uploaded: NumberOfBytes(announce_request.uploaded.unwrap_or(0)), downloaded: NumberOfBytes(announce_request.downloaded.unwrap_or(0)), left: NumberOfBytes(announce_request.left.unwrap_or(0)), - event: map_to_aquatic_event(&announce_request.event), + event: map_to_torrust_event(&announce_request.event), } } -fn map_to_aquatic_event(event: &Option) -> AnnounceEvent { +#[must_use] +pub fn map_to_aquatic_event(event: &Option) -> aquatic_udp_protocol::AnnounceEvent { match event { Some(event) => match &event { Event::Started => aquatic_udp_protocol::AnnounceEvent::Started, @@ -153,17 +154,30 @@ fn map_to_aquatic_event(event: &Option) -> AnnounceEvent { } } +#[must_use] +pub fn map_to_torrust_event(event: &Option) -> AnnounceEvent { + match event { + Some(event) => match &event { + Event::Started => AnnounceEvent::Started, + Event::Stopped => AnnounceEvent::Stopped, + Event::Completed => AnnounceEvent::Completed, + }, + None => AnnounceEvent::None, + } +} + #[cfg(test)] mod tests { + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; - use crate::core::{peer, Tracker}; + use crate::core::Tracker; use crate::servers::http::v1::requests::announce::Announce; use crate::servers::http::v1::responses; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; - use crate::shared::bit_torrent::info_hash::InfoHash; fn private_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_mode_private()) diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 49b1aebc7..d6b39cc53 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -111,6 +111,7 @@ mod tests { use std::net::IpAddr; use std::str::FromStr; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; @@ -118,7 +119,6 @@ mod tests { use crate::servers::http::v1::requests::scrape::Scrape; use crate::servers::http::v1::responses; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; - use crate::shared::bit_torrent::info_hash::InfoHash; fn private_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_mode_private()) diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index 08dd9da29..39a6c1846 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -7,12 +7,12 @@ use std::str::FromStr; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; +use torrust_tracker_primitives::info_hash::{self, InfoHash}; +use torrust_tracker_primitives::peer; -use crate::core::peer::{self, IdConversionError}; use crate::servers::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::servers::http::v1::query::{ParseQueryError, Query}; use crate::servers::http::v1::responses; -use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; /// The number of bytes `downloaded`, `uploaded` or `left`. It's used in the /// `Announce` request for parameters that represent a number of bytes. @@ -33,8 +33,8 @@ const COMPACT: &str = "compact"; /// /// ```rust /// use torrust_tracker::servers::http::v1::requests::announce::{Announce, Compact, Event}; -/// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -/// use torrust_tracker::core::peer; +/// use torrust_tracker_primitives::info_hash::InfoHash; +/// use torrust_tracker_primitives::peer; /// /// let request = Announce { /// // Mandatory params @@ -119,14 +119,14 @@ pub enum ParseAnnounceQueryError { InvalidInfoHashParam { param_name: String, param_value: String, - source: LocatedError<'static, ConversionError>, + source: LocatedError<'static, info_hash::ConversionError>, }, /// The `peer_id` is invalid. #[error("invalid param value {param_value} for {param_name} in {source}")] InvalidPeerIdParam { param_name: String, param_value: String, - source: LocatedError<'static, IdConversionError>, + source: LocatedError<'static, peer::IdConversionError>, }, } @@ -355,12 +355,13 @@ mod tests { mod announce_request { - use crate::core::peer; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::announce::{ Announce, Compact, Event, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, }; - use crate::shared::bit_torrent::info_hash::InfoHash; #[test] fn should_be_instantiated_from_the_url_query_with_only_the_mandatory_params() { diff --git a/src/servers/http/v1/requests/scrape.rs b/src/servers/http/v1/requests/scrape.rs index 7c52b9fc4..19f6e35a6 100644 --- a/src/servers/http/v1/requests/scrape.rs +++ b/src/servers/http/v1/requests/scrape.rs @@ -5,11 +5,11 @@ use std::panic::Location; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; +use torrust_tracker_primitives::info_hash::{self, InfoHash}; use crate::servers::http::percent_encoding::percent_decode_info_hash; use crate::servers::http::v1::query::Query; use crate::servers::http::v1::responses; -use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; pub type NumberOfBytes = i64; @@ -34,7 +34,7 @@ pub enum ParseScrapeQueryError { InvalidInfoHashParam { param_name: String, param_value: String, - source: LocatedError<'static, ConversionError>, + source: LocatedError<'static, info_hash::ConversionError>, }, } @@ -86,9 +86,10 @@ mod tests { mod scrape_request { + use torrust_tracker_primitives::info_hash::InfoHash; + use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::scrape::{Scrape, INFO_HASH}; - use crate::shared::bit_torrent::info_hash::InfoHash; #[test] fn should_be_instantiated_from_the_url_query_with_only_one_infohash() { diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index 619632ae4..134da919e 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -7,10 +7,10 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use axum::http::StatusCode; use derive_more::{AsRef, Constructor, From}; use torrust_tracker_contrib_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut}; +use torrust_tracker_primitives::peer; use super::Response; -use crate::core::peer::Peer; -use crate::core::{self, AnnounceData}; +use crate::core::AnnounceData; use crate::servers::http::v1::responses; /// An [`Announce`] response, that can be anything that is convertible from [`AnnounceData`]. @@ -150,21 +150,6 @@ impl Into> for Compact { } } -/// Marker Trait for Peer Vectors -pub trait PeerEncoding: From + PartialEq {} - -impl FromIterator for Vec

{ - fn from_iter>(iter: T) -> Self { - let mut peers: Vec

= vec![]; - - for peer in iter { - peers.push(peer.into()); - } - - peers - } -} - /// A [`NormalPeer`], for the [`Normal`] form. /// /// ```rust @@ -188,10 +173,10 @@ pub struct NormalPeer { pub port: u16, } -impl PeerEncoding for NormalPeer {} +impl peer::Encoding for NormalPeer {} -impl From for NormalPeer { - fn from(peer: core::peer::Peer) -> Self { +impl From for NormalPeer { + fn from(peer: peer::Peer) -> Self { NormalPeer { peer_id: peer.peer_id.to_bytes(), ip: peer.peer_addr.ip(), @@ -240,10 +225,10 @@ pub enum CompactPeer { V6(CompactPeerData), } -impl PeerEncoding for CompactPeer {} +impl peer::Encoding for CompactPeer {} -impl From for CompactPeer { - fn from(peer: core::peer::Peer) -> Self { +impl From for CompactPeer { + fn from(peer: peer::Peer) -> Self { match (peer.peer_addr.ip(), peer.peer_addr.port()) { (IpAddr::V4(ip), port) => Self::V4(CompactPeerData { ip, port }), (IpAddr::V6(ip), port) => Self::V6(CompactPeerData { ip, port }), @@ -316,10 +301,10 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::AnnouncePolicy; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::core::peer::fixture::PeerBuilder; - use crate::core::peer::Id; - use crate::core::torrent::SwarmMetadata; use crate::core::AnnounceData; use crate::servers::http::v1::responses::announce::{Announce, Compact, Normal, Response}; @@ -339,12 +324,12 @@ mod tests { let policy = AnnouncePolicy::new(111, 222); let peer_ipv4 = PeerBuilder::default() - .with_peer_id(&Id(*b"-qB00000000000000001")) + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 0x7070)) .build(); let peer_ipv6 = PeerBuilder::default() - .with_peer_id(&Id(*b"-qB00000000000000002")) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new( IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), 0x7070, diff --git a/src/servers/http/v1/responses/scrape.rs b/src/servers/http/v1/responses/scrape.rs index e16827824..11f361028 100644 --- a/src/servers/http/v1/responses/scrape.rs +++ b/src/servers/http/v1/responses/scrape.rs @@ -13,8 +13,8 @@ use crate::core::ScrapeData; /// /// ```rust /// use torrust_tracker::servers::http::v1::responses::scrape::Bencoded; -/// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -/// use torrust_tracker::core::torrent::SwarmMetadata; +/// use torrust_tracker_primitives::info_hash::InfoHash; +/// use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; /// use torrust_tracker::core::ScrapeData; /// /// let info_hash = InfoHash([0x69; 20]); @@ -92,10 +92,11 @@ impl IntoResponse for Bencoded { mod tests { mod scrape_response { - use crate::core::torrent::SwarmMetadata; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + use crate::core::ScrapeData; use crate::servers::http::v1::responses::scrape::Bencoded; - use crate::shared::bit_torrent::info_hash::InfoHash; fn sample_scrape_data() -> ScrapeData { let info_hash = InfoHash([0x69; 20]); diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index b53697eed..b37081045 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -11,9 +11,10 @@ use std::net::IpAddr; use std::sync::Arc; -use crate::core::peer::Peer; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; + use crate::core::{statistics, AnnounceData, Tracker}; -use crate::shared::bit_torrent::info_hash::InfoHash; /// The HTTP tracker `announce` service. /// @@ -25,7 +26,7 @@ use crate::shared::bit_torrent::info_hash::InfoHash; /// > **NOTICE**: as the HTTP tracker does not requires a connection request /// like the UDP tracker, the number of TCP connections is incremented for /// each `announce` request. -pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) -> AnnounceData { +pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut peer::Peer) -> AnnounceData { let original_peer_ip = peer.peer_addr.ip(); // The tracker could change the original peer ip @@ -47,13 +48,13 @@ pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; - use crate::core::{peer, Tracker}; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::shared::clock::DurationSinceUnixEpoch; + use crate::core::Tracker; fn public_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_mode_public()) @@ -94,11 +95,11 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::core::peer::Peer; - use crate::core::torrent::SwarmMetadata; use crate::core::{statistics, AnnounceData, Tracker}; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; @@ -150,7 +151,7 @@ mod tests { Tracker::new(&configuration, Some(stats_event_sender), statistics::Repo::new()).unwrap() } - fn peer_with_the_ipv4_loopback_ip() -> Peer { + fn peer_with_the_ipv4_loopback_ip() -> peer::Peer { let loopback_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); let mut peer = sample_peer(); peer.peer_addr = SocketAddr::new(loopback_ip, 8080); diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 82ca15dc8..18b57f479 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -11,8 +11,9 @@ use std::net::IpAddr; use std::sync::Arc; +use torrust_tracker_primitives::info_hash::InfoHash; + use crate::core::{statistics, ScrapeData, Tracker}; -use crate::shared::bit_torrent::info_hash::InfoHash; /// The HTTP tracker `scrape` service. /// @@ -60,13 +61,13 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; - use crate::core::{peer, Tracker}; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::shared::clock::DurationSinceUnixEpoch; + use crate::core::Tracker; fn public_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_mode_public()) @@ -99,9 +100,9 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; - use crate::core::torrent::SwarmMetadata; use crate::core::{statistics, ScrapeData, Tracker}; use crate::servers::http::v1::services::scrape::invoke; use crate::servers::http::v1::services::scrape::tests::{ diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index f42e11424..8f6e6d8b4 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -12,6 +12,7 @@ use aquatic_udp_protocol::{ use log::debug; use tokio::net::UdpSocket; use torrust_tracker_located_error::DynError; +use torrust_tracker_primitives::info_hash::InfoHash; use uuid::Uuid; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; @@ -22,7 +23,6 @@ use crate::servers::udp::logging::{log_bad_request, log_error_response, log_requ use crate::servers::udp::peer_builder; use crate::servers::udp::request::AnnounceWrapper; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; -use crate::shared::bit_torrent::info_hash::InfoHash; /// It handles the incoming UDP packets. /// @@ -318,12 +318,13 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::{peer, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; - use crate::core::{peer, Tracker}; + use crate::core::Tracker; use crate::shared::clock::{Current, Time}; fn tracker_configuration() -> Configuration { @@ -605,8 +606,9 @@ mod tests { Response, ResponsePeer, }; use mockall::predicate::eq; + use torrust_tracker_primitives::peer; - use crate::core::{self, peer, statistics}; + use crate::core::{self, statistics}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -635,7 +637,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); let expected_peer = TorrentPeerBuilder::default() .with_peer_id(peer::Id(peer_id.0)) @@ -696,7 +698,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } @@ -773,8 +775,8 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use torrust_tracker_primitives::peer; - use crate::core::peer; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -801,7 +803,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); @@ -826,8 +828,9 @@ mod tests { Response, ResponsePeer, }; use mockall::predicate::eq; + use torrust_tracker_primitives::peer; - use crate::core::{self, peer, statistics}; + use crate::core::{self, statistics}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -857,7 +860,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); let expected_peer = TorrentPeerBuilder::default() .with_peer_id(peer::Id(peer_id.0)) @@ -921,7 +924,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); // When using IPv6 the tracker converts the remote client ip into a IPv4 address assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); @@ -1038,7 +1041,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); @@ -1063,9 +1066,10 @@ mod tests { InfoHash, NumberOfDownloads, NumberOfPeers, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; + use torrust_tracker_primitives::peer; use super::TorrentPeerBuilder; - use crate::core::{self, peer}; + use crate::core::{self}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; diff --git a/src/servers/udp/logging.rs b/src/servers/udp/logging.rs index a32afc6a3..9bbb48f6a 100644 --- a/src/servers/udp/logging.rs +++ b/src/servers/udp/logging.rs @@ -4,9 +4,9 @@ use std::net::SocketAddr; use std::time::Duration; use aquatic_udp_protocol::{Request, Response, TransactionId}; +use torrust_tracker_primitives::info_hash::InfoHash; use super::handlers::RequestId; -use crate::shared::bit_torrent::info_hash::InfoHash; pub fn log_request(request: &Request, request_id: &RequestId, server_socket_addr: &SocketAddr) { let action = map_action_name(request); diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index 8ef562086..fa4e8e926 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -62,7 +62,7 @@ //! ``` //! //! For the `Announce` request there is a wrapper struct [`AnnounceWrapper`](crate::servers::udp::request::AnnounceWrapper). -//! It was added to add an extra field with the internal [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash) struct. +//! It was added to add an extra field with the internal [`InfoHash`](torrust_tracker_primitives::info_hash::InfoHash) struct. //! //! ### Connect //! @@ -345,7 +345,7 @@ //! packet. //! //! We are using a wrapper struct for the aquatic [`AnnounceRequest`](aquatic_udp_protocol::request::AnnounceRequest) -//! struct, because we have our internal [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash) +//! struct, because we have our internal [`InfoHash`](torrust_tracker_primitives::info_hash::InfoHash) //! struct. //! //! ```text @@ -467,15 +467,15 @@ //! //! ### Scrape //! -//! The `scrape` request allows a peer to get [swarm metadata](crate::core::torrent::SwarmMetadata) +//! The `scrape` request allows a peer to get [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) //! for multiple torrents at the same time. //! -//! The response contains the [swarm metadata](crate::core::torrent::SwarmMetadata) +//! The response contains the [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) //! for that torrent: //! -//! - [complete](crate::core::torrent::SwarmMetadata::complete) -//! - [downloaded](crate::core::torrent::SwarmMetadata::downloaded) -//! - [incomplete](crate::core::torrent::SwarmMetadata::incomplete) +//! - [complete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::complete) +//! - [downloaded](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::downloaded) +//! - [incomplete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::incomplete) //! //! > **NOTICE**: up to about 74 torrents can be scraped at once. A full scrape //! can't be done with this protocol. This is a limitation of the UDP protocol. diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs index 5168e2578..8c8fa10a5 100644 --- a/src/servers/udp/peer_builder.rs +++ b/src/servers/udp/peer_builder.rs @@ -1,11 +1,13 @@ //! Logic to extract the peer info from the announce request. use std::net::{IpAddr, SocketAddr}; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::{peer, NumberOfBytes}; + use super::request::AnnounceWrapper; -use crate::core::peer::{Id, Peer}; use crate::shared::clock::{Current, Time}; -/// Extracts the [`Peer`] info from the +/// Extracts the [`peer::Peer`] info from the /// announce request. /// /// # Arguments @@ -14,14 +16,14 @@ use crate::shared::clock::{Current, Time}; /// * `peer_ip` - The real IP address of the peer, not the one in the announce /// request. #[must_use] -pub fn from_request(announce_wrapper: &AnnounceWrapper, peer_ip: &IpAddr) -> Peer { - Peer { - peer_id: Id(announce_wrapper.announce_request.peer_id.0), +pub fn from_request(announce_wrapper: &AnnounceWrapper, peer_ip: &IpAddr) -> peer::Peer { + peer::Peer { + peer_id: peer::Id(announce_wrapper.announce_request.peer_id.0), peer_addr: SocketAddr::new(*peer_ip, announce_wrapper.announce_request.port.0), updated: Current::now(), - uploaded: announce_wrapper.announce_request.bytes_uploaded, - downloaded: announce_wrapper.announce_request.bytes_downloaded, - left: announce_wrapper.announce_request.bytes_left, - event: announce_wrapper.announce_request.event, + uploaded: NumberOfBytes(announce_wrapper.announce_request.bytes_uploaded.0), + downloaded: NumberOfBytes(announce_wrapper.announce_request.bytes_downloaded.0), + left: NumberOfBytes(announce_wrapper.announce_request.bytes_left.0), + event: AnnounceEvent::from_i32(announce_wrapper.announce_request.event.to_i32()), } } diff --git a/src/servers/udp/request.rs b/src/servers/udp/request.rs index f655fd36a..e172e03b1 100644 --- a/src/servers/udp/request.rs +++ b/src/servers/udp/request.rs @@ -6,8 +6,7 @@ //! Some of the type in this module are wrappers around the types in the //! `aquatic_udp_protocol` crate. use aquatic_udp_protocol::AnnounceRequest; - -use crate::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; /// Wrapper around [`AnnounceRequest`]. pub struct AnnounceWrapper { diff --git a/src/shared/bit_torrent/common.rs b/src/shared/bit_torrent/common.rs index 9bf9dfd3c..9625b88e7 100644 --- a/src/shared/bit_torrent/common.rs +++ b/src/shared/bit_torrent/common.rs @@ -1,7 +1,6 @@ //! `BitTorrent` protocol primitive types //! //! [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde::{Deserialize, Serialize}; /// The maximum number of torrents that can be returned in an `scrape` response. @@ -33,23 +32,3 @@ enum Actions { Scrape = 2, Error = 3, } - -/// Announce events. Described on the -/// [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -#[derive(Serialize, Deserialize)] -#[serde(remote = "AnnounceEvent")] -pub enum AnnounceEventDef { - /// The peer has started downloading the torrent. - Started, - /// The peer has ceased downloading the torrent. - Stopped, - /// The peer has completed downloading the torrent. - Completed, - /// This is one of the announcements done at regular intervals. - None, -} - -/// Number of bytes downloaded, uploaded or pending to download (left) by the peer. -#[derive(Serialize, Deserialize)] -#[serde(remote = "NumberOfBytes")] -pub struct NumberOfBytesDef(pub i64); diff --git a/src/shared/bit_torrent/info_hash.rs b/src/shared/bit_torrent/info_hash.rs index 20c3cb38b..506c37758 100644 --- a/src/shared/bit_torrent/info_hash.rs +++ b/src/shared/bit_torrent/info_hash.rs @@ -129,169 +129,38 @@ //! You can hash that byte string with //! //! The result is a 20-char string: `5452869BE36F9F3350CCEE6B4544E7E76CAAADAB` -use std::panic::Location; -use thiserror::Error; +use torrust_tracker_primitives::info_hash::InfoHash; -/// `BitTorrent` Info Hash v1 -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct InfoHash(pub [u8; 20]); +pub mod fixture { + use std::hash::{DefaultHasher, Hash, Hasher}; -const INFO_HASH_BYTES_LEN: usize = 20; + use super::InfoHash; -impl InfoHash { - /// Create a new `InfoHash` from a byte slice. + /// Generate as semi-stable pseudo-random infohash /// - /// # Panics + /// Note: If the [`DefaultHasher`] implementation changes + /// so will the resulting info-hashes. /// - /// Will panic if byte slice does not contains the exact amount of bytes need for the `InfoHash`. - #[must_use] - pub fn from_bytes(bytes: &[u8]) -> Self { - assert_eq!(bytes.len(), INFO_HASH_BYTES_LEN); - let mut ret = Self([0u8; INFO_HASH_BYTES_LEN]); - ret.0.clone_from_slice(bytes); - ret - } - - /// Returns the `InfoHash` internal byte array. - #[must_use] - pub fn bytes(&self) -> [u8; 20] { - self.0 - } - - /// Returns the `InfoHash` as a hex string. + /// The results should not be relied upon between versions. #[must_use] - pub fn to_hex_string(&self) -> String { - self.to_string() - } -} - -impl std::fmt::Display for InfoHash { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut chars = [0u8; 40]; - binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); - write!(f, "{}", std::str::from_utf8(&chars).unwrap()) - } -} - -impl std::str::FromStr for InfoHash { - type Err = binascii::ConvertError; - - fn from_str(s: &str) -> Result { - let mut i = Self([0u8; 20]); - if s.len() != 40 { - return Err(binascii::ConvertError::InvalidInputLength); - } - binascii::hex2bin(s.as_bytes(), &mut i.0)?; - Ok(i) - } -} - -impl Ord for InfoHash { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.0.cmp(&other.0) - } -} - -impl std::cmp::PartialOrd for InfoHash { - fn partial_cmp(&self, other: &InfoHash) -> Option { - Some(self.cmp(other)) - } -} + pub fn gen_seeded_infohash(seed: &u64) -> InfoHash { + let mut buf_a: [[u8; 8]; 4] = Default::default(); + let mut buf_b = InfoHash::default(); -impl std::convert::From<&[u8]> for InfoHash { - fn from(data: &[u8]) -> InfoHash { - assert_eq!(data.len(), 20); - let mut ret = InfoHash([0u8; 20]); - ret.0.clone_from_slice(data); - ret - } -} - -impl std::convert::From<[u8; 20]> for InfoHash { - fn from(val: [u8; 20]) -> Self { - InfoHash(val) - } -} - -/// Errors that can occur when converting from a `Vec` to an `InfoHash`. -#[derive(Error, Debug)] -pub enum ConversionError { - /// Not enough bytes for infohash. An infohash is 20 bytes. - #[error("not enough bytes for infohash: {message} {location}")] - NotEnoughBytes { - location: &'static Location<'static>, - message: String, - }, - /// Too many bytes for infohash. An infohash is 20 bytes. - #[error("too many bytes for infohash: {message} {location}")] - TooManyBytes { - location: &'static Location<'static>, - message: String, - }, -} - -impl TryFrom> for InfoHash { - type Error = ConversionError; + let mut hasher = DefaultHasher::new(); + seed.hash(&mut hasher); - fn try_from(bytes: Vec) -> Result { - if bytes.len() < INFO_HASH_BYTES_LEN { - return Err(ConversionError::NotEnoughBytes { - location: Location::caller(), - message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, - }); - } - if bytes.len() > INFO_HASH_BYTES_LEN { - return Err(ConversionError::TooManyBytes { - location: Location::caller(), - message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, - }); + for u in &mut buf_a { + seed.hash(&mut hasher); + *u = hasher.finish().to_le_bytes(); } - Ok(Self::from_bytes(&bytes)) - } -} -impl serde::ser::Serialize for InfoHash { - fn serialize(&self, serializer: S) -> Result { - let mut buffer = [0u8; 40]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); - let str_out = std::str::from_utf8(bytes_out).unwrap(); - serializer.serialize_str(str_out) - } -} - -impl<'de> serde::de::Deserialize<'de> for InfoHash { - fn deserialize>(des: D) -> Result { - des.deserialize_str(InfoHashVisitor) - } -} - -struct InfoHashVisitor; - -impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { - type Value = InfoHash; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "a 40 character long hash") - } - - fn visit_str(self, v: &str) -> Result { - if v.len() != 40 { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"a 40 character long string", - )); + for (a, b) in buf_a.iter().flat_map(|a| a.iter()).zip(buf_b.0.iter_mut()) { + *b = *a; } - let mut res = InfoHash([0u8; 20]); - - if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"a hexadecimal string", - )); - }; - Ok(res) + buf_b } } diff --git a/src/shared/bit_torrent/tracker/http/client/requests/announce.rs b/src/shared/bit_torrent/tracker/http/client/requests/announce.rs index 6cae79888..b872e76e9 100644 --- a/src/shared/bit_torrent/tracker/http/client/requests/announce.rs +++ b/src/shared/bit_torrent/tracker/http/client/requests/announce.rs @@ -3,9 +3,9 @@ use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; use serde_repr::Serialize_repr; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; -use crate::core::peer::Id; -use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::bit_torrent::tracker::http::{percent_encode_byte_array, ByteArray20}; pub struct Query { @@ -99,7 +99,7 @@ impl QueryBuilder { peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), downloaded: 0, uploaded: 0, - peer_id: Id(*b"-qB00000000000000001").0, + peer_id: peer::Id(*b"-qB00000000000000001").0, port: 17548, left: 0, event: Some(Event::Completed), @@ -117,7 +117,7 @@ impl QueryBuilder { } #[must_use] - pub fn with_peer_id(mut self, peer_id: &Id) -> Self { + pub fn with_peer_id(mut self, peer_id: &peer::Id) -> Self { self.announce_query.peer_id = peer_id.0; self } diff --git a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs index 4fa49eed6..4d12fc2d2 100644 --- a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs +++ b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs @@ -2,7 +2,8 @@ use std::error::Error; use std::fmt::{self}; use std::str::FromStr; -use crate::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; + use crate::shared::bit_torrent::tracker::http::{percent_encode_byte_array, ByteArray20}; pub struct Query { diff --git a/src/shared/bit_torrent/tracker/http/client/responses/announce.rs b/src/shared/bit_torrent/tracker/http/client/responses/announce.rs index e75cc6671..15ec446cb 100644 --- a/src/shared/bit_torrent/tracker/http/client/responses/announce.rs +++ b/src/shared/bit_torrent/tracker/http/client/responses/announce.rs @@ -1,8 +1,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use serde::{Deserialize, Serialize}; - -use crate::core::peer::Peer; +use torrust_tracker_primitives::peer; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Announce { @@ -23,8 +22,8 @@ pub struct DictionaryPeer { pub port: u16, } -impl From for DictionaryPeer { - fn from(peer: Peer) -> Self { +impl From for DictionaryPeer { + fn from(peer: peer::Peer) -> Self { DictionaryPeer { peer_id: peer.peer_id.to_bytes().to_vec(), ip: peer.peer_addr.ip().to_string(), diff --git a/src/shared/clock/mod.rs b/src/shared/clock/mod.rs index 6d9d4112a..a73878466 100644 --- a/src/shared/clock/mod.rs +++ b/src/shared/clock/mod.rs @@ -31,9 +31,7 @@ use std::str::FromStr; use std::time::Duration; use chrono::{DateTime, Utc}; - -/// Duration since the Unix Epoch. -pub type DurationSinceUnixEpoch = Duration; +use torrust_tracker_primitives::DurationSinceUnixEpoch; /// Clock types. #[derive(Debug)] diff --git a/src/shared/clock/time_extent.rs b/src/shared/clock/time_extent.rs index a5a359e52..168224eda 100644 --- a/src/shared/clock/time_extent.rs +++ b/src/shared/clock/time_extent.rs @@ -542,9 +542,11 @@ mod test { mod make_time_extent { mod fn_now { + use torrust_tracker_primitives::DurationSinceUnixEpoch; + use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make, TimeExtent}; - use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; + use crate::shared::clock::{Current, StoppedTime}; #[test] fn it_should_give_a_time_extent() { @@ -582,9 +584,11 @@ mod test { mod fn_now_after { use std::time::Duration; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make}; - use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; + use crate::shared::clock::{Current, StoppedTime}; #[test] fn it_should_give_a_time_extent() { @@ -621,8 +625,10 @@ mod test { mod fn_now_before { use std::time::Duration; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make, TimeExtent}; - use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; + use crate::shared::clock::{Current, StoppedTime}; #[test] fn it_should_give_a_time_extent() { diff --git a/src/shared/clock/utils.rs b/src/shared/clock/utils.rs index 94d88d288..8b1378917 100644 --- a/src/shared/clock/utils.rs +++ b/src/shared/clock/utils.rs @@ -1,11 +1 @@ -//! It contains helper functions related to time. -use super::DurationSinceUnixEpoch; -/// Serializes a `DurationSinceUnixEpoch` as a Unix timestamp in milliseconds. -/// # Errors -/// -/// Will return `serde::Serializer::Error` if unable to serialize the `unix_time_value`. -pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { - #[allow(clippy::cast_possible_truncation)] - ser.serialize_u64(unix_time_value.as_millis() as u64) -} diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 186b7ea3b..8d91f3ae8 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -4,12 +4,12 @@ use std::sync::Arc; use futures::executor::block_on; use torrust_tracker::bootstrap::app::initialize_with_configuration; use torrust_tracker::bootstrap::jobs::make_rust_tls; -use torrust_tracker::core::peer::Peer; use torrust_tracker::core::Tracker; use torrust_tracker::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; use torrust_tracker::servers::registar::Registar; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_configuration::{Configuration, HttpApi}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; use super::connection_info::ConnectionInfo; @@ -22,7 +22,7 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; } } diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index 54263f8b8..af6587673 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -1,8 +1,8 @@ use std::str::FromStr; -use torrust_tracker::core::peer::fixture::PeerBuilder; use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index ee701ecc4..d54935f80 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -1,9 +1,9 @@ use std::str::FromStr; -use torrust_tracker::core::peer::fixture::PeerBuilder; use torrust_tracker::servers::apis::v1::context::torrent::resources::peer::Peer; use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{self, Torrent}; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use crate::common::http::{Query, QueryParam}; diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index 358a4a19e..29064ec9e 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -1,6 +1,6 @@ use std::str::FromStr; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 326f4e534..5638713aa 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -3,12 +3,12 @@ use std::sync::Arc; use futures::executor::block_on; use torrust_tracker::bootstrap::app::initialize_with_configuration; use torrust_tracker::bootstrap::jobs::make_rust_tls; -use torrust_tracker::core::peer::Peer; use torrust_tracker::core::Tracker; use torrust_tracker::servers::http::server::{HttpServer, Launcher, Running, Stopped}; use torrust_tracker::servers::registar::Registar; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_configuration::{Configuration, HttpTracker}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; pub struct Environment { pub config: Arc, @@ -19,7 +19,7 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; } } diff --git a/tests/servers/http/requests/announce.rs b/tests/servers/http/requests/announce.rs index 2cc615d0f..061990621 100644 --- a/tests/servers/http/requests/announce.rs +++ b/tests/servers/http/requests/announce.rs @@ -3,8 +3,8 @@ use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; use serde_repr::Serialize_repr; -use torrust_tracker::core::peer::Id; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; use crate::servers::http::{percent_encode_byte_array, ByteArray20}; @@ -93,7 +93,7 @@ impl QueryBuilder { peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), downloaded: 0, uploaded: 0, - peer_id: Id(*b"-qB00000000000000001").0, + peer_id: peer::Id(*b"-qB00000000000000001").0, port: 17548, left: 0, event: Some(Event::Completed), @@ -109,7 +109,7 @@ impl QueryBuilder { self } - pub fn with_peer_id(mut self, peer_id: &Id) -> Self { + pub fn with_peer_id(mut self, peer_id: &peer::Id) -> Self { self.announce_query.peer_id = peer_id.0; self } diff --git a/tests/servers/http/requests/scrape.rs b/tests/servers/http/requests/scrape.rs index 264c72c33..f66605855 100644 --- a/tests/servers/http/requests/scrape.rs +++ b/tests/servers/http/requests/scrape.rs @@ -1,7 +1,7 @@ use std::fmt; use std::str::FromStr; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; use crate::servers::http::{percent_encode_byte_array, ByteArray20}; diff --git a/tests/servers/http/responses/announce.rs b/tests/servers/http/responses/announce.rs index 968c327eb..2b49b4405 100644 --- a/tests/servers/http/responses/announce.rs +++ b/tests/servers/http/responses/announce.rs @@ -1,7 +1,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use serde::{Deserialize, Serialize}; -use torrust_tracker::core::peer::Peer; +use torrust_tracker_primitives::peer; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Announce { @@ -22,8 +22,8 @@ pub struct DictionaryPeer { pub port: u16, } -impl From for DictionaryPeer { - fn from(peer: Peer) -> Self { +impl From for DictionaryPeer { + fn from(peer: peer::Peer) -> Self { DictionaryPeer { peer_id: peer.peer_id.to_bytes().to_vec(), ip: peer.peer_addr.ip().to_string(), diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index be285dcd7..a7962db0f 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -89,9 +89,9 @@ mod for_all_config_modes { use local_ip_address::local_ip; use reqwest::{Response, StatusCode}; use tokio::net::TcpListener; - use torrust_tracker::core::peer; - use torrust_tracker::core::peer::fixture::PeerBuilder; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::invalid_info_hashes; @@ -750,7 +750,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.get_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), client_ip); @@ -786,7 +786,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.get_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), env.tracker.get_maybe_external_ip().unwrap()); @@ -826,7 +826,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.get_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), env.tracker.get_maybe_external_ip().unwrap()); @@ -864,7 +864,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.get_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); @@ -887,9 +887,9 @@ mod for_all_config_modes { use std::str::FromStr; use tokio::net::TcpListener; - use torrust_tracker::core::peer; - use torrust_tracker::core::peer::fixture::PeerBuilder; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::invalid_info_hashes; @@ -1113,7 +1113,7 @@ mod configured_as_whitelisted { mod and_receiving_an_announce_request { use std::str::FromStr; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::servers::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; @@ -1160,9 +1160,9 @@ mod configured_as_whitelisted { mod receiving_an_scrape_request { use std::str::FromStr; - use torrust_tracker::core::peer; - use torrust_tracker::core::peer::fixture::PeerBuilder; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use crate::servers::http::asserts::assert_scrape_response; @@ -1253,7 +1253,7 @@ mod configured_as_private { use std::time::Duration; use torrust_tracker::core::auth::Key; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::servers::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; @@ -1329,9 +1329,9 @@ mod configured_as_private { use std::time::Duration; use torrust_tracker::core::auth::Key; - use torrust_tracker::core::peer; - use torrust_tracker::core::peer::fixture::PeerBuilder; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use crate::servers::http::asserts::{assert_authentication_error_response, assert_scrape_response}; diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index da7705016..12f4aeb9e 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -2,12 +2,12 @@ use std::net::SocketAddr; use std::sync::Arc; use torrust_tracker::bootstrap::app::initialize_with_configuration; -use torrust_tracker::core::peer::Peer; use torrust_tracker::core::Tracker; use torrust_tracker::servers::registar::Registar; use torrust_tracker::servers::udp::server::{Launcher, Running, Stopped, UdpServer}; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_configuration::{Configuration, UdpTracker}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; pub struct Environment { pub config: Arc, @@ -19,7 +19,7 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker #[allow(dead_code)] - pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { + pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; } } From 03883c00d606ba0e5d23849852b1aad7be3c1e03 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 3 Mar 2024 03:56:06 +0800 Subject: [PATCH 6/9] dev: repository benchmark uses criterion --- Cargo.lock | 4 +- packages/torrent-repository/Cargo.toml | 8 +- .../benches/helpers/args.rs | 15 -- .../benches/helpers/asyn.rs | 199 ++++++++---------- .../torrent-repository/benches/helpers/mod.rs | 1 - .../benches/helpers/sync.rs | 179 +++++++--------- .../benches/helpers/utils.rs | 33 --- .../benches/repository-benchmark.rs | 187 ---------------- .../benches/repository_benchmark.rs | 191 +++++++++++++++++ 9 files changed, 363 insertions(+), 454 deletions(-) delete mode 100644 packages/torrent-repository/benches/helpers/args.rs delete mode 100644 packages/torrent-repository/benches/repository-benchmark.rs create mode 100644 packages/torrent-repository/benches/repository_benchmark.rs diff --git a/Cargo.lock b/Cargo.lock index 8ec922448..b8437326c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -762,6 +762,7 @@ dependencies = [ "ciborium", "clap", "criterion-plot", + "futures", "is-terminal", "itertools 0.10.5", "num-traits", @@ -774,6 +775,7 @@ dependencies = [ "serde_derive", "serde_json", "tinytemplate", + "tokio", "walkdir", ] @@ -3557,7 +3559,7 @@ dependencies = [ name = "torrust-tracker-torrent-repository" version = "3.0.0-alpha.12-develop" dependencies = [ - "clap", + "criterion", "futures", "serde", "tokio", diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 0df82a2c6..b53b9a15e 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -16,9 +16,15 @@ rust-version.workspace = true version.workspace = true [dependencies] -clap = { version = "4.4.8", features = ["derive"] } futures = "0.3.29" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" } serde = { version = "1", features = ["derive"] } + +[dev-dependencies] +criterion = { version = "0", features = ["async_tokio"] } + +[[bench]] +harness = false +name = "repository_benchmark" diff --git a/packages/torrent-repository/benches/helpers/args.rs b/packages/torrent-repository/benches/helpers/args.rs deleted file mode 100644 index 3a38c55a7..000000000 --- a/packages/torrent-repository/benches/helpers/args.rs +++ /dev/null @@ -1,15 +0,0 @@ -use clap::Parser; - -#[derive(Parser, Debug)] -#[command(author, version, about, long_about = None)] -pub struct Args { - /// Amount of benchmark worker threads - #[arg(short, long)] - pub threads: usize, - /// Amount of time in ns a thread will sleep to simulate a client response after handling a task - #[arg(short, long)] - pub sleep: Option, - /// Compare with old implementations of the torrent repository - #[arg(short, long)] - pub compare: Option, -} diff --git a/packages/torrent-repository/benches/helpers/asyn.rs b/packages/torrent-repository/benches/helpers/asyn.rs index 4fb37104f..80f70cdc2 100644 --- a/packages/torrent-repository/benches/helpers/asyn.rs +++ b/packages/torrent-repository/benches/helpers/asyn.rs @@ -1,182 +1,155 @@ use std::sync::Arc; -use std::time::Duration; +use std::time::{Duration, Instant}; -use clap::Parser; use futures::stream::FuturesUnordered; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_torrent_repository::repository::RepositoryAsync; -use super::args::Args; -use super::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; +use super::utils::{generate_unique_info_hashes, DEFAULT_PEER}; -pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) +pub async fn add_one_torrent(samples: u64) -> Duration where V: RepositoryAsync + Default, { - let mut results: Vec = Vec::with_capacity(samples); + let start = Instant::now(); for _ in 0..samples { let torrent_repository = V::default(); let info_hash = InfoHash([0; 20]); - let start_time = std::time::Instant::now(); - torrent_repository .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) .await; - - let result = start_time.elapsed(); - - results.push(result); } - get_average_and_adjusted_average_from_results(results) + start.elapsed() } // Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: u64, sleep: Option) -> Duration where V: RepositoryAsync + Default, Arc: Clone + Send + Sync + 'static, { - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::::default(); - let info_hash: &'static InfoHash = &InfoHash([0; 20]); - let handles = FuturesUnordered::new(); - - // Add the torrent/peer to the torrent repository - torrent_repository - .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) - .await; + let torrent_repository = Arc::::default(); + let info_hash: &'static InfoHash = &InfoHash([0; 20]); + let handles = FuturesUnordered::new(); - let start_time = std::time::Instant::now(); + // Add the torrent/peer to the torrent repository + torrent_repository + .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) + .await; - for _ in 0..10_000 { - let torrent_repository_clone = torrent_repository.clone(); + let start = Instant::now(); - let handle = runtime.spawn(async move { - torrent_repository_clone - .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) - .await; - - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); + for _ in 0..samples { + let torrent_repository_clone = torrent_repository.clone(); - handles.push(handle); - } + let handle = runtime.spawn(async move { + torrent_repository_clone + .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) + .await; - // Await all tasks - futures::future::join_all(handles).await; + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); - let result = start_time.elapsed(); + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); - results.push(result); + handles.push(handle); } - get_average_and_adjusted_average_from_results(results) + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() } // Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn add_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration where V: RepositoryAsync + Default, Arc: Clone + Send + Sync + 'static, { - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::::default(); - let info_hashes = generate_unique_info_hashes(10_000); - let handles = FuturesUnordered::new(); - - let start_time = std::time::Instant::now(); + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in a usize")); + let handles = FuturesUnordered::new(); - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); + let start = Instant::now(); - let handle = runtime.spawn(async move { - torrent_repository_clone - .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) - .await; + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } + let handle = runtime.spawn(async move { + torrent_repository_clone + .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) + .await; - // Await all tasks - futures::future::join_all(handles).await; + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); - let result = start_time.elapsed(); + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); - results.push(result); + handles.push(handle); } - get_average_and_adjusted_average_from_results(results) + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() } // Async update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration where V: RepositoryAsync + Default, Arc: Clone + Send + Sync + 'static, { - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::::default(); - let info_hashes = generate_unique_info_hashes(10_000); - let handles = FuturesUnordered::new(); - - // Add the torrents/peers to the torrent repository - for info_hash in &info_hashes { - torrent_repository - .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) - .await; - } + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in usize")); + let handles = FuturesUnordered::new(); - let start_time = std::time::Instant::now(); - - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone - .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) - .await; + // Add the torrents/peers to the torrent repository + for info_hash in &info_hashes { + torrent_repository + .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) + .await; + } - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); + let start = Instant::now(); - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); - handles.push(handle); - } + let handle = runtime.spawn(async move { + torrent_repository_clone + .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) + .await; - // Await all tasks - futures::future::join_all(handles).await; + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); - let result = start_time.elapsed(); + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); - results.push(result); + handles.push(handle); } - get_average_and_adjusted_average_from_results(results) + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() } diff --git a/packages/torrent-repository/benches/helpers/mod.rs b/packages/torrent-repository/benches/helpers/mod.rs index 758c123bd..1026aa4bf 100644 --- a/packages/torrent-repository/benches/helpers/mod.rs +++ b/packages/torrent-repository/benches/helpers/mod.rs @@ -1,4 +1,3 @@ -pub mod args; pub mod asyn; pub mod sync; pub mod utils; diff --git a/packages/torrent-repository/benches/helpers/sync.rs b/packages/torrent-repository/benches/helpers/sync.rs index aa2f8188a..0523f4141 100644 --- a/packages/torrent-repository/benches/helpers/sync.rs +++ b/packages/torrent-repository/benches/helpers/sync.rs @@ -1,172 +1,145 @@ use std::sync::Arc; -use std::time::Duration; +use std::time::{Duration, Instant}; -use clap::Parser; use futures::stream::FuturesUnordered; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_torrent_repository::repository::Repository; -use super::args::Args; -use super::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; +use super::utils::{generate_unique_info_hashes, DEFAULT_PEER}; // Simply add one torrent #[must_use] -pub fn add_one_torrent(samples: usize) -> (Duration, Duration) +pub fn add_one_torrent(samples: u64) -> Duration where V: Repository + Default, { - let mut results: Vec = Vec::with_capacity(samples); + let start = Instant::now(); for _ in 0..samples { let torrent_repository = V::default(); let info_hash = InfoHash([0; 20]); - let start_time = std::time::Instant::now(); - torrent_repository.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); - - let result = start_time.elapsed(); - - results.push(result); } - get_average_and_adjusted_average_from_results(results) + start.elapsed() } // Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: u64, sleep: Option) -> Duration where V: Repository + Default, Arc: Clone + Send + Sync + 'static, { - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::::default(); - let info_hash: &'static InfoHash = &InfoHash([0; 20]); - let handles = FuturesUnordered::new(); - - // Add the torrent/peer to the torrent repository - torrent_repository.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); + let torrent_repository = Arc::::default(); + let info_hash: &'static InfoHash = &InfoHash([0; 20]); + let handles = FuturesUnordered::new(); - let start_time = std::time::Instant::now(); + // Add the torrent/peer to the torrent repository + torrent_repository.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); - for _ in 0..10_000 { - let torrent_repository_clone = torrent_repository.clone(); + let start = Instant::now(); - let handle = runtime.spawn(async move { - torrent_repository_clone.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); - - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); + for _ in 0..samples { + let torrent_repository_clone = torrent_repository.clone(); - handles.push(handle); - } + let handle = runtime.spawn(async move { + torrent_repository_clone.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); - // Await all tasks - futures::future::join_all(handles).await; + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); - let result = start_time.elapsed(); + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); - results.push(result); + handles.push(handle); } - get_average_and_adjusted_average_from_results(results) + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() } // Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn add_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration where V: Repository + Default, Arc: Clone + Send + Sync + 'static, { - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::::default(); - let info_hashes = generate_unique_info_hashes(10_000); - let handles = FuturesUnordered::new(); + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in a usize")); + let handles = FuturesUnordered::new(); - let start_time = std::time::Instant::now(); + let start = Instant::now(); - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); - let handle = runtime.spawn(async move { - torrent_repository_clone.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); + let handle = runtime.spawn(async move { + torrent_repository_clone.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - let result = start_time.elapsed(); - - results.push(result); + handles.push(handle); } - get_average_and_adjusted_average_from_results(results) + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() } // Update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration where V: Repository + Default, Arc: Clone + Send + Sync + 'static, { - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::::default(); - let info_hashes = generate_unique_info_hashes(10_000); - let handles = FuturesUnordered::new(); + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in usize")); + let handles = FuturesUnordered::new(); - // Add the torrents/peers to the torrent repository - for info_hash in &info_hashes { - torrent_repository.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); - } - - let start_time = std::time::Instant::now(); - - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); + // Add the torrents/peers to the torrent repository + for info_hash in &info_hashes { + torrent_repository.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); + } - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); + let start = Instant::now(); - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); - handles.push(handle); - } + let handle = runtime.spawn(async move { + torrent_repository_clone.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); - // Await all tasks - futures::future::join_all(handles).await; + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); - let result = start_time.elapsed(); + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); - results.push(result); + handles.push(handle); } - get_average_and_adjusted_average_from_results(results) + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() } diff --git a/packages/torrent-repository/benches/helpers/utils.rs b/packages/torrent-repository/benches/helpers/utils.rs index aed9f40cf..170194806 100644 --- a/packages/torrent-repository/benches/helpers/utils.rs +++ b/packages/torrent-repository/benches/helpers/utils.rs @@ -1,6 +1,5 @@ use std::collections::HashSet; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::time::Duration; use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::info_hash::InfoHash; @@ -39,35 +38,3 @@ pub fn generate_unique_info_hashes(size: usize) -> Vec { result.into_iter().collect() } - -#[must_use] -pub fn within_acceptable_range(test: &Duration, norm: &Duration) -> bool { - let test_secs = test.as_secs_f64(); - let norm_secs = norm.as_secs_f64(); - - // Calculate the upper and lower bounds for the 10% tolerance - let tolerance = norm_secs * 0.1; - - // Calculate the upper and lower limits - let upper_limit = norm_secs + tolerance; - let lower_limit = norm_secs - tolerance; - - test_secs < upper_limit && test_secs > lower_limit -} - -#[must_use] -pub fn get_average_and_adjusted_average_from_results(mut results: Vec) -> (Duration, Duration) { - #[allow(clippy::cast_possible_truncation)] - let average = results.iter().sum::() / results.len() as u32; - - results.retain(|result| within_acceptable_range(result, &average)); - - let mut adjusted_average = Duration::from_nanos(0); - - #[allow(clippy::cast_possible_truncation)] - if results.len() > 1 { - adjusted_average = results.iter().sum::() / results.len() as u32; - } - - (average, adjusted_average) -} diff --git a/packages/torrent-repository/benches/repository-benchmark.rs b/packages/torrent-repository/benches/repository-benchmark.rs deleted file mode 100644 index bff34b256..000000000 --- a/packages/torrent-repository/benches/repository-benchmark.rs +++ /dev/null @@ -1,187 +0,0 @@ -mod helpers; - -use clap::Parser; -use torrust_tracker_torrent_repository::{ - TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, - TorrentsRwLockTokioMutexTokio, -}; - -use crate::helpers::args::Args; -use crate::helpers::{asyn, sync}; - -#[allow(clippy::too_many_lines)] -#[allow(clippy::print_literal)] -fn main() { - let args = Args::parse(); - - // Add 1 to worker_threads since we need a thread that awaits the benchmark - let rt = tokio::runtime::Builder::new_multi_thread() - .worker_threads(args.threads + 1) - .enable_time() - .build() - .unwrap(); - - println!("TorrentsRwLockTokio"); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) - ); - - if let Some(true) = args.compare { - println!(); - - println!("TorrentsRwLockStd"); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_one_torrent", - sync::add_one_torrent::(1_000_000) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_one_torrent_in_parallel", - rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_multiple_torrents_in_parallel", - rt.block_on(sync::add_multiple_torrents_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_multiple_torrents_in_parallel", - rt.block_on(sync::update_multiple_torrents_in_parallel::(&rt, 10)) - ); - - println!(); - - println!("TorrentsRwLockStdMutexStd"); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_one_torrent", - sync::add_one_torrent::(1_000_000) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_one_torrent_in_parallel", - rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_multiple_torrents_in_parallel", - rt.block_on(sync::add_multiple_torrents_in_parallel::( - &rt, 10 - )) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_multiple_torrents_in_parallel", - rt.block_on(sync::update_multiple_torrents_in_parallel::( - &rt, 10 - )) - ); - - println!(); - - println!("TorrentsRwLockStdMutexTokio"); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::( - &rt, 10 - )) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::( - &rt, 10 - )) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::( - &rt, 10 - )) - ); - - println!(); - - println!("TorrentsRwLockTokioMutexStd"); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::( - &rt, 10 - )) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::( - &rt, 10 - )) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::( - &rt, 10 - )) - ); - - println!(); - - println!("TorrentsRwLockTokioMutexTokio"); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::( - &rt, 10 - )) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::( - &rt, 10 - )) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) - ); - } -} diff --git a/packages/torrent-repository/benches/repository_benchmark.rs b/packages/torrent-repository/benches/repository_benchmark.rs new file mode 100644 index 000000000..a3684c8e2 --- /dev/null +++ b/packages/torrent-repository/benches/repository_benchmark.rs @@ -0,0 +1,191 @@ +use std::time::Duration; + +mod helpers; + +use criterion::{criterion_group, criterion_main, Criterion}; +use torrust_tracker_torrent_repository::{ + TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, + TorrentsRwLockTokioMutexTokio, +}; + +use crate::helpers::{asyn, sync}; + +fn add_one_torrent(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("add_one_torrent"); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt).iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(asyn::add_one_torrent::); + }); + + group.finish(); +} + +fn add_multiple_torrents_in_parallel(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("add_multiple_torrents_in_parallel"); + + //group.sampling_mode(criterion::SamplingMode::Flat); + //group.sample_size(10); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.finish(); +} + +fn update_one_torrent_in_parallel(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("update_one_torrent_in_parallel"); + + //group.sampling_mode(criterion::SamplingMode::Flat); + //group.sample_size(10); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.finish(); +} + +fn update_multiple_torrents_in_parallel(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("update_multiple_torrents_in_parallel"); + + //group.sampling_mode(criterion::SamplingMode::Flat); + //group.sample_size(10); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt).iter_custom(|iters| { + asyn::update_multiple_torrents_in_parallel::(&rt, iters, None) + }); + }); + + group.finish(); +} + +criterion_group!( + benches, + add_one_torrent, + add_multiple_torrents_in_parallel, + update_one_torrent_in_parallel, + update_multiple_torrents_in_parallel +); +criterion_main!(benches); From 3e0745b757f80ac0b5efce0e7c9459c8218cee73 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 17 Mar 2024 11:23:25 +0800 Subject: [PATCH 7/9] dev: extract clock to new package --- Cargo.lock | 54 ++- Cargo.toml | 1 + cSpell.json | 2 + packages/clock/Cargo.toml | 24 ++ packages/clock/README.md | 11 + packages/clock/src/clock/mod.rs | 72 ++++ packages/clock/src/clock/stopped/mod.rs | 210 ++++++++++ packages/clock/src/clock/working/mod.rs | 18 + packages/clock/src/conv/mod.rs | 82 ++++ packages/clock/src/lib.rs | 53 +++ .../clock/src/static_time/mod.rs | 0 .../clock/src/time_extent/mod.rs | 99 +++-- packages/clock/tests/clock/mod.rs | 16 + packages/clock/tests/integration.rs | 19 + packages/configuration/src/lib.rs | 3 + packages/torrent-repository/Cargo.toml | 5 +- packages/torrent-repository/src/entry/mod.rs | 6 +- .../torrent-repository/src/entry/single.rs | 299 +++++++++++++ packages/torrent-repository/src/lib.rs | 13 + src/bootstrap/app.rs | 2 +- src/core/auth.rs | 20 +- src/core/databases/mod.rs | 4 +- src/core/mod.rs | 15 +- src/core/peer_tests.rs | 8 +- src/core/torrent/mod.rs | 298 +------------ src/lib.rs | 24 ++ .../apis/v1/context/auth_key/resources.rs | 15 +- src/servers/http/mod.rs | 2 +- src/servers/http/v1/handlers/announce.rs | 5 +- src/servers/udp/connection_cookie.rs | 24 +- src/servers/udp/handlers.rs | 5 +- src/servers/udp/peer_builder.rs | 5 +- src/shared/clock/mod.rs | 393 ------------------ src/shared/clock/utils.rs | 1 - src/shared/mod.rs | 2 - tests/common/clock.rs | 16 + tests/common/mod.rs | 1 + tests/integration.rs | 13 + 38 files changed, 1050 insertions(+), 790 deletions(-) create mode 100644 packages/clock/Cargo.toml create mode 100644 packages/clock/README.md create mode 100644 packages/clock/src/clock/mod.rs create mode 100644 packages/clock/src/clock/stopped/mod.rs create mode 100644 packages/clock/src/clock/working/mod.rs create mode 100644 packages/clock/src/conv/mod.rs create mode 100644 packages/clock/src/lib.rs rename src/shared/clock/static_time.rs => packages/clock/src/static_time/mod.rs (100%) rename src/shared/clock/time_extent.rs => packages/clock/src/time_extent/mod.rs (85%) create mode 100644 packages/clock/tests/clock/mod.rs create mode 100644 packages/clock/tests/integration.rs delete mode 100644 src/shared/clock/mod.rs delete mode 100644 src/shared/clock/utils.rs create mode 100644 tests/common/clock.rs diff --git a/Cargo.lock b/Cargo.lock index b8437326c..e28278abb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1209,6 +1209,12 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +[[package]] +name = "futures-timer" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" + [[package]] name = "futures-util" version = "0.3.30" @@ -2560,6 +2566,12 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +[[package]] +name = "relative-path" +version = "1.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e898588f33fdd5b9420719948f9f2a32c922a246964576f71ba7f24f80610fbc" + [[package]] name = "rend" version = "0.4.2" @@ -2676,6 +2688,35 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "rstest" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97eeab2f3c0a199bc4be135c36c924b6590b88c377d416494288c14f2db30199" +dependencies = [ + "futures", + "futures-timer", + "rstest_macros", + "rustc_version", +] + +[[package]] +name = "rstest_macros" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" +dependencies = [ + "cfg-if", + "glob", + "proc-macro2", + "quote", + "regex", + "relative-path", + "rustc_version", + "syn 2.0.53", + "unicode-ident", +] + [[package]] name = "rusqlite" version = "0.31.0" @@ -3490,6 +3531,7 @@ dependencies = [ "serde_repr", "thiserror", "tokio", + "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-contrib-bencode", "torrust-tracker-located-error", @@ -3503,6 +3545,15 @@ dependencies = [ "uuid", ] +[[package]] +name = "torrust-tracker-clock" +version = "3.0.0-alpha.12-develop" +dependencies = [ + "chrono", + "lazy_static", + "torrust-tracker-primitives", +] + [[package]] name = "torrust-tracker-configuration" version = "3.0.0-alpha.12-develop" @@ -3561,8 +3612,9 @@ version = "3.0.0-alpha.12-develop" dependencies = [ "criterion", "futures", - "serde", + "rstest", "tokio", + "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-primitives", ] diff --git a/Cargo.toml b/Cargo.toml index 9610fffc2..99b7a334a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -64,6 +64,7 @@ serde_repr = "0" thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "packages/configuration" } +torrust-tracker-clock = { version = "3.0.0-alpha.12-develop", path = "packages/clock" } torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.12-develop", path = "contrib/bencode" } torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "packages/located-error" } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "packages/primitives" } diff --git a/cSpell.json b/cSpell.json index 6d5f71b85..1e276dbc2 100644 --- a/cSpell.json +++ b/cSpell.json @@ -34,6 +34,7 @@ "completei", "connectionless", "Containerfile", + "conv", "curr", "Cyberneering", "dashmap", @@ -116,6 +117,7 @@ "rngs", "rosegment", "routable", + "rstest", "rusqlite", "RUSTDOCFLAGS", "RUSTFLAGS", diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml new file mode 100644 index 000000000..d7192b6e4 --- /dev/null +++ b/packages/clock/Cargo.toml @@ -0,0 +1,24 @@ +[package] +description = "A library to a clock for the torrust tracker." +keywords = ["library", "clock", "torrents"] +name = "torrust-tracker-clock" +readme = "README.md" + +authors.workspace = true +categories.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +lazy_static = "1" +chrono = { version = "0", default-features = false, features = ["clock"] } + +torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } + +[dev-dependencies] diff --git a/packages/clock/README.md b/packages/clock/README.md new file mode 100644 index 000000000..bfdd7808f --- /dev/null +++ b/packages/clock/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Clock + +A library to provide a working and mockable clock for the [Torrust Tracker](https://github.com/torrust/torrust-tracker). + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-torrent-clock). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/clock/src/clock/mod.rs b/packages/clock/src/clock/mod.rs new file mode 100644 index 000000000..50afbc9db --- /dev/null +++ b/packages/clock/src/clock/mod.rs @@ -0,0 +1,72 @@ +use std::time::Duration; + +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use self::stopped::StoppedClock; +use self::working::WorkingClock; + +pub mod stopped; +pub mod working; + +/// A generic structure that represents a clock. +/// +/// It can be either the working clock (production) or the stopped clock +/// (testing). It implements the `Time` trait, which gives you the current time. +#[derive(Debug)] +pub struct Clock { + clock: std::marker::PhantomData, +} + +/// The working clock. It returns the current time. +pub type Working = Clock; +/// The stopped clock. It returns always the same fixed time. +pub type Stopped = Clock; + +/// Trait for types that can be used as a timestamp clock. +pub trait Time: Sized { + fn now() -> DurationSinceUnixEpoch; + + fn dbg_clock_type() -> String; + + #[must_use] + fn now_add(add_time: &Duration) -> Option { + Self::now().checked_add(*add_time) + } + #[must_use] + fn now_sub(sub_time: &Duration) -> Option { + Self::now().checked_sub(*sub_time) + } +} + +#[cfg(test)] +mod tests { + use std::any::TypeId; + use std::time::Duration; + + use crate::clock::{self, Stopped, Time, Working}; + use crate::CurrentClock; + + #[test] + fn it_should_be_the_stopped_clock_as_default_when_testing() { + // We are testing, so we should default to the fixed time. + assert_eq!(TypeId::of::(), TypeId::of::()); + assert_eq!(Stopped::now(), CurrentClock::now()); + } + + #[test] + fn it_should_have_different_times() { + assert_ne!(TypeId::of::(), TypeId::of::()); + assert_ne!(Stopped::now(), Working::now()); + } + + #[test] + fn it_should_use_stopped_time_for_testing() { + assert_eq!(CurrentClock::dbg_clock_type(), "Stopped".to_owned()); + + let time = CurrentClock::now(); + std::thread::sleep(Duration::from_millis(50)); + let time_2 = CurrentClock::now(); + + assert_eq!(time, time_2); + } +} diff --git a/packages/clock/src/clock/stopped/mod.rs b/packages/clock/src/clock/stopped/mod.rs new file mode 100644 index 000000000..57655ab75 --- /dev/null +++ b/packages/clock/src/clock/stopped/mod.rs @@ -0,0 +1,210 @@ +/// Trait for types that can be used as a timestamp clock stopped +/// at a given time. + +#[allow(clippy::module_name_repetitions)] +pub struct StoppedClock {} + +#[allow(clippy::module_name_repetitions)] +pub trait Stopped: clock::Time { + /// It sets the clock to a given time. + fn local_set(unix_time: &DurationSinceUnixEpoch); + + /// It sets the clock to the Unix Epoch. + fn local_set_to_unix_epoch() { + Self::local_set(&DurationSinceUnixEpoch::ZERO); + } + + /// It sets the clock to the time the application started. + fn local_set_to_app_start_time(); + + /// It sets the clock to the current system time. + fn local_set_to_system_time_now(); + + /// It adds a `Duration` to the clock. + /// + /// # Errors + /// + /// Will return `IntErrorKind` if `duration` would overflow the internal `Duration`. + fn local_add(duration: &Duration) -> Result<(), IntErrorKind>; + + /// It subtracts a `Duration` from the clock. + /// # Errors + /// + /// Will return `IntErrorKind` if `duration` would underflow the internal `Duration`. + fn local_sub(duration: &Duration) -> Result<(), IntErrorKind>; + + /// It resets the clock to default fixed time that is application start time (or the unix epoch when testing). + fn local_reset(); +} + +use std::num::IntErrorKind; +use std::time::Duration; + +use super::{DurationSinceUnixEpoch, Time}; +use crate::clock; + +impl Time for clock::Stopped { + fn now() -> DurationSinceUnixEpoch { + detail::FIXED_TIME.with(|time| { + return *time.borrow(); + }) + } + + fn dbg_clock_type() -> String { + "Stopped".to_owned() + } +} + +impl Stopped for clock::Stopped { + fn local_set(unix_time: &DurationSinceUnixEpoch) { + detail::FIXED_TIME.with(|time| { + *time.borrow_mut() = *unix_time; + }); + } + + fn local_set_to_app_start_time() { + Self::local_set(&detail::get_app_start_time()); + } + + fn local_set_to_system_time_now() { + Self::local_set(&detail::get_app_start_time()); + } + + fn local_add(duration: &Duration) -> Result<(), IntErrorKind> { + detail::FIXED_TIME.with(|time| { + let time_borrowed = *time.borrow(); + *time.borrow_mut() = match time_borrowed.checked_add(*duration) { + Some(time) => time, + None => { + return Err(IntErrorKind::PosOverflow); + } + }; + Ok(()) + }) + } + + fn local_sub(duration: &Duration) -> Result<(), IntErrorKind> { + detail::FIXED_TIME.with(|time| { + let time_borrowed = *time.borrow(); + *time.borrow_mut() = match time_borrowed.checked_sub(*duration) { + Some(time) => time, + None => { + return Err(IntErrorKind::NegOverflow); + } + }; + Ok(()) + }) + } + + fn local_reset() { + Self::local_set(&detail::get_default_fixed_time()); + } +} + +#[cfg(test)] +mod tests { + use std::thread; + use std::time::Duration; + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::clock::stopped::Stopped as _; + use crate::clock::{Stopped, Time, Working}; + + #[test] + fn it_should_default_to_zero_when_testing() { + assert_eq!(Stopped::now(), DurationSinceUnixEpoch::ZERO); + } + + #[test] + fn it_should_possible_to_set_the_time() { + // Check we start with ZERO. + assert_eq!(Stopped::now(), Duration::ZERO); + + // Set to Current Time and Check + let timestamp = Working::now(); + Stopped::local_set(×tamp); + assert_eq!(Stopped::now(), timestamp); + + // Elapse the Current Time and Check + Stopped::local_add(×tamp).unwrap(); + assert_eq!(Stopped::now(), timestamp + timestamp); + + // Reset to ZERO and Check + Stopped::local_reset(); + assert_eq!(Stopped::now(), Duration::ZERO); + } + + #[test] + fn it_should_default_to_zero_on_thread_exit() { + assert_eq!(Stopped::now(), Duration::ZERO); + let after5 = Working::now_add(&Duration::from_secs(5)).unwrap(); + Stopped::local_set(&after5); + assert_eq!(Stopped::now(), after5); + + let t = thread::spawn(move || { + // each thread starts out with the initial value of ZERO + assert_eq!(Stopped::now(), Duration::ZERO); + + // and gets set to the current time. + let timestamp = Working::now(); + Stopped::local_set(×tamp); + assert_eq!(Stopped::now(), timestamp); + }); + + // wait for the thread to complete and bail out on panic + t.join().unwrap(); + + // we retain our original value of current time + 5sec despite the child thread + assert_eq!(Stopped::now(), after5); + + // Reset to ZERO and Check + Stopped::local_reset(); + assert_eq!(Stopped::now(), Duration::ZERO); + } +} + +mod detail { + use std::cell::RefCell; + use std::time::SystemTime; + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::static_time; + + thread_local!(pub static FIXED_TIME: RefCell = RefCell::new(get_default_fixed_time())); + + pub fn get_app_start_time() -> DurationSinceUnixEpoch { + (*static_time::TIME_AT_APP_START) + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + } + + #[cfg(not(test))] + pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { + get_app_start_time() + } + + #[cfg(test)] + pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::ZERO + } + + #[cfg(test)] + mod tests { + use std::time::Duration; + + use crate::clock::stopped::detail::{get_app_start_time, get_default_fixed_time}; + + #[test] + fn it_should_get_the_zero_start_time_when_testing() { + assert_eq!(get_default_fixed_time(), Duration::ZERO); + } + + #[test] + fn it_should_get_app_start_time() { + const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1_662_983_731, 22312); + assert!(get_app_start_time() > TIME_AT_WRITING_THIS_TEST); + } + } +} diff --git a/packages/clock/src/clock/working/mod.rs b/packages/clock/src/clock/working/mod.rs new file mode 100644 index 000000000..6d0b4dcf7 --- /dev/null +++ b/packages/clock/src/clock/working/mod.rs @@ -0,0 +1,18 @@ +use std::time::SystemTime; + +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::clock; + +#[allow(clippy::module_name_repetitions)] +pub struct WorkingClock; + +impl clock::Time for clock::Working { + fn now() -> DurationSinceUnixEpoch { + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap() + } + + fn dbg_clock_type() -> String { + "Working".to_owned() + } +} diff --git a/packages/clock/src/conv/mod.rs b/packages/clock/src/conv/mod.rs new file mode 100644 index 000000000..f70950c38 --- /dev/null +++ b/packages/clock/src/conv/mod.rs @@ -0,0 +1,82 @@ +use std::str::FromStr; + +use chrono::{DateTime, Utc}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +/// It converts a string in ISO 8601 format to a timestamp. +/// For example, the string `1970-01-01T00:00:00.000Z` which is the Unix Epoch +/// will be converted to a timestamp of 0: `DurationSinceUnixEpoch::ZERO`. +/// +/// # Panics +/// +/// Will panic if the input time cannot be converted to `DateTime::`, internally using the `i64` type. +/// (this will naturally happen in 292.5 billion years) +#[must_use] +pub fn convert_from_iso_8601_to_timestamp(iso_8601: &str) -> DurationSinceUnixEpoch { + convert_from_datetime_utc_to_timestamp(&DateTime::::from_str(iso_8601).unwrap()) +} + +/// It converts a `DateTime::` to a timestamp. +/// For example, the `DateTime::` of the Unix Epoch will be converted to a +/// timestamp of 0: `DurationSinceUnixEpoch::ZERO`. +/// +/// # Panics +/// +/// Will panic if the input time overflows the `u64` type. +/// (this will naturally happen in 584.9 billion years) +#[must_use] +pub fn convert_from_datetime_utc_to_timestamp(datetime_utc: &DateTime) -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::from_secs(u64::try_from(datetime_utc.timestamp()).expect("Overflow of u64 seconds, very future!")) +} + +/// It converts a timestamp to a `DateTime::`. +/// For example, the timestamp of 0: `DurationSinceUnixEpoch::ZERO` will be +/// converted to the `DateTime::` of the Unix Epoch. +/// +/// # Panics +/// +/// Will panic if the input time overflows the `u64` seconds overflows the `i64` type. +/// (this will naturally happen in 292.5 billion years) +#[must_use] +pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) -> DateTime { + DateTime::from_timestamp( + i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), + duration.subsec_nanos(), + ) + .unwrap() +} + +#[cfg(test)] + +mod tests { + use chrono::DateTime; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::conv::{ + convert_from_datetime_utc_to_timestamp, convert_from_iso_8601_to_timestamp, convert_from_timestamp_to_datetime_utc, + }; + + #[test] + fn should_be_converted_to_datetime_utc() { + let timestamp = DurationSinceUnixEpoch::ZERO; + assert_eq!( + convert_from_timestamp_to_datetime_utc(timestamp), + DateTime::from_timestamp(0, 0).unwrap() + ); + } + + #[test] + fn should_be_converted_from_datetime_utc() { + let datetime = DateTime::from_timestamp(0, 0).unwrap(); + assert_eq!( + convert_from_datetime_utc_to_timestamp(&datetime), + DurationSinceUnixEpoch::ZERO + ); + } + + #[test] + fn should_be_converted_from_datetime_utc_in_iso_8601() { + let iso_8601 = "1970-01-01T00:00:00.000Z".to_string(); + assert_eq!(convert_from_iso_8601_to_timestamp(&iso_8601), DurationSinceUnixEpoch::ZERO); + } +} diff --git a/packages/clock/src/lib.rs b/packages/clock/src/lib.rs new file mode 100644 index 000000000..9fc67cb54 --- /dev/null +++ b/packages/clock/src/lib.rs @@ -0,0 +1,53 @@ +//! Time related functions and types. +//! +//! It's usually a good idea to control where the time comes from +//! in an application so that it can be mocked for testing and it can be +//! controlled in production so we get the intended behavior without +//! relying on the specific time zone for the underlying system. +//! +//! Clocks use the type `DurationSinceUnixEpoch` which is a +//! `std::time::Duration` since the Unix Epoch (timestamp). +//! +//! ```text +//! Local time: lun 2023-03-27 16:12:00 WEST +//! Universal time: lun 2023-03-27 15:12:00 UTC +//! Time zone: Atlantic/Canary (WEST, +0100) +//! Timestamp: 1679929914 +//! Duration: 1679929914.10167426 +//! ``` +//! +//! > **NOTICE**: internally the `Duration` is stores it's main unit as seconds in a `u64` and it will +//! overflow in 584.9 billion years. +//! +//! > **NOTICE**: the timestamp does not depend on the time zone. That gives you +//! the ability to use the clock regardless of the underlying system time zone +//! configuration. See [Unix time Wikipedia entry](https://en.wikipedia.org/wiki/Unix_time). + +pub mod clock; +pub mod conv; +pub mod static_time; +pub mod time_extent; + +#[macro_use] +extern crate lazy_static; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; + +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::WorkingTimeExtentMaker; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::StoppedTimeExtentMaker; diff --git a/src/shared/clock/static_time.rs b/packages/clock/src/static_time/mod.rs similarity index 100% rename from src/shared/clock/static_time.rs rename to packages/clock/src/static_time/mod.rs diff --git a/src/shared/clock/time_extent.rs b/packages/clock/src/time_extent/mod.rs similarity index 85% rename from src/shared/clock/time_extent.rs rename to packages/clock/src/time_extent/mod.rs index 168224eda..c51849f21 100644 --- a/src/shared/clock/time_extent.rs +++ b/packages/clock/src/time_extent/mod.rs @@ -65,7 +65,7 @@ use std::num::{IntErrorKind, TryFromIntError}; use std::time::Duration; -use super::{Stopped, TimeNow, Type, Working}; +use crate::clock::{self, Stopped, Working}; /// This trait defines the operations that can be performed on a `TimeExtent`. pub trait Extent: Sized + Default { @@ -199,10 +199,10 @@ impl Extent for TimeExtent { /// It gives you the time in time extents. pub trait Make: Sized where - Clock: TimeNow, + Clock: clock::Time, { /// It gives you the current time extent (with a certain increment) for - /// the current time. It gets the current timestamp front he `Clock`. + /// the current time. It gets the current timestamp front the `Clock`. /// /// For example: /// @@ -223,12 +223,12 @@ where }) } - /// Same as [`now`](crate::shared::clock::time_extent::Make::now), but it + /// Same as [`now`](crate::time_extent::Make::now), but it /// will add an extra duration to the current time before calculating the /// time extent. It gives you a time extent for a time in the future. #[must_use] fn now_after(increment: &Base, add_time: &Duration) -> Option> { - match Clock::add(add_time) { + match Clock::now_add(add_time) { None => None, Some(time) => time .as_nanos() @@ -240,12 +240,12 @@ where } } - /// Same as [`now`](crate::shared::clock::time_extent::Make::now), but it + /// Same as [`now`](crate::time_extent::Make::now), but it /// will subtract a duration to the current time before calculating the /// time extent. It gives you a time extent for a time in the past. #[must_use] fn now_before(increment: &Base, sub_time: &Duration) -> Option> { - match Clock::sub(sub_time) { + match Clock::now_sub(sub_time) { None => None, Some(time) => time .as_nanos() @@ -262,38 +262,30 @@ where /// /// It's a clock which measures time in `TimeExtents`. #[derive(Debug)] -pub struct Maker {} +pub struct Maker { + clock: std::marker::PhantomData, +} /// A `TimeExtent` maker which makes `TimeExtents` from the `Working` clock. -pub type WorkingTimeExtentMaker = Maker<{ Type::WorkingClock as usize }>; +pub type WorkingTimeExtentMaker = Maker; /// A `TimeExtent` maker which makes `TimeExtents` from the `Stopped` clock. -pub type StoppedTimeExtentMaker = Maker<{ Type::StoppedClock as usize }>; - -impl Make for WorkingTimeExtentMaker {} -impl Make for StoppedTimeExtentMaker {} +pub type StoppedTimeExtentMaker = Maker; -/// The default `TimeExtent` maker. It is `WorkingTimeExtentMaker` in production -/// and `StoppedTimeExtentMaker` in tests. -#[cfg(not(test))] -pub type DefaultTimeExtentMaker = WorkingTimeExtentMaker; - -/// The default `TimeExtent` maker. It is `WorkingTimeExtentMaker` in production -/// and `StoppedTimeExtentMaker` in tests. -#[cfg(test)] -pub type DefaultTimeExtentMaker = StoppedTimeExtentMaker; +impl Make for WorkingTimeExtentMaker {} +impl Make for StoppedTimeExtentMaker {} #[cfg(test)] mod test { - use crate::shared::clock::time_extent::TimeExtent; + use crate::time_extent::TimeExtent; const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239_812_388_723); mod fn_checked_duration_from_nanos { use std::time::Duration; - use crate::shared::clock::time_extent::checked_duration_from_nanos; - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::checked_duration_from_nanos; + use crate::time_extent::test::TIME_EXTENT_VAL; const NANOS_PER_SEC: u32 = 1_000_000_000; @@ -334,7 +326,7 @@ mod test { mod time_extent { mod fn_default { - use crate::shared::clock::time_extent::{TimeExtent, ZERO}; + use crate::time_extent::{TimeExtent, ZERO}; #[test] fn it_should_default_initialize_to_zero() { @@ -343,8 +335,8 @@ mod test { } mod fn_from_sec { - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Multiplier, TimeExtent, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Multiplier, TimeExtent, ZERO}; #[test] fn it_should_make_empty_for_zero() { @@ -360,8 +352,8 @@ mod test { } mod fn_new { - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, Extent, Multiplier, TimeExtent, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Extent, Multiplier, TimeExtent, ZERO}; #[test] fn it_should_make_empty_for_zero() { @@ -383,8 +375,8 @@ mod test { mod fn_increase { use std::num::IntErrorKind; - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Extent, TimeExtent, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Extent, TimeExtent, ZERO}; #[test] fn it_should_not_increase_for_zero() { @@ -411,8 +403,8 @@ mod test { mod fn_decrease { use std::num::IntErrorKind; - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Extent, TimeExtent, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Extent, TimeExtent, ZERO}; #[test] fn it_should_not_decrease_for_zero() { @@ -437,8 +429,8 @@ mod test { } mod fn_total { - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; #[test] fn it_should_be_zero_for_zero() { @@ -485,8 +477,8 @@ mod test { } mod fn_total_next { - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; #[test] fn it_should_be_zero_for_zero() { @@ -544,9 +536,10 @@ mod test { mod fn_now { use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make, TimeExtent}; - use crate::shared::clock::{Current, StoppedTime}; + use crate::clock::stopped::Stopped as _; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Make, TimeExtent}; + use crate::{CurrentClock, DefaultTimeExtentMaker}; #[test] fn it_should_give_a_time_extent() { @@ -558,7 +551,7 @@ mod test { } ); - Current::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); + CurrentClock::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); assert_eq!( DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), @@ -573,7 +566,7 @@ mod test { #[test] fn it_should_fail_if_amount_exceeds_bounds() { - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now(&Base::from_millis(1)).unwrap().unwrap_err(), u64::try_from(u128::MAX).unwrap_err() @@ -586,9 +579,10 @@ mod test { use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make}; - use crate::shared::clock::{Current, StoppedTime}; + use crate::clock::stopped::Stopped as _; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Make}; + use crate::{CurrentClock, DefaultTimeExtentMaker}; #[test] fn it_should_give_a_time_extent() { @@ -607,13 +601,13 @@ mod test { fn it_should_fail_for_zero() { assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::ZERO), None); - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::MAX), None); } #[test] fn it_should_fail_if_amount_exceeds_bounds() { - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_after(&Base::from_millis(1), &Duration::ZERO) .unwrap() @@ -627,12 +621,13 @@ mod test { use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make, TimeExtent}; - use crate::shared::clock::{Current, StoppedTime}; + use crate::clock::stopped::Stopped as _; + use crate::time_extent::{Base, Make, TimeExtent}; + use crate::{CurrentClock, DefaultTimeExtentMaker}; #[test] fn it_should_give_a_time_extent() { - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_before( @@ -657,7 +652,7 @@ mod test { #[test] fn it_should_fail_if_amount_exceeds_bounds() { - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_before(&Base::from_millis(1), &Duration::ZERO) .unwrap() diff --git a/packages/clock/tests/clock/mod.rs b/packages/clock/tests/clock/mod.rs new file mode 100644 index 000000000..5d94bb83d --- /dev/null +++ b/packages/clock/tests/clock/mod.rs @@ -0,0 +1,16 @@ +use std::time::Duration; + +use torrust_tracker_clock::clock::Time; + +use crate::CurrentClock; + +#[test] +fn it_should_use_stopped_time_for_testing() { + assert_eq!(CurrentClock::dbg_clock_type(), "Stopped".to_owned()); + + let time = CurrentClock::now(); + std::thread::sleep(Duration::from_millis(50)); + let time_2 = CurrentClock::now(); + + assert_eq!(time, time_2); +} diff --git a/packages/clock/tests/integration.rs b/packages/clock/tests/integration.rs new file mode 100644 index 000000000..fa500227a --- /dev/null +++ b/packages/clock/tests/integration.rs @@ -0,0 +1,19 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` + +//mod common; +mod clock; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = torrust_tracker_clock::clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = torrust_tracker_clock::clock::Stopped; diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index b3b146717..549c73a31 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -243,6 +243,9 @@ use thiserror::Error; use torrust_tracker_located_error::{DynError, Located, LocatedError}; use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; +/// The maximum number of returned peers for a torrent. +pub const TORRENT_PEERS_LIMIT: usize = 74; + #[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] pub struct TrackerPolicy { pub remove_peerless_torrents: bool, diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index b53b9a15e..c36ae1440 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -1,5 +1,5 @@ [package] -description = "A library to provide error decorator with the location and the source of the original error." +description = "A library that provides a repository of torrents files and their peers." keywords = ["torrents", "repository", "library"] name = "torrust-tracker-torrent-repository" readme = "README.md" @@ -20,10 +20,11 @@ futures = "0.3.29" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" } -serde = { version = "1", features = ["derive"] } +torrust-tracker-clock = { version = "3.0.0-alpha.12-develop", path = "../clock" } [dev-dependencies] criterion = { version = "0", features = ["async_tokio"] } +rstest = "0" [[bench]] harness = false diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index 04aa597df..11352a8fa 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -1,7 +1,7 @@ use std::fmt::Debug; use std::sync::Arc; -use serde::{Deserialize, Serialize}; +//use serde::{Deserialize, Serialize}; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; @@ -88,10 +88,10 @@ pub trait EntryAsync { /// This is the tracker entry for a given torrent and contains the swarm data, /// that's the list of all the peers trying to download the same torrent. /// The tracker keeps one entry like this for every torrent. -#[derive(Serialize, Deserialize, Clone, Debug, Default)] +#[derive(Clone, Debug, Default)] pub struct Torrent { /// The swarm: a network of peers that are all trying to download the torrent associated to this entry - #[serde(skip)] + // #[serde(skip)] pub(crate) peers: std::collections::BTreeMap>, /// The number of peers that have ever completed downloading the torrent associated to this entry pub(crate) completed: u32, diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs index 7a5cf6240..85fdc6cf0 100644 --- a/packages/torrent-repository/src/entry/single.rs +++ b/packages/torrent-repository/src/entry/single.rs @@ -103,3 +103,302 @@ impl Entry for EntrySingle { .retain(|_, peer| peer::ReadInfo::get_updated(peer) > current_cutoff); } } + +#[cfg(test)] +mod tests { + mod torrent_entry { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::ops::Sub; + use std::sync::Arc; + use std::time::Duration; + + use torrust_tracker_clock::clock::stopped::Stopped as _; + use torrust_tracker_clock::clock::{self, Time}; + use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; + + use crate::entry::Entry; + use crate::{CurrentClock, EntrySingle}; + + struct TorrentPeerBuilder { + peer: peer::Peer, + } + + impl TorrentPeerBuilder { + pub fn default() -> TorrentPeerBuilder { + let default_peer = peer::Peer { + peer_id: peer::Id([0u8; 20]), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + updated: CurrentClock::now(), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + TorrentPeerBuilder { peer: default_peer } + } + + pub fn with_event_completed(mut self) -> Self { + self.peer.event = AnnounceEvent::Completed; + self + } + + pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { + self.peer.peer_id = peer_id; + self + } + + pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } + + pub fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + + pub fn into(self) -> peer::Peer { + self.peer + } + } + + /// A torrent seeder is a peer with 0 bytes left to download which + /// has not announced it has stopped + fn a_torrent_seeder() -> peer::Peer { + TorrentPeerBuilder::default() + .with_number_of_bytes_left(0) + .with_event_completed() + .into() + } + + /// A torrent leecher is a peer that is not a seeder. + /// Leecher: left > 0 OR event = Stopped + fn a_torrent_leecher() -> peer::Peer { + TorrentPeerBuilder::default() + .with_number_of_bytes_left(1) + .with_event_completed() + .into() + } + + #[test] + fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { + let torrent_entry = EntrySingle::default(); + + assert_eq!(torrent_entry.get_peers(None).len(), 0); + } + + #[test] + fn a_new_peer_can_be_added_to_a_torrent_entry() { + let mut torrent_entry = EntrySingle::default(); + let torrent_peer = TorrentPeerBuilder::default().into(); + + torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer + + assert_eq!(*torrent_entry.get_peers(None)[0], torrent_peer); + assert_eq!(torrent_entry.get_peers(None).len(), 1); + } + + #[test] + fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { + let mut torrent_entry = EntrySingle::default(); + let torrent_peer = TorrentPeerBuilder::default().into(); + + torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer + + assert_eq!(torrent_entry.get_peers(None), vec![Arc::new(torrent_peer)]); + } + + #[test] + fn a_peer_can_be_updated_in_a_torrent_entry() { + let mut torrent_entry = EntrySingle::default(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer + + torrent_peer.event = AnnounceEvent::Completed; // Update the peer + torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry + + assert_eq!(torrent_entry.get_peers(None)[0].event, AnnounceEvent::Completed); + } + + #[test] + fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { + let mut torrent_entry = EntrySingle::default(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer + + torrent_peer.event = AnnounceEvent::Stopped; // Update the peer + torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry + + assert_eq!(torrent_entry.get_peers(None).len(), 0); + } + + #[test] + fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { + let mut torrent_entry = EntrySingle::default(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + + torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer + + torrent_peer.event = AnnounceEvent::Completed; // Update the peer + let stats_have_changed = torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry + + assert!(stats_have_changed); + } + + #[test] + fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( + ) { + let mut torrent_entry = EntrySingle::default(); + let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); + + // Add a peer that did not exist before in the entry + let torrent_stats_have_not_changed = !torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); + + assert!(torrent_stats_have_not_changed); + } + + #[test] + fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_the_client_that_is_making_the_request() + { + let mut torrent_entry = EntrySingle::default(); + let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); + torrent_entry.insert_or_update_peer(&torrent_peer); // Add peer + + // Get peers excluding the one we have just added + let peers = torrent_entry.get_peers_for_peer(&torrent_peer, None); + + assert_eq!(peers.len(), 0); + } + + #[test] + fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { + let mut torrent_entry = EntrySingle::default(); + + let peer_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + + // Add peer 1 + let torrent_peer_1 = TorrentPeerBuilder::default() + .with_peer_address(SocketAddr::new(peer_ip, 8080)) + .into(); + torrent_entry.insert_or_update_peer(&torrent_peer_1); + + // Add peer 2 + let torrent_peer_2 = TorrentPeerBuilder::default() + .with_peer_address(SocketAddr::new(peer_ip, 8081)) + .into(); + torrent_entry.insert_or_update_peer(&torrent_peer_2); + + // Get peers for peer 1 + let peers = torrent_entry.get_peers_for_peer(&torrent_peer_1, None); + + // The peer 2 using the same IP but different port should be included + assert_eq!(peers[0].peer_addr.ip(), Ipv4Addr::new(127, 0, 0, 1)); + assert_eq!(peers[0].peer_addr.port(), 8081); + } + + fn peer_id_from_i32(number: i32) -> peer::Id { + let peer_id = number.to_le_bytes(); + peer::Id([ + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], + peer_id[2], peer_id[3], + ]) + } + + #[test] + fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { + let mut torrent_entry = EntrySingle::default(); + + // We add one more peer than the scrape limit + for peer_number in 1..=74 + 1 { + let torrent_peer = TorrentPeerBuilder::default() + .with_peer_id(peer_id_from_i32(peer_number)) + .into(); + torrent_entry.insert_or_update_peer(&torrent_peer); + } + + let peers = torrent_entry.get_peers(Some(TORRENT_PEERS_LIMIT)); + + assert_eq!(peers.len(), 74); + } + + #[test] + fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { + let mut torrent_entry = EntrySingle::default(); + let torrent_seeder = a_torrent_seeder(); + + torrent_entry.insert_or_update_peer(&torrent_seeder); // Add seeder + + assert_eq!(torrent_entry.get_stats().complete, 1); + } + + #[test] + fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { + let mut torrent_entry = EntrySingle::default(); + let torrent_leecher = a_torrent_leecher(); + + torrent_entry.insert_or_update_peer(&torrent_leecher); // Add leecher + + assert_eq!(torrent_entry.get_stats().incomplete, 1); + } + + #[test] + fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( + ) { + let mut torrent_entry = EntrySingle::default(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer + + // Announce "Completed" torrent download event. + torrent_peer.event = AnnounceEvent::Completed; + torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer + + let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().complete; + + assert_eq!(number_of_previously_known_peers_with_completed_torrent, 1); + } + + #[test] + fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { + let mut torrent_entry = EntrySingle::default(); + let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); + + // Announce "Completed" torrent download event. + // It's the first event announced from this peer. + torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); // Add the peer + + let number_of_peers_with_completed_torrent = torrent_entry.get_stats().downloaded; + + assert_eq!(number_of_peers_with_completed_torrent, 0); + } + + #[test] + fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { + let mut torrent_entry = EntrySingle::default(); + + let timeout = 120u32; + + let now = clock::Working::now(); + clock::Stopped::local_set(&now); + + let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); + let inactive_peer = TorrentPeerBuilder::default() + .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) + .into(); + torrent_entry.insert_or_update_peer(&inactive_peer); // Add the peer + + let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(timeout))).unwrap_or_default(); + torrent_entry.remove_inactive_peers(current_cutoff); + + assert_eq!(torrent_entry.get_peers_len(), 0); + } + } +} diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 903e1405e..8bb1b6def 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,5 +1,7 @@ use std::sync::Arc; +use torrust_tracker_clock::clock; + pub mod entry; pub mod repository; @@ -13,3 +15,14 @@ pub type TorrentsRwLockStdMutexTokio = repository::RwLockStd; pub type TorrentsRwLockTokio = repository::RwLockTokio; pub type TorrentsRwLockTokioMutexStd = repository::RwLockTokio; pub type TorrentsRwLockTokioMutexTokio = repository::RwLockTokio; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 09b624566..396e63682 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -13,13 +13,13 @@ //! 4. Initialize the domain tracker. use std::sync::Arc; +use torrust_tracker_clock::static_time; use torrust_tracker_configuration::Configuration; use super::config::initialize_configuration; use crate::bootstrap; use crate::core::services::tracker_factory; use crate::core::Tracker; -use crate::shared::clock::static_time; use crate::shared::crypto::ephemeral_instance_keys; /// It loads the configuration from the environment and builds the main domain [`Tracker`] struct. diff --git a/src/core/auth.rs b/src/core/auth.rs index a7bb91aa4..b5326a373 100644 --- a/src/core/auth.rs +++ b/src/core/auth.rs @@ -47,11 +47,13 @@ use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; use thiserror::Error; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; use torrust_tracker_located_error::{DynError, LocatedError}; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; -use crate::shared::clock::{convert_from_timestamp_to_datetime_utc, Current, Time, TimeNow}; +use crate::CurrentClock; #[must_use] /// It generates a new random 32-char authentication [`ExpiringKey`] @@ -70,7 +72,7 @@ pub fn generate(lifetime: Duration) -> ExpiringKey { ExpiringKey { key: random_id.parse::().unwrap(), - valid_until: Current::add(&lifetime).unwrap(), + valid_until: CurrentClock::now_add(&lifetime).unwrap(), } } @@ -82,7 +84,7 @@ pub fn generate(lifetime: Duration) -> ExpiringKey { /// /// Will return `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. pub fn verify(auth_key: &ExpiringKey) -> Result<(), Error> { - let current_time: DurationSinceUnixEpoch = Current::now(); + let current_time: DurationSinceUnixEpoch = CurrentClock::now(); if auth_key.valid_until < current_time { Err(Error::KeyExpired { @@ -213,8 +215,10 @@ mod tests { use std::str::FromStr; use std::time::Duration; + use torrust_tracker_clock::clock; + use torrust_tracker_clock::clock::stopped::Stopped as _; + use crate::core::auth; - use crate::shared::clock::{Current, StoppedTime}; #[test] fn should_be_parsed_from_an_string() { @@ -228,7 +232,7 @@ mod tests { #[test] fn should_be_displayed() { // Set the time to the current time. - Current::local_set_to_unix_epoch(); + clock::Stopped::local_set_to_unix_epoch(); let expiring_key = auth::generate(Duration::from_secs(0)); @@ -248,18 +252,18 @@ mod tests { #[test] fn should_be_generate_and_verified() { // Set the time to the current time. - Current::local_set_to_system_time_now(); + clock::Stopped::local_set_to_system_time_now(); // Make key that is valid for 19 seconds. let expiring_key = auth::generate(Duration::from_secs(19)); // Mock the time has passed 10 sec. - Current::local_add(&Duration::from_secs(10)).unwrap(); + clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); assert!(auth::verify(&expiring_key).is_ok()); // Mock the time has passed another 10 sec. - Current::local_add(&Duration::from_secs(10)).unwrap(); + clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); assert!(auth::verify(&expiring_key).is_err()); } diff --git a/src/core/databases/mod.rs b/src/core/databases/mod.rs index b708ef4dc..20a45cf83 100644 --- a/src/core/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -117,9 +117,9 @@ pub trait Database: Sync + Send { /// /// It returns an array of tuples with the torrent /// [`InfoHash`] and the - /// [`completed`](torrust_tracker_torrent_repository::entry::Entry::completed) counter + /// [`completed`](torrust_tracker_torrent_repository::entry::Torrent::completed) counter /// which is the number of times the torrent has been downloaded. - /// See [`Entry::completed`](torrust_tracker_torrent_repository::entry::Entry::completed). + /// See [`Entry::completed`](torrust_tracker_torrent_repository::entry::Torrent::completed). /// /// # Context: Torrent Metrics /// diff --git a/src/core/mod.rs b/src/core/mod.rs index f94c46543..21cd1b501 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -444,7 +444,8 @@ use std::time::Duration; use derive_more::Constructor; use log::debug; use tokio::sync::mpsc::error::SendError; -use torrust_tracker_configuration::{AnnouncePolicy, Configuration, TrackerPolicy}; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_configuration::{AnnouncePolicy, Configuration, TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; @@ -456,10 +457,7 @@ use self::auth::Key; use self::error::Error; use self::torrent::Torrents; use crate::core::databases::Database; -use crate::shared::clock::{self, TimeNow}; - -/// The maximum number of returned peers for a torrent. -pub const TORRENT_PEERS_LIMIT: usize = 74; +use crate::CurrentClock; /// The domain layer tracker service. /// @@ -741,7 +739,7 @@ impl Tracker { self.torrents.remove_peerless_torrents(&self.policy); } else { let current_cutoff = - clock::Current::sub(&Duration::from_secs(u64::from(self.policy.max_peer_timeout))).unwrap_or_default(); + CurrentClock::now_sub(&Duration::from_secs(u64::from(self.policy.max_peer_timeout))).unwrap_or_default(); self.torrents.remove_inactive_peers(current_cutoff); } } @@ -1592,8 +1590,11 @@ mod tests { use std::str::FromStr; use std::time::Duration; + use torrust_tracker_clock::clock::Time; + use crate::core::auth; use crate::core::tests::the_tracker::private_tracker; + use crate::CurrentClock; #[tokio::test] async fn it_should_generate_the_expiring_authentication_keys() { @@ -1601,7 +1602,7 @@ mod tests { let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); - assert_eq!(key.valid_until, Duration::from_secs(100)); + assert_eq!(key.valid_until, CurrentClock::now_add(&Duration::from_secs(100)).unwrap()); } #[tokio::test] diff --git a/src/core/peer_tests.rs b/src/core/peer_tests.rs index 9e5b4be01..d30d73db3 100644 --- a/src/core/peer_tests.rs +++ b/src/core/peer_tests.rs @@ -2,17 +2,21 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use torrust_tracker_clock::clock::stopped::Stopped as _; +use torrust_tracker_clock::clock::{self, Time}; use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::{peer, NumberOfBytes}; -use crate::shared::clock::{self, Time}; +use crate::CurrentClock; #[test] fn it_should_be_serializable() { + clock::Stopped::local_set_to_unix_epoch(); + let torrent_peer = peer::Peer { peer_id: peer::Id(*b"-qB0000-000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: clock::Current::now(), + updated: CurrentClock::now(), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index b5a2b4c07..2b3f9cbf7 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -31,300 +31,4 @@ use torrust_tracker_torrent_repository::TorrentsRwLockStdMutexStd; pub type Torrents = TorrentsRwLockStdMutexStd; // Currently Used #[cfg(test)] -mod tests { - - mod torrent_entry { - - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::ops::Sub; - use std::sync::Arc; - use std::time::Duration; - - use torrust_tracker_primitives::announce_event::AnnounceEvent; - use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; - use torrust_tracker_torrent_repository::entry::Entry; - use torrust_tracker_torrent_repository::EntrySingle; - - use crate::core::TORRENT_PEERS_LIMIT; - use crate::shared::clock::{self, StoppedTime, Time, TimeNow}; - - struct TorrentPeerBuilder { - peer: peer::Peer, - } - - impl TorrentPeerBuilder { - pub fn default() -> TorrentPeerBuilder { - let default_peer = peer::Peer { - peer_id: peer::Id([0u8; 20]), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), - updated: clock::Current::now(), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - TorrentPeerBuilder { peer: default_peer } - } - - pub fn with_event_completed(mut self) -> Self { - self.peer.event = AnnounceEvent::Completed; - self - } - - pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; - self - } - - pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { - self.peer.peer_id = peer_id; - self - } - - pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes(left); - self - } - - pub fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { - self.peer.updated = updated; - self - } - - pub fn into(self) -> peer::Peer { - self.peer - } - } - - /// A torrent seeder is a peer with 0 bytes left to download which - /// has not announced it has stopped - fn a_torrent_seeder() -> peer::Peer { - TorrentPeerBuilder::default() - .with_number_of_bytes_left(0) - .with_event_completed() - .into() - } - - /// A torrent leecher is a peer that is not a seeder. - /// Leecher: left > 0 OR event = Stopped - fn a_torrent_leecher() -> peer::Peer { - TorrentPeerBuilder::default() - .with_number_of_bytes_left(1) - .with_event_completed() - .into() - } - - #[test] - fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { - let torrent_entry = EntrySingle::default(); - - assert_eq!(torrent_entry.get_peers(None).len(), 0); - } - - #[test] - fn a_new_peer_can_be_added_to_a_torrent_entry() { - let mut torrent_entry = EntrySingle::default(); - let torrent_peer = TorrentPeerBuilder::default().into(); - - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - assert_eq!(*torrent_entry.get_peers(None)[0], torrent_peer); - assert_eq!(torrent_entry.get_peers(None).len(), 1); - } - - #[test] - fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { - let mut torrent_entry = EntrySingle::default(); - let torrent_peer = TorrentPeerBuilder::default().into(); - - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - assert_eq!(torrent_entry.get_peers(None), vec![Arc::new(torrent_peer)]); - } - - #[test] - fn a_peer_can_be_updated_in_a_torrent_entry() { - let mut torrent_entry = EntrySingle::default(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - torrent_peer.event = AnnounceEvent::Completed; // Update the peer - torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - - assert_eq!(torrent_entry.get_peers(None)[0].event, AnnounceEvent::Completed); - } - - #[test] - fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { - let mut torrent_entry = EntrySingle::default(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - torrent_peer.event = AnnounceEvent::Stopped; // Update the peer - torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - - assert_eq!(torrent_entry.get_peers(None).len(), 0); - } - - #[test] - fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { - let mut torrent_entry = EntrySingle::default(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - torrent_peer.event = AnnounceEvent::Completed; // Update the peer - let stats_have_changed = torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - - assert!(stats_have_changed); - } - - #[test] - fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( - ) { - let mut torrent_entry = EntrySingle::default(); - let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); - - // Add a peer that did not exist before in the entry - let torrent_stats_have_not_changed = !torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); - - assert!(torrent_stats_have_not_changed); - } - - #[test] - fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_the_client_that_is_making_the_request() - { - let mut torrent_entry = EntrySingle::default(); - let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add peer - - // Get peers excluding the one we have just added - let peers = torrent_entry.get_peers_for_peer(&torrent_peer, None); - - assert_eq!(peers.len(), 0); - } - - #[test] - fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { - let mut torrent_entry = EntrySingle::default(); - - let peer_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); - - // Add peer 1 - let torrent_peer_1 = TorrentPeerBuilder::default() - .with_peer_address(SocketAddr::new(peer_ip, 8080)) - .into(); - torrent_entry.insert_or_update_peer(&torrent_peer_1); - - // Add peer 2 - let torrent_peer_2 = TorrentPeerBuilder::default() - .with_peer_address(SocketAddr::new(peer_ip, 8081)) - .into(); - torrent_entry.insert_or_update_peer(&torrent_peer_2); - - // Get peers for peer 1 - let peers = torrent_entry.get_peers_for_peer(&torrent_peer_1, None); - - // The peer 2 using the same IP but different port should be included - assert_eq!(peers[0].peer_addr.ip(), Ipv4Addr::new(127, 0, 0, 1)); - assert_eq!(peers[0].peer_addr.port(), 8081); - } - - fn peer_id_from_i32(number: i32) -> peer::Id { - let peer_id = number.to_le_bytes(); - peer::Id([ - 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], - peer_id[2], peer_id[3], - ]) - } - - #[test] - fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { - let mut torrent_entry = EntrySingle::default(); - - // We add one more peer than the scrape limit - for peer_number in 1..=74 + 1 { - let torrent_peer = TorrentPeerBuilder::default() - .with_peer_id(peer_id_from_i32(peer_number)) - .into(); - torrent_entry.insert_or_update_peer(&torrent_peer); - } - - let peers = torrent_entry.get_peers(Some(TORRENT_PEERS_LIMIT)); - - assert_eq!(peers.len(), 74); - } - - #[test] - fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { - let mut torrent_entry = EntrySingle::default(); - let torrent_seeder = a_torrent_seeder(); - - torrent_entry.insert_or_update_peer(&torrent_seeder); // Add seeder - - assert_eq!(torrent_entry.get_stats().complete, 1); - } - - #[test] - fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { - let mut torrent_entry = EntrySingle::default(); - let torrent_leecher = a_torrent_leecher(); - - torrent_entry.insert_or_update_peer(&torrent_leecher); // Add leecher - - assert_eq!(torrent_entry.get_stats().incomplete, 1); - } - - #[test] - fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( - ) { - let mut torrent_entry = EntrySingle::default(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - // Announce "Completed" torrent download event. - torrent_peer.event = AnnounceEvent::Completed; - torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer - - let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().complete; - - assert_eq!(number_of_previously_known_peers_with_completed_torrent, 1); - } - - #[test] - fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { - let mut torrent_entry = EntrySingle::default(); - let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); - - // Announce "Completed" torrent download event. - // It's the first event announced from this peer. - torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); // Add the peer - - let number_of_peers_with_completed_torrent = torrent_entry.get_stats().downloaded; - - assert_eq!(number_of_peers_with_completed_torrent, 0); - } - - #[test] - fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { - let mut torrent_entry = EntrySingle::default(); - - let timeout = 120u32; - - let now = clock::Working::now(); - clock::Stopped::local_set(&now); - - let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); - let inactive_peer = TorrentPeerBuilder::default() - .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) - .into(); - torrent_entry.insert_or_update_peer(&inactive_peer); // Add the peer - - let current_cutoff = clock::Current::sub(&Duration::from_secs(u64::from(timeout))).unwrap_or_default(); - torrent_entry.remove_inactive_peers(current_cutoff); - - assert_eq!(torrent_entry.get_peers_len(), 0); - } - } -} +mod tests {} diff --git a/src/lib.rs b/src/lib.rs index b4ad298ac..064f50eb6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -469,6 +469,9 @@ //! //! In addition to the production code documentation you can find a lot of //! examples on the integration and unit tests. + +use torrust_tracker_clock::{clock, time_extent}; + pub mod app; pub mod bootstrap; pub mod console; @@ -478,3 +481,24 @@ pub mod shared; #[macro_use] extern crate lazy_static; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; + +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::WorkingTimeExtentMaker; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::StoppedTimeExtentMaker; diff --git a/src/servers/apis/v1/context/auth_key/resources.rs b/src/servers/apis/v1/context/auth_key/resources.rs index 99e93aaf9..3671438c2 100644 --- a/src/servers/apis/v1/context/auth_key/resources.rs +++ b/src/servers/apis/v1/context/auth_key/resources.rs @@ -1,9 +1,9 @@ //! API resources for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. use serde::{Deserialize, Serialize}; +use torrust_tracker_clock::conv::convert_from_iso_8601_to_timestamp; use crate::core::auth::{self, Key}; -use crate::shared::clock::convert_from_iso_8601_to_timestamp; /// A resource that represents an authentication key. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -41,9 +41,12 @@ impl From for AuthKey { mod tests { use std::time::Duration; + use torrust_tracker_clock::clock::stopped::Stopped as _; + use torrust_tracker_clock::clock::{self, Time}; + use super::AuthKey; use crate::core::auth::{self, Key}; - use crate::shared::clock::{Current, TimeNow}; + use crate::CurrentClock; struct TestTime { pub timestamp: u64, @@ -65,6 +68,8 @@ mod tests { #[test] #[allow(deprecated)] fn it_should_be_convertible_into_an_auth_key() { + clock::Stopped::local_set_to_unix_epoch(); + let auth_key_resource = AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: one_hour_after_unix_epoch().timestamp, @@ -75,7 +80,7 @@ mod tests { auth::ExpiringKey::from(auth_key_resource), auth::ExpiringKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line - valid_until: Current::add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap() + valid_until: CurrentClock::now_add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap() } ); } @@ -83,9 +88,11 @@ mod tests { #[test] #[allow(deprecated)] fn it_should_be_convertible_from_an_auth_key() { + clock::Stopped::local_set_to_unix_epoch(); + let auth_key = auth::ExpiringKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line - valid_until: Current::add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap(), + valid_until: CurrentClock::now_add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap(), }; assert_eq!( diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index 6e8b5a40e..3ef85e600 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -71,7 +71,7 @@ //! is behind a reverse proxy. //! //! > **NOTICE**: the maximum number of peers that the tracker can return is -//! `74`. Defined with a hardcoded const [`TORRENT_PEERS_LIMIT`](crate::core::TORRENT_PEERS_LIMIT). +//! `74`. Defined with a hardcoded const [`TORRENT_PEERS_LIMIT`](torrust_tracker_configuration::TORRENT_PEERS_LIMIT). //! Refer to [issue 262](https://github.com/torrust/torrust-tracker/issues/262) //! for more information about this limitation. //! diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 215acbad8..e9198f20c 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -12,6 +12,7 @@ use std::sync::Arc; use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; +use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::{peer, NumberOfBytes}; @@ -25,7 +26,7 @@ use crate::servers::http::v1::requests::announce::{Announce, Compact, Event}; use crate::servers::http::v1::responses::{self}; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::servers::http::v1::services::{self, peer_ip_resolver}; -use crate::shared::clock::{Current, Time}; +use crate::CurrentClock; /// It handles the `announce` request when the HTTP tracker does not require /// authentication (no PATH `key` parameter required). @@ -134,7 +135,7 @@ fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> peer::Pee peer::Peer { peer_id: announce_request.peer_id, peer_addr: SocketAddr::new(*peer_ip, announce_request.port), - updated: Current::now(), + updated: CurrentClock::now(), uploaded: NumberOfBytes(announce_request.uploaded.unwrap_or(0)), downloaded: NumberOfBytes(announce_request.downloaded.unwrap_or(0)), left: NumberOfBytes(announce_request.left.unwrap_or(0)), diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs index 19e61f14e..49ea6261b 100644 --- a/src/servers/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -70,9 +70,9 @@ use std::net::SocketAddr; use std::panic::Location; use aquatic_udp_protocol::ConnectionId; +use torrust_tracker_clock::time_extent::{Extent, TimeExtent}; use super::error::Error; -use crate::shared::clock::time_extent::{Extent, TimeExtent}; pub type Cookie = [u8; 8]; @@ -133,9 +133,11 @@ mod cookie_builder { use std::hash::{Hash, Hasher}; use std::net::SocketAddr; + use torrust_tracker_clock::time_extent::{Extent, Make, TimeExtent}; + use super::{Cookie, SinceUnixEpochTimeExtent, COOKIE_LIFETIME}; - use crate::shared::clock::time_extent::{DefaultTimeExtentMaker, Extent, Make, TimeExtent}; use crate::shared::crypto::keys::seeds::{Current, Keeper}; + use crate::DefaultTimeExtentMaker; pub(super) fn get_last_time_extent() -> SinceUnixEpochTimeExtent { DefaultTimeExtentMaker::now(&COOKIE_LIFETIME.increment) @@ -162,10 +164,12 @@ mod cookie_builder { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use torrust_tracker_clock::clock::stopped::Stopped as _; + use torrust_tracker_clock::clock::{self}; + use torrust_tracker_clock::time_extent::{self, Extent}; + use super::cookie_builder::{self}; use crate::servers::udp::connection_cookie::{check, make, Cookie, COOKIE_LIFETIME}; - use crate::shared::clock::time_extent::{self, Extent}; - use crate::shared::clock::{Stopped, StoppedTime}; // #![feature(const_socketaddr)] // const REMOTE_ADDRESS_IPV4_ZERO: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); @@ -176,6 +180,8 @@ mod tests { const ID_COOKIE_OLD: Cookie = [23, 204, 198, 29, 48, 180, 62, 19]; const ID_COOKIE_NEW: Cookie = [41, 166, 45, 246, 249, 24, 108, 203]; + clock::Stopped::local_set_to_unix_epoch(); + let cookie = make(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)); assert!(cookie == ID_COOKIE_OLD || cookie == ID_COOKIE_NEW); @@ -276,7 +282,7 @@ mod tests { let cookie = make(&remote_address); - Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); + clock::Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); let cookie_next = make(&remote_address); @@ -298,7 +304,7 @@ mod tests { let cookie = make(&remote_address); - Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); + clock::Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); check(&remote_address, &cookie).unwrap(); } @@ -307,9 +313,11 @@ mod tests { fn it_should_be_valid_for_the_last_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + clock::Stopped::local_set_to_unix_epoch(); + let cookie = make(&remote_address); - Stopped::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); + clock::Stopped::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); check(&remote_address, &cookie).unwrap(); } @@ -321,7 +329,7 @@ mod tests { let cookie = make(&remote_address); - Stopped::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); + clock::Stopped::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); check(&remote_address, &cookie).unwrap(); } diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 8f6e6d8b4..59aec0ff3 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -318,6 +318,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; + use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::{peer, NumberOfBytes}; @@ -325,7 +326,7 @@ mod tests { use crate::core::services::tracker_factory; use crate::core::Tracker; - use crate::shared::clock::{Current, Time}; + use crate::CurrentClock; fn tracker_configuration() -> Configuration { default_testing_tracker_configuration() @@ -376,7 +377,7 @@ mod tests { let default_peer = peer::Peer { peer_id: peer::Id([255u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: Current::now(), + updated: CurrentClock::now(), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs index 8c8fa10a5..f7eb935a0 100644 --- a/src/servers/udp/peer_builder.rs +++ b/src/servers/udp/peer_builder.rs @@ -1,11 +1,12 @@ //! Logic to extract the peer info from the announce request. use std::net::{IpAddr, SocketAddr}; +use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::{peer, NumberOfBytes}; use super::request::AnnounceWrapper; -use crate::shared::clock::{Current, Time}; +use crate::CurrentClock; /// Extracts the [`peer::Peer`] info from the /// announce request. @@ -20,7 +21,7 @@ pub fn from_request(announce_wrapper: &AnnounceWrapper, peer_ip: &IpAddr) -> pee peer::Peer { peer_id: peer::Id(announce_wrapper.announce_request.peer_id.0), peer_addr: SocketAddr::new(*peer_ip, announce_wrapper.announce_request.port.0), - updated: Current::now(), + updated: CurrentClock::now(), uploaded: NumberOfBytes(announce_wrapper.announce_request.bytes_uploaded.0), downloaded: NumberOfBytes(announce_wrapper.announce_request.bytes_downloaded.0), left: NumberOfBytes(announce_wrapper.announce_request.bytes_left.0), diff --git a/src/shared/clock/mod.rs b/src/shared/clock/mod.rs deleted file mode 100644 index a73878466..000000000 --- a/src/shared/clock/mod.rs +++ /dev/null @@ -1,393 +0,0 @@ -//! Time related functions and types. -//! -//! It's usually a good idea to control where the time comes from -//! in an application so that it can be mocked for testing and it can be -//! controlled in production so we get the intended behavior without -//! relying on the specific time zone for the underlying system. -//! -//! Clocks use the type `DurationSinceUnixEpoch` which is a -//! `std::time::Duration` since the Unix Epoch (timestamp). -//! -//! ```text -//! Local time: lun 2023-03-27 16:12:00 WEST -//! Universal time: lun 2023-03-27 15:12:00 UTC -//! Time zone: Atlantic/Canary (WEST, +0100) -//! Timestamp: 1679929914 -//! Duration: 1679929914.10167426 -//! ``` -//! -//! > **NOTICE**: internally the `Duration` is stores it's main unit as seconds in a `u64` and it will -//! overflow in 584.9 billion years. -//! -//! > **NOTICE**: the timestamp does not depend on the time zone. That gives you -//! the ability to use the clock regardless of the underlying system time zone -//! configuration. See [Unix time Wikipedia entry](https://en.wikipedia.org/wiki/Unix_time). -pub mod static_time; -pub mod time_extent; -pub mod utils; - -use std::num::IntErrorKind; -use std::str::FromStr; -use std::time::Duration; - -use chrono::{DateTime, Utc}; -use torrust_tracker_primitives::DurationSinceUnixEpoch; - -/// Clock types. -#[derive(Debug)] -pub enum Type { - /// Clock that returns the current time. - WorkingClock, - /// Clock that returns always the same fixed time. - StoppedClock, -} - -/// A generic structure that represents a clock. -/// -/// It can be either the working clock (production) or the stopped clock -/// (testing). It implements the `Time` trait, which gives you the current time. -#[derive(Debug)] -pub struct Clock; - -/// The working clock. It returns the current time. -pub type Working = Clock<{ Type::WorkingClock as usize }>; -/// The stopped clock. It returns always the same fixed time. -pub type Stopped = Clock<{ Type::StoppedClock as usize }>; - -/// The current clock. Defined at compilation time. -/// It can be either the working clock (production) or the stopped clock (testing). -#[cfg(not(test))] -pub type Current = Working; - -/// The current clock. Defined at compilation time. -/// It can be either the working clock (production) or the stopped clock (testing). -#[cfg(test)] -pub type Current = Stopped; - -/// Trait for types that can be used as a timestamp clock. -pub trait Time: Sized { - fn now() -> DurationSinceUnixEpoch; -} - -/// Trait for types that can be manipulate the current time in order to -/// get time in the future or in the past after or before a duration of time. -pub trait TimeNow: Time { - #[must_use] - fn add(add_time: &Duration) -> Option { - Self::now().checked_add(*add_time) - } - #[must_use] - fn sub(sub_time: &Duration) -> Option { - Self::now().checked_sub(*sub_time) - } -} - -/// It converts a string in ISO 8601 format to a timestamp. -/// For example, the string `1970-01-01T00:00:00.000Z` which is the Unix Epoch -/// will be converted to a timestamp of 0: `DurationSinceUnixEpoch::ZERO`. -/// -/// # Panics -/// -/// Will panic if the input time cannot be converted to `DateTime::`, internally using the `i64` type. -/// (this will naturally happen in 292.5 billion years) -#[must_use] -pub fn convert_from_iso_8601_to_timestamp(iso_8601: &str) -> DurationSinceUnixEpoch { - convert_from_datetime_utc_to_timestamp(&DateTime::::from_str(iso_8601).unwrap()) -} - -/// It converts a `DateTime::` to a timestamp. -/// For example, the `DateTime::` of the Unix Epoch will be converted to a -/// timestamp of 0: `DurationSinceUnixEpoch::ZERO`. -/// -/// # Panics -/// -/// Will panic if the input time overflows the `u64` type. -/// (this will naturally happen in 584.9 billion years) -#[must_use] -pub fn convert_from_datetime_utc_to_timestamp(datetime_utc: &DateTime) -> DurationSinceUnixEpoch { - DurationSinceUnixEpoch::from_secs(u64::try_from(datetime_utc.timestamp()).expect("Overflow of u64 seconds, very future!")) -} - -/// It converts a timestamp to a `DateTime::`. -/// For example, the timestamp of 0: `DurationSinceUnixEpoch::ZERO` will be -/// converted to the `DateTime::` of the Unix Epoch. -/// -/// # Panics -/// -/// Will panic if the input time overflows the `u64` seconds overflows the `i64` type. -/// (this will naturally happen in 292.5 billion years) -#[must_use] -pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) -> DateTime { - DateTime::from_timestamp( - i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), - duration.subsec_nanos(), - ) - .unwrap() -} - -#[cfg(test)] -mod tests { - use std::any::TypeId; - - use crate::shared::clock::{Current, Stopped, Time, Working}; - - #[test] - fn it_should_be_the_stopped_clock_as_default_when_testing() { - // We are testing, so we should default to the fixed time. - assert_eq!(TypeId::of::(), TypeId::of::()); - assert_eq!(Stopped::now(), Current::now()); - } - - #[test] - fn it_should_have_different_times() { - assert_ne!(TypeId::of::(), TypeId::of::()); - assert_ne!(Stopped::now(), Working::now()); - } - - mod timestamp { - use chrono::DateTime; - - use crate::shared::clock::{ - convert_from_datetime_utc_to_timestamp, convert_from_iso_8601_to_timestamp, convert_from_timestamp_to_datetime_utc, - DurationSinceUnixEpoch, - }; - - #[test] - fn should_be_converted_to_datetime_utc() { - let timestamp = DurationSinceUnixEpoch::ZERO; - assert_eq!( - convert_from_timestamp_to_datetime_utc(timestamp), - DateTime::from_timestamp(0, 0).unwrap() - ); - } - - #[test] - fn should_be_converted_from_datetime_utc() { - let datetime = DateTime::from_timestamp(0, 0).unwrap(); - assert_eq!( - convert_from_datetime_utc_to_timestamp(&datetime), - DurationSinceUnixEpoch::ZERO - ); - } - - #[test] - fn should_be_converted_from_datetime_utc_in_iso_8601() { - let iso_8601 = "1970-01-01T00:00:00.000Z".to_string(); - assert_eq!(convert_from_iso_8601_to_timestamp(&iso_8601), DurationSinceUnixEpoch::ZERO); - } - } -} - -mod working_clock { - use std::time::SystemTime; - - use super::{DurationSinceUnixEpoch, Time, TimeNow, Working}; - - impl Time for Working { - fn now() -> DurationSinceUnixEpoch { - SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap() - } - } - - impl TimeNow for Working {} -} - -/// Trait for types that can be used as a timestamp clock stopped -/// at a given time. -pub trait StoppedTime: TimeNow { - /// It sets the clock to a given time. - fn local_set(unix_time: &DurationSinceUnixEpoch); - - /// It sets the clock to the Unix Epoch. - fn local_set_to_unix_epoch() { - Self::local_set(&DurationSinceUnixEpoch::ZERO); - } - - /// It sets the clock to the time the application started. - fn local_set_to_app_start_time(); - - /// It sets the clock to the current system time. - fn local_set_to_system_time_now(); - - /// It adds a `Duration` to the clock. - /// - /// # Errors - /// - /// Will return `IntErrorKind` if `duration` would overflow the internal `Duration`. - fn local_add(duration: &Duration) -> Result<(), IntErrorKind>; - - /// It subtracts a `Duration` from the clock. - /// # Errors - /// - /// Will return `IntErrorKind` if `duration` would underflow the internal `Duration`. - fn local_sub(duration: &Duration) -> Result<(), IntErrorKind>; - - /// It resets the clock to default fixed time that is application start time (or the unix epoch when testing). - fn local_reset(); -} - -mod stopped_clock { - use std::num::IntErrorKind; - use std::time::Duration; - - use super::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow}; - - impl Time for Stopped { - fn now() -> DurationSinceUnixEpoch { - detail::FIXED_TIME.with(|time| { - return *time.borrow(); - }) - } - } - - impl TimeNow for Stopped {} - - impl StoppedTime for Stopped { - fn local_set(unix_time: &DurationSinceUnixEpoch) { - detail::FIXED_TIME.with(|time| { - *time.borrow_mut() = *unix_time; - }); - } - - fn local_set_to_app_start_time() { - Self::local_set(&detail::get_app_start_time()); - } - - fn local_set_to_system_time_now() { - Self::local_set(&detail::get_app_start_time()); - } - - fn local_add(duration: &Duration) -> Result<(), IntErrorKind> { - detail::FIXED_TIME.with(|time| { - let time_borrowed = *time.borrow(); - *time.borrow_mut() = match time_borrowed.checked_add(*duration) { - Some(time) => time, - None => { - return Err(IntErrorKind::PosOverflow); - } - }; - Ok(()) - }) - } - - fn local_sub(duration: &Duration) -> Result<(), IntErrorKind> { - detail::FIXED_TIME.with(|time| { - let time_borrowed = *time.borrow(); - *time.borrow_mut() = match time_borrowed.checked_sub(*duration) { - Some(time) => time, - None => { - return Err(IntErrorKind::NegOverflow); - } - }; - Ok(()) - }) - } - - fn local_reset() { - Self::local_set(&detail::get_default_fixed_time()); - } - } - - #[cfg(test)] - mod tests { - use std::thread; - use std::time::Duration; - - use crate::shared::clock::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow, Working}; - - #[test] - fn it_should_default_to_zero_when_testing() { - assert_eq!(Stopped::now(), DurationSinceUnixEpoch::ZERO); - } - - #[test] - fn it_should_possible_to_set_the_time() { - // Check we start with ZERO. - assert_eq!(Stopped::now(), Duration::ZERO); - - // Set to Current Time and Check - let timestamp = Working::now(); - Stopped::local_set(×tamp); - assert_eq!(Stopped::now(), timestamp); - - // Elapse the Current Time and Check - Stopped::local_add(×tamp).unwrap(); - assert_eq!(Stopped::now(), timestamp + timestamp); - - // Reset to ZERO and Check - Stopped::local_reset(); - assert_eq!(Stopped::now(), Duration::ZERO); - } - - #[test] - fn it_should_default_to_zero_on_thread_exit() { - assert_eq!(Stopped::now(), Duration::ZERO); - let after5 = Working::add(&Duration::from_secs(5)).unwrap(); - Stopped::local_set(&after5); - assert_eq!(Stopped::now(), after5); - - let t = thread::spawn(move || { - // each thread starts out with the initial value of ZERO - assert_eq!(Stopped::now(), Duration::ZERO); - - // and gets set to the current time. - let timestamp = Working::now(); - Stopped::local_set(×tamp); - assert_eq!(Stopped::now(), timestamp); - }); - - // wait for the thread to complete and bail out on panic - t.join().unwrap(); - - // we retain our original value of current time + 5sec despite the child thread - assert_eq!(Stopped::now(), after5); - - // Reset to ZERO and Check - Stopped::local_reset(); - assert_eq!(Stopped::now(), Duration::ZERO); - } - } - - mod detail { - use std::cell::RefCell; - use std::time::SystemTime; - - use crate::shared::clock::{static_time, DurationSinceUnixEpoch}; - - pub fn get_app_start_time() -> DurationSinceUnixEpoch { - (*static_time::TIME_AT_APP_START) - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - } - - #[cfg(not(test))] - pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { - get_app_start_time() - } - - #[cfg(test)] - pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { - DurationSinceUnixEpoch::ZERO - } - - thread_local!(pub static FIXED_TIME: RefCell = RefCell::new(get_default_fixed_time())); - - #[cfg(test)] - mod tests { - use std::time::Duration; - - use crate::shared::clock::stopped_clock::detail::{get_app_start_time, get_default_fixed_time}; - - #[test] - fn it_should_get_the_zero_start_time_when_testing() { - assert_eq!(get_default_fixed_time(), Duration::ZERO); - } - - #[test] - fn it_should_get_app_start_time() { - const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1_662_983_731, 22312); - assert!(get_app_start_time() > TIME_AT_WRITING_THIS_TEST); - } - } - } -} diff --git a/src/shared/clock/utils.rs b/src/shared/clock/utils.rs deleted file mode 100644 index 8b1378917..000000000 --- a/src/shared/clock/utils.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/src/shared/mod.rs b/src/shared/mod.rs index f016ba913..8c95effe1 100644 --- a/src/shared/mod.rs +++ b/src/shared/mod.rs @@ -1,8 +1,6 @@ //! Modules with generic logic used by several modules. //! //! - [`bit_torrent`]: `BitTorrent` protocol related logic. -//! - [`clock`]: Times services. //! - [`crypto`]: Encryption related logic. pub mod bit_torrent; -pub mod clock; pub mod crypto; diff --git a/tests/common/clock.rs b/tests/common/clock.rs new file mode 100644 index 000000000..5d94bb83d --- /dev/null +++ b/tests/common/clock.rs @@ -0,0 +1,16 @@ +use std::time::Duration; + +use torrust_tracker_clock::clock::Time; + +use crate::CurrentClock; + +#[test] +fn it_should_use_stopped_time_for_testing() { + assert_eq!(CurrentClock::dbg_clock_type(), "Stopped".to_owned()); + + let time = CurrentClock::now(); + std::thread::sleep(Duration::from_millis(50)); + let time_2 = CurrentClock::now(); + + assert_eq!(time, time_2); +} diff --git a/tests/common/mod.rs b/tests/common/mod.rs index b57996292..281c1fb9c 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,3 +1,4 @@ +pub mod clock; pub mod fixtures; pub mod http; pub mod udp; diff --git a/tests/integration.rs b/tests/integration.rs index 5d66d9074..8e3d46826 100644 --- a/tests/integration.rs +++ b/tests/integration.rs @@ -3,5 +3,18 @@ //! ```text //! cargo test --test integration //! ``` + +use torrust_tracker_clock::clock; mod common; mod servers; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; From e18cae46e74f2f38bdbd2ee064b3c986c01ed7f6 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 25 Mar 2024 12:12:46 +0800 Subject: [PATCH 8/9] dev: torrent repository cleanups --- cSpell.json | 1 + packages/configuration/src/lib.rs | 2 +- packages/primitives/src/announce_event.rs | 2 +- packages/primitives/src/info_hash.rs | 19 + packages/primitives/src/lib.rs | 5 +- packages/primitives/src/pagination.rs | 8 +- packages/primitives/src/peer.rs | 24 +- packages/primitives/src/torrent_metrics.rs | 12 +- packages/torrent-repository/src/entry/mod.rs | 31 +- .../torrent-repository/src/entry/mutex_std.rs | 17 +- .../src/entry/mutex_tokio.rs | 25 +- .../torrent-repository/src/entry/single.rs | 324 +----------------- .../torrent-repository/src/repository/mod.rs | 38 +- .../src/repository/rw_lock_std.rs | 12 +- .../src/repository/rw_lock_std_mutex_std.rs | 10 +- .../src/repository/rw_lock_std_mutex_tokio.rs | 30 +- .../src/repository/rw_lock_tokio.rs | 10 +- .../src/repository/rw_lock_tokio_mutex_std.rs | 10 +- .../repository/rw_lock_tokio_mutex_tokio.rs | 22 +- src/core/databases/mod.rs | 4 +- src/core/databases/mysql.rs | 6 +- src/core/databases/sqlite.rs | 11 +- src/core/mod.rs | 20 +- src/core/torrent/mod.rs | 3 - .../apis/v1/context/stats/resources.rs | 12 +- src/servers/udp/handlers.rs | 63 ++-- 26 files changed, 259 insertions(+), 462 deletions(-) diff --git a/cSpell.json b/cSpell.json index 1e276dbc2..bbcba98a7 100644 --- a/cSpell.json +++ b/cSpell.json @@ -100,6 +100,7 @@ "ostr", "Pando", "peekable", + "peerlist", "proot", "proto", "Quickstart", diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 549c73a31..ca873f3cd 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -246,7 +246,7 @@ use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; /// The maximum number of returned peers for a torrent. pub const TORRENT_PEERS_LIMIT: usize = 74; -#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] +#[derive(Copy, Clone, Debug, PartialEq, Constructor)] pub struct TrackerPolicy { pub remove_peerless_torrents: bool, pub max_peer_timeout: u32, diff --git a/packages/primitives/src/announce_event.rs b/packages/primitives/src/announce_event.rs index 16e47da99..3bd560084 100644 --- a/packages/primitives/src/announce_event.rs +++ b/packages/primitives/src/announce_event.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; /// Announce events. Described on the /// [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, Serialize, Deserialize)] +#[derive(Hash, Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] pub enum AnnounceEvent { /// The peer has started downloading the torrent. Started, diff --git a/packages/primitives/src/info_hash.rs b/packages/primitives/src/info_hash.rs index 46ae6283e..a07cc41a2 100644 --- a/packages/primitives/src/info_hash.rs +++ b/packages/primitives/src/info_hash.rs @@ -1,3 +1,4 @@ +use std::hash::{DefaultHasher, Hash, Hasher}; use std::panic::Location; use thiserror::Error; @@ -77,6 +78,24 @@ impl std::convert::From<&[u8]> for InfoHash { } } +/// for testing +impl std::convert::From<&DefaultHasher> for InfoHash { + fn from(data: &DefaultHasher) -> InfoHash { + let n = data.finish().to_le_bytes(); + InfoHash([ + n[0], n[1], n[2], n[3], n[4], n[5], n[6], n[7], n[0], n[1], n[2], n[3], n[4], n[5], n[6], n[7], n[0], n[1], n[2], + n[3], + ]) + } +} + +impl std::convert::From<&i32> for InfoHash { + fn from(n: &i32) -> InfoHash { + let n = n.to_le_bytes(); + InfoHash([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, n[0], n[1], n[2], n[3]]) + } +} + impl std::convert::From<[u8; 20]> for InfoHash { fn from(val: [u8; 20]) -> Self { InfoHash(val) diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index 664c0c82d..aeb4d0d4e 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -4,6 +4,7 @@ //! which is a `BitTorrent` tracker server. These structures are used not only //! by the tracker server crate, but also by other crates in the Torrust //! ecosystem. +use std::collections::BTreeMap; use std::time::Duration; use info_hash::InfoHash; @@ -38,7 +39,7 @@ pub enum IPVersion { } /// Number of bytes downloaded, uploaded or pending to download (left) by the peer. -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, Serialize, Deserialize)] +#[derive(Hash, Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] pub struct NumberOfBytes(pub i64); /// The database management system used by the tracker. @@ -58,7 +59,7 @@ pub enum DatabaseDriver { MySQL, } -pub type PersistentTorrents = Vec<(InfoHash, u32)>; +pub type PersistentTorrents = BTreeMap; /// The mode the tracker will run in. /// diff --git a/packages/primitives/src/pagination.rs b/packages/primitives/src/pagination.rs index ab7dcfe2b..96b5ad662 100644 --- a/packages/primitives/src/pagination.rs +++ b/packages/primitives/src/pagination.rs @@ -1,7 +1,8 @@ +use derive_more::Constructor; use serde::Deserialize; /// A struct to keep information about the page when results are being paginated -#[derive(Deserialize, Copy, Clone, Debug, PartialEq)] +#[derive(Deserialize, Copy, Clone, Debug, PartialEq, Constructor)] pub struct Pagination { /// The page number, starting at 0 pub offset: u32, @@ -10,11 +11,6 @@ pub struct Pagination { } impl Pagination { - #[must_use] - pub fn new(offset: u32, limit: u32) -> Self { - Self { offset, limit } - } - #[must_use] pub fn new_with_options(offset_option: Option, limit_option: Option) -> Self { let offset = match offset_option { diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index 5fb9e525f..f5b009f2a 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -51,7 +51,7 @@ use crate::{ser_unix_time_value, DurationSinceUnixEpoch, IPVersion, NumberOfByte /// event: AnnounceEvent::Started, /// }; /// ``` -#[derive(PartialEq, Eq, Debug, Clone, Serialize, Copy)] +#[derive(Debug, Clone, Serialize, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Peer { /// ID used by the downloader peer pub peer_id: Id, @@ -173,6 +173,16 @@ impl From<[u8; 20]> for Id { } } +impl From for Id { + fn from(number: i32) -> Self { + let peer_id = number.to_le_bytes(); + Id::from([ + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], peer_id[2], + peer_id[3], + ]) + } +} + impl TryFrom> for Id { type Error = IdConversionError; @@ -332,7 +342,7 @@ impl FromIterator for Vec

{ } pub mod fixture { - use std::net::SocketAddr; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use super::{Id, Peer}; use crate::announce_event::AnnounceEvent; @@ -396,8 +406,8 @@ pub mod fixture { impl Default for Peer { fn default() -> Self { Self { - peer_id: Id(*b"-qB00000000000000000"), - peer_addr: std::net::SocketAddr::new(std::net::IpAddr::V4(std::net::Ipv4Addr::new(126, 0, 0, 1)), 8080), + peer_id: Id::default(), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), @@ -406,6 +416,12 @@ pub mod fixture { } } } + + impl Default for Id { + fn default() -> Self { + Self(*b"-qB00000000000000000") + } + } } #[cfg(test)] diff --git a/packages/primitives/src/torrent_metrics.rs b/packages/primitives/src/torrent_metrics.rs index c60507171..02de02954 100644 --- a/packages/primitives/src/torrent_metrics.rs +++ b/packages/primitives/src/torrent_metrics.rs @@ -6,20 +6,20 @@ use std::ops::AddAssign; #[derive(Copy, Clone, Debug, PartialEq, Default)] pub struct TorrentsMetrics { /// Total number of seeders for all torrents - pub seeders: u64, + pub complete: u64, /// Total number of peers that have ever completed downloading for all torrents. - pub completed: u64, + pub downloaded: u64, /// Total number of leechers for all torrents. - pub leechers: u64, + pub incomplete: u64, /// Total number of torrents. pub torrents: u64, } impl AddAssign for TorrentsMetrics { fn add_assign(&mut self, rhs: Self) { - self.seeders += rhs.seeders; - self.completed += rhs.completed; - self.leechers += rhs.leechers; + self.complete += rhs.complete; + self.downloaded += rhs.downloaded; + self.incomplete += rhs.incomplete; self.torrents += rhs.torrents; } } diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index 11352a8fa..4c39af829 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -1,4 +1,5 @@ use std::fmt::Debug; +use std::net::SocketAddr; use std::sync::Arc; //use serde::{Deserialize, Serialize}; @@ -17,7 +18,7 @@ pub trait Entry { fn get_stats(&self) -> SwarmMetadata; /// Returns True if Still a Valid Entry according to the Tracker Policy - fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool; + fn is_good(&self, policy: &TrackerPolicy) -> bool; /// Returns True if the Peers is Empty fn peers_is_empty(&self) -> bool; @@ -33,7 +34,7 @@ pub trait Entry { /// /// It filters out the input peer, typically because we want to return this /// list of peers to that client peer. - fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec>; + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec>; /// It updates a peer and returns true if the number of complete downloads have increased. /// @@ -51,11 +52,11 @@ pub trait Entry { #[allow(clippy::module_name_repetitions)] pub trait EntrySync { fn get_stats(&self) -> SwarmMetadata; - fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool; + fn is_good(&self, policy: &TrackerPolicy) -> bool; fn peers_is_empty(&self) -> bool; fn get_peers_len(&self) -> usize; fn get_peers(&self, limit: Option) -> Vec>; - fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec>; + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec>; fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool; fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata); fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); @@ -63,16 +64,14 @@ pub trait EntrySync { #[allow(clippy::module_name_repetitions)] pub trait EntryAsync { - fn get_stats(self) -> impl std::future::Future + Send; - - #[allow(clippy::wrong_self_convention)] - fn is_not_zombie(self, policy: &TrackerPolicy) -> impl std::future::Future + Send; - fn peers_is_empty(self) -> impl std::future::Future + Send; - fn get_peers_len(self) -> impl std::future::Future + Send; - fn get_peers(self, limit: Option) -> impl std::future::Future>> + Send; - fn get_peers_for_peer( - self, - client: &peer::Peer, + fn get_stats(&self) -> impl std::future::Future + Send; + fn check_good(self, policy: &TrackerPolicy) -> impl std::future::Future + Send; + fn peers_is_empty(&self) -> impl std::future::Future + Send; + fn get_peers_len(&self) -> impl std::future::Future + Send; + fn get_peers(&self, limit: Option) -> impl std::future::Future>> + Send; + fn get_peers_for_client( + &self, + client: &SocketAddr, limit: Option, ) -> impl std::future::Future>> + Send; fn insert_or_update_peer(self, peer: &peer::Peer) -> impl std::future::Future + Send; @@ -88,11 +87,11 @@ pub trait EntryAsync { /// This is the tracker entry for a given torrent and contains the swarm data, /// that's the list of all the peers trying to download the same torrent. /// The tracker keeps one entry like this for every torrent. -#[derive(Clone, Debug, Default)] +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Torrent { /// The swarm: a network of peers that are all trying to download the torrent associated to this entry // #[serde(skip)] pub(crate) peers: std::collections::BTreeMap>, /// The number of peers that have ever completed downloading the torrent associated to this entry - pub(crate) completed: u32, + pub(crate) downloaded: u32, } diff --git a/packages/torrent-repository/src/entry/mutex_std.rs b/packages/torrent-repository/src/entry/mutex_std.rs index df6228317..b4b823909 100644 --- a/packages/torrent-repository/src/entry/mutex_std.rs +++ b/packages/torrent-repository/src/entry/mutex_std.rs @@ -1,3 +1,4 @@ +use std::net::SocketAddr; use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; @@ -5,15 +6,15 @@ use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use super::{Entry, EntrySync}; -use crate::EntryMutexStd; +use crate::{EntryMutexStd, EntrySingle}; impl EntrySync for EntryMutexStd { fn get_stats(&self) -> SwarmMetadata { self.lock().expect("it should get a lock").get_stats() } - fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { - self.lock().expect("it should get a lock").is_not_zombie(policy) + fn is_good(&self, policy: &TrackerPolicy) -> bool { + self.lock().expect("it should get a lock").is_good(policy) } fn peers_is_empty(&self) -> bool { @@ -28,8 +29,8 @@ impl EntrySync for EntryMutexStd { self.lock().expect("it should get lock").get_peers(limit) } - fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { - self.lock().expect("it should get lock").get_peers_for_peer(client, limit) + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers_for_client(client, limit) } fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool { @@ -48,3 +49,9 @@ impl EntrySync for EntryMutexStd { .remove_inactive_peers(current_cutoff); } } + +impl From for EntryMutexStd { + fn from(entry: EntrySingle) -> Self { + Arc::new(std::sync::Mutex::new(entry)) + } +} diff --git a/packages/torrent-repository/src/entry/mutex_tokio.rs b/packages/torrent-repository/src/entry/mutex_tokio.rs index c4d13fb43..34f4a4e92 100644 --- a/packages/torrent-repository/src/entry/mutex_tokio.rs +++ b/packages/torrent-repository/src/entry/mutex_tokio.rs @@ -1,3 +1,4 @@ +use std::net::SocketAddr; use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; @@ -5,31 +6,31 @@ use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use super::{Entry, EntryAsync}; -use crate::EntryMutexTokio; +use crate::{EntryMutexTokio, EntrySingle}; impl EntryAsync for EntryMutexTokio { - async fn get_stats(self) -> SwarmMetadata { + async fn get_stats(&self) -> SwarmMetadata { self.lock().await.get_stats() } - async fn is_not_zombie(self, policy: &TrackerPolicy) -> bool { - self.lock().await.is_not_zombie(policy) + async fn check_good(self, policy: &TrackerPolicy) -> bool { + self.lock().await.is_good(policy) } - async fn peers_is_empty(self) -> bool { + async fn peers_is_empty(&self) -> bool { self.lock().await.peers_is_empty() } - async fn get_peers_len(self) -> usize { + async fn get_peers_len(&self) -> usize { self.lock().await.get_peers_len() } - async fn get_peers(self, limit: Option) -> Vec> { + async fn get_peers(&self, limit: Option) -> Vec> { self.lock().await.get_peers(limit) } - async fn get_peers_for_peer(self, client: &peer::Peer, limit: Option) -> Vec> { - self.lock().await.get_peers_for_peer(client, limit) + async fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.lock().await.get_peers_for_client(client, limit) } async fn insert_or_update_peer(self, peer: &peer::Peer) -> bool { @@ -44,3 +45,9 @@ impl EntryAsync for EntryMutexTokio { self.lock().await.remove_inactive_peers(current_cutoff); } } + +impl From for EntryMutexTokio { + fn from(entry: EntrySingle) -> Self { + Arc::new(tokio::sync::Mutex::new(entry)) + } +} diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs index 85fdc6cf0..c1041e9a2 100644 --- a/packages/torrent-repository/src/entry/single.rs +++ b/packages/torrent-repository/src/entry/single.rs @@ -1,3 +1,4 @@ +use std::net::SocketAddr; use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; @@ -16,14 +17,14 @@ impl Entry for EntrySingle { let incomplete: u32 = self.peers.len() as u32 - complete; SwarmMetadata { - downloaded: self.completed, + downloaded: self.downloaded, complete, incomplete, } } - fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { - if policy.persistent_torrent_completed_stat && self.completed > 0 { + fn is_good(&self, policy: &TrackerPolicy) -> bool { + if policy.persistent_torrent_completed_stat && self.downloaded > 0 { return true; } @@ -48,13 +49,13 @@ impl Entry for EntrySingle { } } - fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { match limit { Some(limit) => self .peers .values() // Take peers which are not the client peer - .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != peer::ReadInfo::get_address(client)) + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *client) // Limit the number of peers on the result .take(limit) .cloned() @@ -63,25 +64,25 @@ impl Entry for EntrySingle { .peers .values() // Take peers which are not the client peer - .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != peer::ReadInfo::get_address(client)) + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *client) .cloned() .collect(), } } fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { - let mut did_torrent_stats_change: bool = false; + let mut downloaded_stats_updated: bool = false; match peer::ReadInfo::get_event(peer) { AnnounceEvent::Stopped => { drop(self.peers.remove(&peer::ReadInfo::get_id(peer))); } AnnounceEvent::Completed => { - let peer_old = self.peers.insert(peer::ReadInfo::get_id(peer), Arc::new(*peer)); + let previous = self.peers.insert(peer::ReadInfo::get_id(peer), Arc::new(*peer)); // Don't count if peer was not previously known and not already completed. - if peer_old.is_some_and(|p| p.event != AnnounceEvent::Completed) { - self.completed += 1; - did_torrent_stats_change = true; + if previous.is_some_and(|p| p.event != AnnounceEvent::Completed) { + self.downloaded += 1; + downloaded_stats_updated = true; } } _ => { @@ -89,7 +90,7 @@ impl Entry for EntrySingle { } } - did_torrent_stats_change + downloaded_stats_updated } fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata) { @@ -103,302 +104,3 @@ impl Entry for EntrySingle { .retain(|_, peer| peer::ReadInfo::get_updated(peer) > current_cutoff); } } - -#[cfg(test)] -mod tests { - mod torrent_entry { - - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::ops::Sub; - use std::sync::Arc; - use std::time::Duration; - - use torrust_tracker_clock::clock::stopped::Stopped as _; - use torrust_tracker_clock::clock::{self, Time}; - use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; - use torrust_tracker_primitives::announce_event::AnnounceEvent; - use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; - - use crate::entry::Entry; - use crate::{CurrentClock, EntrySingle}; - - struct TorrentPeerBuilder { - peer: peer::Peer, - } - - impl TorrentPeerBuilder { - pub fn default() -> TorrentPeerBuilder { - let default_peer = peer::Peer { - peer_id: peer::Id([0u8; 20]), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), - updated: CurrentClock::now(), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - TorrentPeerBuilder { peer: default_peer } - } - - pub fn with_event_completed(mut self) -> Self { - self.peer.event = AnnounceEvent::Completed; - self - } - - pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; - self - } - - pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { - self.peer.peer_id = peer_id; - self - } - - pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes(left); - self - } - - pub fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { - self.peer.updated = updated; - self - } - - pub fn into(self) -> peer::Peer { - self.peer - } - } - - /// A torrent seeder is a peer with 0 bytes left to download which - /// has not announced it has stopped - fn a_torrent_seeder() -> peer::Peer { - TorrentPeerBuilder::default() - .with_number_of_bytes_left(0) - .with_event_completed() - .into() - } - - /// A torrent leecher is a peer that is not a seeder. - /// Leecher: left > 0 OR event = Stopped - fn a_torrent_leecher() -> peer::Peer { - TorrentPeerBuilder::default() - .with_number_of_bytes_left(1) - .with_event_completed() - .into() - } - - #[test] - fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { - let torrent_entry = EntrySingle::default(); - - assert_eq!(torrent_entry.get_peers(None).len(), 0); - } - - #[test] - fn a_new_peer_can_be_added_to_a_torrent_entry() { - let mut torrent_entry = EntrySingle::default(); - let torrent_peer = TorrentPeerBuilder::default().into(); - - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - assert_eq!(*torrent_entry.get_peers(None)[0], torrent_peer); - assert_eq!(torrent_entry.get_peers(None).len(), 1); - } - - #[test] - fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { - let mut torrent_entry = EntrySingle::default(); - let torrent_peer = TorrentPeerBuilder::default().into(); - - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - assert_eq!(torrent_entry.get_peers(None), vec![Arc::new(torrent_peer)]); - } - - #[test] - fn a_peer_can_be_updated_in_a_torrent_entry() { - let mut torrent_entry = EntrySingle::default(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - torrent_peer.event = AnnounceEvent::Completed; // Update the peer - torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - - assert_eq!(torrent_entry.get_peers(None)[0].event, AnnounceEvent::Completed); - } - - #[test] - fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { - let mut torrent_entry = EntrySingle::default(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - torrent_peer.event = AnnounceEvent::Stopped; // Update the peer - torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - - assert_eq!(torrent_entry.get_peers(None).len(), 0); - } - - #[test] - fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { - let mut torrent_entry = EntrySingle::default(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - torrent_peer.event = AnnounceEvent::Completed; // Update the peer - let stats_have_changed = torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - - assert!(stats_have_changed); - } - - #[test] - fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( - ) { - let mut torrent_entry = EntrySingle::default(); - let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); - - // Add a peer that did not exist before in the entry - let torrent_stats_have_not_changed = !torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); - - assert!(torrent_stats_have_not_changed); - } - - #[test] - fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_the_client_that_is_making_the_request() - { - let mut torrent_entry = EntrySingle::default(); - let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add peer - - // Get peers excluding the one we have just added - let peers = torrent_entry.get_peers_for_peer(&torrent_peer, None); - - assert_eq!(peers.len(), 0); - } - - #[test] - fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { - let mut torrent_entry = EntrySingle::default(); - - let peer_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); - - // Add peer 1 - let torrent_peer_1 = TorrentPeerBuilder::default() - .with_peer_address(SocketAddr::new(peer_ip, 8080)) - .into(); - torrent_entry.insert_or_update_peer(&torrent_peer_1); - - // Add peer 2 - let torrent_peer_2 = TorrentPeerBuilder::default() - .with_peer_address(SocketAddr::new(peer_ip, 8081)) - .into(); - torrent_entry.insert_or_update_peer(&torrent_peer_2); - - // Get peers for peer 1 - let peers = torrent_entry.get_peers_for_peer(&torrent_peer_1, None); - - // The peer 2 using the same IP but different port should be included - assert_eq!(peers[0].peer_addr.ip(), Ipv4Addr::new(127, 0, 0, 1)); - assert_eq!(peers[0].peer_addr.port(), 8081); - } - - fn peer_id_from_i32(number: i32) -> peer::Id { - let peer_id = number.to_le_bytes(); - peer::Id([ - 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], - peer_id[2], peer_id[3], - ]) - } - - #[test] - fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { - let mut torrent_entry = EntrySingle::default(); - - // We add one more peer than the scrape limit - for peer_number in 1..=74 + 1 { - let torrent_peer = TorrentPeerBuilder::default() - .with_peer_id(peer_id_from_i32(peer_number)) - .into(); - torrent_entry.insert_or_update_peer(&torrent_peer); - } - - let peers = torrent_entry.get_peers(Some(TORRENT_PEERS_LIMIT)); - - assert_eq!(peers.len(), 74); - } - - #[test] - fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { - let mut torrent_entry = EntrySingle::default(); - let torrent_seeder = a_torrent_seeder(); - - torrent_entry.insert_or_update_peer(&torrent_seeder); // Add seeder - - assert_eq!(torrent_entry.get_stats().complete, 1); - } - - #[test] - fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { - let mut torrent_entry = EntrySingle::default(); - let torrent_leecher = a_torrent_leecher(); - - torrent_entry.insert_or_update_peer(&torrent_leecher); // Add leecher - - assert_eq!(torrent_entry.get_stats().incomplete, 1); - } - - #[test] - fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( - ) { - let mut torrent_entry = EntrySingle::default(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - // Announce "Completed" torrent download event. - torrent_peer.event = AnnounceEvent::Completed; - torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer - - let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().complete; - - assert_eq!(number_of_previously_known_peers_with_completed_torrent, 1); - } - - #[test] - fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { - let mut torrent_entry = EntrySingle::default(); - let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); - - // Announce "Completed" torrent download event. - // It's the first event announced from this peer. - torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); // Add the peer - - let number_of_peers_with_completed_torrent = torrent_entry.get_stats().downloaded; - - assert_eq!(number_of_peers_with_completed_torrent, 0); - } - - #[test] - fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { - let mut torrent_entry = EntrySingle::default(); - - let timeout = 120u32; - - let now = clock::Working::now(); - clock::Stopped::local_set(&now); - - let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); - let inactive_peer = TorrentPeerBuilder::default() - .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) - .into(); - torrent_entry.insert_or_update_peer(&inactive_peer); // Add the peer - - let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(timeout))).unwrap_or_default(); - torrent_entry.remove_inactive_peers(current_cutoff); - - assert_eq!(torrent_entry.get_peers_len(), 0); - } - } -} diff --git a/packages/torrent-repository/src/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs index b46771163..494040c9d 100644 --- a/packages/torrent-repository/src/repository/mod.rs +++ b/packages/torrent-repository/src/repository/mod.rs @@ -12,7 +12,9 @@ pub mod rw_lock_tokio; pub mod rw_lock_tokio_mutex_std; pub mod rw_lock_tokio_mutex_tokio; -pub trait Repository: Default + 'static { +use std::fmt::Debug; + +pub trait Repository: Debug + Default + Sized + 'static { fn get(&self, key: &InfoHash) -> Option; fn get_metrics(&self) -> TorrentsMetrics; fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, T)>; @@ -24,7 +26,7 @@ pub trait Repository: Default + 'static { } #[allow(clippy::module_name_repetitions)] -pub trait RepositoryAsync: Default + 'static { +pub trait RepositoryAsync: Debug + Default + Sized + 'static { fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; fn get_metrics(&self) -> impl std::future::Future + Send; fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; @@ -39,12 +41,36 @@ pub trait RepositoryAsync: Default + 'static { ) -> impl std::future::Future + Send; } -#[derive(Default)] +#[derive(Default, Debug)] +pub struct RwLockStd { + torrents: std::sync::RwLock>, +} + +#[derive(Default, Debug)] pub struct RwLockTokio { torrents: tokio::sync::RwLock>, } -#[derive(Default)] -pub struct RwLockStd { - torrents: std::sync::RwLock>, +impl RwLockStd { + /// # Panics + /// + /// Panics if unable to get a lock. + pub fn write( + &self, + ) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { + self.torrents.write().expect("it should get lock") + } +} + +impl RwLockTokio { + pub fn write( + &self, + ) -> impl std::future::Future< + Output = tokio::sync::RwLockWriteGuard< + '_, + std::collections::BTreeMap, + >, + > { + self.torrents.write() + } } diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository/src/repository/rw_lock_std.rs index bacef623d..9d7f29416 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std.rs @@ -49,9 +49,9 @@ where for entry in self.get_torrents().values() { let stats = entry.get_stats(); - metrics.seeders += u64::from(stats.complete); - metrics.completed += u64::from(stats.downloaded); - metrics.leechers += u64::from(stats.incomplete); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); metrics.torrents += 1; } @@ -75,7 +75,7 @@ where fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { let mut torrents = self.get_torrents_mut(); - for (info_hash, completed) in persistent_torrents { + for (info_hash, downloaded) in persistent_torrents { // Skip if torrent entry already exists if torrents.contains_key(info_hash) { continue; @@ -83,7 +83,7 @@ where let entry = EntrySingle { peers: BTreeMap::default(), - completed: *completed, + downloaded: *downloaded, }; torrents.insert(*info_hash, entry); @@ -107,6 +107,6 @@ where fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { let mut db = self.get_torrents_mut(); - db.retain(|_, e| e.is_not_zombie(policy)); + db.retain(|_, e| e.is_good(policy)); } } diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs index 9fca82ba8..0b65234e3 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs @@ -57,9 +57,9 @@ where for entry in self.get_torrents().values() { let stats = entry.lock().expect("it should get a lock").get_stats(); - metrics.seeders += u64::from(stats.complete); - metrics.completed += u64::from(stats.downloaded); - metrics.leechers += u64::from(stats.incomplete); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); metrics.torrents += 1; } @@ -92,7 +92,7 @@ where let entry = EntryMutexStd::new( EntrySingle { peers: BTreeMap::default(), - completed: *completed, + downloaded: *completed, } .into(), ); @@ -118,6 +118,6 @@ where fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { let mut db = self.get_torrents_mut(); - db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); + db.retain(|_, e| e.lock().expect("it should lock entry").is_good(policy)); } } diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs index b9fb54469..5394abb6a 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs @@ -1,4 +1,5 @@ use std::collections::BTreeMap; +use std::iter::zip; use std::pin::Pin; use std::sync::Arc; @@ -75,9 +76,9 @@ where for entry in entries { let stats = entry.lock().await.get_stats(); - metrics.seeders += u64::from(stats.complete); - metrics.completed += u64::from(stats.downloaded); - metrics.leechers += u64::from(stats.incomplete); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); metrics.torrents += 1; } @@ -96,7 +97,7 @@ where let entry = EntryMutexTokio::new( EntrySingle { peers: BTreeMap::default(), - completed: *completed, + downloaded: *completed, } .into(), ); @@ -124,8 +125,27 @@ where } async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let handles: Vec> + Send>>>; + + { + let db = self.get_torrents(); + + handles = zip(db.keys().copied(), db.values().cloned()) + .map(|(infohash, torrent)| { + torrent + .check_good(policy) + .map(move |good| if good { None } else { Some(infohash) }) + .boxed() + }) + .collect::>(); + } + + let not_good = join_all(handles).await; + let mut db = self.get_torrents_mut(); - db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); + for remove in not_good.into_iter().flatten() { + drop(db.remove(&remove)); + } } } diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio.rs index d0b7ec751..fa84e2451 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio.rs @@ -64,9 +64,9 @@ where for entry in self.get_torrents().await.values() { let stats = entry.get_stats(); - metrics.seeders += u64::from(stats.complete); - metrics.completed += u64::from(stats.downloaded); - metrics.leechers += u64::from(stats.incomplete); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); metrics.torrents += 1; } @@ -84,7 +84,7 @@ where let entry = EntrySingle { peers: BTreeMap::default(), - completed: *completed, + downloaded: *completed, }; torrents.insert(*info_hash, entry); @@ -108,6 +108,6 @@ where async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { let mut db = self.get_torrents_mut().await; - db.retain(|_, e| e.is_not_zombie(policy)); + db.retain(|_, e| e.is_good(policy)); } } diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs index f800d2001..fbbc51a09 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs @@ -72,9 +72,9 @@ where for entry in self.get_torrents().await.values() { let stats = entry.get_stats(); - metrics.seeders += u64::from(stats.complete); - metrics.completed += u64::from(stats.downloaded); - metrics.leechers += u64::from(stats.incomplete); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); metrics.torrents += 1; } @@ -93,7 +93,7 @@ where let entry = EntryMutexStd::new( EntrySingle { peers: BTreeMap::default(), - completed: *completed, + downloaded: *completed, } .into(), ); @@ -119,6 +119,6 @@ where async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { let mut db = self.get_torrents_mut().await; - db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); + db.retain(|_, e| e.lock().expect("it should lock entry").is_good(policy)); } } diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs index 7ce2cc74c..bc7fd61e8 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -70,11 +70,11 @@ where async fn get_metrics(&self) -> TorrentsMetrics { let mut metrics = TorrentsMetrics::default(); - for entry in self.get_torrents().await.values().cloned() { + for entry in self.get_torrents().await.values() { let stats = entry.get_stats().await; - metrics.seeders += u64::from(stats.complete); - metrics.completed += u64::from(stats.downloaded); - metrics.leechers += u64::from(stats.incomplete); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); metrics.torrents += 1; } @@ -93,7 +93,7 @@ where let entry = EntryMutexTokio::new( EntrySingle { peers: BTreeMap::default(), - completed: *completed, + downloaded: *completed, } .into(), ); @@ -119,6 +119,16 @@ where async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { let mut db = self.get_torrents_mut().await; - db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); + let mut not_good = Vec::::default(); + + for (&infohash, torrent) in db.iter() { + if !torrent.clone().check_good(policy).await { + not_good.push(infohash); + } + } + + for remove in not_good { + drop(db.remove(&remove)); + } } } diff --git a/src/core/databases/mod.rs b/src/core/databases/mod.rs index 20a45cf83..c08aed76a 100644 --- a/src/core/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -117,9 +117,9 @@ pub trait Database: Sync + Send { /// /// It returns an array of tuples with the torrent /// [`InfoHash`] and the - /// [`completed`](torrust_tracker_torrent_repository::entry::Torrent::completed) counter + /// [`downloaded`](torrust_tracker_torrent_repository::entry::Torrent::downloaded) counter /// which is the number of times the torrent has been downloaded. - /// See [`Entry::completed`](torrust_tracker_torrent_repository::entry::Torrent::completed). + /// See [`Entry::downloaded`](torrust_tracker_torrent_repository::entry::Torrent::downloaded). /// /// # Context: Torrent Metrics /// diff --git a/src/core/databases/mysql.rs b/src/core/databases/mysql.rs index e37cdd9bf..ca95fa0b9 100644 --- a/src/core/databases/mysql.rs +++ b/src/core/databases/mysql.rs @@ -9,7 +9,7 @@ use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; use torrust_tracker_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::DatabaseDriver; +use torrust_tracker_primitives::{DatabaseDriver, PersistentTorrents}; use super::{Database, Error}; use crate::core::auth::{self, Key}; @@ -105,7 +105,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). - async fn load_persistent_torrents(&self) -> Result, Error> { + async fn load_persistent_torrents(&self) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let torrents = conn.query_map( @@ -116,7 +116,7 @@ impl Database for Mysql { }, )?; - Ok(torrents) + Ok(torrents.iter().copied().collect()) } /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). diff --git a/src/core/databases/sqlite.rs b/src/core/databases/sqlite.rs index 5a3ac144a..53a01f80c 100644 --- a/src/core/databases/sqlite.rs +++ b/src/core/databases/sqlite.rs @@ -6,7 +6,7 @@ use async_trait::async_trait; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; use torrust_tracker_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::{DatabaseDriver, DurationSinceUnixEpoch}; +use torrust_tracker_primitives::{DatabaseDriver, DurationSinceUnixEpoch, PersistentTorrents}; use super::{Database, Error}; use crate::core::auth::{self, Key}; @@ -89,7 +89,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). - async fn load_persistent_torrents(&self) -> Result, Error> { + async fn load_persistent_torrents(&self) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; @@ -101,12 +101,7 @@ impl Database for Sqlite { Ok((info_hash, completed)) })?; - //torrent_iter?; - //let torrent_iter = torrent_iter.unwrap(); - - let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(std::result::Result::ok).collect(); - - Ok(torrents) + Ok(torrent_iter.filter_map(std::result::Result::ok).collect()) } /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). diff --git a/src/core/mod.rs b/src/core/mod.rs index 21cd1b501..6628426c1 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -684,7 +684,7 @@ impl Tracker { fn get_torrent_peers_for_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> Vec> { match self.torrents.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers_for_peer(peer, Some(TORRENT_PEERS_LIMIT)), + Some(entry) => entry.get_peers_for_client(&peer.peer_addr, Some(TORRENT_PEERS_LIMIT)), } } @@ -1115,9 +1115,9 @@ mod tests { assert_eq!( torrents_metrics, TorrentsMetrics { - seeders: 0, - completed: 0, - leechers: 0, + complete: 0, + downloaded: 0, + incomplete: 0, torrents: 0 } ); @@ -1164,9 +1164,9 @@ mod tests { assert_eq!( torrent_metrics, TorrentsMetrics { - seeders: 0, - completed: 0, - leechers: 1, + complete: 0, + downloaded: 0, + incomplete: 1, torrents: 1, } ); @@ -1191,9 +1191,9 @@ mod tests { assert_eq!( (torrent_metrics), (TorrentsMetrics { - seeders: 0, - completed: 0, - leechers: 1_000_000, + complete: 0, + downloaded: 0, + incomplete: 1_000_000, torrents: 1_000_000, }), "{result_a:?} {result_b:?}" diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index 2b3f9cbf7..ab78de683 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -29,6 +29,3 @@ use torrust_tracker_torrent_repository::TorrentsRwLockStdMutexStd; pub type Torrents = TorrentsRwLockStdMutexStd; // Currently Used - -#[cfg(test)] -mod tests {} diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index 48ac660cf..9e8ab6bab 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -50,9 +50,9 @@ impl From for Stats { fn from(metrics: TrackerMetrics) -> Self { Self { torrents: metrics.torrents_metrics.torrents, - seeders: metrics.torrents_metrics.seeders, - completed: metrics.torrents_metrics.completed, - leechers: metrics.torrents_metrics.leechers, + seeders: metrics.torrents_metrics.complete, + completed: metrics.torrents_metrics.downloaded, + leechers: metrics.torrents_metrics.incomplete, tcp4_connections_handled: metrics.protocol_metrics.tcp4_connections_handled, tcp4_announces_handled: metrics.protocol_metrics.tcp4_announces_handled, tcp4_scrapes_handled: metrics.protocol_metrics.tcp4_scrapes_handled, @@ -82,9 +82,9 @@ mod tests { assert_eq!( Stats::from(TrackerMetrics { torrents_metrics: TorrentsMetrics { - seeders: 1, - completed: 2, - leechers: 3, + complete: 1, + downloaded: 2, + incomplete: 3, torrents: 4 }, protocol_metrics: Metrics { diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 59aec0ff3..2d5038ec3 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -320,7 +320,6 @@ mod tests { use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::{peer, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; @@ -368,39 +367,41 @@ mod tests { SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) } - struct TorrentPeerBuilder { + #[derive(Debug, Default)] + pub struct TorrentPeerBuilder { peer: peer::Peer, } impl TorrentPeerBuilder { - pub fn default() -> TorrentPeerBuilder { - let default_peer = peer::Peer { - peer_id: peer::Id([255u8; 20]), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: CurrentClock::now(), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - TorrentPeerBuilder { peer: default_peer } + #[must_use] + pub fn new() -> Self { + Self { + peer: peer::Peer { + updated: CurrentClock::now(), + ..Default::default() + }, + } } - pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { - self.peer.peer_id = peer_id; + #[must_use] + pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; self } - pub fn with_peer_addr(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; + #[must_use] + pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { + self.peer.peer_id = peer_id; self } - pub fn with_bytes_left(mut self, left: i64) -> Self { + #[must_use] + pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { self.peer.left = NumberOfBytes(left); self } + #[must_use] pub fn into(self) -> peer::Peer { self.peer } @@ -640,9 +641,9 @@ mod tests { let peers = tracker.get_torrent_peers(&info_hash.0.into()); - let expected_peer = TorrentPeerBuilder::default() + let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip), client_port)) + .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip), client_port)) .into(); assert_eq!(peers[0], Arc::new(expected_peer)); @@ -712,9 +713,9 @@ mod tests { let client_port = 8080; let peer_id = AquaticPeerId([255u8; 20]); - let peer_using_ipv6 = TorrentPeerBuilder::default() + let peer_using_ipv6 = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); tracker @@ -808,9 +809,9 @@ mod tests { let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); - let expected_peer = TorrentPeerBuilder::default() + let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) + .with_peer_address(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) .into(); assert_eq!(peers[0], Arc::new(expected_peer)); @@ -863,9 +864,9 @@ mod tests { let peers = tracker.get_torrent_peers(&info_hash.0.into()); - let expected_peer = TorrentPeerBuilder::default() + let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); assert_eq!(peers[0], Arc::new(expected_peer)); @@ -938,9 +939,9 @@ mod tests { let client_port = 8080; let peer_id = AquaticPeerId([255u8; 20]); - let peer_using_ipv4 = TorrentPeerBuilder::default() + let peer_using_ipv4 = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) + .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); tracker @@ -1112,10 +1113,10 @@ mod tests { async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { let peer_id = peer::Id([255u8; 20]); - let peer = TorrentPeerBuilder::default() + let peer = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(*remote_addr) - .with_bytes_left(0) + .with_peer_address(*remote_addr) + .with_number_of_bytes_left(0) .into(); tracker From 3414e2abea16ff79a1150aa432c6563612735d79 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 25 Mar 2024 12:13:09 +0800 Subject: [PATCH 9/9] dev: torrent repository tests --- Cargo.lock | 419 ++++++++++++++- packages/torrent-repository/Cargo.toml | 1 + .../torrent-repository/tests/common/mod.rs | 3 + .../torrent-repository/tests/common/repo.rs | 147 +++++ .../tests/common/torrent.rs | 89 ++++ .../tests/common/torrent_peer_builder.rs | 88 +++ .../torrent-repository/tests/entry/mod.rs | 433 +++++++++++++++ .../torrent-repository/tests/integration.rs | 22 + .../tests/repository/mod.rs | 504 ++++++++++++++++++ 9 files changed, 1700 insertions(+), 6 deletions(-) create mode 100644 packages/torrent-repository/tests/common/mod.rs create mode 100644 packages/torrent-repository/tests/common/repo.rs create mode 100644 packages/torrent-repository/tests/common/torrent.rs create mode 100644 packages/torrent-repository/tests/common/torrent_peer_builder.rs create mode 100644 packages/torrent-repository/tests/entry/mod.rs create mode 100644 packages/torrent-repository/tests/integration.rs create mode 100644 packages/torrent-repository/tests/repository/mod.rs diff --git a/Cargo.lock b/Cargo.lock index e28278abb..0bdd83b9b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -167,6 +167,40 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "async-attributes" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "async-channel" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", +] + +[[package]] +name = "async-channel" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28243a43d821d11341ab73c80bed182dc015c514b951616cf79bd4af39af0c3" +dependencies = [ + "concurrent-queue", + "event-listener 5.2.0", + "event-listener-strategy 0.5.0", + "futures-core", + "pin-project-lite", +] + [[package]] name = "async-compression" version = "0.4.6" @@ -183,6 +217,128 @@ dependencies = [ "zstd-safe", ] +[[package]] +name = "async-executor" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" +dependencies = [ + "async-lock 3.3.0", + "async-task", + "concurrent-queue", + "fastrand 2.0.1", + "futures-lite 2.3.0", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" +dependencies = [ + "async-channel 2.2.0", + "async-executor", + "async-io 2.3.2", + "async-lock 3.3.0", + "blocking", + "futures-lite 2.3.0", + "once_cell", + "tokio", +] + +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock 2.8.0", + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-lite 1.13.0", + "log", + "parking", + "polling 2.8.0", + "rustix 0.37.27", + "slab", + "socket2 0.4.10", + "waker-fn", +] + +[[package]] +name = "async-io" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcccb0f599cfa2f8ace422d3555572f47424da5648a4382a9dd0310ff8210884" +dependencies = [ + "async-lock 3.3.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite 2.3.0", + "parking", + "polling 3.6.0", + "rustix 0.38.32", + "slab", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + +[[package]] +name = "async-lock" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" +dependencies = [ + "event-listener 4.0.3", + "event-listener-strategy 0.4.0", + "pin-project-lite", +] + +[[package]] +name = "async-std" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +dependencies = [ + "async-attributes", + "async-channel 1.9.0", + "async-global-executor", + "async-io 1.13.0", + "async-lock 2.8.0", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite 1.13.0", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + +[[package]] +name = "async-task" +version = "4.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" + [[package]] name = "async-trait" version = "0.1.78" @@ -194,6 +350,12 @@ dependencies = [ "syn 2.0.53", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "autocfg" version = "1.1.0" @@ -418,6 +580,22 @@ dependencies = [ "generic-array", ] +[[package]] +name = "blocking" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +dependencies = [ + "async-channel 2.2.0", + "async-lock 3.3.0", + "async-task", + "fastrand 2.0.1", + "futures-io", + "futures-lite 2.3.0", + "piper", + "tracing", +] + [[package]] name = "borsh" version = "1.3.1" @@ -662,6 +840,15 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "concurrent-queue" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "config" version = "0.14.0" @@ -996,6 +1183,54 @@ dependencies = [ "version_check", ] +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "event-listener" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b5fb89194fa3cad959b833185b3063ba881dbfc7030680b314250779fb4cc91" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener 4.0.3", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "feedafcaa9b749175d5ac357452a9d41ea2911da598fde46ce1fe02c37751291" +dependencies = [ + "event-listener 5.2.0", + "pin-project-lite", +] + [[package]] name = "fallible-iterator" version = "0.3.0" @@ -1008,6 +1243,15 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + [[package]] name = "fastrand" version = "2.0.1" @@ -1186,6 +1430,34 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +[[package]] +name = "futures-lite" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + +[[package]] +name = "futures-lite" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +dependencies = [ + "fastrand 2.0.1", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + [[package]] name = "futures-macro" version = "0.3.30" @@ -1266,6 +1538,18 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +[[package]] +name = "gloo-timers" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "h2" version = "0.4.3" @@ -1458,7 +1742,7 @@ dependencies = [ "http-body", "hyper", "pin-project-lite", - "socket2", + "socket2 0.5.6", "tokio", "tower", "tower-service", @@ -1526,6 +1810,15 @@ dependencies = [ "serde", ] +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + [[package]] name = "io-enum" version = "1.1.3" @@ -1535,6 +1828,17 @@ dependencies = [ "derive_utils", ] +[[package]] +name = "io-lifetimes" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "ipnet" version = "2.9.0" @@ -1605,6 +1909,15 @@ dependencies = [ "serde", ] +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -1734,6 +2047,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linux-raw-sys" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + [[package]] name = "linux-raw-sys" version = "0.4.13" @@ -1767,6 +2086,9 @@ name = "log" version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +dependencies = [ + "value-bag", +] [[package]] name = "lru" @@ -1878,7 +2200,7 @@ dependencies = [ "percent-encoding", "serde", "serde_json", - "socket2", + "socket2 0.5.6", "twox-hash", "url", ] @@ -2127,6 +2449,12 @@ dependencies = [ "hashbrown 0.13.2", ] +[[package]] +name = "parking" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" + [[package]] name = "parking_lot" version = "0.12.1" @@ -2287,6 +2615,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +dependencies = [ + "atomic-waker", + "fastrand 2.0.1", + "futures-io", +] + [[package]] name = "pkg-config" version = "0.3.30" @@ -2321,6 +2660,37 @@ dependencies = [ "plotters-backend", ] +[[package]] +name = "polling" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +dependencies = [ + "autocfg", + "bitflags 1.3.2", + "cfg-if", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", +] + +[[package]] +name = "polling" +version = "3.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0c976a60b2d7e99d6f229e414670a9b85d13ac305cc6d1e9c134de58c5aaaf6" +dependencies = [ + "cfg-if", + "concurrent-queue", + "hermit-abi", + "pin-project-lite", + "rustix 0.38.32", + "tracing", + "windows-sys 0.52.0", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -2778,6 +3148,20 @@ dependencies = [ "semver", ] +[[package]] +name = "rustix" +version = "0.37.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", +] + [[package]] name = "rustix" version = "0.38.32" @@ -2787,7 +3171,7 @@ dependencies = [ "bitflags 2.5.0", "errno", "libc", - "linux-raw-sys", + "linux-raw-sys 0.4.13", "windows-sys 0.52.0", ] @@ -3133,6 +3517,16 @@ version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +[[package]] +name = "socket2" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "socket2" version = "0.5.6" @@ -3268,8 +3662,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", - "fastrand", - "rustix", + "fastrand 2.0.1", + "rustix 0.38.32", "windows-sys 0.52.0", ] @@ -3386,7 +3780,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.5.6", "tokio-macros", "windows-sys 0.48.0", ] @@ -3610,6 +4004,7 @@ dependencies = [ name = "torrust-tracker-torrent-repository" version = "3.0.0-alpha.12-develop" dependencies = [ + "async-std", "criterion", "futures", "rstest", @@ -3801,6 +4196,12 @@ dependencies = [ "rand", ] +[[package]] +name = "value-bag" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74797339c3b98616c009c7c3eb53a0ce41e85c8ec66bd3db96ed132d20cfdee8" + [[package]] name = "vcpkg" version = "0.2.15" @@ -3813,6 +4214,12 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "waker-fn" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" + [[package]] name = "walkdir" version = "2.5.0" diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index c36ae1440..4cea8767f 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -25,6 +25,7 @@ torrust-tracker-clock = { version = "3.0.0-alpha.12-develop", path = "../clock" [dev-dependencies] criterion = { version = "0", features = ["async_tokio"] } rstest = "0" +async-std = {version = "1", features = ["attributes", "tokio1"] } [[bench]] harness = false diff --git a/packages/torrent-repository/tests/common/mod.rs b/packages/torrent-repository/tests/common/mod.rs new file mode 100644 index 000000000..efdf7f742 --- /dev/null +++ b/packages/torrent-repository/tests/common/mod.rs @@ -0,0 +1,3 @@ +pub mod repo; +pub mod torrent; +pub mod torrent_peer_builder; diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs new file mode 100644 index 000000000..3a4b53d2f --- /dev/null +++ b/packages/torrent-repository/tests/common/repo.rs @@ -0,0 +1,147 @@ +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_torrent_repository::repository::{Repository as _, RepositoryAsync as _}; +use torrust_tracker_torrent_repository::{ + EntrySingle, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, + TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, +}; + +#[derive(Debug)] +pub(crate) enum Repo { + Std(TorrentsRwLockStd), + StdMutexStd(TorrentsRwLockStdMutexStd), + StdMutexTokio(TorrentsRwLockStdMutexTokio), + Tokio(TorrentsRwLockTokio), + TokioMutexStd(TorrentsRwLockTokioMutexStd), + TokioMutexTokio(TorrentsRwLockTokioMutexTokio), +} + +impl Repo { + pub(crate) async fn get(&self, key: &InfoHash) -> Option { + match self { + Repo::Std(repo) => repo.get(key), + Repo::StdMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), + Repo::StdMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), + Repo::Tokio(repo) => repo.get(key).await, + Repo::TokioMutexStd(repo) => Some(repo.get(key).await?.lock().unwrap().clone()), + Repo::TokioMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), + } + } + pub(crate) async fn get_metrics(&self) -> TorrentsMetrics { + match self { + Repo::Std(repo) => repo.get_metrics(), + Repo::StdMutexStd(repo) => repo.get_metrics(), + Repo::StdMutexTokio(repo) => repo.get_metrics().await, + Repo::Tokio(repo) => repo.get_metrics().await, + Repo::TokioMutexStd(repo) => repo.get_metrics().await, + Repo::TokioMutexTokio(repo) => repo.get_metrics().await, + } + } + pub(crate) async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + match self { + Repo::Std(repo) => repo.get_paginated(pagination), + Repo::StdMutexStd(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), + Repo::StdMutexTokio(repo) => { + let mut v: Vec<(InfoHash, EntrySingle)> = vec![]; + + for (i, t) in repo.get_paginated(pagination).await { + v.push((i, t.lock().await.clone())); + } + v + } + Repo::Tokio(repo) => repo.get_paginated(pagination).await, + Repo::TokioMutexStd(repo) => repo + .get_paginated(pagination) + .await + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), + Repo::TokioMutexTokio(repo) => { + let mut v: Vec<(InfoHash, EntrySingle)> = vec![]; + + for (i, t) in repo.get_paginated(pagination).await { + v.push((i, t.lock().await.clone())); + } + v + } + } + } + pub(crate) async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + match self { + Repo::Std(repo) => repo.import_persistent(persistent_torrents), + Repo::StdMutexStd(repo) => repo.import_persistent(persistent_torrents), + Repo::StdMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::Tokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::TokioMutexStd(repo) => repo.import_persistent(persistent_torrents).await, + Repo::TokioMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, + } + } + pub(crate) async fn remove(&self, key: &InfoHash) -> Option { + match self { + Repo::Std(repo) => repo.remove(key), + Repo::StdMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), + Repo::StdMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), + Repo::Tokio(repo) => repo.remove(key).await, + Repo::TokioMutexStd(repo) => Some(repo.remove(key).await?.lock().unwrap().clone()), + Repo::TokioMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), + } + } + pub(crate) async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + match self { + Repo::Std(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::StdMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::StdMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::Tokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::TokioMutexStd(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::TokioMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + } + } + pub(crate) async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + match self { + Repo::Std(repo) => repo.remove_peerless_torrents(policy), + Repo::StdMutexStd(repo) => repo.remove_peerless_torrents(policy), + Repo::StdMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::Tokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::TokioMutexStd(repo) => repo.remove_peerless_torrents(policy).await, + Repo::TokioMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, + } + } + pub(crate) async fn update_torrent_with_peer_and_get_stats( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + ) -> (bool, SwarmMetadata) { + match self { + Repo::Std(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer), + Repo::StdMutexStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer), + Repo::StdMutexTokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, + Repo::Tokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, + Repo::TokioMutexStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, + Repo::TokioMutexTokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, + } + } + pub(crate) async fn insert(&self, info_hash: &InfoHash, torrent: EntrySingle) -> Option { + match self { + Repo::Std(repo) => repo.write().insert(*info_hash, torrent), + Repo::StdMutexStd(repo) => Some(repo.write().insert(*info_hash, torrent.into())?.lock().unwrap().clone()), + Repo::StdMutexTokio(repo) => { + let r = repo.write().insert(*info_hash, torrent.into()); + match r { + Some(t) => Some(t.lock().await.clone()), + None => None, + } + } + Repo::Tokio(repo) => repo.write().await.insert(*info_hash, torrent), + Repo::TokioMutexStd(repo) => Some(repo.write().await.insert(*info_hash, torrent.into())?.lock().unwrap().clone()), + Repo::TokioMutexTokio(repo) => Some(repo.write().await.insert(*info_hash, torrent.into())?.lock().await.clone()), + } + } +} diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs new file mode 100644 index 000000000..33264c443 --- /dev/null +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -0,0 +1,89 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; +use torrust_tracker_torrent_repository::entry::{Entry as _, EntryAsync as _, EntrySync as _}; +use torrust_tracker_torrent_repository::{EntryMutexStd, EntryMutexTokio, EntrySingle}; + +#[derive(Debug, Clone)] +pub(crate) enum Torrent { + Single(EntrySingle), + MutexStd(EntryMutexStd), + MutexTokio(EntryMutexTokio), +} + +impl Torrent { + pub(crate) async fn get_stats(&self) -> SwarmMetadata { + match self { + Torrent::Single(entry) => entry.get_stats(), + Torrent::MutexStd(entry) => entry.get_stats(), + Torrent::MutexTokio(entry) => entry.clone().get_stats().await, + } + } + + pub(crate) async fn is_good(&self, policy: &TrackerPolicy) -> bool { + match self { + Torrent::Single(entry) => entry.is_good(policy), + Torrent::MutexStd(entry) => entry.is_good(policy), + Torrent::MutexTokio(entry) => entry.clone().check_good(policy).await, + } + } + + pub(crate) async fn peers_is_empty(&self) -> bool { + match self { + Torrent::Single(entry) => entry.peers_is_empty(), + Torrent::MutexStd(entry) => entry.peers_is_empty(), + Torrent::MutexTokio(entry) => entry.clone().peers_is_empty().await, + } + } + + pub(crate) async fn get_peers_len(&self) -> usize { + match self { + Torrent::Single(entry) => entry.get_peers_len(), + Torrent::MutexStd(entry) => entry.get_peers_len(), + Torrent::MutexTokio(entry) => entry.clone().get_peers_len().await, + } + } + + pub(crate) async fn get_peers(&self, limit: Option) -> Vec> { + match self { + Torrent::Single(entry) => entry.get_peers(limit), + Torrent::MutexStd(entry) => entry.get_peers(limit), + Torrent::MutexTokio(entry) => entry.clone().get_peers(limit).await, + } + } + + pub(crate) async fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + match self { + Torrent::Single(entry) => entry.get_peers_for_client(client, limit), + Torrent::MutexStd(entry) => entry.get_peers_for_client(client, limit), + Torrent::MutexTokio(entry) => entry.clone().get_peers_for_client(client, limit).await, + } + } + + pub(crate) async fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { + match self { + Torrent::Single(entry) => entry.insert_or_update_peer(peer), + Torrent::MutexStd(entry) => entry.insert_or_update_peer(peer), + Torrent::MutexTokio(entry) => entry.clone().insert_or_update_peer(peer).await, + } + } + + pub(crate) async fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + match self { + Torrent::Single(entry) => entry.insert_or_update_peer_and_get_stats(peer), + Torrent::MutexStd(entry) => entry.insert_or_update_peer_and_get_stats(peer), + Torrent::MutexTokio(entry) => entry.clone().insert_or_update_peer_and_get_stats(peer).await, + } + } + + pub(crate) async fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + match self { + Torrent::Single(entry) => entry.remove_inactive_peers(current_cutoff), + Torrent::MutexStd(entry) => entry.remove_inactive_peers(current_cutoff), + Torrent::MutexTokio(entry) => entry.clone().remove_inactive_peers(current_cutoff).await, + } + } +} diff --git a/packages/torrent-repository/tests/common/torrent_peer_builder.rs b/packages/torrent-repository/tests/common/torrent_peer_builder.rs new file mode 100644 index 000000000..3a4e61ed2 --- /dev/null +++ b/packages/torrent-repository/tests/common/torrent_peer_builder.rs @@ -0,0 +1,88 @@ +use std::net::SocketAddr; + +use torrust_tracker_clock::clock::Time; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; + +use crate::CurrentClock; + +#[derive(Debug, Default)] +struct TorrentPeerBuilder { + peer: peer::Peer, +} + +#[allow(dead_code)] +impl TorrentPeerBuilder { + #[must_use] + fn new() -> Self { + Self { + peer: peer::Peer { + updated: CurrentClock::now(), + ..Default::default() + }, + } + } + + #[must_use] + fn with_event_completed(mut self) -> Self { + self.peer.event = AnnounceEvent::Completed; + self + } + + #[must_use] + fn with_event_started(mut self) -> Self { + self.peer.event = AnnounceEvent::Started; + self + } + + #[must_use] + fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + #[must_use] + fn with_peer_id(mut self, peer_id: peer::Id) -> Self { + self.peer.peer_id = peer_id; + self + } + + #[must_use] + fn with_number_of_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } + + #[must_use] + fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + + #[must_use] + fn into(self) -> peer::Peer { + self.peer + } +} + +/// A torrent seeder is a peer with 0 bytes left to download which +/// has not announced it has stopped +#[must_use] +pub fn a_completed_peer(id: i32) -> peer::Peer { + TorrentPeerBuilder::new() + .with_number_of_bytes_left(0) + .with_event_completed() + .with_peer_id(id.into()) + .into() +} + +/// A torrent leecher is a peer that is not a seeder. +/// Leecher: left > 0 OR event = Stopped +#[must_use] +pub fn a_started_peer(id: i32) -> peer::Peer { + TorrentPeerBuilder::new() + .with_number_of_bytes_left(1) + .with_event_started() + .with_peer_id(id.into()) + .into() +} diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs new file mode 100644 index 000000000..c39bef636 --- /dev/null +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -0,0 +1,433 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::ops::Sub; +use std::time::Duration; + +use rstest::{fixture, rstest}; +use torrust_tracker_clock::clock::stopped::Stopped as _; +use torrust_tracker_clock::clock::{self, Time as _}; +use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::{peer, NumberOfBytes}; +use torrust_tracker_torrent_repository::{EntryMutexStd, EntryMutexTokio, EntrySingle}; + +use crate::common::torrent::Torrent; +use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; +use crate::CurrentClock; + +#[fixture] +fn single() -> Torrent { + Torrent::Single(EntrySingle::default()) +} +#[fixture] +fn standard_mutex() -> Torrent { + Torrent::MutexStd(EntryMutexStd::default()) +} + +#[fixture] +fn mutex_tokio() -> Torrent { + Torrent::MutexTokio(EntryMutexTokio::default()) +} + +#[fixture] +fn policy_none() -> TrackerPolicy { + TrackerPolicy::new(false, 0, false) +} + +#[fixture] +fn policy_persist() -> TrackerPolicy { + TrackerPolicy::new(false, 0, true) +} + +#[fixture] +fn policy_remove() -> TrackerPolicy { + TrackerPolicy::new(true, 0, false) +} + +#[fixture] +fn policy_remove_persist() -> TrackerPolicy { + TrackerPolicy::new(true, 0, true) +} + +pub enum Makes { + Empty, + Started, + Completed, + Downloaded, + Three, +} + +async fn make(torrent: &mut Torrent, makes: &Makes) -> Vec { + match makes { + Makes::Empty => vec![], + Makes::Started => { + let peer = a_started_peer(1); + torrent.insert_or_update_peer(&peer).await; + vec![peer] + } + Makes::Completed => { + let peer = a_completed_peer(2); + torrent.insert_or_update_peer(&peer).await; + vec![peer] + } + Makes::Downloaded => { + let mut peer = a_started_peer(3); + torrent.insert_or_update_peer(&peer).await; + peer.event = AnnounceEvent::Completed; + peer.left = NumberOfBytes(0); + torrent.insert_or_update_peer(&peer).await; + vec![peer] + } + Makes::Three => { + let peer_1 = a_started_peer(1); + torrent.insert_or_update_peer(&peer_1).await; + + let peer_2 = a_completed_peer(2); + torrent.insert_or_update_peer(&peer_2).await; + + let mut peer_3 = a_started_peer(3); + torrent.insert_or_update_peer(&peer_3).await; + peer_3.event = AnnounceEvent::Completed; + peer_3.left = NumberOfBytes(0); + torrent.insert_or_update_peer(&peer_3).await; + vec![peer_1, peer_2, peer_3] + } + } +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[tokio::test] +async fn it_should_be_empty_by_default( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + assert_eq!(torrent.get_peers_len().await, 0); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_check_if_entry_is_good( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, + #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, +) { + make(&mut torrent, makes).await; + + let has_peers = !torrent.peers_is_empty().await; + let has_downloads = torrent.get_stats().await.downloaded != 0; + + match (policy.remove_peerless_torrents, policy.persistent_torrent_completed_stat) { + // remove torrents without peers, and keep completed download stats + (true, true) => match (has_peers, has_downloads) { + // no peers, but has downloads + // peers, with or without downloads + (false, true) | (true, true | false) => assert!(torrent.is_good(&policy).await), + // no peers and no downloads + (false, false) => assert!(!torrent.is_good(&policy).await), + }, + // remove torrents without peers and drop completed download stats + (true, false) => match (has_peers, has_downloads) { + // peers, with or without downloads + (true, true | false) => assert!(torrent.is_good(&policy).await), + // no peers and with or without downloads + (false, true | false) => assert!(!torrent.is_good(&policy).await), + }, + // keep torrents without peers, but keep or drop completed download stats + (false, true | false) => assert!(torrent.is_good(&policy).await), + } +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_get_peers_for_torrent_entry( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + let peers = make(&mut torrent, makes).await; + + let torrent_peers = torrent.get_peers(None).await; + + assert_eq!(torrent_peers.len(), peers.len()); + + for peer in torrent_peers { + assert!(peers.contains(&peer)); + } +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_update_a_peer( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + // Make and insert a new peer. + let mut peer = a_started_peer(-1); + torrent.insert_or_update_peer(&peer).await; + + // Get the Inserted Peer by Id. + let peers = torrent.get_peers(None).await; + let original = peers + .iter() + .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) + .expect("it should find peer by id"); + + assert_eq!(original.event, AnnounceEvent::Started, "it should be as created"); + + // Announce "Completed" torrent download event. + peer.event = AnnounceEvent::Completed; + torrent.insert_or_update_peer(&peer).await; + + // Get the Updated Peer by Id. + let peers = torrent.get_peers(None).await; + let updated = peers + .iter() + .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) + .expect("it should find peer by id"); + + assert_eq!(updated.event, AnnounceEvent::Completed, "it should be updated"); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_remove_a_peer_upon_stopped_announcement( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + use torrust_tracker_primitives::peer::ReadInfo as _; + + make(&mut torrent, makes).await; + + let mut peer = a_started_peer(-1); + + torrent.insert_or_update_peer(&peer).await; + + // The started peer should be inserted. + let peers = torrent.get_peers(None).await; + let original = peers + .iter() + .find(|p| p.get_id() == peer.get_id()) + .expect("it should find peer by id"); + + assert_eq!(original.event, AnnounceEvent::Started); + + // Change peer to "Stopped" and insert. + peer.event = AnnounceEvent::Stopped; + torrent.insert_or_update_peer(&peer).await; + + // It should be removed now. + let peers = torrent.get_peers(None).await; + + assert_eq!( + peers.iter().find(|p| p.get_id() == peer.get_id()), + None, + "it should be removed" + ); +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloaded_statistic( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + let downloaded = torrent.get_stats().await.downloaded; + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let is_already_completed = peer.event == AnnounceEvent::Completed; + + // Announce "Completed" torrent download event. + peer.event = AnnounceEvent::Completed; + + let (updated, stats) = torrent.insert_or_update_peer_and_get_stats(&peer).await; + + if is_already_completed { + assert!(!updated); + assert_eq!(stats.downloaded, downloaded); + } else { + assert!(updated); + assert_eq!(stats.downloaded, downloaded + 1); + } +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_update_a_peer_as_a_seeder( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + let peers = make(&mut torrent, makes).await; + let completed = u32::try_from(peers.iter().filter(|p| p.is_seeder()).count()).expect("it_should_not_be_so_many"); + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let is_already_non_left = peer.left == NumberOfBytes(0); + + // Set Bytes Left to Zero + peer.left = NumberOfBytes(0); + let (_, stats) = torrent.insert_or_update_peer_and_get_stats(&peer).await; // Add the peer + + if is_already_non_left { + // it was already complete + assert_eq!(stats.complete, completed); + } else { + // now it is complete + assert_eq!(stats.complete, completed + 1); + } +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_update_a_peer_as_incomplete( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + let peers = make(&mut torrent, makes).await; + let incomplete = u32::try_from(peers.iter().filter(|p| !p.is_seeder()).count()).expect("it should not be so many"); + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let completed_already = peer.left == NumberOfBytes(0); + + // Set Bytes Left to no Zero + peer.left = NumberOfBytes(1); + let (_, stats) = torrent.insert_or_update_peer_and_get_stats(&peer).await; // Add the peer + + if completed_already { + // now it is incomplete + assert_eq!(stats.incomplete, incomplete + 1); + } else { + // was already incomplete + assert_eq!(stats.incomplete, incomplete); + } +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_get_peers_excluding_the_client_socket( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081); + + // for this test, we should not already use this socket. + assert_ne!(peer.peer_addr, socket); + + // it should get the peer as it dose not share the socket. + assert!(torrent.get_peers_for_client(&socket, None).await.contains(&peer.into())); + + // set the address to the socket. + peer.peer_addr = socket; + torrent.insert_or_update_peer(&peer).await; // Add peer + + // It should not include the peer that has the same socket. + assert!(!torrent.get_peers_for_client(&socket, None).await.contains(&peer.into())); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_limit_the_number_of_peers_returned( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + // We add one more peer than the scrape limit + for peer_number in 1..=74 + 1 { + let mut peer = a_started_peer(1); + peer.peer_id = peer::Id::from(peer_number); + torrent.insert_or_update_peer(&peer).await; + } + + let peers = torrent.get_peers(Some(TORRENT_PEERS_LIMIT)).await; + + assert_eq!(peers.len(), 74); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_remove_inactive_peers_beyond_cutoff( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + const TIMEOUT: Duration = Duration::from_secs(120); + const EXPIRE: Duration = Duration::from_secs(121); + + let peers = make(&mut torrent, makes).await; + + let mut peer = a_completed_peer(-1); + + let now = clock::Working::now(); + clock::Stopped::local_set(&now); + + peer.updated = now.sub(EXPIRE); + + torrent.insert_or_update_peer(&peer).await; + + assert_eq!(torrent.get_peers_len().await, peers.len() + 1); + + let current_cutoff = CurrentClock::now_sub(&TIMEOUT).unwrap_or_default(); + torrent.remove_inactive_peers(current_cutoff).await; + + assert_eq!(torrent.get_peers_len().await, peers.len()); +} diff --git a/packages/torrent-repository/tests/integration.rs b/packages/torrent-repository/tests/integration.rs new file mode 100644 index 000000000..5aab67b03 --- /dev/null +++ b/packages/torrent-repository/tests/integration.rs @@ -0,0 +1,22 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` + +use torrust_tracker_clock::clock; + +pub mod common; +mod entry; +mod repository; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs new file mode 100644 index 000000000..7ffe17dd7 --- /dev/null +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -0,0 +1,504 @@ +use std::collections::{BTreeMap, HashSet}; +use std::hash::{DefaultHasher, Hash, Hasher}; + +use rstest::{fixture, rstest}; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::{NumberOfBytes, PersistentTorrents}; +use torrust_tracker_torrent_repository::entry::Entry as _; +use torrust_tracker_torrent_repository::repository::{RwLockStd, RwLockTokio}; +use torrust_tracker_torrent_repository::EntrySingle; + +use crate::common::repo::Repo; +use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; + +#[fixture] +fn standard() -> Repo { + Repo::Std(RwLockStd::default()) +} +#[fixture] +fn standard_mutex() -> Repo { + Repo::StdMutexStd(RwLockStd::default()) +} + +#[fixture] +fn standard_tokio() -> Repo { + Repo::StdMutexTokio(RwLockStd::default()) +} + +#[fixture] +fn tokio_std() -> Repo { + Repo::Tokio(RwLockTokio::default()) +} +#[fixture] +fn tokio_mutex() -> Repo { + Repo::TokioMutexStd(RwLockTokio::default()) +} + +#[fixture] +fn tokio_tokio() -> Repo { + Repo::TokioMutexTokio(RwLockTokio::default()) +} + +type Entries = Vec<(InfoHash, EntrySingle)>; + +#[fixture] +fn empty() -> Entries { + vec![] +} + +#[fixture] +fn default() -> Entries { + vec![(InfoHash::default(), EntrySingle::default())] +} + +#[fixture] +fn started() -> Entries { + let mut torrent = EntrySingle::default(); + torrent.insert_or_update_peer(&a_started_peer(1)); + vec![(InfoHash::default(), torrent)] +} + +#[fixture] +fn completed() -> Entries { + let mut torrent = EntrySingle::default(); + torrent.insert_or_update_peer(&a_completed_peer(2)); + vec![(InfoHash::default(), torrent)] +} + +#[fixture] +fn downloaded() -> Entries { + let mut torrent = EntrySingle::default(); + let mut peer = a_started_peer(3); + torrent.insert_or_update_peer(&peer); + peer.event = AnnounceEvent::Completed; + peer.left = NumberOfBytes(0); + torrent.insert_or_update_peer(&peer); + vec![(InfoHash::default(), torrent)] +} + +#[fixture] +fn three() -> Entries { + let mut started = EntrySingle::default(); + let started_h = &mut DefaultHasher::default(); + started.insert_or_update_peer(&a_started_peer(1)); + started.hash(started_h); + + let mut completed = EntrySingle::default(); + let completed_h = &mut DefaultHasher::default(); + completed.insert_or_update_peer(&a_completed_peer(2)); + completed.hash(completed_h); + + let mut downloaded = EntrySingle::default(); + let downloaded_h = &mut DefaultHasher::default(); + let mut downloaded_peer = a_started_peer(3); + downloaded.insert_or_update_peer(&downloaded_peer); + downloaded_peer.event = AnnounceEvent::Completed; + downloaded_peer.left = NumberOfBytes(0); + downloaded.insert_or_update_peer(&downloaded_peer); + downloaded.hash(downloaded_h); + + vec![ + (InfoHash::from(&started_h.clone()), started), + (InfoHash::from(&completed_h.clone()), completed), + (InfoHash::from(&downloaded_h.clone()), downloaded), + ] +} + +#[fixture] +fn many_out_of_order() -> Entries { + let mut entries: HashSet<(InfoHash, EntrySingle)> = HashSet::default(); + + for i in 0..408 { + let mut entry = EntrySingle::default(); + entry.insert_or_update_peer(&a_started_peer(i)); + + entries.insert((InfoHash::from(&i), entry)); + } + + // we keep the random order from the hashed set for the vector. + entries.iter().map(|(i, e)| (*i, e.clone())).collect() +} + +#[fixture] +fn many_hashed_in_order() -> Entries { + let mut entries: BTreeMap = BTreeMap::default(); + + for i in 0..408 { + let mut entry = EntrySingle::default(); + entry.insert_or_update_peer(&a_started_peer(i)); + + let hash: &mut DefaultHasher = &mut DefaultHasher::default(); + hash.write_i32(i); + + entries.insert(InfoHash::from(&hash.clone()), entry); + } + + // We return the entries in-order from from the b-tree map. + entries.iter().map(|(i, e)| (*i, e.clone())).collect() +} + +#[fixture] +fn persistent_empty() -> PersistentTorrents { + PersistentTorrents::default() +} + +#[fixture] +fn persistent_single() -> PersistentTorrents { + let hash = &mut DefaultHasher::default(); + + hash.write_u8(1); + let t = [(InfoHash::from(&hash.clone()), 0_u32)]; + + t.iter().copied().collect() +} + +#[fixture] +fn persistent_three() -> PersistentTorrents { + let hash = &mut DefaultHasher::default(); + + hash.write_u8(1); + let info_1 = InfoHash::from(&hash.clone()); + hash.write_u8(2); + let info_2 = InfoHash::from(&hash.clone()); + hash.write_u8(3); + let info_3 = InfoHash::from(&hash.clone()); + + let t = [(info_1, 1_u32), (info_2, 2_u32), (info_3, 3_u32)]; + + t.iter().copied().collect() +} + +async fn make(repo: &Repo, entries: &Entries) { + for (info_hash, entry) in entries { + repo.insert(info_hash, entry.clone()).await; + } +} + +#[fixture] +fn paginated_limit_zero() -> Pagination { + Pagination::new(0, 0) +} + +#[fixture] +fn paginated_limit_one() -> Pagination { + Pagination::new(0, 1) +} + +#[fixture] +fn paginated_limit_one_offset_one() -> Pagination { + Pagination::new(1, 1) +} + +#[fixture] +fn policy_none() -> TrackerPolicy { + TrackerPolicy::new(false, 0, false) +} + +#[fixture] +fn policy_persist() -> TrackerPolicy { + TrackerPolicy::new(false, 0, true) +} + +#[fixture] +fn policy_remove() -> TrackerPolicy { + TrackerPolicy::new(true, 0, false) +} + +#[fixture] +fn policy_remove_persist() -> TrackerPolicy { + TrackerPolicy::new(true, 0, true) +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_a_torrent_entry( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, +) { + make(&repo, &entries).await; + + if let Some((info_hash, torrent)) = entries.first() { + assert_eq!(repo.get(info_hash).await, Some(torrent.clone())); + } else { + assert_eq!(repo.get(&InfoHash::default()).await, None); + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, + many_out_of_order: Entries, +) { + make(&repo, &entries).await; + + let entries_a = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + + make(&repo, &many_out_of_order).await; + + let entries_b = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + + let is_equal = entries_b.iter().take(entries_a.len()).copied().collect::>() == entries_a; + + let is_sorted = entries_b.windows(2).all(|w| w[0] <= w[1]); + + assert!( + is_equal || is_sorted, + "The order is unstable: {is_equal}, or is sorted {is_sorted}." + ); +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_paginated( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, + #[values(paginated_limit_zero(), paginated_limit_one(), paginated_limit_one_offset_one())] paginated: Pagination, +) { + make(&repo, &entries).await; + + let mut info_hashes = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + info_hashes.sort(); + + match paginated { + // it should return empty if limit is zero. + Pagination { limit: 0, .. } => assert_eq!(repo.get_paginated(Some(&paginated)).await, vec![]), + + // it should return a single entry if the limit is one. + Pagination { limit: 1, offset: 0 } => { + if info_hashes.is_empty() { + assert_eq!(repo.get_paginated(Some(&paginated)).await.len(), 0); + } else { + let page = repo.get_paginated(Some(&paginated)).await; + assert_eq!(page.len(), 1); + assert_eq!(page.first().map(|(i, _)| i), info_hashes.first()); + } + } + + // it should return the only the second entry if both the limit and the offset are one. + Pagination { limit: 1, offset: 1 } => { + if info_hashes.len() > 1 { + let page = repo.get_paginated(Some(&paginated)).await; + assert_eq!(page.len(), 1); + assert_eq!(page[0].0, info_hashes[1]); + } + } + // the other cases are not yet tested. + _ => {} + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_metrics( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, +) { + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + + make(&repo, &entries).await; + + let mut metrics = TorrentsMetrics::default(); + + for (_, torrent) in entries { + let stats = torrent.get_stats(); + + metrics.torrents += 1; + metrics.incomplete += u64::from(stats.incomplete); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + } + + assert_eq!(repo.get_metrics().await, metrics); +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_import_persistent_torrents( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, + #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: PersistentTorrents, +) { + make(&repo, &entries).await; + + let mut downloaded = repo.get_metrics().await.downloaded; + persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); + + repo.import_persistent(&persistent_torrents).await; + + assert_eq!(repo.get_metrics().await.downloaded, downloaded); + + for (entry, _) in persistent_torrents { + assert!(repo.get(&entry).await.is_some()); + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_remove_an_entry( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, +) { + make(&repo, &entries).await; + + for (info_hash, torrent) in entries { + assert_eq!(repo.get(&info_hash).await, Some(torrent.clone())); + assert_eq!(repo.remove(&info_hash).await, Some(torrent)); + + assert_eq!(repo.get(&info_hash).await, None); + assert_eq!(repo.remove(&info_hash).await, None); + } + + assert_eq!(repo.get_metrics().await.torrents, 0); +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_remove_inactive_peers( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, +) { + use std::ops::Sub as _; + use std::time::Duration; + + use torrust_tracker_clock::clock::stopped::Stopped as _; + use torrust_tracker_clock::clock::{self, Time as _}; + use torrust_tracker_primitives::peer; + + use crate::CurrentClock; + + const TIMEOUT: Duration = Duration::from_secs(120); + const EXPIRE: Duration = Duration::from_secs(121); + + make(&repo, &entries).await; + + let info_hash: InfoHash; + let mut peer: peer::Peer; + + // Generate a new infohash and peer. + { + let hash = &mut DefaultHasher::default(); + hash.write_u8(255); + info_hash = InfoHash::from(&hash.clone()); + peer = a_completed_peer(-1); + } + + // Set the last updated time of the peer to be 121 seconds ago. + { + let now = clock::Working::now(); + clock::Stopped::local_set(&now); + + peer.updated = now.sub(EXPIRE); + } + + // Insert the infohash and peer into the repository + // and verify there is an extra torrent entry. + { + repo.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + assert_eq!(repo.get_metrics().await.torrents, entries.len() as u64 + 1); + } + + // Verify that this new peer was inserted into the repository. + { + let entry = repo.get(&info_hash).await.expect("it_should_get_some"); + assert!(entry.get_peers(None).contains(&peer.into())); + } + + // Remove peers that have not been updated since the timeout (120 seconds ago). + { + repo.remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")) + .await; + } + + // Verify that the this peer was removed from the repository. + { + let entry = repo.get(&info_hash).await.expect("it_should_get_some"); + assert!(!entry.get_peers(None).contains(&peer.into())); + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_remove_peerless_torrents( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, + #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, +) { + make(&repo, &entries).await; + + repo.remove_peerless_torrents(&policy).await; + + let torrents = repo.get_paginated(None).await; + + for (_, entry) in torrents { + assert!(entry.is_good(&policy)); + } +}