Skip to content

Commit 4af6c0b

Browse files
committedApr 15, 2024··
refactor: use generics for perr list implementation with SkipMap
1 parent bc95b84 commit 4af6c0b

18 files changed

+338
-312
lines changed
 

‎packages/torrent-repository/src/entry/mod.rs

+2-14
Original file line numberDiff line numberDiff line change
@@ -2,16 +2,13 @@ use std::fmt::Debug;
22
use std::net::SocketAddr;
33
use std::sync::Arc;
44

5-
use crossbeam_skiplist::SkipMap;
65
use torrust_tracker_configuration::TrackerPolicy;
76
use torrust_tracker_primitives::swarm_metadata::SwarmMetadata;
87
use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch};
98

109
pub mod mutex_std;
1110
pub mod mutex_tokio;
1211
pub mod single;
13-
pub mod skip_map;
14-
pub mod skip_map_mutex_std;
1512

1613
pub trait Entry {
1714
/// It returns the swarm metadata (statistics) as a struct:
@@ -82,19 +79,10 @@ pub trait EntryAsync {
8279
/// that's the list of all the peers trying to download the same torrent.
8380
/// The tracker keeps one entry like this for every torrent.
8481
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
85-
pub struct Torrent {
82+
pub struct Torrent<T> {
8683
/// The swarm: a network of peers that are all trying to download the torrent associated to this entry
8784
// #[serde(skip)]
88-
pub(crate) peers: std::collections::BTreeMap<peer::Id, Arc<peer::Peer>>,
89-
/// The number of peers that have ever completed downloading the torrent associated to this entry
90-
pub(crate) downloaded: u32,
91-
}
92-
93-
#[derive(Debug, Default)]
94-
pub struct SkipMapTorrent {
95-
/// The swarm: a network of peers that are all trying to download the torrent associated to this entry
96-
// #[serde(skip)]
97-
pub(crate) peers: SkipMap<peer::Id, Arc<peer::Peer>>,
85+
pub(crate) peers: T,
9886
/// The number of peers that have ever completed downloading the torrent associated to this entry
9987
pub(crate) downloaded: u32,
10088
}

‎packages/torrent-repository/src/entry/mutex_std.rs

+40-4
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@ use torrust_tracker_primitives::swarm_metadata::SwarmMetadata;
66
use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch};
77

88
use super::{Entry, EntrySync};
9-
use crate::{EntryMutexStd, EntrySingle};
9+
use crate::{BTreeMapPeerList, EntryMutexStd, EntrySingle, SkipMapPeerList};
1010

11-
impl EntrySync for EntryMutexStd {
11+
impl EntrySync for EntryMutexStd<BTreeMapPeerList> {
1212
fn get_swarm_metadata(&self) -> SwarmMetadata {
1313
self.lock().expect("it should get a lock").get_swarm_metadata()
1414
}
@@ -44,8 +44,44 @@ impl EntrySync for EntryMutexStd {
4444
}
4545
}
4646

47-
impl From<EntrySingle> for EntryMutexStd {
48-
fn from(entry: EntrySingle) -> Self {
47+
impl EntrySync for EntryMutexStd<SkipMapPeerList> {
48+
fn get_swarm_metadata(&self) -> SwarmMetadata {
49+
self.lock().expect("it should get a lock").get_swarm_metadata()
50+
}
51+
52+
fn is_good(&self, policy: &TrackerPolicy) -> bool {
53+
self.lock().expect("it should get a lock").is_good(policy)
54+
}
55+
56+
fn peers_is_empty(&self) -> bool {
57+
self.lock().expect("it should get a lock").peers_is_empty()
58+
}
59+
60+
fn get_peers_len(&self) -> usize {
61+
self.lock().expect("it should get a lock").get_peers_len()
62+
}
63+
64+
fn get_peers(&self, limit: Option<usize>) -> Vec<Arc<peer::Peer>> {
65+
self.lock().expect("it should get lock").get_peers(limit)
66+
}
67+
68+
fn get_peers_for_client(&self, client: &SocketAddr, limit: Option<usize>) -> Vec<Arc<peer::Peer>> {
69+
self.lock().expect("it should get lock").get_peers_for_client(client, limit)
70+
}
71+
72+
fn upsert_peer(&self, peer: &peer::Peer) -> bool {
73+
self.lock().expect("it should lock the entry").upsert_peer(peer)
74+
}
75+
76+
fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) {
77+
self.lock()
78+
.expect("it should lock the entry")
79+
.remove_inactive_peers(current_cutoff);
80+
}
81+
}
82+
83+
impl<T> From<EntrySingle<T>> for EntryMutexStd<T> {
84+
fn from(entry: EntrySingle<T>) -> Self {
4985
Arc::new(std::sync::Mutex::new(entry))
5086
}
5187
}

‎packages/torrent-repository/src/entry/mutex_tokio.rs

+38-4
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@ use torrust_tracker_primitives::swarm_metadata::SwarmMetadata;
66
use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch};
77

88
use super::{Entry, EntryAsync};
9-
use crate::{EntryMutexTokio, EntrySingle};
9+
use crate::{BTreeMapPeerList, EntryMutexTokio, EntrySingle, SkipMapPeerList};
1010

11-
impl EntryAsync for EntryMutexTokio {
11+
impl EntryAsync for EntryMutexTokio<BTreeMapPeerList> {
1212
async fn get_swarm_metadata(&self) -> SwarmMetadata {
1313
self.lock().await.get_swarm_metadata()
1414
}
@@ -42,8 +42,42 @@ impl EntryAsync for EntryMutexTokio {
4242
}
4343
}
4444

45-
impl From<EntrySingle> for EntryMutexTokio {
46-
fn from(entry: EntrySingle) -> Self {
45+
impl EntryAsync for EntryMutexTokio<SkipMapPeerList> {
46+
async fn get_swarm_metadata(&self) -> SwarmMetadata {
47+
self.lock().await.get_swarm_metadata()
48+
}
49+
50+
async fn check_good(self, policy: &TrackerPolicy) -> bool {
51+
self.lock().await.is_good(policy)
52+
}
53+
54+
async fn peers_is_empty(&self) -> bool {
55+
self.lock().await.peers_is_empty()
56+
}
57+
58+
async fn get_peers_len(&self) -> usize {
59+
self.lock().await.get_peers_len()
60+
}
61+
62+
async fn get_peers(&self, limit: Option<usize>) -> Vec<Arc<peer::Peer>> {
63+
self.lock().await.get_peers(limit)
64+
}
65+
66+
async fn get_peers_for_client(&self, client: &SocketAddr, limit: Option<usize>) -> Vec<Arc<peer::Peer>> {
67+
self.lock().await.get_peers_for_client(client, limit)
68+
}
69+
70+
async fn upsert_peer(self, peer: &peer::Peer) -> bool {
71+
self.lock().await.upsert_peer(peer)
72+
}
73+
74+
async fn remove_inactive_peers(self, current_cutoff: DurationSinceUnixEpoch) {
75+
self.lock().await.remove_inactive_peers(current_cutoff);
76+
}
77+
}
78+
79+
impl<T> From<EntrySingle<T>> for EntryMutexTokio<T> {
80+
fn from(entry: EntrySingle<T>) -> Self {
4781
Arc::new(tokio::sync::Mutex::new(entry))
4882
}
4983
}

‎packages/torrent-repository/src/entry/single.rs

+108-3
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,14 @@ use std::sync::Arc;
33

44
use torrust_tracker_configuration::TrackerPolicy;
55
use torrust_tracker_primitives::announce_event::AnnounceEvent;
6-
use torrust_tracker_primitives::peer::{self};
6+
use torrust_tracker_primitives::peer::{self, ReadInfo};
77
use torrust_tracker_primitives::swarm_metadata::SwarmMetadata;
88
use torrust_tracker_primitives::DurationSinceUnixEpoch;
99

1010
use super::Entry;
11-
use crate::EntrySingle;
11+
use crate::{BTreeMapPeerList, EntrySingle, SkipMapPeerList};
1212

13-
impl Entry for EntrySingle {
13+
impl Entry for EntrySingle<BTreeMapPeerList> {
1414
#[allow(clippy::cast_possible_truncation)]
1515
fn get_swarm_metadata(&self) -> SwarmMetadata {
1616
let complete: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32;
@@ -98,3 +98,108 @@ impl Entry for EntrySingle {
9898
.retain(|_, peer| peer::ReadInfo::get_updated(peer) > current_cutoff);
9999
}
100100
}
101+
102+
impl Entry for EntrySingle<SkipMapPeerList> {
103+
#[allow(clippy::cast_possible_truncation)]
104+
fn get_swarm_metadata(&self) -> SwarmMetadata {
105+
let complete: u32 = self.peers.iter().filter(|entry| entry.value().is_seeder()).count() as u32;
106+
let incomplete: u32 = self.peers.len() as u32 - complete;
107+
108+
SwarmMetadata {
109+
downloaded: self.downloaded,
110+
complete,
111+
incomplete,
112+
}
113+
}
114+
115+
fn is_good(&self, policy: &TrackerPolicy) -> bool {
116+
if policy.persistent_torrent_completed_stat && self.downloaded > 0 {
117+
return true;
118+
}
119+
120+
if policy.remove_peerless_torrents && self.peers.is_empty() {
121+
return false;
122+
}
123+
124+
true
125+
}
126+
127+
fn peers_is_empty(&self) -> bool {
128+
self.peers.is_empty()
129+
}
130+
131+
fn get_peers_len(&self) -> usize {
132+
self.peers.len()
133+
}
134+
fn get_peers(&self, limit: Option<usize>) -> Vec<Arc<peer::Peer>> {
135+
match limit {
136+
Some(limit) => self.peers.iter().take(limit).map(|entry| entry.value().clone()).collect(),
137+
None => self.peers.iter().map(|entry| entry.value().clone()).collect(),
138+
}
139+
}
140+
141+
fn get_peers_for_client(&self, client: &SocketAddr, limit: Option<usize>) -> Vec<Arc<peer::Peer>> {
142+
match limit {
143+
Some(limit) => self
144+
.peers
145+
.iter()
146+
// Take peers which are not the client peer
147+
.filter(|entry| peer::ReadInfo::get_address(entry.value().as_ref()) != *client)
148+
// Limit the number of peers on the result
149+
.take(limit)
150+
.map(|entry| entry.value().clone())
151+
.collect(),
152+
None => self
153+
.peers
154+
.iter()
155+
// Take peers which are not the client peer
156+
.filter(|entry| peer::ReadInfo::get_address(entry.value().as_ref()) != *client)
157+
.map(|entry| entry.value().clone())
158+
.collect(),
159+
}
160+
}
161+
162+
fn upsert_peer(&mut self, peer: &peer::Peer) -> bool {
163+
let mut downloaded_stats_updated: bool = false;
164+
165+
match peer::ReadInfo::get_event(peer) {
166+
AnnounceEvent::Stopped => {
167+
drop(self.peers.remove(&peer::ReadInfo::get_id(peer)));
168+
}
169+
AnnounceEvent::Completed => {
170+
let previous = self.peers.get(&peer.get_id());
171+
172+
let increase_downloads = match previous {
173+
Some(entry) => {
174+
// Don't count if peer was already completed.
175+
entry.value().event != AnnounceEvent::Completed
176+
}
177+
None => {
178+
// Don't count if peer was not previously known
179+
false
180+
}
181+
};
182+
183+
self.peers.insert(peer::ReadInfo::get_id(peer), Arc::new(*peer));
184+
185+
if increase_downloads {
186+
self.downloaded += 1;
187+
downloaded_stats_updated = true;
188+
}
189+
}
190+
_ => {
191+
drop(self.peers.insert(peer::ReadInfo::get_id(peer), Arc::new(*peer)));
192+
}
193+
}
194+
195+
downloaded_stats_updated
196+
}
197+
198+
fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) {
199+
for entry in &self.peers {
200+
if entry.value().get_updated() >= current_cutoff {
201+
entry.remove();
202+
}
203+
}
204+
}
205+
}

0 commit comments

Comments
 (0)
Please sign in to comment.