diff --git a/.vscode/settings.json b/.vscode/settings.json index 701e89ccf..caa48dd01 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -31,4 +31,5 @@ "evenBetterToml.formatter.trailingNewline": true, "evenBetterToml.formatter.reorderKeys": true, "evenBetterToml.formatter.reorderArrays": true, + } \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 5722032b8..0bdd83b9b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -167,6 +167,40 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "async-attributes" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "async-channel" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", +] + +[[package]] +name = "async-channel" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28243a43d821d11341ab73c80bed182dc015c514b951616cf79bd4af39af0c3" +dependencies = [ + "concurrent-queue", + "event-listener 5.2.0", + "event-listener-strategy 0.5.0", + "futures-core", + "pin-project-lite", +] + [[package]] name = "async-compression" version = "0.4.6" @@ -183,6 +217,128 @@ dependencies = [ "zstd-safe", ] +[[package]] +name = "async-executor" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" +dependencies = [ + "async-lock 3.3.0", + "async-task", + "concurrent-queue", + "fastrand 2.0.1", + "futures-lite 2.3.0", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" +dependencies = [ + "async-channel 2.2.0", + "async-executor", + "async-io 2.3.2", + "async-lock 3.3.0", + "blocking", + "futures-lite 2.3.0", + "once_cell", + "tokio", +] + +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock 2.8.0", + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-lite 1.13.0", + "log", + "parking", + "polling 2.8.0", + "rustix 0.37.27", + "slab", + "socket2 0.4.10", + "waker-fn", +] + +[[package]] +name = "async-io" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcccb0f599cfa2f8ace422d3555572f47424da5648a4382a9dd0310ff8210884" +dependencies = [ + "async-lock 3.3.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite 2.3.0", + "parking", + "polling 3.6.0", + "rustix 0.38.32", + "slab", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + +[[package]] +name = "async-lock" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" +dependencies = [ + "event-listener 4.0.3", + "event-listener-strategy 0.4.0", + "pin-project-lite", +] + +[[package]] +name = "async-std" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +dependencies = [ + "async-attributes", + "async-channel 1.9.0", + "async-global-executor", + "async-io 1.13.0", + "async-lock 2.8.0", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite 1.13.0", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + +[[package]] +name = "async-task" +version = "4.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" + [[package]] name = "async-trait" version = "0.1.78" @@ -194,6 +350,12 @@ dependencies = [ "syn 2.0.53", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "autocfg" version = "1.1.0" @@ -418,6 +580,22 @@ dependencies = [ "generic-array", ] +[[package]] +name = "blocking" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +dependencies = [ + "async-channel 2.2.0", + "async-lock 3.3.0", + "async-task", + "fastrand 2.0.1", + "futures-io", + "futures-lite 2.3.0", + "piper", + "tracing", +] + [[package]] name = "borsh" version = "1.3.1" @@ -662,6 +840,15 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "concurrent-queue" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "config" version = "0.14.0" @@ -762,6 +949,7 @@ dependencies = [ "ciborium", "clap", "criterion-plot", + "futures", "is-terminal", "itertools 0.10.5", "num-traits", @@ -774,6 +962,7 @@ dependencies = [ "serde_derive", "serde_json", "tinytemplate", + "tokio", "walkdir", ] @@ -994,6 +1183,54 @@ dependencies = [ "version_check", ] +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "event-listener" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b5fb89194fa3cad959b833185b3063ba881dbfc7030680b314250779fb4cc91" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener 4.0.3", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "feedafcaa9b749175d5ac357452a9d41ea2911da598fde46ce1fe02c37751291" +dependencies = [ + "event-listener 5.2.0", + "pin-project-lite", +] + [[package]] name = "fallible-iterator" version = "0.3.0" @@ -1006,6 +1243,15 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + [[package]] name = "fastrand" version = "2.0.1" @@ -1184,6 +1430,34 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +[[package]] +name = "futures-lite" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + +[[package]] +name = "futures-lite" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +dependencies = [ + "fastrand 2.0.1", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + [[package]] name = "futures-macro" version = "0.3.30" @@ -1207,6 +1481,12 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +[[package]] +name = "futures-timer" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" + [[package]] name = "futures-util" version = "0.3.30" @@ -1258,6 +1538,18 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +[[package]] +name = "gloo-timers" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "h2" version = "0.4.3" @@ -1450,7 +1742,7 @@ dependencies = [ "http-body", "hyper", "pin-project-lite", - "socket2", + "socket2 0.5.6", "tokio", "tower", "tower-service", @@ -1518,6 +1810,15 @@ dependencies = [ "serde", ] +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + [[package]] name = "io-enum" version = "1.1.3" @@ -1527,6 +1828,17 @@ dependencies = [ "derive_utils", ] +[[package]] +name = "io-lifetimes" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "ipnet" version = "2.9.0" @@ -1597,6 +1909,15 @@ dependencies = [ "serde", ] +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -1726,6 +2047,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linux-raw-sys" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + [[package]] name = "linux-raw-sys" version = "0.4.13" @@ -1759,6 +2086,9 @@ name = "log" version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +dependencies = [ + "value-bag", +] [[package]] name = "lru" @@ -1870,7 +2200,7 @@ dependencies = [ "percent-encoding", "serde", "serde_json", - "socket2", + "socket2 0.5.6", "twox-hash", "url", ] @@ -2119,6 +2449,12 @@ dependencies = [ "hashbrown 0.13.2", ] +[[package]] +name = "parking" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" + [[package]] name = "parking_lot" version = "0.12.1" @@ -2279,6 +2615,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +dependencies = [ + "atomic-waker", + "fastrand 2.0.1", + "futures-io", +] + [[package]] name = "pkg-config" version = "0.3.30" @@ -2313,6 +2660,37 @@ dependencies = [ "plotters-backend", ] +[[package]] +name = "polling" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +dependencies = [ + "autocfg", + "bitflags 1.3.2", + "cfg-if", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", +] + +[[package]] +name = "polling" +version = "3.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0c976a60b2d7e99d6f229e414670a9b85d13ac305cc6d1e9c134de58c5aaaf6" +dependencies = [ + "cfg-if", + "concurrent-queue", + "hermit-abi", + "pin-project-lite", + "rustix 0.38.32", + "tracing", + "windows-sys 0.52.0", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -2558,6 +2936,12 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +[[package]] +name = "relative-path" +version = "1.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e898588f33fdd5b9420719948f9f2a32c922a246964576f71ba7f24f80610fbc" + [[package]] name = "rend" version = "0.4.2" @@ -2674,6 +3058,35 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "rstest" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97eeab2f3c0a199bc4be135c36c924b6590b88c377d416494288c14f2db30199" +dependencies = [ + "futures", + "futures-timer", + "rstest_macros", + "rustc_version", +] + +[[package]] +name = "rstest_macros" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" +dependencies = [ + "cfg-if", + "glob", + "proc-macro2", + "quote", + "regex", + "relative-path", + "rustc_version", + "syn 2.0.53", + "unicode-ident", +] + [[package]] name = "rusqlite" version = "0.31.0" @@ -2735,6 +3148,20 @@ dependencies = [ "semver", ] +[[package]] +name = "rustix" +version = "0.37.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", +] + [[package]] name = "rustix" version = "0.38.32" @@ -2744,7 +3171,7 @@ dependencies = [ "bitflags 2.5.0", "errno", "libc", - "linux-raw-sys", + "linux-raw-sys 0.4.13", "windows-sys 0.52.0", ] @@ -3086,9 +3513,19 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" + +[[package]] +name = "socket2" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" +dependencies = [ + "libc", + "winapi", +] [[package]] name = "socket2" @@ -3225,8 +3662,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", - "fastrand", - "rustix", + "fastrand 2.0.1", + "rustix 0.38.32", "windows-sys 0.52.0", ] @@ -3343,7 +3780,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.5.6", "tokio-macros", "windows-sys 0.48.0", ] @@ -3449,17 +3886,6 @@ dependencies = [ "winnow 0.6.5", ] -[[package]] -name = "torrust-torrent-repository-benchmarks" -version = "3.0.0-alpha.12-develop" -dependencies = [ - "aquatic_udp_protocol", - "clap", - "futures", - "tokio", - "torrust-tracker", -] - [[package]] name = "torrust-tracker" version = "3.0.0-alpha.12-develop" @@ -3471,7 +3897,6 @@ dependencies = [ "axum-client-ip", "axum-extra", "axum-server", - "binascii", "chrono", "clap", "colored", @@ -3498,15 +3923,15 @@ dependencies = [ "serde_bytes", "serde_json", "serde_repr", - "tdyne-peer-id", - "tdyne-peer-id-registry", "thiserror", "tokio", + "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-contrib-bencode", "torrust-tracker-located-error", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "tower-http", "trace", "tracing", @@ -3514,6 +3939,15 @@ dependencies = [ "uuid", ] +[[package]] +name = "torrust-tracker-clock" +version = "3.0.0-alpha.12-develop" +dependencies = [ + "chrono", + "lazy_static", + "torrust-tracker-primitives", +] + [[package]] name = "torrust-tracker-configuration" version = "3.0.0-alpha.12-develop" @@ -3549,8 +3983,12 @@ dependencies = [ name = "torrust-tracker-primitives" version = "3.0.0-alpha.12-develop" dependencies = [ + "binascii", "derive_more", "serde", + "tdyne-peer-id", + "tdyne-peer-id-registry", + "thiserror", ] [[package]] @@ -3562,6 +4000,20 @@ dependencies = [ "torrust-tracker-primitives", ] +[[package]] +name = "torrust-tracker-torrent-repository" +version = "3.0.0-alpha.12-develop" +dependencies = [ + "async-std", + "criterion", + "futures", + "rstest", + "tokio", + "torrust-tracker-clock", + "torrust-tracker-configuration", + "torrust-tracker-primitives", +] + [[package]] name = "tower" version = "0.4.13" @@ -3744,6 +4196,12 @@ dependencies = [ "rand", ] +[[package]] +name = "value-bag" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74797339c3b98616c009c7c3eb53a0ce41e85c8ec66bd3db96ed132d20cfdee8" + [[package]] name = "vcpkg" version = "0.2.15" @@ -3756,6 +4214,12 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "waker-fn" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" + [[package]] name = "walkdir" version = "2.5.0" diff --git a/Cargo.toml b/Cargo.toml index e6f196583..99b7a334a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,7 +37,6 @@ axum = { version = "0", features = ["macros"] } axum-client-ip = "0" axum-extra = { version = "0", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls"] } -binascii = "0" chrono = { version = "0", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive", "env"] } colored = "2" @@ -62,14 +61,14 @@ serde_bencode = "0" serde_bytes = "0" serde_json = "1" serde_repr = "0" -tdyne-peer-id = "1" -tdyne-peer-id-registry = "0" thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "packages/configuration" } +torrust-tracker-clock = { version = "3.0.0-alpha.12-develop", path = "packages/clock" } torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.12-develop", path = "contrib/bencode" } torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "packages/located-error" } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "packages/primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-alpha.12-develop", path = "packages/torrent-repository" } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } trace = "0" tracing = "0" @@ -91,7 +90,7 @@ members = [ "packages/located-error", "packages/primitives", "packages/test-helpers", - "packages/torrent-repository-benchmarks", + "packages/torrent-repository" ] [profile.dev] diff --git a/cSpell.json b/cSpell.json index 16dff714e..bbcba98a7 100644 --- a/cSpell.json +++ b/cSpell.json @@ -5,6 +5,7 @@ "alekitto", "appuser", "Arvid", + "asyn", "autoclean", "AUTOINCREMENT", "automock", @@ -33,8 +34,10 @@ "completei", "connectionless", "Containerfile", + "conv", "curr", "Cyberneering", + "dashmap", "datagram", "datetime", "debuginfo", @@ -48,6 +51,7 @@ "filesd", "flamegraph", "Freebox", + "FrostegĂ„rd", "gecos", "Grcov", "hasher", @@ -66,6 +70,7 @@ "Intermodal", "intervali", "kcachegrind", + "Joakim", "keyout", "lcov", "leecher", @@ -94,6 +99,8 @@ "oneshot", "ostr", "Pando", + "peekable", + "peerlist", "proot", "proto", "Quickstart", @@ -107,9 +114,11 @@ "reqwest", "rerequests", "ringbuf", + "ringsize", "rngs", "rosegment", "routable", + "rstest", "rusqlite", "RUSTDOCFLAGS", "RUSTFLAGS", diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml new file mode 100644 index 000000000..d7192b6e4 --- /dev/null +++ b/packages/clock/Cargo.toml @@ -0,0 +1,24 @@ +[package] +description = "A library to a clock for the torrust tracker." +keywords = ["library", "clock", "torrents"] +name = "torrust-tracker-clock" +readme = "README.md" + +authors.workspace = true +categories.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +lazy_static = "1" +chrono = { version = "0", default-features = false, features = ["clock"] } + +torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } + +[dev-dependencies] diff --git a/packages/clock/README.md b/packages/clock/README.md new file mode 100644 index 000000000..bfdd7808f --- /dev/null +++ b/packages/clock/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Clock + +A library to provide a working and mockable clock for the [Torrust Tracker](https://github.com/torrust/torrust-tracker). + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-torrent-clock). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/clock/src/clock/mod.rs b/packages/clock/src/clock/mod.rs new file mode 100644 index 000000000..50afbc9db --- /dev/null +++ b/packages/clock/src/clock/mod.rs @@ -0,0 +1,72 @@ +use std::time::Duration; + +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use self::stopped::StoppedClock; +use self::working::WorkingClock; + +pub mod stopped; +pub mod working; + +/// A generic structure that represents a clock. +/// +/// It can be either the working clock (production) or the stopped clock +/// (testing). It implements the `Time` trait, which gives you the current time. +#[derive(Debug)] +pub struct Clock { + clock: std::marker::PhantomData, +} + +/// The working clock. It returns the current time. +pub type Working = Clock; +/// The stopped clock. It returns always the same fixed time. +pub type Stopped = Clock; + +/// Trait for types that can be used as a timestamp clock. +pub trait Time: Sized { + fn now() -> DurationSinceUnixEpoch; + + fn dbg_clock_type() -> String; + + #[must_use] + fn now_add(add_time: &Duration) -> Option { + Self::now().checked_add(*add_time) + } + #[must_use] + fn now_sub(sub_time: &Duration) -> Option { + Self::now().checked_sub(*sub_time) + } +} + +#[cfg(test)] +mod tests { + use std::any::TypeId; + use std::time::Duration; + + use crate::clock::{self, Stopped, Time, Working}; + use crate::CurrentClock; + + #[test] + fn it_should_be_the_stopped_clock_as_default_when_testing() { + // We are testing, so we should default to the fixed time. + assert_eq!(TypeId::of::(), TypeId::of::()); + assert_eq!(Stopped::now(), CurrentClock::now()); + } + + #[test] + fn it_should_have_different_times() { + assert_ne!(TypeId::of::(), TypeId::of::()); + assert_ne!(Stopped::now(), Working::now()); + } + + #[test] + fn it_should_use_stopped_time_for_testing() { + assert_eq!(CurrentClock::dbg_clock_type(), "Stopped".to_owned()); + + let time = CurrentClock::now(); + std::thread::sleep(Duration::from_millis(50)); + let time_2 = CurrentClock::now(); + + assert_eq!(time, time_2); + } +} diff --git a/packages/clock/src/clock/stopped/mod.rs b/packages/clock/src/clock/stopped/mod.rs new file mode 100644 index 000000000..57655ab75 --- /dev/null +++ b/packages/clock/src/clock/stopped/mod.rs @@ -0,0 +1,210 @@ +/// Trait for types that can be used as a timestamp clock stopped +/// at a given time. + +#[allow(clippy::module_name_repetitions)] +pub struct StoppedClock {} + +#[allow(clippy::module_name_repetitions)] +pub trait Stopped: clock::Time { + /// It sets the clock to a given time. + fn local_set(unix_time: &DurationSinceUnixEpoch); + + /// It sets the clock to the Unix Epoch. + fn local_set_to_unix_epoch() { + Self::local_set(&DurationSinceUnixEpoch::ZERO); + } + + /// It sets the clock to the time the application started. + fn local_set_to_app_start_time(); + + /// It sets the clock to the current system time. + fn local_set_to_system_time_now(); + + /// It adds a `Duration` to the clock. + /// + /// # Errors + /// + /// Will return `IntErrorKind` if `duration` would overflow the internal `Duration`. + fn local_add(duration: &Duration) -> Result<(), IntErrorKind>; + + /// It subtracts a `Duration` from the clock. + /// # Errors + /// + /// Will return `IntErrorKind` if `duration` would underflow the internal `Duration`. + fn local_sub(duration: &Duration) -> Result<(), IntErrorKind>; + + /// It resets the clock to default fixed time that is application start time (or the unix epoch when testing). + fn local_reset(); +} + +use std::num::IntErrorKind; +use std::time::Duration; + +use super::{DurationSinceUnixEpoch, Time}; +use crate::clock; + +impl Time for clock::Stopped { + fn now() -> DurationSinceUnixEpoch { + detail::FIXED_TIME.with(|time| { + return *time.borrow(); + }) + } + + fn dbg_clock_type() -> String { + "Stopped".to_owned() + } +} + +impl Stopped for clock::Stopped { + fn local_set(unix_time: &DurationSinceUnixEpoch) { + detail::FIXED_TIME.with(|time| { + *time.borrow_mut() = *unix_time; + }); + } + + fn local_set_to_app_start_time() { + Self::local_set(&detail::get_app_start_time()); + } + + fn local_set_to_system_time_now() { + Self::local_set(&detail::get_app_start_time()); + } + + fn local_add(duration: &Duration) -> Result<(), IntErrorKind> { + detail::FIXED_TIME.with(|time| { + let time_borrowed = *time.borrow(); + *time.borrow_mut() = match time_borrowed.checked_add(*duration) { + Some(time) => time, + None => { + return Err(IntErrorKind::PosOverflow); + } + }; + Ok(()) + }) + } + + fn local_sub(duration: &Duration) -> Result<(), IntErrorKind> { + detail::FIXED_TIME.with(|time| { + let time_borrowed = *time.borrow(); + *time.borrow_mut() = match time_borrowed.checked_sub(*duration) { + Some(time) => time, + None => { + return Err(IntErrorKind::NegOverflow); + } + }; + Ok(()) + }) + } + + fn local_reset() { + Self::local_set(&detail::get_default_fixed_time()); + } +} + +#[cfg(test)] +mod tests { + use std::thread; + use std::time::Duration; + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::clock::stopped::Stopped as _; + use crate::clock::{Stopped, Time, Working}; + + #[test] + fn it_should_default_to_zero_when_testing() { + assert_eq!(Stopped::now(), DurationSinceUnixEpoch::ZERO); + } + + #[test] + fn it_should_possible_to_set_the_time() { + // Check we start with ZERO. + assert_eq!(Stopped::now(), Duration::ZERO); + + // Set to Current Time and Check + let timestamp = Working::now(); + Stopped::local_set(×tamp); + assert_eq!(Stopped::now(), timestamp); + + // Elapse the Current Time and Check + Stopped::local_add(×tamp).unwrap(); + assert_eq!(Stopped::now(), timestamp + timestamp); + + // Reset to ZERO and Check + Stopped::local_reset(); + assert_eq!(Stopped::now(), Duration::ZERO); + } + + #[test] + fn it_should_default_to_zero_on_thread_exit() { + assert_eq!(Stopped::now(), Duration::ZERO); + let after5 = Working::now_add(&Duration::from_secs(5)).unwrap(); + Stopped::local_set(&after5); + assert_eq!(Stopped::now(), after5); + + let t = thread::spawn(move || { + // each thread starts out with the initial value of ZERO + assert_eq!(Stopped::now(), Duration::ZERO); + + // and gets set to the current time. + let timestamp = Working::now(); + Stopped::local_set(×tamp); + assert_eq!(Stopped::now(), timestamp); + }); + + // wait for the thread to complete and bail out on panic + t.join().unwrap(); + + // we retain our original value of current time + 5sec despite the child thread + assert_eq!(Stopped::now(), after5); + + // Reset to ZERO and Check + Stopped::local_reset(); + assert_eq!(Stopped::now(), Duration::ZERO); + } +} + +mod detail { + use std::cell::RefCell; + use std::time::SystemTime; + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::static_time; + + thread_local!(pub static FIXED_TIME: RefCell = RefCell::new(get_default_fixed_time())); + + pub fn get_app_start_time() -> DurationSinceUnixEpoch { + (*static_time::TIME_AT_APP_START) + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + } + + #[cfg(not(test))] + pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { + get_app_start_time() + } + + #[cfg(test)] + pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::ZERO + } + + #[cfg(test)] + mod tests { + use std::time::Duration; + + use crate::clock::stopped::detail::{get_app_start_time, get_default_fixed_time}; + + #[test] + fn it_should_get_the_zero_start_time_when_testing() { + assert_eq!(get_default_fixed_time(), Duration::ZERO); + } + + #[test] + fn it_should_get_app_start_time() { + const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1_662_983_731, 22312); + assert!(get_app_start_time() > TIME_AT_WRITING_THIS_TEST); + } + } +} diff --git a/packages/clock/src/clock/working/mod.rs b/packages/clock/src/clock/working/mod.rs new file mode 100644 index 000000000..6d0b4dcf7 --- /dev/null +++ b/packages/clock/src/clock/working/mod.rs @@ -0,0 +1,18 @@ +use std::time::SystemTime; + +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::clock; + +#[allow(clippy::module_name_repetitions)] +pub struct WorkingClock; + +impl clock::Time for clock::Working { + fn now() -> DurationSinceUnixEpoch { + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap() + } + + fn dbg_clock_type() -> String { + "Working".to_owned() + } +} diff --git a/packages/clock/src/conv/mod.rs b/packages/clock/src/conv/mod.rs new file mode 100644 index 000000000..f70950c38 --- /dev/null +++ b/packages/clock/src/conv/mod.rs @@ -0,0 +1,82 @@ +use std::str::FromStr; + +use chrono::{DateTime, Utc}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +/// It converts a string in ISO 8601 format to a timestamp. +/// For example, the string `1970-01-01T00:00:00.000Z` which is the Unix Epoch +/// will be converted to a timestamp of 0: `DurationSinceUnixEpoch::ZERO`. +/// +/// # Panics +/// +/// Will panic if the input time cannot be converted to `DateTime::`, internally using the `i64` type. +/// (this will naturally happen in 292.5 billion years) +#[must_use] +pub fn convert_from_iso_8601_to_timestamp(iso_8601: &str) -> DurationSinceUnixEpoch { + convert_from_datetime_utc_to_timestamp(&DateTime::::from_str(iso_8601).unwrap()) +} + +/// It converts a `DateTime::` to a timestamp. +/// For example, the `DateTime::` of the Unix Epoch will be converted to a +/// timestamp of 0: `DurationSinceUnixEpoch::ZERO`. +/// +/// # Panics +/// +/// Will panic if the input time overflows the `u64` type. +/// (this will naturally happen in 584.9 billion years) +#[must_use] +pub fn convert_from_datetime_utc_to_timestamp(datetime_utc: &DateTime) -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::from_secs(u64::try_from(datetime_utc.timestamp()).expect("Overflow of u64 seconds, very future!")) +} + +/// It converts a timestamp to a `DateTime::`. +/// For example, the timestamp of 0: `DurationSinceUnixEpoch::ZERO` will be +/// converted to the `DateTime::` of the Unix Epoch. +/// +/// # Panics +/// +/// Will panic if the input time overflows the `u64` seconds overflows the `i64` type. +/// (this will naturally happen in 292.5 billion years) +#[must_use] +pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) -> DateTime { + DateTime::from_timestamp( + i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), + duration.subsec_nanos(), + ) + .unwrap() +} + +#[cfg(test)] + +mod tests { + use chrono::DateTime; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::conv::{ + convert_from_datetime_utc_to_timestamp, convert_from_iso_8601_to_timestamp, convert_from_timestamp_to_datetime_utc, + }; + + #[test] + fn should_be_converted_to_datetime_utc() { + let timestamp = DurationSinceUnixEpoch::ZERO; + assert_eq!( + convert_from_timestamp_to_datetime_utc(timestamp), + DateTime::from_timestamp(0, 0).unwrap() + ); + } + + #[test] + fn should_be_converted_from_datetime_utc() { + let datetime = DateTime::from_timestamp(0, 0).unwrap(); + assert_eq!( + convert_from_datetime_utc_to_timestamp(&datetime), + DurationSinceUnixEpoch::ZERO + ); + } + + #[test] + fn should_be_converted_from_datetime_utc_in_iso_8601() { + let iso_8601 = "1970-01-01T00:00:00.000Z".to_string(); + assert_eq!(convert_from_iso_8601_to_timestamp(&iso_8601), DurationSinceUnixEpoch::ZERO); + } +} diff --git a/packages/clock/src/lib.rs b/packages/clock/src/lib.rs new file mode 100644 index 000000000..9fc67cb54 --- /dev/null +++ b/packages/clock/src/lib.rs @@ -0,0 +1,53 @@ +//! Time related functions and types. +//! +//! It's usually a good idea to control where the time comes from +//! in an application so that it can be mocked for testing and it can be +//! controlled in production so we get the intended behavior without +//! relying on the specific time zone for the underlying system. +//! +//! Clocks use the type `DurationSinceUnixEpoch` which is a +//! `std::time::Duration` since the Unix Epoch (timestamp). +//! +//! ```text +//! Local time: lun 2023-03-27 16:12:00 WEST +//! Universal time: lun 2023-03-27 15:12:00 UTC +//! Time zone: Atlantic/Canary (WEST, +0100) +//! Timestamp: 1679929914 +//! Duration: 1679929914.10167426 +//! ``` +//! +//! > **NOTICE**: internally the `Duration` is stores it's main unit as seconds in a `u64` and it will +//! overflow in 584.9 billion years. +//! +//! > **NOTICE**: the timestamp does not depend on the time zone. That gives you +//! the ability to use the clock regardless of the underlying system time zone +//! configuration. See [Unix time Wikipedia entry](https://en.wikipedia.org/wiki/Unix_time). + +pub mod clock; +pub mod conv; +pub mod static_time; +pub mod time_extent; + +#[macro_use] +extern crate lazy_static; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; + +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::WorkingTimeExtentMaker; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::StoppedTimeExtentMaker; diff --git a/src/shared/clock/static_time.rs b/packages/clock/src/static_time/mod.rs similarity index 100% rename from src/shared/clock/static_time.rs rename to packages/clock/src/static_time/mod.rs diff --git a/src/shared/clock/time_extent.rs b/packages/clock/src/time_extent/mod.rs similarity index 85% rename from src/shared/clock/time_extent.rs rename to packages/clock/src/time_extent/mod.rs index a5a359e52..c51849f21 100644 --- a/src/shared/clock/time_extent.rs +++ b/packages/clock/src/time_extent/mod.rs @@ -65,7 +65,7 @@ use std::num::{IntErrorKind, TryFromIntError}; use std::time::Duration; -use super::{Stopped, TimeNow, Type, Working}; +use crate::clock::{self, Stopped, Working}; /// This trait defines the operations that can be performed on a `TimeExtent`. pub trait Extent: Sized + Default { @@ -199,10 +199,10 @@ impl Extent for TimeExtent { /// It gives you the time in time extents. pub trait Make: Sized where - Clock: TimeNow, + Clock: clock::Time, { /// It gives you the current time extent (with a certain increment) for - /// the current time. It gets the current timestamp front he `Clock`. + /// the current time. It gets the current timestamp front the `Clock`. /// /// For example: /// @@ -223,12 +223,12 @@ where }) } - /// Same as [`now`](crate::shared::clock::time_extent::Make::now), but it + /// Same as [`now`](crate::time_extent::Make::now), but it /// will add an extra duration to the current time before calculating the /// time extent. It gives you a time extent for a time in the future. #[must_use] fn now_after(increment: &Base, add_time: &Duration) -> Option> { - match Clock::add(add_time) { + match Clock::now_add(add_time) { None => None, Some(time) => time .as_nanos() @@ -240,12 +240,12 @@ where } } - /// Same as [`now`](crate::shared::clock::time_extent::Make::now), but it + /// Same as [`now`](crate::time_extent::Make::now), but it /// will subtract a duration to the current time before calculating the /// time extent. It gives you a time extent for a time in the past. #[must_use] fn now_before(increment: &Base, sub_time: &Duration) -> Option> { - match Clock::sub(sub_time) { + match Clock::now_sub(sub_time) { None => None, Some(time) => time .as_nanos() @@ -262,38 +262,30 @@ where /// /// It's a clock which measures time in `TimeExtents`. #[derive(Debug)] -pub struct Maker {} +pub struct Maker { + clock: std::marker::PhantomData, +} /// A `TimeExtent` maker which makes `TimeExtents` from the `Working` clock. -pub type WorkingTimeExtentMaker = Maker<{ Type::WorkingClock as usize }>; +pub type WorkingTimeExtentMaker = Maker; /// A `TimeExtent` maker which makes `TimeExtents` from the `Stopped` clock. -pub type StoppedTimeExtentMaker = Maker<{ Type::StoppedClock as usize }>; - -impl Make for WorkingTimeExtentMaker {} -impl Make for StoppedTimeExtentMaker {} +pub type StoppedTimeExtentMaker = Maker; -/// The default `TimeExtent` maker. It is `WorkingTimeExtentMaker` in production -/// and `StoppedTimeExtentMaker` in tests. -#[cfg(not(test))] -pub type DefaultTimeExtentMaker = WorkingTimeExtentMaker; - -/// The default `TimeExtent` maker. It is `WorkingTimeExtentMaker` in production -/// and `StoppedTimeExtentMaker` in tests. -#[cfg(test)] -pub type DefaultTimeExtentMaker = StoppedTimeExtentMaker; +impl Make for WorkingTimeExtentMaker {} +impl Make for StoppedTimeExtentMaker {} #[cfg(test)] mod test { - use crate::shared::clock::time_extent::TimeExtent; + use crate::time_extent::TimeExtent; const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239_812_388_723); mod fn_checked_duration_from_nanos { use std::time::Duration; - use crate::shared::clock::time_extent::checked_duration_from_nanos; - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::checked_duration_from_nanos; + use crate::time_extent::test::TIME_EXTENT_VAL; const NANOS_PER_SEC: u32 = 1_000_000_000; @@ -334,7 +326,7 @@ mod test { mod time_extent { mod fn_default { - use crate::shared::clock::time_extent::{TimeExtent, ZERO}; + use crate::time_extent::{TimeExtent, ZERO}; #[test] fn it_should_default_initialize_to_zero() { @@ -343,8 +335,8 @@ mod test { } mod fn_from_sec { - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Multiplier, TimeExtent, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Multiplier, TimeExtent, ZERO}; #[test] fn it_should_make_empty_for_zero() { @@ -360,8 +352,8 @@ mod test { } mod fn_new { - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, Extent, Multiplier, TimeExtent, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Extent, Multiplier, TimeExtent, ZERO}; #[test] fn it_should_make_empty_for_zero() { @@ -383,8 +375,8 @@ mod test { mod fn_increase { use std::num::IntErrorKind; - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Extent, TimeExtent, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Extent, TimeExtent, ZERO}; #[test] fn it_should_not_increase_for_zero() { @@ -411,8 +403,8 @@ mod test { mod fn_decrease { use std::num::IntErrorKind; - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Extent, TimeExtent, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Extent, TimeExtent, ZERO}; #[test] fn it_should_not_decrease_for_zero() { @@ -437,8 +429,8 @@ mod test { } mod fn_total { - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; #[test] fn it_should_be_zero_for_zero() { @@ -485,8 +477,8 @@ mod test { } mod fn_total_next { - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; #[test] fn it_should_be_zero_for_zero() { @@ -542,9 +534,12 @@ mod test { mod make_time_extent { mod fn_now { - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make, TimeExtent}; - use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::clock::stopped::Stopped as _; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Make, TimeExtent}; + use crate::{CurrentClock, DefaultTimeExtentMaker}; #[test] fn it_should_give_a_time_extent() { @@ -556,7 +551,7 @@ mod test { } ); - Current::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); + CurrentClock::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); assert_eq!( DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), @@ -571,7 +566,7 @@ mod test { #[test] fn it_should_fail_if_amount_exceeds_bounds() { - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now(&Base::from_millis(1)).unwrap().unwrap_err(), u64::try_from(u128::MAX).unwrap_err() @@ -582,9 +577,12 @@ mod test { mod fn_now_after { use std::time::Duration; - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make}; - use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::clock::stopped::Stopped as _; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Make}; + use crate::{CurrentClock, DefaultTimeExtentMaker}; #[test] fn it_should_give_a_time_extent() { @@ -603,13 +601,13 @@ mod test { fn it_should_fail_for_zero() { assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::ZERO), None); - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::MAX), None); } #[test] fn it_should_fail_if_amount_exceeds_bounds() { - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_after(&Base::from_millis(1), &Duration::ZERO) .unwrap() @@ -621,12 +619,15 @@ mod test { mod fn_now_before { use std::time::Duration; - use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make, TimeExtent}; - use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::clock::stopped::Stopped as _; + use crate::time_extent::{Base, Make, TimeExtent}; + use crate::{CurrentClock, DefaultTimeExtentMaker}; #[test] fn it_should_give_a_time_extent() { - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_before( @@ -651,7 +652,7 @@ mod test { #[test] fn it_should_fail_if_amount_exceeds_bounds() { - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_before(&Base::from_millis(1), &Duration::ZERO) .unwrap() diff --git a/packages/clock/tests/clock/mod.rs b/packages/clock/tests/clock/mod.rs new file mode 100644 index 000000000..5d94bb83d --- /dev/null +++ b/packages/clock/tests/clock/mod.rs @@ -0,0 +1,16 @@ +use std::time::Duration; + +use torrust_tracker_clock::clock::Time; + +use crate::CurrentClock; + +#[test] +fn it_should_use_stopped_time_for_testing() { + assert_eq!(CurrentClock::dbg_clock_type(), "Stopped".to_owned()); + + let time = CurrentClock::now(); + std::thread::sleep(Duration::from_millis(50)); + let time_2 = CurrentClock::now(); + + assert_eq!(time, time_2); +} diff --git a/packages/clock/tests/integration.rs b/packages/clock/tests/integration.rs new file mode 100644 index 000000000..fa500227a --- /dev/null +++ b/packages/clock/tests/integration.rs @@ -0,0 +1,19 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` + +//mod common; +mod clock; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = torrust_tracker_clock::clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = torrust_tracker_clock::clock::Stopped; diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 4068c046f..ca873f3cd 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -243,6 +243,16 @@ use thiserror::Error; use torrust_tracker_located_error::{DynError, Located, LocatedError}; use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; +/// The maximum number of returned peers for a torrent. +pub const TORRENT_PEERS_LIMIT: usize = 74; + +#[derive(Copy, Clone, Debug, PartialEq, Constructor)] +pub struct TrackerPolicy { + pub remove_peerless_torrents: bool, + pub max_peer_timeout: u32, + pub persistent_torrent_completed_stat: bool, +} + /// Information required for loading config #[derive(Debug, Default, Clone)] pub struct Info { diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index efcce71a9..3b2406a69 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -16,4 +16,8 @@ version.workspace = true [dependencies] derive_more = "0" +thiserror = "1" +binascii = "0" serde = { version = "1", features = ["derive"] } +tdyne-peer-id = "1" +tdyne-peer-id-registry = "0" \ No newline at end of file diff --git a/packages/primitives/src/announce_event.rs b/packages/primitives/src/announce_event.rs new file mode 100644 index 000000000..3bd560084 --- /dev/null +++ b/packages/primitives/src/announce_event.rs @@ -0,0 +1,43 @@ +//! Copyright (c) 2020-2023 Joakim FrostegĂ„rd and The Torrust Developers +//! +//! Distributed under Apache 2.0 license + +use serde::{Deserialize, Serialize}; + +/// Announce events. Described on the +/// [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +#[derive(Hash, Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub enum AnnounceEvent { + /// The peer has started downloading the torrent. + Started, + /// The peer has ceased downloading the torrent. + Stopped, + /// The peer has completed downloading the torrent. + Completed, + /// This is one of the announcements done at regular intervals. + None, +} + +impl AnnounceEvent { + #[inline] + #[must_use] + pub fn from_i32(i: i32) -> Self { + match i { + 1 => Self::Completed, + 2 => Self::Started, + 3 => Self::Stopped, + _ => Self::None, + } + } + + #[inline] + #[must_use] + pub fn to_i32(&self) -> i32 { + match self { + AnnounceEvent::None => 0, + AnnounceEvent::Completed => 1, + AnnounceEvent::Started => 2, + AnnounceEvent::Stopped => 3, + } + } +} diff --git a/packages/primitives/src/info_hash.rs b/packages/primitives/src/info_hash.rs new file mode 100644 index 000000000..a07cc41a2 --- /dev/null +++ b/packages/primitives/src/info_hash.rs @@ -0,0 +1,184 @@ +use std::hash::{DefaultHasher, Hash, Hasher}; +use std::panic::Location; + +use thiserror::Error; + +/// `BitTorrent` Info Hash v1 +#[derive(PartialEq, Eq, Hash, Clone, Copy, Default, Debug)] +pub struct InfoHash(pub [u8; 20]); + +pub const INFO_HASH_BYTES_LEN: usize = 20; + +impl InfoHash { + /// Create a new `InfoHash` from a byte slice. + /// + /// # Panics + /// + /// Will panic if byte slice does not contains the exact amount of bytes need for the `InfoHash`. + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + assert_eq!(bytes.len(), INFO_HASH_BYTES_LEN); + let mut ret = Self([0u8; INFO_HASH_BYTES_LEN]); + ret.0.clone_from_slice(bytes); + ret + } + + /// Returns the `InfoHash` internal byte array. + #[must_use] + pub fn bytes(&self) -> [u8; 20] { + self.0 + } + + /// Returns the `InfoHash` as a hex string. + #[must_use] + pub fn to_hex_string(&self) -> String { + self.to_string() + } +} + +impl Ord for InfoHash { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl std::cmp::PartialOrd for InfoHash { + fn partial_cmp(&self, other: &InfoHash) -> Option { + Some(self.cmp(other)) + } +} + +impl std::fmt::Display for InfoHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut chars = [0u8; 40]; + binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); + write!(f, "{}", std::str::from_utf8(&chars).unwrap()) + } +} + +impl std::str::FromStr for InfoHash { + type Err = binascii::ConvertError; + + fn from_str(s: &str) -> Result { + let mut i = Self([0u8; 20]); + if s.len() != 40 { + return Err(binascii::ConvertError::InvalidInputLength); + } + binascii::hex2bin(s.as_bytes(), &mut i.0)?; + Ok(i) + } +} + +impl std::convert::From<&[u8]> for InfoHash { + fn from(data: &[u8]) -> InfoHash { + assert_eq!(data.len(), 20); + let mut ret = InfoHash([0u8; 20]); + ret.0.clone_from_slice(data); + ret + } +} + +/// for testing +impl std::convert::From<&DefaultHasher> for InfoHash { + fn from(data: &DefaultHasher) -> InfoHash { + let n = data.finish().to_le_bytes(); + InfoHash([ + n[0], n[1], n[2], n[3], n[4], n[5], n[6], n[7], n[0], n[1], n[2], n[3], n[4], n[5], n[6], n[7], n[0], n[1], n[2], + n[3], + ]) + } +} + +impl std::convert::From<&i32> for InfoHash { + fn from(n: &i32) -> InfoHash { + let n = n.to_le_bytes(); + InfoHash([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, n[0], n[1], n[2], n[3]]) + } +} + +impl std::convert::From<[u8; 20]> for InfoHash { + fn from(val: [u8; 20]) -> Self { + InfoHash(val) + } +} + +/// Errors that can occur when converting from a `Vec` to an `InfoHash`. +#[derive(Error, Debug)] +pub enum ConversionError { + /// Not enough bytes for infohash. An infohash is 20 bytes. + #[error("not enough bytes for infohash: {message} {location}")] + NotEnoughBytes { + location: &'static Location<'static>, + message: String, + }, + /// Too many bytes for infohash. An infohash is 20 bytes. + #[error("too many bytes for infohash: {message} {location}")] + TooManyBytes { + location: &'static Location<'static>, + message: String, + }, +} + +impl TryFrom> for InfoHash { + type Error = ConversionError; + + fn try_from(bytes: Vec) -> Result { + if bytes.len() < INFO_HASH_BYTES_LEN { + return Err(ConversionError::NotEnoughBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + if bytes.len() > INFO_HASH_BYTES_LEN { + return Err(ConversionError::TooManyBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + Ok(Self::from_bytes(&bytes)) + } +} + +impl serde::ser::Serialize for InfoHash { + fn serialize(&self, serializer: S) -> Result { + let mut buffer = [0u8; 40]; + let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); + let str_out = std::str::from_utf8(bytes_out).unwrap(); + serializer.serialize_str(str_out) + } +} + +impl<'de> serde::de::Deserialize<'de> for InfoHash { + fn deserialize>(des: D) -> Result { + des.deserialize_str(InfoHashVisitor) + } +} + +struct InfoHashVisitor; + +impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { + type Value = InfoHash; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "a 40 character long hash") + } + + fn visit_str(self, v: &str) -> Result { + if v.len() != 40 { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"a 40 character long string", + )); + } + + let mut res = InfoHash([0u8; 20]); + + if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"a hexadecimal string", + )); + }; + Ok(res) + } +} diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index f6a14b9e8..aeb4d0d4e 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -4,8 +4,44 @@ //! which is a `BitTorrent` tracker server. These structures are used not only //! by the tracker server crate, but also by other crates in the Torrust //! ecosystem. +use std::collections::BTreeMap; +use std::time::Duration; + +use info_hash::InfoHash; use serde::{Deserialize, Serialize}; +pub mod announce_event; +pub mod info_hash; +pub mod pagination; +pub mod peer; +pub mod swarm_metadata; +pub mod torrent_metrics; + +/// Duration since the Unix Epoch. +pub type DurationSinceUnixEpoch = Duration; + +/// Serializes a `DurationSinceUnixEpoch` as a Unix timestamp in milliseconds. +/// # Errors +/// +/// Will return `serde::Serializer::Error` if unable to serialize the `unix_time_value`. +pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { + #[allow(clippy::cast_possible_truncation)] + ser.serialize_u64(unix_time_value.as_millis() as u64) +} + +/// IP version used by the peer to connect to the tracker: IPv4 or IPv6 +#[derive(PartialEq, Eq, Debug)] +pub enum IPVersion { + /// + IPv4, + /// + IPv6, +} + +/// Number of bytes downloaded, uploaded or pending to download (left) by the peer. +#[derive(Hash, Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct NumberOfBytes(pub i64); + /// The database management system used by the tracker. /// /// Refer to: @@ -23,6 +59,8 @@ pub enum DatabaseDriver { MySQL, } +pub type PersistentTorrents = BTreeMap; + /// The mode the tracker will run in. /// /// Refer to [Torrust Tracker Configuration](https://docs.rs/torrust-tracker-configuration) diff --git a/packages/primitives/src/pagination.rs b/packages/primitives/src/pagination.rs new file mode 100644 index 000000000..96b5ad662 --- /dev/null +++ b/packages/primitives/src/pagination.rs @@ -0,0 +1,46 @@ +use derive_more::Constructor; +use serde::Deserialize; + +/// A struct to keep information about the page when results are being paginated +#[derive(Deserialize, Copy, Clone, Debug, PartialEq, Constructor)] +pub struct Pagination { + /// The page number, starting at 0 + pub offset: u32, + /// Page size. The number of results per page + pub limit: u32, +} + +impl Pagination { + #[must_use] + pub fn new_with_options(offset_option: Option, limit_option: Option) -> Self { + let offset = match offset_option { + Some(offset) => offset, + None => Pagination::default_offset(), + }; + let limit = match limit_option { + Some(offset) => offset, + None => Pagination::default_limit(), + }; + + Self { offset, limit } + } + + #[must_use] + pub fn default_offset() -> u32 { + 0 + } + + #[must_use] + pub fn default_limit() -> u32 { + 4000 + } +} + +impl Default for Pagination { + fn default() -> Self { + Self { + offset: Self::default_offset(), + limit: Self::default_limit(), + } + } +} diff --git a/src/core/peer.rs b/packages/primitives/src/peer.rs similarity index 81% rename from src/core/peer.rs rename to packages/primitives/src/peer.rs index 16aa1fe56..f5b009f2a 100644 --- a/src/core/peer.rs +++ b/packages/primitives/src/peer.rs @@ -3,12 +3,12 @@ //! A sample peer: //! //! ```rust,no_run -//! use torrust_tracker::core::peer; +//! use torrust_tracker_primitives::peer; //! use std::net::SocketAddr; //! use std::net::IpAddr; //! use std::net::Ipv4Addr; -//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; -//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; +//! //! //! peer::Peer { //! peer_id: peer::Id(*b"-qB00000000000000000"), @@ -20,37 +20,26 @@ //! event: AnnounceEvent::Started, //! }; //! ``` + use std::net::{IpAddr, SocketAddr}; -use std::panic::Location; +use std::sync::Arc; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde::Serialize; -use thiserror::Error; -use crate::shared::bit_torrent::common::{AnnounceEventDef, NumberOfBytesDef}; -use crate::shared::clock::utils::ser_unix_time_value; -use crate::shared::clock::DurationSinceUnixEpoch; - -/// IP version used by the peer to connect to the tracker: IPv4 or IPv6 -#[derive(PartialEq, Eq, Debug)] -pub enum IPVersion { - /// - IPv4, - /// - IPv6, -} +use crate::announce_event::AnnounceEvent; +use crate::{ser_unix_time_value, DurationSinceUnixEpoch, IPVersion, NumberOfBytes}; /// Peer struct used by the core `Tracker`. /// /// A sample peer: /// /// ```rust,no_run -/// use torrust_tracker::core::peer; +/// use torrust_tracker_primitives::peer; /// use std::net::SocketAddr; /// use std::net::IpAddr; /// use std::net::Ipv4Addr; -/// use torrust_tracker::shared::clock::DurationSinceUnixEpoch; -/// use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +/// use torrust_tracker_primitives::DurationSinceUnixEpoch; +/// /// /// peer::Peer { /// peer_id: peer::Id(*b"-qB00000000000000000"), @@ -62,7 +51,7 @@ pub enum IPVersion { /// event: AnnounceEvent::Started, /// }; /// ``` -#[derive(PartialEq, Eq, Debug, Clone, Serialize, Copy)] +#[derive(Debug, Clone, Serialize, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Peer { /// ID used by the downloader peer pub peer_id: Id, @@ -72,19 +61,67 @@ pub struct Peer { #[serde(serialize_with = "ser_unix_time_value")] pub updated: DurationSinceUnixEpoch, /// The total amount of bytes uploaded by this peer so far - #[serde(with = "NumberOfBytesDef")] pub uploaded: NumberOfBytes, /// The total amount of bytes downloaded by this peer so far - #[serde(with = "NumberOfBytesDef")] pub downloaded: NumberOfBytes, /// The number of bytes this peer still has to download - #[serde(with = "NumberOfBytesDef")] pub left: NumberOfBytes, /// This is an optional key which maps to started, completed, or stopped (or empty, which is the same as not being present). - #[serde(with = "AnnounceEventDef")] pub event: AnnounceEvent, } +pub trait ReadInfo { + fn is_seeder(&self) -> bool; + fn get_event(&self) -> AnnounceEvent; + fn get_id(&self) -> Id; + fn get_updated(&self) -> DurationSinceUnixEpoch; + fn get_address(&self) -> SocketAddr; +} + +impl ReadInfo for Peer { + fn is_seeder(&self) -> bool { + self.left.0 <= 0 && self.event != AnnounceEvent::Stopped + } + + fn get_event(&self) -> AnnounceEvent { + self.event + } + + fn get_id(&self) -> Id { + self.peer_id + } + + fn get_updated(&self) -> DurationSinceUnixEpoch { + self.updated + } + + fn get_address(&self) -> SocketAddr { + self.peer_addr + } +} + +impl ReadInfo for Arc { + fn is_seeder(&self) -> bool { + self.left.0 <= 0 && self.event != AnnounceEvent::Stopped + } + + fn get_event(&self) -> AnnounceEvent { + self.event + } + + fn get_id(&self) -> Id { + self.peer_id + } + + fn get_updated(&self) -> DurationSinceUnixEpoch { + self.updated + } + + fn get_address(&self) -> SocketAddr { + self.peer_addr + } +} + impl Peer { #[must_use] pub fn is_seeder(&self) -> bool { @@ -109,22 +146,9 @@ impl Peer { } } -/// Peer ID. A 20-byte array. -/// -/// A string of length 20 which this downloader uses as its id. -/// Each downloader generates its own id at random at the start of a new download. -/// -/// A sample peer ID: -/// -/// ```rust,no_run -/// use torrust_tracker::core::peer; -/// -/// let peer_id = peer::Id(*b"-qB00000000000000000"); -/// ``` -#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] -pub struct Id(pub [u8; 20]); +use std::panic::Location; -const PEER_ID_BYTES_LEN: usize = 20; +use thiserror::Error; /// Error returned when trying to convert an invalid peer id from another type. /// @@ -143,36 +167,22 @@ pub enum IdConversionError { }, } -impl Id { - /// # Panics - /// - /// Will panic if byte slice does not contains the exact amount of bytes need for the `Id`. - #[must_use] - pub fn from_bytes(bytes: &[u8]) -> Self { - assert_eq!( - PEER_ID_BYTES_LEN, - bytes.len(), - "we are testing the equality of the constant: `PEER_ID_BYTES_LEN` ({}) and the supplied `bytes` length: {}", - PEER_ID_BYTES_LEN, - bytes.len(), - ); - let mut ret = Self([0u8; PEER_ID_BYTES_LEN]); - ret.0.clone_from_slice(bytes); - ret - } - - #[must_use] - pub fn to_bytes(&self) -> [u8; 20] { - self.0 - } -} - impl From<[u8; 20]> for Id { fn from(bytes: [u8; 20]) -> Self { Id(bytes) } } +impl From for Id { + fn from(number: i32) -> Self { + let peer_id = number.to_le_bytes(); + Id::from([ + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], peer_id[2], + peer_id[3], + ]) + } +} + impl TryFrom> for Id { type Error = IdConversionError; @@ -210,7 +220,47 @@ impl std::fmt::Display for Id { } } +/// Peer ID. A 20-byte array. +/// +/// A string of length 20 which this downloader uses as its id. +/// Each downloader generates its own id at random at the start of a new download. +/// +/// A sample peer ID: +/// +/// ```rust,no_run +/// use torrust_tracker_primitives::peer; +/// +/// let peer_id = peer::Id(*b"-qB00000000000000000"); +/// ``` +/// +#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] +pub struct Id(pub [u8; 20]); + +pub const PEER_ID_BYTES_LEN: usize = 20; + impl Id { + /// # Panics + /// + /// Will panic if byte slice does not contains the exact amount of bytes need for the `Id`. + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + assert_eq!( + PEER_ID_BYTES_LEN, + bytes.len(), + "we are testing the equality of the constant: `PEER_ID_BYTES_LEN` ({}) and the supplied `bytes` length: {}", + PEER_ID_BYTES_LEN, + bytes.len(), + ); + let mut ret = Self([0u8; PEER_ID_BYTES_LEN]); + ret.0.clone_from_slice(bytes); + ret + } + + #[must_use] + pub fn to_bytes(&self) -> [u8; 20] { + self.0 + } + #[must_use] /// Converts to hex string. /// @@ -276,12 +326,27 @@ impl Serialize for Id { } } -pub mod fixture { - use std::net::SocketAddr; +/// Marker Trait for Peer Vectors +pub trait Encoding: From + PartialEq {} + +impl FromIterator for Vec

{ + fn from_iter>(iter: T) -> Self { + let mut peers: Vec

= vec![]; + + for peer in iter { + peers.push(peer.into()); + } - use aquatic_udp_protocol::NumberOfBytes; + peers + } +} + +pub mod fixture { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use super::{Id, Peer}; + use crate::announce_event::AnnounceEvent; + use crate::{DurationSinceUnixEpoch, NumberOfBytes}; #[derive(PartialEq, Debug)] @@ -341,22 +406,28 @@ pub mod fixture { impl Default for Peer { fn default() -> Self { Self { - peer_id: Id(*b"-qB00000000000000000"), - peer_addr: std::net::SocketAddr::new(std::net::IpAddr::V4(std::net::Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: crate::shared::clock::DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + peer_id: Id::default(), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), - event: aquatic_udp_protocol::AnnounceEvent::Started, + event: AnnounceEvent::Started, } } } + + impl Default for Id { + fn default() -> Self { + Self(*b"-qB00000000000000000") + } + } } #[cfg(test)] pub mod test { mod torrent_peer_id { - use crate::core::peer; + use crate::peer; #[test] fn should_be_instantiated_from_a_byte_slice() { @@ -465,50 +536,4 @@ pub mod test { assert_eq!(peer::Id(*b"-qB00000000000000000").to_bytes(), *b"-qB00000000000000000"); } } - - mod torrent_peer { - - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use serde_json::Value; - - use crate::core::peer::{self, Peer}; - use crate::shared::clock::{Current, Time}; - - #[test] - fn it_should_be_serializable() { - let torrent_peer = Peer { - peer_id: peer::Id(*b"-qB0000-000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: Current::now(), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - - let raw_json = serde_json::to_string(&torrent_peer).unwrap(); - - let expected_raw_json = r#" - { - "peer_id": { - "id": "0x2d7142303030302d303030303030303030303030", - "client": "qBittorrent" - }, - "peer_addr":"126.0.0.1:8080", - "updated":0, - "uploaded":0, - "downloaded":0, - "left":0, - "event":"Started" - } - "#; - - assert_eq!( - serde_json::from_str::(&raw_json).unwrap(), - serde_json::from_str::(expected_raw_json).unwrap() - ); - } - } } diff --git a/packages/primitives/src/swarm_metadata.rs b/packages/primitives/src/swarm_metadata.rs new file mode 100644 index 000000000..ca880b54d --- /dev/null +++ b/packages/primitives/src/swarm_metadata.rs @@ -0,0 +1,22 @@ +use derive_more::Constructor; + +/// Swarm statistics for one torrent. +/// Swarm metadata dictionary in the scrape response. +/// +/// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] +pub struct SwarmMetadata { + /// (i.e `completed`): The number of peers that have ever completed downloading + pub downloaded: u32, // + /// (i.e `seeders`): The number of active peers that have completed downloading (seeders) + pub complete: u32, //seeders + /// (i.e `leechers`): The number of active peers that have not completed downloading (leechers) + pub incomplete: u32, +} + +impl SwarmMetadata { + #[must_use] + pub fn zeroed() -> Self { + Self::default() + } +} diff --git a/packages/primitives/src/torrent_metrics.rs b/packages/primitives/src/torrent_metrics.rs new file mode 100644 index 000000000..02de02954 --- /dev/null +++ b/packages/primitives/src/torrent_metrics.rs @@ -0,0 +1,25 @@ +use std::ops::AddAssign; + +/// Structure that holds general `Tracker` torrents metrics. +/// +/// Metrics are aggregate values for all torrents. +#[derive(Copy, Clone, Debug, PartialEq, Default)] +pub struct TorrentsMetrics { + /// Total number of seeders for all torrents + pub complete: u64, + /// Total number of peers that have ever completed downloading for all torrents. + pub downloaded: u64, + /// Total number of leechers for all torrents. + pub incomplete: u64, + /// Total number of torrents. + pub torrents: u64, +} + +impl AddAssign for TorrentsMetrics { + fn add_assign(&mut self, rhs: Self) { + self.complete += rhs.complete; + self.downloaded += rhs.downloaded; + self.incomplete += rhs.incomplete; + self.torrents += rhs.torrents; + } +} diff --git a/packages/torrent-repository-benchmarks/Cargo.toml b/packages/torrent-repository-benchmarks/Cargo.toml deleted file mode 100644 index e8b22f52f..000000000 --- a/packages/torrent-repository-benchmarks/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -description = "A set of benchmarks for the torrent repository" -keywords = ["benchmarking", "library", "repository", "torrent"] -name = "torrust-torrent-repository-benchmarks" -readme = "README.md" - -authors.workspace = true -documentation.workspace = true -edition.workspace = true -homepage.workspace = true -license.workspace = true -publish.workspace = true -repository.workspace = true -rust-version.workspace = true -version.workspace = true - -[dependencies] -aquatic_udp_protocol = "0.8.0" -clap = { version = "4.4.8", features = ["derive"] } -futures = "0.3.29" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker = { path = "../../" } diff --git a/packages/torrent-repository-benchmarks/README.md b/packages/torrent-repository-benchmarks/README.md deleted file mode 100644 index 14183ea69..000000000 --- a/packages/torrent-repository-benchmarks/README.md +++ /dev/null @@ -1 +0,0 @@ -# Benchmarks of the torrent repository diff --git a/packages/torrent-repository-benchmarks/src/args.rs b/packages/torrent-repository-benchmarks/src/args.rs deleted file mode 100644 index 3a38c55a7..000000000 --- a/packages/torrent-repository-benchmarks/src/args.rs +++ /dev/null @@ -1,15 +0,0 @@ -use clap::Parser; - -#[derive(Parser, Debug)] -#[command(author, version, about, long_about = None)] -pub struct Args { - /// Amount of benchmark worker threads - #[arg(short, long)] - pub threads: usize, - /// Amount of time in ns a thread will sleep to simulate a client response after handling a task - #[arg(short, long)] - pub sleep: Option, - /// Compare with old implementations of the torrent repository - #[arg(short, long)] - pub compare: Option, -} diff --git a/packages/torrent-repository-benchmarks/src/benches/asyn.rs b/packages/torrent-repository-benchmarks/src/benches/asyn.rs deleted file mode 100644 index 33f9e85fa..000000000 --- a/packages/torrent-repository-benchmarks/src/benches/asyn.rs +++ /dev/null @@ -1,176 +0,0 @@ -use std::sync::Arc; -use std::time::Duration; - -use clap::Parser; -use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository::TRepositoryAsync; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; - -use crate::args::Args; -use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; - -pub async fn async_add_one_torrent(samples: usize) -> (Duration, Duration) { - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); - - let info_hash = InfoHash([0; 20]); - - let start_time = std::time::Instant::now(); - - torrent_repository - .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) - .await; - - let result = start_time.elapsed(); - - results.push(result); - } - - get_average_and_adjusted_average_from_results(results) -} - -// Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn async_update_one_torrent_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: usize, -) -> (Duration, Duration) { - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); - let info_hash: &'static InfoHash = &InfoHash([0; 20]); - let handles = FuturesUnordered::new(); - - // Add the torrent/peer to the torrent repository - torrent_repository - .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) - .await; - - let start_time = std::time::Instant::now(); - - for _ in 0..10_000 { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone - .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) - .await; - - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - let result = start_time.elapsed(); - - results.push(result); - } - - get_average_and_adjusted_average_from_results(results) -} - -// Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn async_add_multiple_torrents_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: usize, -) -> (Duration, Duration) { - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); - let info_hashes = generate_unique_info_hashes(10_000); - let handles = FuturesUnordered::new(); - - let start_time = std::time::Instant::now(); - - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone - .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) - .await; - - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - let result = start_time.elapsed(); - - results.push(result); - } - - get_average_and_adjusted_average_from_results(results) -} - -// Async update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn async_update_multiple_torrents_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: usize, -) -> (Duration, Duration) { - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); - let info_hashes = generate_unique_info_hashes(10_000); - let handles = FuturesUnordered::new(); - - // Add the torrents/peers to the torrent repository - for info_hash in &info_hashes { - torrent_repository - .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) - .await; - } - - let start_time = std::time::Instant::now(); - - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone - .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) - .await; - - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - let result = start_time.elapsed(); - - results.push(result); - } - - get_average_and_adjusted_average_from_results(results) -} diff --git a/packages/torrent-repository-benchmarks/src/benches/sync.rs b/packages/torrent-repository-benchmarks/src/benches/sync.rs deleted file mode 100644 index dac7ab810..000000000 --- a/packages/torrent-repository-benchmarks/src/benches/sync.rs +++ /dev/null @@ -1,166 +0,0 @@ -use std::sync::Arc; -use std::time::Duration; - -use clap::Parser; -use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository::Repository; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; - -use crate::args::Args; -use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; - -// Simply add one torrent -#[must_use] -pub fn add_one_torrent(samples: usize) -> (Duration, Duration) { - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); - - let info_hash = InfoHash([0; 20]); - - let start_time = std::time::Instant::now(); - - torrent_repository.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); - - let result = start_time.elapsed(); - - results.push(result); - } - - get_average_and_adjusted_average_from_results(results) -} - -// Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: usize, -) -> (Duration, Duration) { - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); - let info_hash: &'static InfoHash = &InfoHash([0; 20]); - let handles = FuturesUnordered::new(); - - // Add the torrent/peer to the torrent repository - torrent_repository.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); - - let start_time = std::time::Instant::now(); - - for _ in 0..10_000 { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); - - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - let result = start_time.elapsed(); - - results.push(result); - } - - get_average_and_adjusted_average_from_results(results) -} - -// Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn add_multiple_torrents_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: usize, -) -> (Duration, Duration) { - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); - let info_hashes = generate_unique_info_hashes(10_000); - let handles = FuturesUnordered::new(); - - let start_time = std::time::Instant::now(); - - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); - - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - let result = start_time.elapsed(); - - results.push(result); - } - - get_average_and_adjusted_average_from_results(results) -} - -// Update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn update_multiple_torrents_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: usize, -) -> (Duration, Duration) { - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); - let info_hashes = generate_unique_info_hashes(10_000); - let handles = FuturesUnordered::new(); - - // Add the torrents/peers to the torrent repository - for info_hash in &info_hashes { - torrent_repository.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); - } - - let start_time = std::time::Instant::now(); - - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); - - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - let result = start_time.elapsed(); - - results.push(result); - } - - get_average_and_adjusted_average_from_results(results) -} diff --git a/packages/torrent-repository-benchmarks/src/benches/utils.rs b/packages/torrent-repository-benchmarks/src/benches/utils.rs deleted file mode 100644 index ef1640038..000000000 --- a/packages/torrent-repository-benchmarks/src/benches/utils.rs +++ /dev/null @@ -1,73 +0,0 @@ -use std::collections::HashSet; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::time::Duration; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use torrust_tracker::core::peer::{Id, Peer}; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -use torrust_tracker::shared::clock::DurationSinceUnixEpoch; - -pub const DEFAULT_PEER: Peer = Peer { - peer_id: Id([0; 20]), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::from_secs(0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, -}; - -#[must_use] -#[allow(clippy::missing_panics_doc)] -pub fn generate_unique_info_hashes(size: usize) -> Vec { - let mut result = HashSet::new(); - - let mut bytes = [0u8; 20]; - - #[allow(clippy::cast_possible_truncation)] - for i in 0..size { - bytes[0] = (i & 0xFF) as u8; - bytes[1] = ((i >> 8) & 0xFF) as u8; - bytes[2] = ((i >> 16) & 0xFF) as u8; - bytes[3] = ((i >> 24) & 0xFF) as u8; - - let info_hash = InfoHash(bytes); - result.insert(info_hash); - } - - assert_eq!(result.len(), size); - - result.into_iter().collect() -} - -#[must_use] -pub fn within_acceptable_range(test: &Duration, norm: &Duration) -> bool { - let test_secs = test.as_secs_f64(); - let norm_secs = norm.as_secs_f64(); - - // Calculate the upper and lower bounds for the 10% tolerance - let tolerance = norm_secs * 0.1; - - // Calculate the upper and lower limits - let upper_limit = norm_secs + tolerance; - let lower_limit = norm_secs - tolerance; - - test_secs < upper_limit && test_secs > lower_limit -} - -#[must_use] -pub fn get_average_and_adjusted_average_from_results(mut results: Vec) -> (Duration, Duration) { - #[allow(clippy::cast_possible_truncation)] - let average = results.iter().sum::() / results.len() as u32; - - results.retain(|result| within_acceptable_range(result, &average)); - - let mut adjusted_average = Duration::from_nanos(0); - - #[allow(clippy::cast_possible_truncation)] - if results.len() > 1 { - adjusted_average = results.iter().sum::() / results.len() as u32; - } - - (average, adjusted_average) -} diff --git a/packages/torrent-repository-benchmarks/src/lib.rs b/packages/torrent-repository-benchmarks/src/lib.rs deleted file mode 100644 index 58ebc2057..000000000 --- a/packages/torrent-repository-benchmarks/src/lib.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod args; -pub mod benches; diff --git a/packages/torrent-repository-benchmarks/src/main.rs b/packages/torrent-repository-benchmarks/src/main.rs deleted file mode 100644 index 0d9db73ac..000000000 --- a/packages/torrent-repository-benchmarks/src/main.rs +++ /dev/null @@ -1,139 +0,0 @@ -use clap::Parser; -use torrust_torrent_repository_benchmarks::args::Args; -use torrust_torrent_repository_benchmarks::benches::asyn::{ - async_add_multiple_torrents_in_parallel, async_add_one_torrent, async_update_multiple_torrents_in_parallel, - async_update_one_torrent_in_parallel, -}; -use torrust_torrent_repository_benchmarks::benches::sync::{ - add_multiple_torrents_in_parallel, add_one_torrent, update_multiple_torrents_in_parallel, update_one_torrent_in_parallel, -}; -use torrust_tracker::core::torrent::repository::{AsyncSync, RepositoryAsync, RepositoryAsyncSingle, Sync, SyncSingle}; - -#[allow(clippy::too_many_lines)] -#[allow(clippy::print_literal)] -fn main() { - let args = Args::parse(); - - // Add 1 to worker_threads since we need a thread that awaits the benchmark - let rt = tokio::runtime::Builder::new_multi_thread() - .worker_threads(args.threads + 1) - .enable_time() - .build() - .unwrap(); - - println!("tokio::sync::RwLock>"); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_one_torrent", - rt.block_on(async_add_one_torrent::(1_000_000)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_one_torrent_in_parallel", - rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_multiple_torrents_in_parallel", - rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_multiple_torrents_in_parallel", - rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) - ); - - if let Some(true) = args.compare { - println!(); - - println!("std::sync::RwLock>"); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_one_torrent", - add_one_torrent::(1_000_000) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_one_torrent_in_parallel", - rt.block_on(update_one_torrent_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_multiple_torrents_in_parallel", - rt.block_on(add_multiple_torrents_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_multiple_torrents_in_parallel", - rt.block_on(update_multiple_torrents_in_parallel::(&rt, 10)) - ); - - println!(); - - println!("std::sync::RwLock>>>"); - println!("{}: Avg/AdjAvg: {:?}", "add_one_torrent", add_one_torrent::(1_000_000)); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_one_torrent_in_parallel", - rt.block_on(update_one_torrent_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_multiple_torrents_in_parallel", - rt.block_on(add_multiple_torrents_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_multiple_torrents_in_parallel", - rt.block_on(update_multiple_torrents_in_parallel::(&rt, 10)) - ); - - println!(); - - println!("tokio::sync::RwLock>>>"); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_one_torrent", - rt.block_on(async_add_one_torrent::(1_000_000)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_one_torrent_in_parallel", - rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_multiple_torrents_in_parallel", - rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_multiple_torrents_in_parallel", - rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) - ); - - println!(); - - println!("tokio::sync::RwLock>>>"); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_one_torrent", - rt.block_on(async_add_one_torrent::(1_000_000)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_one_torrent_in_parallel", - rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_multiple_torrents_in_parallel", - rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_multiple_torrents_in_parallel", - rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) - ); - } -} diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml new file mode 100644 index 000000000..4cea8767f --- /dev/null +++ b/packages/torrent-repository/Cargo.toml @@ -0,0 +1,32 @@ +[package] +description = "A library that provides a repository of torrents files and their peers." +keywords = ["torrents", "repository", "library"] +name = "torrust-tracker-torrent-repository" +readme = "README.md" + +authors.workspace = true +categories.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +futures = "0.3.29" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" } +torrust-tracker-clock = { version = "3.0.0-alpha.12-develop", path = "../clock" } + +[dev-dependencies] +criterion = { version = "0", features = ["async_tokio"] } +rstest = "0" +async-std = {version = "1", features = ["attributes", "tokio1"] } + +[[bench]] +harness = false +name = "repository_benchmark" diff --git a/packages/torrent-repository/README.md b/packages/torrent-repository/README.md new file mode 100644 index 000000000..98d7d922b --- /dev/null +++ b/packages/torrent-repository/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Configuration + +A library to provide torrent repository to the [Torrust Tracker](https://github.com/torrust/torrust-tracker). + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-torrent-repository). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/torrent-repository/benches/helpers/asyn.rs b/packages/torrent-repository/benches/helpers/asyn.rs new file mode 100644 index 000000000..80f70cdc2 --- /dev/null +++ b/packages/torrent-repository/benches/helpers/asyn.rs @@ -0,0 +1,155 @@ +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use futures::stream::FuturesUnordered; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_torrent_repository::repository::RepositoryAsync; + +use super::utils::{generate_unique_info_hashes, DEFAULT_PEER}; + +pub async fn add_one_torrent(samples: u64) -> Duration +where + V: RepositoryAsync + Default, +{ + let start = Instant::now(); + + for _ in 0..samples { + let torrent_repository = V::default(); + + let info_hash = InfoHash([0; 20]); + + torrent_repository + .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) + .await; + } + + start.elapsed() +} + +// Add one torrent ten thousand times in parallel (depending on the set worker threads) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: u64, sleep: Option) -> Duration +where + V: RepositoryAsync + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hash: &'static InfoHash = &InfoHash([0; 20]); + let handles = FuturesUnordered::new(); + + // Add the torrent/peer to the torrent repository + torrent_repository + .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) + .await; + + let start = Instant::now(); + + for _ in 0..samples { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone + .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) + .await; + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} + +// Add ten thousand torrents in parallel (depending on the set worker threads) +pub async fn add_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration +where + V: RepositoryAsync + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in a usize")); + let handles = FuturesUnordered::new(); + + let start = Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone + .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) + .await; + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} + +// Async update ten thousand torrents in parallel (depending on the set worker threads) +pub async fn update_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration +where + V: RepositoryAsync + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in usize")); + let handles = FuturesUnordered::new(); + + // Add the torrents/peers to the torrent repository + for info_hash in &info_hashes { + torrent_repository + .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) + .await; + } + + let start = Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone + .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) + .await; + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} diff --git a/packages/torrent-repository-benchmarks/src/benches/mod.rs b/packages/torrent-repository/benches/helpers/mod.rs similarity index 100% rename from packages/torrent-repository-benchmarks/src/benches/mod.rs rename to packages/torrent-repository/benches/helpers/mod.rs diff --git a/packages/torrent-repository/benches/helpers/sync.rs b/packages/torrent-repository/benches/helpers/sync.rs new file mode 100644 index 000000000..0523f4141 --- /dev/null +++ b/packages/torrent-repository/benches/helpers/sync.rs @@ -0,0 +1,145 @@ +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use futures::stream::FuturesUnordered; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_torrent_repository::repository::Repository; + +use super::utils::{generate_unique_info_hashes, DEFAULT_PEER}; + +// Simply add one torrent +#[must_use] +pub fn add_one_torrent(samples: u64) -> Duration +where + V: Repository + Default, +{ + let start = Instant::now(); + + for _ in 0..samples { + let torrent_repository = V::default(); + + let info_hash = InfoHash([0; 20]); + + torrent_repository.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); + } + + start.elapsed() +} + +// Add one torrent ten thousand times in parallel (depending on the set worker threads) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: u64, sleep: Option) -> Duration +where + V: Repository + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hash: &'static InfoHash = &InfoHash([0; 20]); + let handles = FuturesUnordered::new(); + + // Add the torrent/peer to the torrent repository + torrent_repository.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); + + let start = Instant::now(); + + for _ in 0..samples { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} + +// Add ten thousand torrents in parallel (depending on the set worker threads) +pub async fn add_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration +where + V: Repository + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in a usize")); + let handles = FuturesUnordered::new(); + + let start = Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} + +// Update ten thousand torrents in parallel (depending on the set worker threads) +pub async fn update_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration +where + V: Repository + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in usize")); + let handles = FuturesUnordered::new(); + + // Add the torrents/peers to the torrent repository + for info_hash in &info_hashes { + torrent_repository.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); + } + + let start = Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} diff --git a/packages/torrent-repository/benches/helpers/utils.rs b/packages/torrent-repository/benches/helpers/utils.rs new file mode 100644 index 000000000..170194806 --- /dev/null +++ b/packages/torrent-repository/benches/helpers/utils.rs @@ -0,0 +1,40 @@ +use std::collections::HashSet; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::{Id, Peer}; +use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfBytes}; + +pub const DEFAULT_PEER: Peer = Peer { + peer_id: Id([0; 20]), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::from_secs(0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, +}; + +#[must_use] +#[allow(clippy::missing_panics_doc)] +pub fn generate_unique_info_hashes(size: usize) -> Vec { + let mut result = HashSet::new(); + + let mut bytes = [0u8; 20]; + + #[allow(clippy::cast_possible_truncation)] + for i in 0..size { + bytes[0] = (i & 0xFF) as u8; + bytes[1] = ((i >> 8) & 0xFF) as u8; + bytes[2] = ((i >> 16) & 0xFF) as u8; + bytes[3] = ((i >> 24) & 0xFF) as u8; + + let info_hash = InfoHash(bytes); + result.insert(info_hash); + } + + assert_eq!(result.len(), size); + + result.into_iter().collect() +} diff --git a/packages/torrent-repository/benches/repository_benchmark.rs b/packages/torrent-repository/benches/repository_benchmark.rs new file mode 100644 index 000000000..a3684c8e2 --- /dev/null +++ b/packages/torrent-repository/benches/repository_benchmark.rs @@ -0,0 +1,191 @@ +use std::time::Duration; + +mod helpers; + +use criterion::{criterion_group, criterion_main, Criterion}; +use torrust_tracker_torrent_repository::{ + TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, + TorrentsRwLockTokioMutexTokio, +}; + +use crate::helpers::{asyn, sync}; + +fn add_one_torrent(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("add_one_torrent"); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt).iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(asyn::add_one_torrent::); + }); + + group.finish(); +} + +fn add_multiple_torrents_in_parallel(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("add_multiple_torrents_in_parallel"); + + //group.sampling_mode(criterion::SamplingMode::Flat); + //group.sample_size(10); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.finish(); +} + +fn update_one_torrent_in_parallel(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("update_one_torrent_in_parallel"); + + //group.sampling_mode(criterion::SamplingMode::Flat); + //group.sample_size(10); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.finish(); +} + +fn update_multiple_torrents_in_parallel(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("update_multiple_torrents_in_parallel"); + + //group.sampling_mode(criterion::SamplingMode::Flat); + //group.sample_size(10); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt).iter_custom(|iters| { + asyn::update_multiple_torrents_in_parallel::(&rt, iters, None) + }); + }); + + group.finish(); +} + +criterion_group!( + benches, + add_one_torrent, + add_multiple_torrents_in_parallel, + update_one_torrent_in_parallel, + update_multiple_torrents_in_parallel +); +criterion_main!(benches); diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs new file mode 100644 index 000000000..4c39af829 --- /dev/null +++ b/packages/torrent-repository/src/entry/mod.rs @@ -0,0 +1,97 @@ +use std::fmt::Debug; +use std::net::SocketAddr; +use std::sync::Arc; + +//use serde::{Deserialize, Serialize}; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +pub mod mutex_std; +pub mod mutex_tokio; +pub mod single; + +pub trait Entry { + /// It returns the swarm metadata (statistics) as a struct: + /// + /// `(seeders, completed, leechers)` + fn get_stats(&self) -> SwarmMetadata; + + /// Returns True if Still a Valid Entry according to the Tracker Policy + fn is_good(&self, policy: &TrackerPolicy) -> bool; + + /// Returns True if the Peers is Empty + fn peers_is_empty(&self) -> bool; + + /// Returns the number of Peers + fn get_peers_len(&self) -> usize; + + /// Get all swarm peers, optionally limiting the result. + fn get_peers(&self, limit: Option) -> Vec>; + + /// It returns the list of peers for a given peer client, optionally limiting the + /// result. + /// + /// It filters out the input peer, typically because we want to return this + /// list of peers to that client peer. + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec>; + + /// It updates a peer and returns true if the number of complete downloads have increased. + /// + /// The number of peers that have complete downloading is synchronously updated when peers are updated. + /// That's the total torrent downloads counter. + fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool; + + // It preforms a combined operation of `insert_or_update_peer` and `get_stats`. + fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata); + + /// It removes peer from the swarm that have not been updated for more than `current_cutoff` seconds + fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch); +} + +#[allow(clippy::module_name_repetitions)] +pub trait EntrySync { + fn get_stats(&self) -> SwarmMetadata; + fn is_good(&self, policy: &TrackerPolicy) -> bool; + fn peers_is_empty(&self) -> bool; + fn get_peers_len(&self) -> usize; + fn get_peers(&self, limit: Option) -> Vec>; + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec>; + fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool; + fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata); + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); +} + +#[allow(clippy::module_name_repetitions)] +pub trait EntryAsync { + fn get_stats(&self) -> impl std::future::Future + Send; + fn check_good(self, policy: &TrackerPolicy) -> impl std::future::Future + Send; + fn peers_is_empty(&self) -> impl std::future::Future + Send; + fn get_peers_len(&self) -> impl std::future::Future + Send; + fn get_peers(&self, limit: Option) -> impl std::future::Future>> + Send; + fn get_peers_for_client( + &self, + client: &SocketAddr, + limit: Option, + ) -> impl std::future::Future>> + Send; + fn insert_or_update_peer(self, peer: &peer::Peer) -> impl std::future::Future + Send; + fn insert_or_update_peer_and_get_stats( + self, + peer: &peer::Peer, + ) -> impl std::future::Future + std::marker::Send; + fn remove_inactive_peers(self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; +} + +/// A data structure containing all the information about a torrent in the tracker. +/// +/// This is the tracker entry for a given torrent and contains the swarm data, +/// that's the list of all the peers trying to download the same torrent. +/// The tracker keeps one entry like this for every torrent. +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Torrent { + /// The swarm: a network of peers that are all trying to download the torrent associated to this entry + // #[serde(skip)] + pub(crate) peers: std::collections::BTreeMap>, + /// The number of peers that have ever completed downloading the torrent associated to this entry + pub(crate) downloaded: u32, +} diff --git a/packages/torrent-repository/src/entry/mutex_std.rs b/packages/torrent-repository/src/entry/mutex_std.rs new file mode 100644 index 000000000..b4b823909 --- /dev/null +++ b/packages/torrent-repository/src/entry/mutex_std.rs @@ -0,0 +1,57 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle}; + +impl EntrySync for EntryMutexStd { + fn get_stats(&self) -> SwarmMetadata { + self.lock().expect("it should get a lock").get_stats() + } + + fn is_good(&self, policy: &TrackerPolicy) -> bool { + self.lock().expect("it should get a lock").is_good(policy) + } + + fn peers_is_empty(&self) -> bool { + self.lock().expect("it should get a lock").peers_is_empty() + } + + fn get_peers_len(&self) -> usize { + self.lock().expect("it should get a lock").get_peers_len() + } + + fn get_peers(&self, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers(limit) + } + + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers_for_client(client, limit) + } + + fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool { + self.lock().expect("it should lock the entry").insert_or_update_peer(peer) + } + + fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.lock() + .expect("it should lock the entry") + .insert_or_update_peer_and_get_stats(peer) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + self.lock() + .expect("it should lock the entry") + .remove_inactive_peers(current_cutoff); + } +} + +impl From for EntryMutexStd { + fn from(entry: EntrySingle) -> Self { + Arc::new(std::sync::Mutex::new(entry)) + } +} diff --git a/packages/torrent-repository/src/entry/mutex_tokio.rs b/packages/torrent-repository/src/entry/mutex_tokio.rs new file mode 100644 index 000000000..34f4a4e92 --- /dev/null +++ b/packages/torrent-repository/src/entry/mutex_tokio.rs @@ -0,0 +1,53 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntryAsync}; +use crate::{EntryMutexTokio, EntrySingle}; + +impl EntryAsync for EntryMutexTokio { + async fn get_stats(&self) -> SwarmMetadata { + self.lock().await.get_stats() + } + + async fn check_good(self, policy: &TrackerPolicy) -> bool { + self.lock().await.is_good(policy) + } + + async fn peers_is_empty(&self) -> bool { + self.lock().await.peers_is_empty() + } + + async fn get_peers_len(&self) -> usize { + self.lock().await.get_peers_len() + } + + async fn get_peers(&self, limit: Option) -> Vec> { + self.lock().await.get_peers(limit) + } + + async fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.lock().await.get_peers_for_client(client, limit) + } + + async fn insert_or_update_peer(self, peer: &peer::Peer) -> bool { + self.lock().await.insert_or_update_peer(peer) + } + + async fn insert_or_update_peer_and_get_stats(self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.lock().await.insert_or_update_peer_and_get_stats(peer) + } + + async fn remove_inactive_peers(self, current_cutoff: DurationSinceUnixEpoch) { + self.lock().await.remove_inactive_peers(current_cutoff); + } +} + +impl From for EntryMutexTokio { + fn from(entry: EntrySingle) -> Self { + Arc::new(tokio::sync::Mutex::new(entry)) + } +} diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs new file mode 100644 index 000000000..c1041e9a2 --- /dev/null +++ b/packages/torrent-repository/src/entry/single.rs @@ -0,0 +1,106 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::peer::{self}; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::Entry; +use crate::EntrySingle; + +impl Entry for EntrySingle { + #[allow(clippy::cast_possible_truncation)] + fn get_stats(&self) -> SwarmMetadata { + let complete: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; + let incomplete: u32 = self.peers.len() as u32 - complete; + + SwarmMetadata { + downloaded: self.downloaded, + complete, + incomplete, + } + } + + fn is_good(&self, policy: &TrackerPolicy) -> bool { + if policy.persistent_torrent_completed_stat && self.downloaded > 0 { + return true; + } + + if policy.remove_peerless_torrents && self.peers.is_empty() { + return false; + } + + true + } + + fn peers_is_empty(&self) -> bool { + self.peers.is_empty() + } + + fn get_peers_len(&self) -> usize { + self.peers.len() + } + fn get_peers(&self, limit: Option) -> Vec> { + match limit { + Some(limit) => self.peers.values().take(limit).cloned().collect(), + None => self.peers.values().cloned().collect(), + } + } + + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + match limit { + Some(limit) => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *client) + // Limit the number of peers on the result + .take(limit) + .cloned() + .collect(), + None => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *client) + .cloned() + .collect(), + } + } + + fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { + let mut downloaded_stats_updated: bool = false; + + match peer::ReadInfo::get_event(peer) { + AnnounceEvent::Stopped => { + drop(self.peers.remove(&peer::ReadInfo::get_id(peer))); + } + AnnounceEvent::Completed => { + let previous = self.peers.insert(peer::ReadInfo::get_id(peer), Arc::new(*peer)); + // Don't count if peer was not previously known and not already completed. + if previous.is_some_and(|p| p.event != AnnounceEvent::Completed) { + self.downloaded += 1; + downloaded_stats_updated = true; + } + } + _ => { + drop(self.peers.insert(peer::ReadInfo::get_id(peer), Arc::new(*peer))); + } + } + + downloaded_stats_updated + } + + fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let changed = self.insert_or_update_peer(peer); + let stats = self.get_stats(); + (changed, stats) + } + + fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + self.peers + .retain(|_, peer| peer::ReadInfo::get_updated(peer) > current_cutoff); + } +} diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs new file mode 100644 index 000000000..8bb1b6def --- /dev/null +++ b/packages/torrent-repository/src/lib.rs @@ -0,0 +1,28 @@ +use std::sync::Arc; + +use torrust_tracker_clock::clock; + +pub mod entry; +pub mod repository; + +pub type EntrySingle = entry::Torrent; +pub type EntryMutexStd = Arc>; +pub type EntryMutexTokio = Arc>; + +pub type TorrentsRwLockStd = repository::RwLockStd; +pub type TorrentsRwLockStdMutexStd = repository::RwLockStd; +pub type TorrentsRwLockStdMutexTokio = repository::RwLockStd; +pub type TorrentsRwLockTokio = repository::RwLockTokio; +pub type TorrentsRwLockTokioMutexStd = repository::RwLockTokio; +pub type TorrentsRwLockTokioMutexTokio = repository::RwLockTokio; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/packages/torrent-repository/src/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs new file mode 100644 index 000000000..494040c9d --- /dev/null +++ b/packages/torrent-repository/src/repository/mod.rs @@ -0,0 +1,76 @@ +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +pub mod rw_lock_std; +pub mod rw_lock_std_mutex_std; +pub mod rw_lock_std_mutex_tokio; +pub mod rw_lock_tokio; +pub mod rw_lock_tokio_mutex_std; +pub mod rw_lock_tokio_mutex_tokio; + +use std::fmt::Debug; + +pub trait Repository: Debug + Default + Sized + 'static { + fn get(&self, key: &InfoHash) -> Option; + fn get_metrics(&self) -> TorrentsMetrics; + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, T)>; + fn import_persistent(&self, persistent_torrents: &PersistentTorrents); + fn remove(&self, key: &InfoHash) -> Option; + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); + fn remove_peerless_torrents(&self, policy: &TrackerPolicy); + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata); +} + +#[allow(clippy::module_name_repetitions)] +pub trait RepositoryAsync: Debug + Default + Sized + 'static { + fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; + fn get_metrics(&self) -> impl std::future::Future + Send; + fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) -> impl std::future::Future + Send; + fn remove(&self, key: &InfoHash) -> impl std::future::Future> + Send; + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; + fn update_torrent_with_peer_and_get_stats( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + ) -> impl std::future::Future + Send; +} + +#[derive(Default, Debug)] +pub struct RwLockStd { + torrents: std::sync::RwLock>, +} + +#[derive(Default, Debug)] +pub struct RwLockTokio { + torrents: tokio::sync::RwLock>, +} + +impl RwLockStd { + /// # Panics + /// + /// Panics if unable to get a lock. + pub fn write( + &self, + ) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { + self.torrents.write().expect("it should get lock") + } +} + +impl RwLockTokio { + pub fn write( + &self, + ) -> impl std::future::Future< + Output = tokio::sync::RwLockWriteGuard< + '_, + std::collections::BTreeMap, + >, + > { + self.torrents.write() + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository/src/repository/rw_lock_std.rs new file mode 100644 index 000000000..9d7f29416 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_std.rs @@ -0,0 +1,112 @@ +use std::collections::BTreeMap; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::Repository; +use crate::entry::Entry; +use crate::{EntrySingle, TorrentsRwLockStd}; + +impl TorrentsRwLockStd { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("it should get the read lock") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("it should get the write lock") + } +} + +impl Repository for TorrentsRwLockStd +where + EntrySingle: Entry, +{ + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let mut db = self.get_torrents_mut(); + + let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); + + entry.insert_or_update_peer_and_get_stats(peer) + } + + fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().values() { + let stats = entry.get_stats(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, downloaded) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntrySingle { + peers: BTreeMap::default(), + downloaded: *downloaded, + }; + + torrents.insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let mut db = self.get_torrents_mut(); + let entries = db.values_mut(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.is_good(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs new file mode 100644 index 000000000..0b65234e3 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs @@ -0,0 +1,123 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::Repository; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle, TorrentsRwLockStdMutexStd}; + +impl TorrentsRwLockStdMutexStd { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl Repository for TorrentsRwLockStdMutexStd +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut(); + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer) + } + + fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().values() { + let stats = entry.lock().expect("it should get a lock").get_stats(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + peers: BTreeMap::default(), + downloaded: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let db = self.get_torrents(); + let entries = db.values().cloned(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.lock().expect("it should lock entry").is_good(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs new file mode 100644 index 000000000..5394abb6a --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs @@ -0,0 +1,151 @@ +use std::collections::BTreeMap; +use std::iter::zip; +use std::pin::Pin; +use std::sync::Arc; + +use futures::future::join_all; +use futures::{Future, FutureExt}; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::{Entry, EntryAsync}; +use crate::{EntryMutexTokio, EntrySingle, TorrentsRwLockStdMutexTokio}; + +impl TorrentsRwLockStdMutexTokio { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl RepositoryAsync for TorrentsRwLockStdMutexTokio +where + EntryMutexTokio: EntryAsync, + EntrySingle: Entry, +{ + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut(); + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer).await + } + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexTokio)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + let entries: Vec<_> = self.get_torrents().values().cloned().collect(); + + for entry in entries { + let stats = entry.lock().await.get_stats(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexTokio::new( + EntrySingle { + peers: BTreeMap::default(), + downloaded: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let handles: Vec + Send>>>; + { + let db = self.get_torrents(); + handles = db + .values() + .cloned() + .map(|e| e.remove_inactive_peers(current_cutoff).boxed()) + .collect(); + } + join_all(handles).await; + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let handles: Vec> + Send>>>; + + { + let db = self.get_torrents(); + + handles = zip(db.keys().copied(), db.values().cloned()) + .map(|(infohash, torrent)| { + torrent + .check_good(policy) + .map(move |good| if good { None } else { Some(infohash) }) + .boxed() + }) + .collect::>(); + } + + let not_good = join_all(handles).await; + + let mut db = self.get_torrents_mut(); + + for remove in not_good.into_iter().flatten() { + drop(db.remove(&remove)); + } + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio.rs new file mode 100644 index 000000000..fa84e2451 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_tokio.rs @@ -0,0 +1,113 @@ +use std::collections::BTreeMap; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::Entry; +use crate::{EntrySingle, TorrentsRwLockTokio}; + +impl TorrentsRwLockTokio { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl RepositoryAsync for TorrentsRwLockTokio +where + EntrySingle: Entry, +{ + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let mut db = self.get_torrents_mut().await; + + let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); + + entry.insert_or_update_peer_and_get_stats(peer) + } + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().await.values() { + let stats = entry.get_stats(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntrySingle { + peers: BTreeMap::default(), + downloaded: *completed, + }; + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let mut db = self.get_torrents_mut().await; + let entries = db.values_mut(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.is_good(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs new file mode 100644 index 000000000..fbbc51a09 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs @@ -0,0 +1,124 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle, TorrentsRwLockTokioMutexStd}; + +impl TorrentsRwLockTokioMutexStd { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl RepositoryAsync for TorrentsRwLockTokioMutexStd +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut().await; + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer) + } + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().await.values() { + let stats = entry.get_stats(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + peers: BTreeMap::default(), + downloaded: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let db = self.get_torrents().await; + let entries = db.values().cloned(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.lock().expect("it should lock entry").is_good(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs new file mode 100644 index 000000000..bc7fd61e8 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -0,0 +1,134 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::{Entry, EntryAsync}; +use crate::{EntryMutexTokio, EntrySingle, TorrentsRwLockTokioMutexTokio}; + +impl TorrentsRwLockTokioMutexTokio { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl RepositoryAsync for TorrentsRwLockTokioMutexTokio +where + EntryMutexTokio: EntryAsync, + EntrySingle: Entry, +{ + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut().await; + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer).await + } + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexTokio)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().await.values() { + let stats = entry.get_stats().await; + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexTokio::new( + EntrySingle { + peers: BTreeMap::default(), + downloaded: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let db = self.get_torrents().await; + let entries = db.values().cloned(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff).await; + } + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + let mut not_good = Vec::::default(); + + for (&infohash, torrent) in db.iter() { + if !torrent.clone().check_good(policy).await { + not_good.push(infohash); + } + } + + for remove in not_good { + drop(db.remove(&remove)); + } + } +} diff --git a/packages/torrent-repository/tests/common/mod.rs b/packages/torrent-repository/tests/common/mod.rs new file mode 100644 index 000000000..efdf7f742 --- /dev/null +++ b/packages/torrent-repository/tests/common/mod.rs @@ -0,0 +1,3 @@ +pub mod repo; +pub mod torrent; +pub mod torrent_peer_builder; diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs new file mode 100644 index 000000000..3a4b53d2f --- /dev/null +++ b/packages/torrent-repository/tests/common/repo.rs @@ -0,0 +1,147 @@ +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_torrent_repository::repository::{Repository as _, RepositoryAsync as _}; +use torrust_tracker_torrent_repository::{ + EntrySingle, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, + TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, +}; + +#[derive(Debug)] +pub(crate) enum Repo { + Std(TorrentsRwLockStd), + StdMutexStd(TorrentsRwLockStdMutexStd), + StdMutexTokio(TorrentsRwLockStdMutexTokio), + Tokio(TorrentsRwLockTokio), + TokioMutexStd(TorrentsRwLockTokioMutexStd), + TokioMutexTokio(TorrentsRwLockTokioMutexTokio), +} + +impl Repo { + pub(crate) async fn get(&self, key: &InfoHash) -> Option { + match self { + Repo::Std(repo) => repo.get(key), + Repo::StdMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), + Repo::StdMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), + Repo::Tokio(repo) => repo.get(key).await, + Repo::TokioMutexStd(repo) => Some(repo.get(key).await?.lock().unwrap().clone()), + Repo::TokioMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), + } + } + pub(crate) async fn get_metrics(&self) -> TorrentsMetrics { + match self { + Repo::Std(repo) => repo.get_metrics(), + Repo::StdMutexStd(repo) => repo.get_metrics(), + Repo::StdMutexTokio(repo) => repo.get_metrics().await, + Repo::Tokio(repo) => repo.get_metrics().await, + Repo::TokioMutexStd(repo) => repo.get_metrics().await, + Repo::TokioMutexTokio(repo) => repo.get_metrics().await, + } + } + pub(crate) async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + match self { + Repo::Std(repo) => repo.get_paginated(pagination), + Repo::StdMutexStd(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), + Repo::StdMutexTokio(repo) => { + let mut v: Vec<(InfoHash, EntrySingle)> = vec![]; + + for (i, t) in repo.get_paginated(pagination).await { + v.push((i, t.lock().await.clone())); + } + v + } + Repo::Tokio(repo) => repo.get_paginated(pagination).await, + Repo::TokioMutexStd(repo) => repo + .get_paginated(pagination) + .await + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), + Repo::TokioMutexTokio(repo) => { + let mut v: Vec<(InfoHash, EntrySingle)> = vec![]; + + for (i, t) in repo.get_paginated(pagination).await { + v.push((i, t.lock().await.clone())); + } + v + } + } + } + pub(crate) async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + match self { + Repo::Std(repo) => repo.import_persistent(persistent_torrents), + Repo::StdMutexStd(repo) => repo.import_persistent(persistent_torrents), + Repo::StdMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::Tokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::TokioMutexStd(repo) => repo.import_persistent(persistent_torrents).await, + Repo::TokioMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, + } + } + pub(crate) async fn remove(&self, key: &InfoHash) -> Option { + match self { + Repo::Std(repo) => repo.remove(key), + Repo::StdMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), + Repo::StdMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), + Repo::Tokio(repo) => repo.remove(key).await, + Repo::TokioMutexStd(repo) => Some(repo.remove(key).await?.lock().unwrap().clone()), + Repo::TokioMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), + } + } + pub(crate) async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + match self { + Repo::Std(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::StdMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::StdMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::Tokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::TokioMutexStd(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::TokioMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + } + } + pub(crate) async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + match self { + Repo::Std(repo) => repo.remove_peerless_torrents(policy), + Repo::StdMutexStd(repo) => repo.remove_peerless_torrents(policy), + Repo::StdMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::Tokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::TokioMutexStd(repo) => repo.remove_peerless_torrents(policy).await, + Repo::TokioMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, + } + } + pub(crate) async fn update_torrent_with_peer_and_get_stats( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + ) -> (bool, SwarmMetadata) { + match self { + Repo::Std(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer), + Repo::StdMutexStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer), + Repo::StdMutexTokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, + Repo::Tokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, + Repo::TokioMutexStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, + Repo::TokioMutexTokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, + } + } + pub(crate) async fn insert(&self, info_hash: &InfoHash, torrent: EntrySingle) -> Option { + match self { + Repo::Std(repo) => repo.write().insert(*info_hash, torrent), + Repo::StdMutexStd(repo) => Some(repo.write().insert(*info_hash, torrent.into())?.lock().unwrap().clone()), + Repo::StdMutexTokio(repo) => { + let r = repo.write().insert(*info_hash, torrent.into()); + match r { + Some(t) => Some(t.lock().await.clone()), + None => None, + } + } + Repo::Tokio(repo) => repo.write().await.insert(*info_hash, torrent), + Repo::TokioMutexStd(repo) => Some(repo.write().await.insert(*info_hash, torrent.into())?.lock().unwrap().clone()), + Repo::TokioMutexTokio(repo) => Some(repo.write().await.insert(*info_hash, torrent.into())?.lock().await.clone()), + } + } +} diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs new file mode 100644 index 000000000..33264c443 --- /dev/null +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -0,0 +1,89 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; +use torrust_tracker_torrent_repository::entry::{Entry as _, EntryAsync as _, EntrySync as _}; +use torrust_tracker_torrent_repository::{EntryMutexStd, EntryMutexTokio, EntrySingle}; + +#[derive(Debug, Clone)] +pub(crate) enum Torrent { + Single(EntrySingle), + MutexStd(EntryMutexStd), + MutexTokio(EntryMutexTokio), +} + +impl Torrent { + pub(crate) async fn get_stats(&self) -> SwarmMetadata { + match self { + Torrent::Single(entry) => entry.get_stats(), + Torrent::MutexStd(entry) => entry.get_stats(), + Torrent::MutexTokio(entry) => entry.clone().get_stats().await, + } + } + + pub(crate) async fn is_good(&self, policy: &TrackerPolicy) -> bool { + match self { + Torrent::Single(entry) => entry.is_good(policy), + Torrent::MutexStd(entry) => entry.is_good(policy), + Torrent::MutexTokio(entry) => entry.clone().check_good(policy).await, + } + } + + pub(crate) async fn peers_is_empty(&self) -> bool { + match self { + Torrent::Single(entry) => entry.peers_is_empty(), + Torrent::MutexStd(entry) => entry.peers_is_empty(), + Torrent::MutexTokio(entry) => entry.clone().peers_is_empty().await, + } + } + + pub(crate) async fn get_peers_len(&self) -> usize { + match self { + Torrent::Single(entry) => entry.get_peers_len(), + Torrent::MutexStd(entry) => entry.get_peers_len(), + Torrent::MutexTokio(entry) => entry.clone().get_peers_len().await, + } + } + + pub(crate) async fn get_peers(&self, limit: Option) -> Vec> { + match self { + Torrent::Single(entry) => entry.get_peers(limit), + Torrent::MutexStd(entry) => entry.get_peers(limit), + Torrent::MutexTokio(entry) => entry.clone().get_peers(limit).await, + } + } + + pub(crate) async fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + match self { + Torrent::Single(entry) => entry.get_peers_for_client(client, limit), + Torrent::MutexStd(entry) => entry.get_peers_for_client(client, limit), + Torrent::MutexTokio(entry) => entry.clone().get_peers_for_client(client, limit).await, + } + } + + pub(crate) async fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { + match self { + Torrent::Single(entry) => entry.insert_or_update_peer(peer), + Torrent::MutexStd(entry) => entry.insert_or_update_peer(peer), + Torrent::MutexTokio(entry) => entry.clone().insert_or_update_peer(peer).await, + } + } + + pub(crate) async fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + match self { + Torrent::Single(entry) => entry.insert_or_update_peer_and_get_stats(peer), + Torrent::MutexStd(entry) => entry.insert_or_update_peer_and_get_stats(peer), + Torrent::MutexTokio(entry) => entry.clone().insert_or_update_peer_and_get_stats(peer).await, + } + } + + pub(crate) async fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + match self { + Torrent::Single(entry) => entry.remove_inactive_peers(current_cutoff), + Torrent::MutexStd(entry) => entry.remove_inactive_peers(current_cutoff), + Torrent::MutexTokio(entry) => entry.clone().remove_inactive_peers(current_cutoff).await, + } + } +} diff --git a/packages/torrent-repository/tests/common/torrent_peer_builder.rs b/packages/torrent-repository/tests/common/torrent_peer_builder.rs new file mode 100644 index 000000000..3a4e61ed2 --- /dev/null +++ b/packages/torrent-repository/tests/common/torrent_peer_builder.rs @@ -0,0 +1,88 @@ +use std::net::SocketAddr; + +use torrust_tracker_clock::clock::Time; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; + +use crate::CurrentClock; + +#[derive(Debug, Default)] +struct TorrentPeerBuilder { + peer: peer::Peer, +} + +#[allow(dead_code)] +impl TorrentPeerBuilder { + #[must_use] + fn new() -> Self { + Self { + peer: peer::Peer { + updated: CurrentClock::now(), + ..Default::default() + }, + } + } + + #[must_use] + fn with_event_completed(mut self) -> Self { + self.peer.event = AnnounceEvent::Completed; + self + } + + #[must_use] + fn with_event_started(mut self) -> Self { + self.peer.event = AnnounceEvent::Started; + self + } + + #[must_use] + fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + #[must_use] + fn with_peer_id(mut self, peer_id: peer::Id) -> Self { + self.peer.peer_id = peer_id; + self + } + + #[must_use] + fn with_number_of_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } + + #[must_use] + fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + + #[must_use] + fn into(self) -> peer::Peer { + self.peer + } +} + +/// A torrent seeder is a peer with 0 bytes left to download which +/// has not announced it has stopped +#[must_use] +pub fn a_completed_peer(id: i32) -> peer::Peer { + TorrentPeerBuilder::new() + .with_number_of_bytes_left(0) + .with_event_completed() + .with_peer_id(id.into()) + .into() +} + +/// A torrent leecher is a peer that is not a seeder. +/// Leecher: left > 0 OR event = Stopped +#[must_use] +pub fn a_started_peer(id: i32) -> peer::Peer { + TorrentPeerBuilder::new() + .with_number_of_bytes_left(1) + .with_event_started() + .with_peer_id(id.into()) + .into() +} diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs new file mode 100644 index 000000000..c39bef636 --- /dev/null +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -0,0 +1,433 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::ops::Sub; +use std::time::Duration; + +use rstest::{fixture, rstest}; +use torrust_tracker_clock::clock::stopped::Stopped as _; +use torrust_tracker_clock::clock::{self, Time as _}; +use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::{peer, NumberOfBytes}; +use torrust_tracker_torrent_repository::{EntryMutexStd, EntryMutexTokio, EntrySingle}; + +use crate::common::torrent::Torrent; +use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; +use crate::CurrentClock; + +#[fixture] +fn single() -> Torrent { + Torrent::Single(EntrySingle::default()) +} +#[fixture] +fn standard_mutex() -> Torrent { + Torrent::MutexStd(EntryMutexStd::default()) +} + +#[fixture] +fn mutex_tokio() -> Torrent { + Torrent::MutexTokio(EntryMutexTokio::default()) +} + +#[fixture] +fn policy_none() -> TrackerPolicy { + TrackerPolicy::new(false, 0, false) +} + +#[fixture] +fn policy_persist() -> TrackerPolicy { + TrackerPolicy::new(false, 0, true) +} + +#[fixture] +fn policy_remove() -> TrackerPolicy { + TrackerPolicy::new(true, 0, false) +} + +#[fixture] +fn policy_remove_persist() -> TrackerPolicy { + TrackerPolicy::new(true, 0, true) +} + +pub enum Makes { + Empty, + Started, + Completed, + Downloaded, + Three, +} + +async fn make(torrent: &mut Torrent, makes: &Makes) -> Vec { + match makes { + Makes::Empty => vec![], + Makes::Started => { + let peer = a_started_peer(1); + torrent.insert_or_update_peer(&peer).await; + vec![peer] + } + Makes::Completed => { + let peer = a_completed_peer(2); + torrent.insert_or_update_peer(&peer).await; + vec![peer] + } + Makes::Downloaded => { + let mut peer = a_started_peer(3); + torrent.insert_or_update_peer(&peer).await; + peer.event = AnnounceEvent::Completed; + peer.left = NumberOfBytes(0); + torrent.insert_or_update_peer(&peer).await; + vec![peer] + } + Makes::Three => { + let peer_1 = a_started_peer(1); + torrent.insert_or_update_peer(&peer_1).await; + + let peer_2 = a_completed_peer(2); + torrent.insert_or_update_peer(&peer_2).await; + + let mut peer_3 = a_started_peer(3); + torrent.insert_or_update_peer(&peer_3).await; + peer_3.event = AnnounceEvent::Completed; + peer_3.left = NumberOfBytes(0); + torrent.insert_or_update_peer(&peer_3).await; + vec![peer_1, peer_2, peer_3] + } + } +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[tokio::test] +async fn it_should_be_empty_by_default( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + assert_eq!(torrent.get_peers_len().await, 0); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_check_if_entry_is_good( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, + #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, +) { + make(&mut torrent, makes).await; + + let has_peers = !torrent.peers_is_empty().await; + let has_downloads = torrent.get_stats().await.downloaded != 0; + + match (policy.remove_peerless_torrents, policy.persistent_torrent_completed_stat) { + // remove torrents without peers, and keep completed download stats + (true, true) => match (has_peers, has_downloads) { + // no peers, but has downloads + // peers, with or without downloads + (false, true) | (true, true | false) => assert!(torrent.is_good(&policy).await), + // no peers and no downloads + (false, false) => assert!(!torrent.is_good(&policy).await), + }, + // remove torrents without peers and drop completed download stats + (true, false) => match (has_peers, has_downloads) { + // peers, with or without downloads + (true, true | false) => assert!(torrent.is_good(&policy).await), + // no peers and with or without downloads + (false, true | false) => assert!(!torrent.is_good(&policy).await), + }, + // keep torrents without peers, but keep or drop completed download stats + (false, true | false) => assert!(torrent.is_good(&policy).await), + } +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_get_peers_for_torrent_entry( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + let peers = make(&mut torrent, makes).await; + + let torrent_peers = torrent.get_peers(None).await; + + assert_eq!(torrent_peers.len(), peers.len()); + + for peer in torrent_peers { + assert!(peers.contains(&peer)); + } +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_update_a_peer( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + // Make and insert a new peer. + let mut peer = a_started_peer(-1); + torrent.insert_or_update_peer(&peer).await; + + // Get the Inserted Peer by Id. + let peers = torrent.get_peers(None).await; + let original = peers + .iter() + .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) + .expect("it should find peer by id"); + + assert_eq!(original.event, AnnounceEvent::Started, "it should be as created"); + + // Announce "Completed" torrent download event. + peer.event = AnnounceEvent::Completed; + torrent.insert_or_update_peer(&peer).await; + + // Get the Updated Peer by Id. + let peers = torrent.get_peers(None).await; + let updated = peers + .iter() + .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) + .expect("it should find peer by id"); + + assert_eq!(updated.event, AnnounceEvent::Completed, "it should be updated"); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_remove_a_peer_upon_stopped_announcement( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + use torrust_tracker_primitives::peer::ReadInfo as _; + + make(&mut torrent, makes).await; + + let mut peer = a_started_peer(-1); + + torrent.insert_or_update_peer(&peer).await; + + // The started peer should be inserted. + let peers = torrent.get_peers(None).await; + let original = peers + .iter() + .find(|p| p.get_id() == peer.get_id()) + .expect("it should find peer by id"); + + assert_eq!(original.event, AnnounceEvent::Started); + + // Change peer to "Stopped" and insert. + peer.event = AnnounceEvent::Stopped; + torrent.insert_or_update_peer(&peer).await; + + // It should be removed now. + let peers = torrent.get_peers(None).await; + + assert_eq!( + peers.iter().find(|p| p.get_id() == peer.get_id()), + None, + "it should be removed" + ); +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloaded_statistic( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + let downloaded = torrent.get_stats().await.downloaded; + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let is_already_completed = peer.event == AnnounceEvent::Completed; + + // Announce "Completed" torrent download event. + peer.event = AnnounceEvent::Completed; + + let (updated, stats) = torrent.insert_or_update_peer_and_get_stats(&peer).await; + + if is_already_completed { + assert!(!updated); + assert_eq!(stats.downloaded, downloaded); + } else { + assert!(updated); + assert_eq!(stats.downloaded, downloaded + 1); + } +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_update_a_peer_as_a_seeder( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + let peers = make(&mut torrent, makes).await; + let completed = u32::try_from(peers.iter().filter(|p| p.is_seeder()).count()).expect("it_should_not_be_so_many"); + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let is_already_non_left = peer.left == NumberOfBytes(0); + + // Set Bytes Left to Zero + peer.left = NumberOfBytes(0); + let (_, stats) = torrent.insert_or_update_peer_and_get_stats(&peer).await; // Add the peer + + if is_already_non_left { + // it was already complete + assert_eq!(stats.complete, completed); + } else { + // now it is complete + assert_eq!(stats.complete, completed + 1); + } +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_update_a_peer_as_incomplete( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + let peers = make(&mut torrent, makes).await; + let incomplete = u32::try_from(peers.iter().filter(|p| !p.is_seeder()).count()).expect("it should not be so many"); + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let completed_already = peer.left == NumberOfBytes(0); + + // Set Bytes Left to no Zero + peer.left = NumberOfBytes(1); + let (_, stats) = torrent.insert_or_update_peer_and_get_stats(&peer).await; // Add the peer + + if completed_already { + // now it is incomplete + assert_eq!(stats.incomplete, incomplete + 1); + } else { + // was already incomplete + assert_eq!(stats.incomplete, incomplete); + } +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_get_peers_excluding_the_client_socket( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081); + + // for this test, we should not already use this socket. + assert_ne!(peer.peer_addr, socket); + + // it should get the peer as it dose not share the socket. + assert!(torrent.get_peers_for_client(&socket, None).await.contains(&peer.into())); + + // set the address to the socket. + peer.peer_addr = socket; + torrent.insert_or_update_peer(&peer).await; // Add peer + + // It should not include the peer that has the same socket. + assert!(!torrent.get_peers_for_client(&socket, None).await.contains(&peer.into())); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_limit_the_number_of_peers_returned( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + // We add one more peer than the scrape limit + for peer_number in 1..=74 + 1 { + let mut peer = a_started_peer(1); + peer.peer_id = peer::Id::from(peer_number); + torrent.insert_or_update_peer(&peer).await; + } + + let peers = torrent.get_peers(Some(TORRENT_PEERS_LIMIT)).await; + + assert_eq!(peers.len(), 74); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_remove_inactive_peers_beyond_cutoff( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + const TIMEOUT: Duration = Duration::from_secs(120); + const EXPIRE: Duration = Duration::from_secs(121); + + let peers = make(&mut torrent, makes).await; + + let mut peer = a_completed_peer(-1); + + let now = clock::Working::now(); + clock::Stopped::local_set(&now); + + peer.updated = now.sub(EXPIRE); + + torrent.insert_or_update_peer(&peer).await; + + assert_eq!(torrent.get_peers_len().await, peers.len() + 1); + + let current_cutoff = CurrentClock::now_sub(&TIMEOUT).unwrap_or_default(); + torrent.remove_inactive_peers(current_cutoff).await; + + assert_eq!(torrent.get_peers_len().await, peers.len()); +} diff --git a/packages/torrent-repository/tests/integration.rs b/packages/torrent-repository/tests/integration.rs new file mode 100644 index 000000000..5aab67b03 --- /dev/null +++ b/packages/torrent-repository/tests/integration.rs @@ -0,0 +1,22 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` + +use torrust_tracker_clock::clock; + +pub mod common; +mod entry; +mod repository; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs new file mode 100644 index 000000000..7ffe17dd7 --- /dev/null +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -0,0 +1,504 @@ +use std::collections::{BTreeMap, HashSet}; +use std::hash::{DefaultHasher, Hash, Hasher}; + +use rstest::{fixture, rstest}; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::{NumberOfBytes, PersistentTorrents}; +use torrust_tracker_torrent_repository::entry::Entry as _; +use torrust_tracker_torrent_repository::repository::{RwLockStd, RwLockTokio}; +use torrust_tracker_torrent_repository::EntrySingle; + +use crate::common::repo::Repo; +use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; + +#[fixture] +fn standard() -> Repo { + Repo::Std(RwLockStd::default()) +} +#[fixture] +fn standard_mutex() -> Repo { + Repo::StdMutexStd(RwLockStd::default()) +} + +#[fixture] +fn standard_tokio() -> Repo { + Repo::StdMutexTokio(RwLockStd::default()) +} + +#[fixture] +fn tokio_std() -> Repo { + Repo::Tokio(RwLockTokio::default()) +} +#[fixture] +fn tokio_mutex() -> Repo { + Repo::TokioMutexStd(RwLockTokio::default()) +} + +#[fixture] +fn tokio_tokio() -> Repo { + Repo::TokioMutexTokio(RwLockTokio::default()) +} + +type Entries = Vec<(InfoHash, EntrySingle)>; + +#[fixture] +fn empty() -> Entries { + vec![] +} + +#[fixture] +fn default() -> Entries { + vec![(InfoHash::default(), EntrySingle::default())] +} + +#[fixture] +fn started() -> Entries { + let mut torrent = EntrySingle::default(); + torrent.insert_or_update_peer(&a_started_peer(1)); + vec![(InfoHash::default(), torrent)] +} + +#[fixture] +fn completed() -> Entries { + let mut torrent = EntrySingle::default(); + torrent.insert_or_update_peer(&a_completed_peer(2)); + vec![(InfoHash::default(), torrent)] +} + +#[fixture] +fn downloaded() -> Entries { + let mut torrent = EntrySingle::default(); + let mut peer = a_started_peer(3); + torrent.insert_or_update_peer(&peer); + peer.event = AnnounceEvent::Completed; + peer.left = NumberOfBytes(0); + torrent.insert_or_update_peer(&peer); + vec![(InfoHash::default(), torrent)] +} + +#[fixture] +fn three() -> Entries { + let mut started = EntrySingle::default(); + let started_h = &mut DefaultHasher::default(); + started.insert_or_update_peer(&a_started_peer(1)); + started.hash(started_h); + + let mut completed = EntrySingle::default(); + let completed_h = &mut DefaultHasher::default(); + completed.insert_or_update_peer(&a_completed_peer(2)); + completed.hash(completed_h); + + let mut downloaded = EntrySingle::default(); + let downloaded_h = &mut DefaultHasher::default(); + let mut downloaded_peer = a_started_peer(3); + downloaded.insert_or_update_peer(&downloaded_peer); + downloaded_peer.event = AnnounceEvent::Completed; + downloaded_peer.left = NumberOfBytes(0); + downloaded.insert_or_update_peer(&downloaded_peer); + downloaded.hash(downloaded_h); + + vec![ + (InfoHash::from(&started_h.clone()), started), + (InfoHash::from(&completed_h.clone()), completed), + (InfoHash::from(&downloaded_h.clone()), downloaded), + ] +} + +#[fixture] +fn many_out_of_order() -> Entries { + let mut entries: HashSet<(InfoHash, EntrySingle)> = HashSet::default(); + + for i in 0..408 { + let mut entry = EntrySingle::default(); + entry.insert_or_update_peer(&a_started_peer(i)); + + entries.insert((InfoHash::from(&i), entry)); + } + + // we keep the random order from the hashed set for the vector. + entries.iter().map(|(i, e)| (*i, e.clone())).collect() +} + +#[fixture] +fn many_hashed_in_order() -> Entries { + let mut entries: BTreeMap = BTreeMap::default(); + + for i in 0..408 { + let mut entry = EntrySingle::default(); + entry.insert_or_update_peer(&a_started_peer(i)); + + let hash: &mut DefaultHasher = &mut DefaultHasher::default(); + hash.write_i32(i); + + entries.insert(InfoHash::from(&hash.clone()), entry); + } + + // We return the entries in-order from from the b-tree map. + entries.iter().map(|(i, e)| (*i, e.clone())).collect() +} + +#[fixture] +fn persistent_empty() -> PersistentTorrents { + PersistentTorrents::default() +} + +#[fixture] +fn persistent_single() -> PersistentTorrents { + let hash = &mut DefaultHasher::default(); + + hash.write_u8(1); + let t = [(InfoHash::from(&hash.clone()), 0_u32)]; + + t.iter().copied().collect() +} + +#[fixture] +fn persistent_three() -> PersistentTorrents { + let hash = &mut DefaultHasher::default(); + + hash.write_u8(1); + let info_1 = InfoHash::from(&hash.clone()); + hash.write_u8(2); + let info_2 = InfoHash::from(&hash.clone()); + hash.write_u8(3); + let info_3 = InfoHash::from(&hash.clone()); + + let t = [(info_1, 1_u32), (info_2, 2_u32), (info_3, 3_u32)]; + + t.iter().copied().collect() +} + +async fn make(repo: &Repo, entries: &Entries) { + for (info_hash, entry) in entries { + repo.insert(info_hash, entry.clone()).await; + } +} + +#[fixture] +fn paginated_limit_zero() -> Pagination { + Pagination::new(0, 0) +} + +#[fixture] +fn paginated_limit_one() -> Pagination { + Pagination::new(0, 1) +} + +#[fixture] +fn paginated_limit_one_offset_one() -> Pagination { + Pagination::new(1, 1) +} + +#[fixture] +fn policy_none() -> TrackerPolicy { + TrackerPolicy::new(false, 0, false) +} + +#[fixture] +fn policy_persist() -> TrackerPolicy { + TrackerPolicy::new(false, 0, true) +} + +#[fixture] +fn policy_remove() -> TrackerPolicy { + TrackerPolicy::new(true, 0, false) +} + +#[fixture] +fn policy_remove_persist() -> TrackerPolicy { + TrackerPolicy::new(true, 0, true) +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_a_torrent_entry( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, +) { + make(&repo, &entries).await; + + if let Some((info_hash, torrent)) = entries.first() { + assert_eq!(repo.get(info_hash).await, Some(torrent.clone())); + } else { + assert_eq!(repo.get(&InfoHash::default()).await, None); + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, + many_out_of_order: Entries, +) { + make(&repo, &entries).await; + + let entries_a = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + + make(&repo, &many_out_of_order).await; + + let entries_b = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + + let is_equal = entries_b.iter().take(entries_a.len()).copied().collect::>() == entries_a; + + let is_sorted = entries_b.windows(2).all(|w| w[0] <= w[1]); + + assert!( + is_equal || is_sorted, + "The order is unstable: {is_equal}, or is sorted {is_sorted}." + ); +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_paginated( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, + #[values(paginated_limit_zero(), paginated_limit_one(), paginated_limit_one_offset_one())] paginated: Pagination, +) { + make(&repo, &entries).await; + + let mut info_hashes = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + info_hashes.sort(); + + match paginated { + // it should return empty if limit is zero. + Pagination { limit: 0, .. } => assert_eq!(repo.get_paginated(Some(&paginated)).await, vec![]), + + // it should return a single entry if the limit is one. + Pagination { limit: 1, offset: 0 } => { + if info_hashes.is_empty() { + assert_eq!(repo.get_paginated(Some(&paginated)).await.len(), 0); + } else { + let page = repo.get_paginated(Some(&paginated)).await; + assert_eq!(page.len(), 1); + assert_eq!(page.first().map(|(i, _)| i), info_hashes.first()); + } + } + + // it should return the only the second entry if both the limit and the offset are one. + Pagination { limit: 1, offset: 1 } => { + if info_hashes.len() > 1 { + let page = repo.get_paginated(Some(&paginated)).await; + assert_eq!(page.len(), 1); + assert_eq!(page[0].0, info_hashes[1]); + } + } + // the other cases are not yet tested. + _ => {} + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_metrics( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, +) { + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + + make(&repo, &entries).await; + + let mut metrics = TorrentsMetrics::default(); + + for (_, torrent) in entries { + let stats = torrent.get_stats(); + + metrics.torrents += 1; + metrics.incomplete += u64::from(stats.incomplete); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + } + + assert_eq!(repo.get_metrics().await, metrics); +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_import_persistent_torrents( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, + #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: PersistentTorrents, +) { + make(&repo, &entries).await; + + let mut downloaded = repo.get_metrics().await.downloaded; + persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); + + repo.import_persistent(&persistent_torrents).await; + + assert_eq!(repo.get_metrics().await.downloaded, downloaded); + + for (entry, _) in persistent_torrents { + assert!(repo.get(&entry).await.is_some()); + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_remove_an_entry( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, +) { + make(&repo, &entries).await; + + for (info_hash, torrent) in entries { + assert_eq!(repo.get(&info_hash).await, Some(torrent.clone())); + assert_eq!(repo.remove(&info_hash).await, Some(torrent)); + + assert_eq!(repo.get(&info_hash).await, None); + assert_eq!(repo.remove(&info_hash).await, None); + } + + assert_eq!(repo.get_metrics().await.torrents, 0); +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_remove_inactive_peers( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, +) { + use std::ops::Sub as _; + use std::time::Duration; + + use torrust_tracker_clock::clock::stopped::Stopped as _; + use torrust_tracker_clock::clock::{self, Time as _}; + use torrust_tracker_primitives::peer; + + use crate::CurrentClock; + + const TIMEOUT: Duration = Duration::from_secs(120); + const EXPIRE: Duration = Duration::from_secs(121); + + make(&repo, &entries).await; + + let info_hash: InfoHash; + let mut peer: peer::Peer; + + // Generate a new infohash and peer. + { + let hash = &mut DefaultHasher::default(); + hash.write_u8(255); + info_hash = InfoHash::from(&hash.clone()); + peer = a_completed_peer(-1); + } + + // Set the last updated time of the peer to be 121 seconds ago. + { + let now = clock::Working::now(); + clock::Stopped::local_set(&now); + + peer.updated = now.sub(EXPIRE); + } + + // Insert the infohash and peer into the repository + // and verify there is an extra torrent entry. + { + repo.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + assert_eq!(repo.get_metrics().await.torrents, entries.len() as u64 + 1); + } + + // Verify that this new peer was inserted into the repository. + { + let entry = repo.get(&info_hash).await.expect("it_should_get_some"); + assert!(entry.get_peers(None).contains(&peer.into())); + } + + // Remove peers that have not been updated since the timeout (120 seconds ago). + { + repo.remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")) + .await; + } + + // Verify that the this peer was removed from the repository. + { + let entry = repo.get(&info_hash).await.expect("it_should_get_some"); + assert!(!entry.get_peers(None).contains(&peer.into())); + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_remove_peerless_torrents( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, + #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, +) { + make(&repo, &entries).await; + + repo.remove_peerless_torrents(&policy).await; + + let torrents = repo.get_paginated(None).await; + + for (_, entry) in torrents { + assert!(entry.is_good(&policy)); + } +} diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 09b624566..396e63682 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -13,13 +13,13 @@ //! 4. Initialize the domain tracker. use std::sync::Arc; +use torrust_tracker_clock::static_time; use torrust_tracker_configuration::Configuration; use super::config::initialize_configuration; use crate::bootstrap; use crate::core::services::tracker_factory; use crate::core::Tracker; -use crate::shared::clock::static_time; use crate::shared::crypto::ephemeral_instance_keys; /// It loads the configuration from the environment and builds the main domain [`Tracker`] struct. diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index 6647e0249..300813430 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -44,7 +44,7 @@ pub fn start_job(config: &Configuration, tracker: &Arc) -> JoinHa if let Some(tracker) = weak_tracker.upgrade() { let start_time = Utc::now().time(); info!("Cleaning up torrents.."); - tracker.cleanup_torrents().await; + tracker.cleanup_torrents(); info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()); } else { break; diff --git a/src/console/clients/checker/checks/http.rs b/src/console/clients/checker/checks/http.rs index df1e9bc9a..501696df4 100644 --- a/src/console/clients/checker/checks/http.rs +++ b/src/console/clients/checker/checks/http.rs @@ -3,12 +3,12 @@ use std::str::FromStr; use colored::Colorize; use log::debug; use reqwest::Url as ServiceUrl; +use torrust_tracker_primitives::info_hash::InfoHash; use url::Url; use crate::console::clients::checker::console::Console; use crate::console::clients::checker::printer::Printer; use crate::console::clients::checker::service::{CheckError, CheckResult}; -use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; use crate::shared::bit_torrent::tracker::http::client::responses::scrape; diff --git a/src/console/clients/checker/checks/udp.rs b/src/console/clients/checker/checks/udp.rs index 890375b75..47a2a1a00 100644 --- a/src/console/clients/checker/checks/udp.rs +++ b/src/console/clients/checker/checks/udp.rs @@ -4,12 +4,12 @@ use aquatic_udp_protocol::{Port, TransactionId}; use colored::Colorize; use hex_literal::hex; use log::debug; +use torrust_tracker_primitives::info_hash::InfoHash; use crate::console::clients::checker::console::Console; use crate::console::clients::checker::printer::Printer; use crate::console::clients::checker::service::{CheckError, CheckResult}; use crate::console::clients::udp::checker; -use crate::shared::bit_torrent::info_hash::InfoHash; const ASSIGNED_BY_OS: u16 = 0; const RANDOM_TRANSACTION_ID: i32 = -888_840_697; diff --git a/src/console/clients/http/app.rs b/src/console/clients/http/app.rs index 80db07231..511fb6628 100644 --- a/src/console/clients/http/app.rs +++ b/src/console/clients/http/app.rs @@ -18,8 +18,8 @@ use std::str::FromStr; use anyhow::Context; use clap::{Parser, Subcommand}; use reqwest::Url; +use torrust_tracker_primitives::info_hash::InfoHash; -use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; use crate::shared::bit_torrent::tracker::http::client::responses::scrape; diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs index b9e31155d..540a25f30 100644 --- a/src/console/clients/udp/app.rs +++ b/src/console/clients/udp/app.rs @@ -64,11 +64,11 @@ use aquatic_udp_protocol::Response::{self, AnnounceIpv4, AnnounceIpv6, Scrape}; use aquatic_udp_protocol::{Port, TransactionId}; use clap::{Parser, Subcommand}; use log::{debug, LevelFilter}; +use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; use url::Url; use crate::console::clients::udp::checker; use crate::console::clients::udp::responses::{AnnounceResponseDto, ScrapeResponseDto}; -use crate::shared::bit_torrent::info_hash::InfoHash as TorrustInfoHash; const ASSIGNED_BY_OS: u16 = 0; const RANDOM_TRANSACTION_ID: i32 = -888_840_697; diff --git a/src/console/clients/udp/checker.rs b/src/console/clients/udp/checker.rs index b35139e49..12b8d764c 100644 --- a/src/console/clients/udp/checker.rs +++ b/src/console/clients/udp/checker.rs @@ -8,8 +8,8 @@ use aquatic_udp_protocol::{ }; use log::debug; use thiserror::Error; +use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; -use crate::shared::bit_torrent::info_hash::InfoHash as TorrustInfoHash; use crate::shared::bit_torrent::tracker::udp::client::{UdpClient, UdpTrackerClient}; #[derive(Error, Debug)] diff --git a/src/core/auth.rs b/src/core/auth.rs index 9fc9d6e7b..b5326a373 100644 --- a/src/core/auth.rs +++ b/src/core/auth.rs @@ -13,7 +13,7 @@ //! //! ```rust,no_run //! use torrust_tracker::core::auth::Key; -//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; //! //! pub struct ExpiringKey { //! /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` @@ -47,10 +47,13 @@ use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; use thiserror::Error; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; use torrust_tracker_located_error::{DynError, LocatedError}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; -use crate::shared::clock::{convert_from_timestamp_to_datetime_utc, Current, DurationSinceUnixEpoch, Time, TimeNow}; +use crate::CurrentClock; #[must_use] /// It generates a new random 32-char authentication [`ExpiringKey`] @@ -69,7 +72,7 @@ pub fn generate(lifetime: Duration) -> ExpiringKey { ExpiringKey { key: random_id.parse::().unwrap(), - valid_until: Current::add(&lifetime).unwrap(), + valid_until: CurrentClock::now_add(&lifetime).unwrap(), } } @@ -81,7 +84,7 @@ pub fn generate(lifetime: Duration) -> ExpiringKey { /// /// Will return `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. pub fn verify(auth_key: &ExpiringKey) -> Result<(), Error> { - let current_time: DurationSinceUnixEpoch = Current::now(); + let current_time: DurationSinceUnixEpoch = CurrentClock::now(); if auth_key.valid_until < current_time { Err(Error::KeyExpired { @@ -212,8 +215,10 @@ mod tests { use std::str::FromStr; use std::time::Duration; + use torrust_tracker_clock::clock; + use torrust_tracker_clock::clock::stopped::Stopped as _; + use crate::core::auth; - use crate::shared::clock::{Current, StoppedTime}; #[test] fn should_be_parsed_from_an_string() { @@ -227,7 +232,7 @@ mod tests { #[test] fn should_be_displayed() { // Set the time to the current time. - Current::local_set_to_unix_epoch(); + clock::Stopped::local_set_to_unix_epoch(); let expiring_key = auth::generate(Duration::from_secs(0)); @@ -247,18 +252,18 @@ mod tests { #[test] fn should_be_generate_and_verified() { // Set the time to the current time. - Current::local_set_to_system_time_now(); + clock::Stopped::local_set_to_system_time_now(); // Make key that is valid for 19 seconds. let expiring_key = auth::generate(Duration::from_secs(19)); // Mock the time has passed 10 sec. - Current::local_add(&Duration::from_secs(10)).unwrap(); + clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); assert!(auth::verify(&expiring_key).is_ok()); // Mock the time has passed another 10 sec. - Current::local_add(&Duration::from_secs(10)).unwrap(); + clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); assert!(auth::verify(&expiring_key).is_err()); } diff --git a/src/core/databases/mod.rs b/src/core/databases/mod.rs index b80b11987..c08aed76a 100644 --- a/src/core/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -22,7 +22,7 @@ //! ---|---|--- //! `id` | 1 | Autoincrement id //! `info_hash` | `c1277613db1d28709b034a017ab2cae4be07ae10` | `BitTorrent` infohash V1 -//! `completed` | 20 | The number of peers that have ever completed downloading the torrent associated to this entry. See [`Entry`](crate::core::torrent::Entry) for more information. +//! `completed` | 20 | The number of peers that have ever completed downloading the torrent associated to this entry. See [`Entry`](torrust_tracker_torrent_repository::entry::Entry) for more information. //! //! > **NOTICE**: The peer list for a torrent is not persisted. Since peer have to re-announce themselves on intervals, the data is be //! regenerated again after some minutes. @@ -51,10 +51,11 @@ pub mod sqlite; use std::marker::PhantomData; use async_trait::async_trait; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::PersistentTorrents; use self::error::Error; use crate::core::auth::{self, Key}; -use crate::shared::bit_torrent::info_hash::InfoHash; struct Builder where @@ -116,16 +117,16 @@ pub trait Database: Sync + Send { /// /// It returns an array of tuples with the torrent /// [`InfoHash`] and the - /// [`completed`](crate::core::torrent::Entry::completed) counter + /// [`downloaded`](torrust_tracker_torrent_repository::entry::Torrent::downloaded) counter /// which is the number of times the torrent has been downloaded. - /// See [`Entry::completed`](crate::core::torrent::Entry::completed). + /// See [`Entry::downloaded`](torrust_tracker_torrent_repository::entry::Torrent::downloaded). /// /// # Context: Torrent Metrics /// /// # Errors /// /// Will return `Err` if unable to load. - async fn load_persistent_torrents(&self) -> Result, Error>; + async fn load_persistent_torrents(&self) -> Result; /// It saves the torrent metrics data into the database. /// diff --git a/src/core/databases/mysql.rs b/src/core/databases/mysql.rs index c46300829..ca95fa0b9 100644 --- a/src/core/databases/mysql.rs +++ b/src/core/databases/mysql.rs @@ -8,12 +8,12 @@ use r2d2::Pool; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; -use torrust_tracker_primitives::DatabaseDriver; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::{DatabaseDriver, PersistentTorrents}; use super::{Database, Error}; use crate::core::auth::{self, Key}; use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; -use crate::shared::bit_torrent::info_hash::InfoHash; const DRIVER: DatabaseDriver = DatabaseDriver::MySQL; @@ -105,7 +105,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). - async fn load_persistent_torrents(&self) -> Result, Error> { + async fn load_persistent_torrents(&self) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let torrents = conn.query_map( @@ -116,7 +116,7 @@ impl Database for Mysql { }, )?; - Ok(torrents) + Ok(torrents.iter().copied().collect()) } /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). diff --git a/src/core/databases/sqlite.rs b/src/core/databases/sqlite.rs index bf2d6b8b9..53a01f80c 100644 --- a/src/core/databases/sqlite.rs +++ b/src/core/databases/sqlite.rs @@ -5,12 +5,11 @@ use std::str::FromStr; use async_trait::async_trait; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; -use torrust_tracker_primitives::DatabaseDriver; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::{DatabaseDriver, DurationSinceUnixEpoch, PersistentTorrents}; use super::{Database, Error}; use crate::core::auth::{self, Key}; -use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::shared::clock::DurationSinceUnixEpoch; const DRIVER: DatabaseDriver = DatabaseDriver::Sqlite3; @@ -90,7 +89,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). - async fn load_persistent_torrents(&self) -> Result, Error> { + async fn load_persistent_torrents(&self) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; @@ -102,12 +101,7 @@ impl Database for Sqlite { Ok((info_hash, completed)) })?; - //torrent_iter?; - //let torrent_iter = torrent_iter.unwrap(); - - let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(std::result::Result::ok).collect(); - - Ok(torrents) + Ok(torrent_iter.filter_map(std::result::Result::ok).collect()) } /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). diff --git a/src/core/error.rs b/src/core/error.rs index f1e622673..a826de349 100644 --- a/src/core/error.rs +++ b/src/core/error.rs @@ -9,6 +9,7 @@ use std::panic::Location; use torrust_tracker_located_error::LocatedError; +use torrust_tracker_primitives::info_hash::InfoHash; /// Authentication or authorization error returned by the core `Tracker` #[derive(thiserror::Error, Debug, Clone)] @@ -25,7 +26,7 @@ pub enum Error { // Authorization errors #[error("The torrent: {info_hash}, is not whitelisted, {location}")] TorrentNotWhitelisted { - info_hash: crate::shared::bit_torrent::info_hash::InfoHash, + info_hash: InfoHash, location: &'static Location<'static>, }, } diff --git a/src/core/mod.rs b/src/core/mod.rs index dac298462..6628426c1 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -52,13 +52,13 @@ //! The tracker responds to the peer with the list of other peers in the swarm so that //! the peer can contact them to start downloading pieces of the file from them. //! -//! Once you have instantiated the `Tracker` you can `announce` a new [`Peer`] with: +//! Once you have instantiated the `Tracker` you can `announce` a new [`peer::Peer`] with: //! //! ```rust,no_run -//! use torrust_tracker::core::peer; -//! use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; -//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +//! use torrust_tracker_primitives::peer; +//! use torrust_tracker_primitives::info_hash::InfoHash; +//! use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfBytes}; +//! use torrust_tracker_primitives::announce_event::AnnounceEvent; //! use std::net::SocketAddr; //! use std::net::IpAddr; //! use std::net::Ipv4Addr; @@ -97,16 +97,16 @@ //! The returned struct is: //! //! ```rust,no_run -//! use torrust_tracker::core::peer::Peer; +//! use torrust_tracker_primitives::peer; //! use torrust_tracker_configuration::AnnouncePolicy; //! //! pub struct AnnounceData { -//! pub peers: Vec, -//! pub swarm_stats: SwarmStats, +//! pub peers: Vec, +//! pub swarm_stats: SwarmMetadata, //! pub policy: AnnouncePolicy, // the tracker announce policy. //! } //! -//! pub struct SwarmStats { +//! pub struct SwarmMetadata { //! pub completed: u32, // The number of peers that have ever completed downloading //! pub seeders: u32, // The number of active peers that have completed downloading (seeders) //! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) @@ -136,7 +136,7 @@ //! The returned struct is: //! //! ```rust,no_run -//! use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +//! use torrust_tracker_primitives::info_hash::InfoHash; //! use std::collections::HashMap; //! //! pub struct ScrapeData { @@ -165,7 +165,7 @@ //! There are two data structures for infohashes: byte arrays and hex strings: //! //! ```rust,no_run -//! use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +//! use torrust_tracker_primitives::info_hash::InfoHash; //! use std::str::FromStr; //! //! let info_hash: InfoHash = [255u8; 20].into(); @@ -232,16 +232,11 @@ //! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) //! } //! -//! pub struct SwarmStats { -//! pub completed: u32, // The number of peers that have ever completed downloading -//! pub seeders: u32, // The number of active peers that have completed downloading (seeders) -//! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) -//! } //! ``` //! //! > **NOTICE**: that `complete` or `completed` peers are the peers that have completed downloading, but only the active ones are considered "seeders". //! -//! `SwarmStats` struct follows name conventions for `scrape` responses. See [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmStats` +//! `SwarmMetadata` struct follows name conventions for `scrape` responses. See [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmMetadata` //! is used for the rest of cases. //! //! Refer to [`torrent`] module for more details about these data structures. @@ -251,14 +246,14 @@ //! A `Peer` is the struct used by the `Tracker` to keep peers data: //! //! ```rust,no_run -//! use torrust_tracker::core::peer::Id; +//! use torrust_tracker_primitives::peer; //! use std::net::SocketAddr; -//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; //! use aquatic_udp_protocol::NumberOfBytes; //! use aquatic_udp_protocol::AnnounceEvent; //! //! pub struct Peer { -//! pub peer_id: Id, // The peer ID +//! pub peer_id: peer::Id, // The peer ID //! pub peer_addr: SocketAddr, // Peer socket address //! pub updated: DurationSinceUnixEpoch, // Last time (timestamp) when the peer was updated //! pub uploaded: NumberOfBytes, // Number of bytes the peer has uploaded so far @@ -434,34 +429,35 @@ pub mod auth; pub mod databases; pub mod error; -pub mod peer; pub mod services; pub mod statistics; pub mod torrent; -use std::collections::{BTreeMap, HashMap}; +pub mod peer_tests; + +use std::collections::HashMap; use std::net::IpAddr; use std::panic::Location; use std::sync::Arc; use std::time::Duration; use derive_more::Constructor; -use futures::future::join_all; use log::debug; use tokio::sync::mpsc::error::SendError; -use torrust_tracker_configuration::{AnnouncePolicy, Configuration}; -use torrust_tracker_primitives::TrackerMode; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_configuration::{AnnouncePolicy, Configuration, TrackerPolicy, TORRENT_PEERS_LIMIT}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, TrackerMode}; +use torrust_tracker_torrent_repository::entry::EntrySync; +use torrust_tracker_torrent_repository::repository::Repository; use self::auth::Key; use self::error::Error; -use self::peer::Peer; -use self::torrent::repository::{RepositoryAsyncSingle, TRepositoryAsync}; +use self::torrent::Torrents; use crate::core::databases::Database; -use crate::core::torrent::{SwarmMetadata, SwarmStats}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -/// The maximum number of returned peers for a torrent. -pub const TORRENT_PEERS_LIMIT: usize = 74; +use crate::CurrentClock; /// The domain layer tracker service. /// @@ -481,42 +477,21 @@ pub struct Tracker { policy: TrackerPolicy, keys: tokio::sync::RwLock>, whitelist: tokio::sync::RwLock>, - pub torrents: Arc, + pub torrents: Arc, stats_event_sender: Option>, stats_repository: statistics::Repo, external_ip: Option, on_reverse_proxy: bool, } -/// Structure that holds general `Tracker` torrents metrics. -/// -/// Metrics are aggregate values for all torrents. -#[derive(Copy, Clone, Debug, PartialEq, Default)] -pub struct TorrentsMetrics { - /// Total number of seeders for all torrents - pub seeders: u64, - /// Total number of peers that have ever completed downloading for all torrents. - pub completed: u64, - /// Total number of leechers for all torrents. - pub leechers: u64, - /// Total number of torrents. - pub torrents: u64, -} - -#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] -pub struct TrackerPolicy { - pub remove_peerless_torrents: bool, - pub max_peer_timeout: u32, - pub persistent_torrent_completed_stat: bool, -} /// Structure that holds the data returned by the `announce` request. #[derive(Clone, Debug, PartialEq, Constructor, Default)] pub struct AnnounceData { /// The list of peers that are downloading the same torrent. /// It excludes the peer that made the request. - pub peers: Vec, + pub peers: Vec>, /// Swarm statistics - pub stats: SwarmStats, + pub stats: SwarmMetadata, pub policy: AnnouncePolicy, } @@ -579,7 +554,7 @@ impl Tracker { mode, keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), - torrents: Arc::new(RepositoryAsyncSingle::new()), + torrents: Arc::default(), stats_event_sender, stats_repository, database, @@ -631,7 +606,7 @@ impl Tracker { /// # Context: Tracker /// /// BEP 03: [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). - pub async fn announce(&self, info_hash: &InfoHash, peer: &mut Peer, remote_client_ip: &IpAddr) -> AnnounceData { + pub async fn announce(&self, info_hash: &InfoHash, peer: &mut peer::Peer, remote_client_ip: &IpAddr) -> AnnounceData { // code-review: maybe instead of mutating the peer we could just return // a tuple with the new peer and the announce data: (Peer, AnnounceData). // It could even be a different struct: `StoredPeer` or `PublicPeer`. @@ -654,7 +629,7 @@ impl Tracker { // we should update the torrent and get the stats before we get the peer list. let stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - let peers = self.get_torrent_peers_for_peer(info_hash, peer).await; + let peers = self.get_torrent_peers_for_peer(info_hash, peer); AnnounceData { peers, @@ -673,7 +648,7 @@ impl Tracker { for info_hash in info_hashes { let swarm_metadata = match self.authorize(info_hash).await { - Ok(()) => self.get_swarm_metadata(info_hash).await, + Ok(()) => self.get_swarm_metadata(info_hash), Err(_) => SwarmMetadata::zeroed(), }; scrape_data.add_file(info_hash, swarm_metadata); @@ -683,11 +658,9 @@ impl Tracker { } /// It returns the data for a `scrape` response. - async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { - let torrents = self.torrents.get_torrents().await; - - match torrents.get(info_hash) { - Some(torrent_entry) => torrent_entry.get_swarm_metadata(), + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { + match self.torrents.get(info_hash) { + Some(torrent_entry) => torrent_entry.get_stats(), None => SwarmMetadata::default(), } } @@ -703,47 +676,25 @@ impl Tracker { pub async fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; - let mut torrents = self.torrents.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(&info_hash) { - continue; - } - - let torrent_entry = torrent::Entry { - peers: BTreeMap::default(), - completed, - }; - - torrents.insert(info_hash, torrent_entry); - } + self.torrents.import_persistent(&persistent_torrents); Ok(()) } - async fn get_torrent_peers_for_peer(&self, info_hash: &InfoHash, peer: &Peer) -> Vec { - let read_lock = self.torrents.get_torrents().await; - - match read_lock.get(info_hash) { + fn get_torrent_peers_for_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> Vec> { + match self.torrents.get(info_hash) { None => vec![], - Some(entry) => entry - .get_peers_for_peer(peer, TORRENT_PEERS_LIMIT) - .into_iter() - .copied() - .collect(), + Some(entry) => entry.get_peers_for_client(&peer.peer_addr, Some(TORRENT_PEERS_LIMIT)), } } /// # Context: Tracker /// /// Get all torrent peers for a given torrent - pub async fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec { - let read_lock = self.torrents.get_torrents().await; - - match read_lock.get(info_hash) { + pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { + match self.torrents.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers(TORRENT_PEERS_LIMIT).into_iter().copied().collect(), + Some(entry) => entry.get_peers(Some(TORRENT_PEERS_LIMIT)), } } @@ -752,11 +703,11 @@ impl Tracker { /// needed for a `announce` request response. /// /// # Context: Tracker - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::SwarmStats { + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { // code-review: consider splitting the function in two (command and query segregation). // `update_torrent_with_peer` and `get_stats` - let (stats, stats_updated) = self.torrents.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + let (stats_updated, stats) = self.torrents.update_torrent_with_peer_and_get_stats(info_hash, peer); if self.policy.persistent_torrent_completed_stat && stats_updated { let completed = stats.downloaded; @@ -775,72 +726,21 @@ impl Tracker { /// /// # Panics /// Panics if unable to get the torrent metrics. - pub async fn get_torrents_metrics(&self) -> TorrentsMetrics { - let arc_torrents_metrics = Arc::new(tokio::sync::Mutex::new(TorrentsMetrics { - seeders: 0, - completed: 0, - leechers: 0, - torrents: 0, - })); - - let db = self.torrents.get_torrents().await.clone(); - - let futures = db - .values() - .map(|torrent_entry| { - let torrent_entry = torrent_entry.clone(); - let torrents_metrics = arc_torrents_metrics.clone(); - - async move { - tokio::spawn(async move { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - torrents_metrics.lock().await.seeders += u64::from(seeders); - torrents_metrics.lock().await.completed += u64::from(completed); - torrents_metrics.lock().await.leechers += u64::from(leechers); - torrents_metrics.lock().await.torrents += 1; - }) - .await - .expect("Error torrent_metrics spawn"); - } - }) - .collect::>(); - - join_all(futures).await; - - let torrents_metrics = Arc::try_unwrap(arc_torrents_metrics).expect("Could not unwrap arc_torrents_metrics"); - - torrents_metrics.into_inner() + pub fn get_torrents_metrics(&self) -> TorrentsMetrics { + self.torrents.get_metrics() } /// Remove inactive peers and (optionally) peerless torrents /// /// # Context: Tracker - pub async fn cleanup_torrents(&self) { - let mut torrents_lock = self.torrents.get_torrents_mut().await; - + pub fn cleanup_torrents(&self) { // If we don't need to remove torrents we will use the faster iter if self.policy.remove_peerless_torrents { - let mut cleaned_torrents_map: BTreeMap = BTreeMap::new(); - - for (info_hash, torrent_entry) in &mut *torrents_lock { - torrent_entry.remove_inactive_peers(self.policy.max_peer_timeout); - - if torrent_entry.peers.is_empty() { - continue; - } - - if self.policy.persistent_torrent_completed_stat && torrent_entry.completed == 0 { - continue; - } - - cleaned_torrents_map.insert(*info_hash, torrent_entry.clone()); - } - - *torrents_lock = cleaned_torrents_map; + self.torrents.remove_peerless_torrents(&self.policy); } else { - for torrent_entry in (*torrents_lock).values_mut() { - torrent_entry.remove_inactive_peers(self.policy.max_peer_timeout); - } + let current_cutoff = + CurrentClock::now_sub(&Duration::from_secs(u64::from(self.policy.max_peer_timeout))).unwrap_or_default(); + self.torrents.remove_inactive_peers(current_cutoff); } } @@ -1092,15 +992,17 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; + use std::sync::Arc; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; use crate::core::peer::{self, Peer}; use crate::core::services::tracker_factory; use crate::core::{TorrentsMetrics, Tracker}; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::shared::clock::DurationSinceUnixEpoch; + use crate::shared::bit_torrent::info_hash::fixture::gen_seeded_infohash; fn public_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_mode_public()) @@ -1208,14 +1110,14 @@ mod tests { async fn should_collect_torrent_metrics() { let tracker = public_tracker(); - let torrents_metrics = tracker.get_torrents_metrics().await; + let torrents_metrics = tracker.get_torrents_metrics(); assert_eq!( torrents_metrics, TorrentsMetrics { - seeders: 0, - completed: 0, - leechers: 0, + complete: 0, + downloaded: 0, + incomplete: 0, torrents: 0 } ); @@ -1230,9 +1132,9 @@ mod tests { tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - let peers = tracker.get_torrent_peers(&info_hash).await; + let peers = tracker.get_torrent_peers(&info_hash); - assert_eq!(peers, vec![peer]); + assert_eq!(peers, vec![Arc::new(peer)]); } #[tokio::test] @@ -1244,7 +1146,7 @@ mod tests { tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - let peers = tracker.get_torrent_peers_for_peer(&info_hash, &peer).await; + let peers = tracker.get_torrent_peers_for_peer(&info_hash, &peer); assert_eq!(peers, vec![]); } @@ -1257,23 +1159,53 @@ mod tests { .update_torrent_with_peer_and_get_stats(&sample_info_hash(), &leecher()) .await; - let torrent_metrics = tracker.get_torrents_metrics().await; + let torrent_metrics = tracker.get_torrents_metrics(); assert_eq!( torrent_metrics, TorrentsMetrics { - seeders: 0, - completed: 0, - leechers: 1, + complete: 0, + downloaded: 0, + incomplete: 1, torrents: 1, } ); } + #[tokio::test] + async fn it_should_get_many_the_torrent_metrics() { + let tracker = public_tracker(); + + let start_time = std::time::Instant::now(); + for i in 0..1_000_000 { + tracker + .update_torrent_with_peer_and_get_stats(&gen_seeded_infohash(&i), &leecher()) + .await; + } + let result_a = start_time.elapsed(); + + let start_time = std::time::Instant::now(); + let torrent_metrics = tracker.get_torrents_metrics(); + let result_b = start_time.elapsed(); + + assert_eq!( + (torrent_metrics), + (TorrentsMetrics { + complete: 0, + downloaded: 0, + incomplete: 1_000_000, + torrents: 1_000_000, + }), + "{result_a:?} {result_b:?}" + ); + } + mod for_all_config_modes { mod handling_an_announce_request { + use std::sync::Arc; + use crate::core::tests::the_tracker::{ peer_ip, public_tracker, sample_info_hash, sample_peer, sample_peer_1, sample_peer_2, }; @@ -1399,7 +1331,7 @@ mod tests { let mut peer = sample_peer_2(); let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; - assert_eq!(announce_data.peers, vec![previously_announced_peer]); + assert_eq!(announce_data.peers, vec![Arc::new(previously_announced_peer)]); } mod it_should_update_the_swarm_stats_for_the_torrent { @@ -1450,9 +1382,10 @@ mod tests { use std::net::{IpAddr, Ipv4Addr}; + use torrust_tracker_primitives::info_hash::InfoHash; + use crate::core::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker}; use crate::core::{ScrapeData, SwarmMetadata}; - use crate::shared::bit_torrent::info_hash::InfoHash; #[tokio::test] async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent( @@ -1607,12 +1540,13 @@ mod tests { mod handling_an_scrape_request { + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + use crate::core::tests::the_tracker::{ complete_peer, incomplete_peer, peer_ip, sample_info_hash, whitelisted_tracker, }; - use crate::core::torrent::SwarmMetadata; use crate::core::ScrapeData; - use crate::shared::bit_torrent::info_hash::InfoHash; #[test] fn it_should_be_able_to_build_a_zeroed_scrape_data_for_a_list_of_info_hashes() { @@ -1656,8 +1590,11 @@ mod tests { use std::str::FromStr; use std::time::Duration; + use torrust_tracker_clock::clock::Time; + use crate::core::auth; use crate::core::tests::the_tracker::private_tracker; + use crate::CurrentClock; #[tokio::test] async fn it_should_generate_the_expiring_authentication_keys() { @@ -1665,7 +1602,7 @@ mod tests { let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); - assert_eq!(key.valid_until, Duration::from_secs(100)); + assert_eq!(key.valid_until, CurrentClock::now_add(&Duration::from_secs(100)).unwrap()); } #[tokio::test] @@ -1751,7 +1688,10 @@ mod tests { } mod handling_torrent_persistence { - use aquatic_udp_protocol::AnnounceEvent; + + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_torrent_repository::entry::EntrySync; + use torrust_tracker_torrent_repository::repository::Repository; use crate::core::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; @@ -1772,20 +1712,17 @@ mod tests { assert_eq!(swarm_stats.downloaded, 1); // Remove the newly updated torrent from memory - tracker.torrents.get_torrents_mut().await.remove(&info_hash); + tracker.torrents.remove(&info_hash); tracker.load_torrents_from_database().await.unwrap(); - let torrents = tracker.torrents.get_torrents().await; - assert!(torrents.contains_key(&info_hash)); - - let torrent_entry = torrents.get(&info_hash).unwrap(); + let torrent_entry = tracker.torrents.get(&info_hash).expect("it should be able to get entry"); // It persists the number of completed peers. - assert_eq!(torrent_entry.completed, 1); + assert_eq!(torrent_entry.get_stats().downloaded, 1); // It does not persist the peers - assert!(torrent_entry.peers.is_empty()); + assert!(torrent_entry.peers_is_empty()); } } } diff --git a/src/core/peer_tests.rs b/src/core/peer_tests.rs new file mode 100644 index 000000000..d30d73db3 --- /dev/null +++ b/src/core/peer_tests.rs @@ -0,0 +1,47 @@ +#![cfg(test)] + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use torrust_tracker_clock::clock::stopped::Stopped as _; +use torrust_tracker_clock::clock::{self, Time}; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::{peer, NumberOfBytes}; + +use crate::CurrentClock; + +#[test] +fn it_should_be_serializable() { + clock::Stopped::local_set_to_unix_epoch(); + + let torrent_peer = peer::Peer { + peer_id: peer::Id(*b"-qB0000-000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: CurrentClock::now(), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + + let raw_json = serde_json::to_string(&torrent_peer).unwrap(); + + let expected_raw_json = r#" + { + "peer_id": { + "id": "0x2d7142303030302d303030303030303030303030", + "client": "qBittorrent" + }, + "peer_addr":"126.0.0.1:8080", + "updated":0, + "uploaded":0, + "downloaded":0, + "left":0, + "event":"Started" + } + "#; + + assert_eq!( + serde_json::from_str::(&raw_json).unwrap(), + serde_json::from_str::(expected_raw_json).unwrap() + ); +} diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 3578c53aa..ee1c0c4fa 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -40,8 +40,10 @@ pub mod setup; use std::sync::Arc; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use crate::core::statistics::Metrics; -use crate::core::{TorrentsMetrics, Tracker}; +use crate::core::Tracker; /// All the metrics collected by the tracker. #[derive(Debug, PartialEq)] @@ -59,7 +61,7 @@ pub struct TrackerMetrics { /// It returns all the [`TrackerMetrics`] pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { - let torrents_metrics = tracker.get_torrents_metrics().await; + let torrents_metrics = tracker.get_torrents_metrics(); let stats = tracker.get_stats().await; TrackerMetrics { @@ -86,6 +88,7 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; use crate::core; @@ -105,7 +108,7 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: core::TorrentsMetrics::default(), + torrents_metrics: TorrentsMetrics::default(), protocol_metrics: core::statistics::Metrics::default(), } ); diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index fc24e7c4c..ce44af3a8 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -6,11 +6,13 @@ //! - [`get_torrents`]: it returns data about some torrent in bulk excluding the peer list. use std::sync::Arc; -use serde::Deserialize; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::peer; +use torrust_tracker_torrent_repository::entry::EntrySync; +use torrust_tracker_torrent_repository::repository::Repository; -use crate::core::peer::Peer; use crate::core::Tracker; -use crate::shared::bit_torrent::info_hash::InfoHash; /// It contains all the information the tracker has about a torrent #[derive(Debug, PartialEq)] @@ -24,7 +26,7 @@ pub struct Info { /// The total number of leechers for this torrent. Peers that actively downloading this torrent pub leechers: u64, /// The swarm: the list of peers that are actively trying to download or serving this torrent - pub peers: Option>, + pub peers: Option>, } /// It contains only part of the information the tracker has about a torrent @@ -42,92 +44,39 @@ pub struct BasicInfo { pub leechers: u64, } -/// A struct to keep information about the page when results are being paginated -#[derive(Deserialize)] -pub struct Pagination { - /// The page number, starting at 0 - pub offset: u32, - /// Page size. The number of results per page - pub limit: u32, -} - -impl Pagination { - #[must_use] - pub fn new(offset: u32, limit: u32) -> Self { - Self { offset, limit } - } - - #[must_use] - pub fn new_with_options(offset_option: Option, limit_option: Option) -> Self { - let offset = match offset_option { - Some(offset) => offset, - None => Pagination::default_offset(), - }; - let limit = match limit_option { - Some(offset) => offset, - None => Pagination::default_limit(), - }; - - Self { offset, limit } - } - - #[must_use] - pub fn default_offset() -> u32 { - 0 - } - - #[must_use] - pub fn default_limit() -> u32 { - 4000 - } -} - -impl Default for Pagination { - fn default() -> Self { - Self { - offset: Self::default_offset(), - limit: Self::default_limit(), - } - } -} - /// It returns all the information the tracker has about one torrent in a [Info] struct. pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { - let db = tracker.torrents.get_torrents().await; - - let torrent_entry_option = db.get(info_hash); + let torrent_entry_option = tracker.torrents.get(info_hash); let torrent_entry = torrent_entry_option?; - let (seeders, completed, leechers) = torrent_entry.get_stats(); + let stats = torrent_entry.get_stats(); - let peers = torrent_entry.get_all_peers(); + let peers = torrent_entry.get_peers(None); let peers = Some(peers.iter().map(|peer| (**peer)).collect()); Some(Info { info_hash: *info_hash, - seeders: u64::from(seeders), - completed: u64::from(completed), - leechers: u64::from(leechers), + seeders: u64::from(stats.complete), + completed: u64::from(stats.downloaded), + leechers: u64::from(stats.incomplete), peers, }) } /// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. -pub async fn get_torrents_page(tracker: Arc, pagination: &Pagination) -> Vec { - let db = tracker.torrents.get_torrents().await; - +pub async fn get_torrents_page(tracker: Arc, pagination: Option<&Pagination>) -> Vec { let mut basic_infos: Vec = vec![]; - for (info_hash, torrent_entry) in db.iter().skip(pagination.offset as usize).take(pagination.limit as usize) { - let (seeders, completed, leechers) = torrent_entry.get_stats(); + for (info_hash, torrent_entry) in tracker.torrents.get_paginated(pagination) { + let stats = torrent_entry.get_stats(); basic_infos.push(BasicInfo { - info_hash: *info_hash, - seeders: u64::from(seeders), - completed: u64::from(completed), - leechers: u64::from(leechers), + info_hash, + seeders: u64::from(stats.complete), + completed: u64::from(stats.downloaded), + leechers: u64::from(stats.incomplete), }); } @@ -136,19 +85,15 @@ pub async fn get_torrents_page(tracker: Arc, pagination: &Pagination) - /// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Vec { - let db = tracker.torrents.get_torrents().await; - let mut basic_infos: Vec = vec![]; for info_hash in info_hashes { - if let Some(entry) = db.get(info_hash) { - let (seeders, completed, leechers) = entry.get_stats(); - + if let Some(stats) = tracker.torrents.get(info_hash).map(|t| t.get_stats()) { basic_infos.push(BasicInfo { info_hash: *info_hash, - seeders: u64::from(seeders), - completed: u64::from(completed), - leechers: u64::from(leechers), + seeders: u64::from(stats.complete), + completed: u64::from(stats.downloaded), + leechers: u64::from(stats.incomplete), }); } } @@ -160,10 +105,8 @@ pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Ve mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - - use crate::core::peer; - use crate::shared::clock::DurationSinceUnixEpoch; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; fn sample_peer() -> peer::Peer { peer::Peer { @@ -183,12 +126,12 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::core::services::torrent::tests::sample_peer; use crate::core::services::torrent::{get_torrent_info, Info}; use crate::core::services::tracker_factory; - use crate::shared::bit_torrent::info_hash::InfoHash; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -238,12 +181,12 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::core::services::torrent::tests::sample_peer; use crate::core::services::torrent::{get_torrents_page, BasicInfo, Pagination}; use crate::core::services::tracker_factory; - use crate::shared::bit_torrent::info_hash::InfoHash; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -253,7 +196,7 @@ mod tests { async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let tracker = Arc::new(tracker_factory(&tracker_configuration())); - let torrents = get_torrents_page(tracker.clone(), &Pagination::default()).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; assert_eq!(torrents, vec![]); } @@ -269,7 +212,7 @@ mod tests { .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) .await; - let torrents = get_torrents_page(tracker.clone(), &Pagination::default()).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; assert_eq!( torrents, @@ -301,7 +244,7 @@ mod tests { let offset = 0; let limit = 1; - let torrents = get_torrents_page(tracker.clone(), &Pagination::new(offset, limit)).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); } @@ -325,7 +268,7 @@ mod tests { let offset = 1; let limit = 4000; - let torrents = get_torrents_page(tracker.clone(), &Pagination::new(offset, limit)).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); assert_eq!( @@ -355,7 +298,7 @@ mod tests { .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) .await; - let torrents = get_torrents_page(tracker.clone(), &Pagination::default()).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; assert_eq!( torrents, diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index c4a1b0df9..ab78de683 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -2,8 +2,8 @@ //! //! There are to main data structures: //! -//! - A torrent [`Entry`]: it contains all the information stored by the tracker for one torrent. -//! - The [`SwarmMetadata`]: it contains aggregate information that can me derived from the torrent entries. +//! - A torrent [`Entry`](torrust_tracker_torrent_repository::entry::Entry): it contains all the information stored by the tracker for one torrent. +//! - The [`SwarmMetadata`](torrust_tracker_primitives::swarm_metadata::SwarmMetadata): it contains aggregate information that can me derived from the torrent entries. //! //! A "swarm" is a network of peers that are trying to download the same torrent. //! @@ -11,8 +11,6 @@ //! That's the most valuable information the peer want to get from the tracker, because it allows them to //! start downloading torrent from those peers. //! -//! > **NOTICE**: that both swarm data (torrent entries) and swarm metadata (aggregate counters) are related to only one torrent. -//! //! The "swarm metadata" contains aggregate data derived from the torrent entries. There two types of data: //! //! - For **active peers**: metrics related to the current active peers in the swarm. @@ -27,460 +25,7 @@ //! - The number of peers that have NOT completed downloading the torrent and are still active, that means they are actively participating in the network. //! Peer that don not have a full copy of the torrent data are called "leechers". //! -//! > **NOTICE**: that both [`SwarmMetadata`] and [`SwarmStats`] contain the same information. [`SwarmMetadata`] is using the names used on [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). -pub mod repository; - -use std::time::Duration; - -use aquatic_udp_protocol::AnnounceEvent; -use derive_more::Constructor; -use serde::{Deserialize, Serialize}; - -use super::peer::{self, Peer}; -use crate::shared::clock::{Current, TimeNow}; - -/// A data structure containing all the information about a torrent in the tracker. -/// -/// This is the tracker entry for a given torrent and contains the swarm data, -/// that's the list of all the peers trying to download the same torrent. -/// The tracker keeps one entry like this for every torrent. -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct Entry { - /// The swarm: a network of peers that are all trying to download the torrent associated to this entry - #[serde(skip)] - pub peers: std::collections::BTreeMap, - /// The number of peers that have ever completed downloading the torrent associated to this entry - pub completed: u32, -} - -/// Swarm statistics for one torrent. -/// Swarm metadata dictionary in the scrape response. -/// -/// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) -#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] -pub struct SwarmMetadata { - /// (i.e `completed`): The number of peers that have ever completed downloading - pub downloaded: u32, // - /// (i.e `seeders`): The number of active peers that have completed downloading (seeders) - pub complete: u32, //seeders - /// (i.e `leechers`): The number of active peers that have not completed downloading (leechers) - pub incomplete: u32, -} - -impl SwarmMetadata { - #[must_use] - pub fn zeroed() -> Self { - Self::default() - } -} - -/// [`SwarmStats`] has the same form as [`SwarmMetadata`] -pub type SwarmStats = SwarmMetadata; - -impl Entry { - #[must_use] - pub fn new() -> Entry { - Entry { - peers: std::collections::BTreeMap::new(), - completed: 0, - } - } - - /// It updates a peer and returns true if the number of complete downloads have increased. - /// - /// The number of peers that have complete downloading is synchronously updated when peers are updated. - /// That's the total torrent downloads counter. - pub fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { - let mut did_torrent_stats_change: bool = false; - - match peer.event { - AnnounceEvent::Stopped => { - let _: Option = self.peers.remove(&peer.peer_id); - } - AnnounceEvent::Completed => { - let peer_old = self.peers.insert(peer.peer_id, *peer); - // Don't count if peer was not previously known and not already completed. - if peer_old.is_some_and(|p| p.event != AnnounceEvent::Completed) { - self.completed += 1; - did_torrent_stats_change = true; - } - } - _ => { - let _: Option = self.peers.insert(peer.peer_id, *peer); - } - } - - did_torrent_stats_change - } - - /// Get all swarm peers. - #[must_use] - pub fn get_all_peers(&self) -> Vec<&peer::Peer> { - self.peers.values().collect() - } - - /// Get swarm peers, limiting the result. - #[must_use] - pub fn get_peers(&self, limit: usize) -> Vec<&peer::Peer> { - self.peers.values().take(limit).collect() - } - - /// It returns the list of peers for a given peer client. - /// - /// It filters out the input peer, typically because we want to return this - /// list of peers to that client peer. - #[must_use] - pub fn get_all_peers_for_peer(&self, client: &Peer) -> Vec<&peer::Peer> { - self.peers - .values() - // Take peers which are not the client peer - .filter(|peer| peer.peer_addr != client.peer_addr) - .collect() - } - - /// It returns the list of peers for a given peer client, limiting the - /// result. - /// - /// It filters out the input peer, typically because we want to return this - /// list of peers to that client peer. - #[must_use] - pub fn get_peers_for_peer(&self, client: &Peer, limit: usize) -> Vec<&peer::Peer> { - self.peers - .values() - // Take peers which are not the client peer - .filter(|peer| peer.peer_addr != client.peer_addr) - // Limit the number of peers on the result - .take(limit) - .collect() - } - - /// It returns the swarm metadata (statistics) as a tuple: - /// - /// `(seeders, completed, leechers)` - #[allow(clippy::cast_possible_truncation)] - #[must_use] - pub fn get_stats(&self) -> (u32, u32, u32) { - let seeders: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; - let leechers: u32 = self.peers.len() as u32 - seeders; - (seeders, self.completed, leechers) - } - - /// It returns the swarm metadata (statistics) as an struct - #[must_use] - pub fn get_swarm_metadata(&self) -> SwarmMetadata { - // code-review: consider using always this function instead of `get_stats`. - let (seeders, completed, leechers) = self.get_stats(); - SwarmMetadata { - complete: seeders, - downloaded: completed, - incomplete: leechers, - } - } - - /// It removes peer from the swarm that have not been updated for more than `max_peer_timeout` seconds - pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { - let current_cutoff = Current::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); - self.peers.retain(|_, peer| peer.updated > current_cutoff); - } -} - -impl Default for Entry { - fn default() -> Self { - Self::new() - } -} - -#[cfg(test)] -mod tests { - - mod torrent_entry { - - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::ops::Sub; - use std::time::Duration; - - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - - use crate::core::torrent::Entry; - use crate::core::{peer, TORRENT_PEERS_LIMIT}; - use crate::shared::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; - - struct TorrentPeerBuilder { - peer: peer::Peer, - } - - impl TorrentPeerBuilder { - pub fn default() -> TorrentPeerBuilder { - let default_peer = peer::Peer { - peer_id: peer::Id([0u8; 20]), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), - updated: Current::now(), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - TorrentPeerBuilder { peer: default_peer } - } - - pub fn with_event_completed(mut self) -> Self { - self.peer.event = AnnounceEvent::Completed; - self - } - - pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; - self - } - - pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { - self.peer.peer_id = peer_id; - self - } - - pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes(left); - self - } - - pub fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { - self.peer.updated = updated; - self - } - - pub fn into(self) -> peer::Peer { - self.peer - } - } - - /// A torrent seeder is a peer with 0 bytes left to download which - /// has not announced it has stopped - fn a_torrent_seeder() -> peer::Peer { - TorrentPeerBuilder::default() - .with_number_of_bytes_left(0) - .with_event_completed() - .into() - } - - /// A torrent leecher is a peer that is not a seeder. - /// Leecher: left > 0 OR event = Stopped - fn a_torrent_leecher() -> peer::Peer { - TorrentPeerBuilder::default() - .with_number_of_bytes_left(1) - .with_event_completed() - .into() - } - - #[test] - fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { - let torrent_entry = Entry::new(); - - assert_eq!(torrent_entry.get_all_peers().len(), 0); - } - - #[test] - fn a_new_peer_can_be_added_to_a_torrent_entry() { - let mut torrent_entry = Entry::new(); - let torrent_peer = TorrentPeerBuilder::default().into(); - - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - assert_eq!(*torrent_entry.get_all_peers()[0], torrent_peer); - assert_eq!(torrent_entry.get_all_peers().len(), 1); - } - - #[test] - fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { - let mut torrent_entry = Entry::new(); - let torrent_peer = TorrentPeerBuilder::default().into(); - - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - assert_eq!(torrent_entry.get_all_peers(), vec![&torrent_peer]); - } - - #[test] - fn a_peer_can_be_updated_in_a_torrent_entry() { - let mut torrent_entry = Entry::new(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - torrent_peer.event = AnnounceEvent::Completed; // Update the peer - torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - - assert_eq!(torrent_entry.get_all_peers()[0].event, AnnounceEvent::Completed); - } - - #[test] - fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { - let mut torrent_entry = Entry::new(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - torrent_peer.event = AnnounceEvent::Stopped; // Update the peer - torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - - assert_eq!(torrent_entry.get_all_peers().len(), 0); - } - - #[test] - fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { - let mut torrent_entry = Entry::new(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - torrent_peer.event = AnnounceEvent::Completed; // Update the peer - let stats_have_changed = torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - - assert!(stats_have_changed); - } - - #[test] - fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( - ) { - let mut torrent_entry = Entry::new(); - let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); - - // Add a peer that did not exist before in the entry - let torrent_stats_have_not_changed = !torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); - - assert!(torrent_stats_have_not_changed); - } - - #[test] - fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_the_client_that_is_making_the_request() - { - let mut torrent_entry = Entry::new(); - let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add peer - - // Get peers excluding the one we have just added - let peers = torrent_entry.get_all_peers_for_peer(&torrent_peer); - - assert_eq!(peers.len(), 0); - } - - #[test] - fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { - let mut torrent_entry = Entry::new(); - - let peer_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); - - // Add peer 1 - let torrent_peer_1 = TorrentPeerBuilder::default() - .with_peer_address(SocketAddr::new(peer_ip, 8080)) - .into(); - torrent_entry.insert_or_update_peer(&torrent_peer_1); - - // Add peer 2 - let torrent_peer_2 = TorrentPeerBuilder::default() - .with_peer_address(SocketAddr::new(peer_ip, 8081)) - .into(); - torrent_entry.insert_or_update_peer(&torrent_peer_2); - - // Get peers for peer 1 - let peers = torrent_entry.get_all_peers_for_peer(&torrent_peer_1); - - // The peer 2 using the same IP but different port should be included - assert_eq!(peers[0].peer_addr.ip(), Ipv4Addr::new(127, 0, 0, 1)); - assert_eq!(peers[0].peer_addr.port(), 8081); - } - - fn peer_id_from_i32(number: i32) -> peer::Id { - let peer_id = number.to_le_bytes(); - peer::Id([ - 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], - peer_id[2], peer_id[3], - ]) - } - - #[test] - fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { - let mut torrent_entry = Entry::new(); - - // We add one more peer than the scrape limit - for peer_number in 1..=74 + 1 { - let torrent_peer = TorrentPeerBuilder::default() - .with_peer_id(peer_id_from_i32(peer_number)) - .into(); - torrent_entry.insert_or_update_peer(&torrent_peer); - } - - let peers = torrent_entry.get_peers(TORRENT_PEERS_LIMIT); - - assert_eq!(peers.len(), 74); - } - - #[test] - fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { - let mut torrent_entry = Entry::new(); - let torrent_seeder = a_torrent_seeder(); - - torrent_entry.insert_or_update_peer(&torrent_seeder); // Add seeder - - assert_eq!(torrent_entry.get_stats().0, 1); - } - - #[test] - fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { - let mut torrent_entry = Entry::new(); - let torrent_leecher = a_torrent_leecher(); - - torrent_entry.insert_or_update_peer(&torrent_leecher); // Add leecher - - assert_eq!(torrent_entry.get_stats().2, 1); - } - - #[test] - fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( - ) { - let mut torrent_entry = Entry::new(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - // Announce "Completed" torrent download event. - torrent_peer.event = AnnounceEvent::Completed; - torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer - - let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().1; - - assert_eq!(number_of_previously_known_peers_with_completed_torrent, 1); - } - - #[test] - fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { - let mut torrent_entry = Entry::new(); - let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); - - // Announce "Completed" torrent download event. - // It's the first event announced from this peer. - torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); // Add the peer - - let number_of_peers_with_completed_torrent = torrent_entry.get_stats().1; - - assert_eq!(number_of_peers_with_completed_torrent, 0); - } - - #[test] - fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { - let mut torrent_entry = Entry::new(); - - let timeout = 120u32; - - let now = Working::now(); - Stopped::local_set(&now); - - let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); - let inactive_peer = TorrentPeerBuilder::default() - .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) - .into(); - torrent_entry.insert_or_update_peer(&inactive_peer); // Add the peer - torrent_entry.remove_inactive_peers(timeout); +use torrust_tracker_torrent_repository::TorrentsRwLockStdMutexStd; - assert_eq!(torrent_entry.peers.len(), 0); - } - } -} +pub type Torrents = TorrentsRwLockStdMutexStd; // Currently Used diff --git a/src/core/torrent/repository.rs b/src/core/torrent/repository.rs deleted file mode 100644 index d4f8ee5e3..000000000 --- a/src/core/torrent/repository.rs +++ /dev/null @@ -1,301 +0,0 @@ -use std::sync::Arc; - -use crate::core::peer; -use crate::core::torrent::{Entry, SwarmStats}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -pub trait Repository { - fn new() -> Self; - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool); -} - -pub trait TRepositoryAsync { - fn new() -> Self; - fn update_torrent_with_peer_and_get_stats( - &self, - info_hash: &InfoHash, - peer: &peer::Peer, - ) -> impl std::future::Future + Send; -} - -/// Structure that holds all torrents. Using `std::sync` locks. -pub struct Sync { - torrents: std::sync::RwLock>>>, -} - -impl Sync { - /// Returns the get torrents of this [`Sync`]. - /// - /// # Panics - /// - /// Panics if unable to read the torrent. - pub fn get_torrents( - &self, - ) -> std::sync::RwLockReadGuard<'_, std::collections::BTreeMap>>> { - self.torrents.read().expect("unable to get torrent list") - } - - /// Returns the mutable get torrents of this [`Sync`]. - /// - /// # Panics - /// - /// Panics if unable to write to the torrents list. - pub fn get_torrents_mut( - &self, - ) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap>>> { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl Repository for Sync { - fn new() -> Self { - Self { - torrents: std::sync::RwLock::new(std::collections::BTreeMap::new()), - } - } - - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut(); - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(std::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().unwrap(); - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -/// Structure that holds all torrents. Using `std::sync` locks. -pub struct SyncSingle { - torrents: std::sync::RwLock>, -} - -impl SyncSingle { - /// Returns the get torrents of this [`SyncSingle`]. - /// - /// # Panics - /// - /// Panics if unable to get torrent list. - pub fn get_torrents(&self) -> std::sync::RwLockReadGuard<'_, std::collections::BTreeMap> { - self.torrents.read().expect("unable to get torrent list") - } - - /// Returns the get torrents of this [`SyncSingle`]. - /// - /// # Panics - /// - /// Panics if unable to get writable torrent list. - pub fn get_torrents_mut(&self) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl Repository for SyncSingle { - fn new() -> Self { - Self { - torrents: std::sync::RwLock::new(std::collections::BTreeMap::new()), - } - } - - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let mut torrents = self.torrents.write().unwrap(); - - let torrent_entry = match torrents.entry(*info_hash) { - std::collections::btree_map::Entry::Vacant(vacant) => vacant.insert(Entry::new()), - std::collections::btree_map::Entry::Occupied(entry) => entry.into_mut(), - }; - - let stats_updated = torrent_entry.insert_or_update_peer(peer); - let stats = torrent_entry.get_stats(); - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -/// Structure that holds all torrents. Using `tokio::sync` locks. -#[allow(clippy::module_name_repetitions)] -pub struct RepositoryAsync { - torrents: tokio::sync::RwLock>>>, -} - -impl TRepositoryAsync for RepositoryAsync { - fn new() -> Self { - Self { - torrents: tokio::sync::RwLock::new(std::collections::BTreeMap::new()), - } - } - - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut().await; - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(tokio::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().await; - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -impl RepositoryAsync { - pub async fn get_torrents( - &self, - ) -> tokio::sync::RwLockReadGuard<'_, std::collections::BTreeMap>>> { - self.torrents.read().await - } - - pub async fn get_torrents_mut( - &self, - ) -> tokio::sync::RwLockWriteGuard<'_, std::collections::BTreeMap>>> { - self.torrents.write().await - } -} - -/// Structure that holds all torrents. Using a `tokio::sync` lock for the torrents map an`std::sync`nc lock for the inner torrent entry. -pub struct AsyncSync { - torrents: tokio::sync::RwLock>>>, -} - -impl TRepositoryAsync for AsyncSync { - fn new() -> Self { - Self { - torrents: tokio::sync::RwLock::new(std::collections::BTreeMap::new()), - } - } - - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut().await; - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(std::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().unwrap(); - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -impl AsyncSync { - pub async fn get_torrents( - &self, - ) -> tokio::sync::RwLockReadGuard<'_, std::collections::BTreeMap>>> { - self.torrents.read().await - } - - pub async fn get_torrents_mut( - &self, - ) -> tokio::sync::RwLockWriteGuard<'_, std::collections::BTreeMap>>> { - self.torrents.write().await - } -} - -#[allow(clippy::module_name_repetitions)] -pub struct RepositoryAsyncSingle { - torrents: tokio::sync::RwLock>, -} - -impl TRepositoryAsync for RepositoryAsyncSingle { - fn new() -> Self { - Self { - torrents: tokio::sync::RwLock::new(std::collections::BTreeMap::new()), - } - } - - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let (stats, stats_updated) = { - let mut torrents_lock = self.torrents.write().await; - let torrent_entry = torrents_lock.entry(*info_hash).or_insert(Entry::new()); - let stats_updated = torrent_entry.insert_or_update_peer(peer); - let stats = torrent_entry.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -impl RepositoryAsyncSingle { - pub async fn get_torrents(&self) -> tokio::sync::RwLockReadGuard<'_, std::collections::BTreeMap> { - self.torrents.read().await - } - - pub async fn get_torrents_mut(&self) -> tokio::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { - self.torrents.write().await - } -} diff --git a/src/lib.rs b/src/lib.rs index b4ad298ac..064f50eb6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -469,6 +469,9 @@ //! //! In addition to the production code documentation you can find a lot of //! examples on the integration and unit tests. + +use torrust_tracker_clock::{clock, time_extent}; + pub mod app; pub mod bootstrap; pub mod console; @@ -478,3 +481,24 @@ pub mod shared; #[macro_use] extern crate lazy_static; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; + +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::WorkingTimeExtentMaker; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::StoppedTimeExtentMaker; diff --git a/src/servers/apis/v1/context/auth_key/resources.rs b/src/servers/apis/v1/context/auth_key/resources.rs index 99e93aaf9..3671438c2 100644 --- a/src/servers/apis/v1/context/auth_key/resources.rs +++ b/src/servers/apis/v1/context/auth_key/resources.rs @@ -1,9 +1,9 @@ //! API resources for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. use serde::{Deserialize, Serialize}; +use torrust_tracker_clock::conv::convert_from_iso_8601_to_timestamp; use crate::core::auth::{self, Key}; -use crate::shared::clock::convert_from_iso_8601_to_timestamp; /// A resource that represents an authentication key. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -41,9 +41,12 @@ impl From for AuthKey { mod tests { use std::time::Duration; + use torrust_tracker_clock::clock::stopped::Stopped as _; + use torrust_tracker_clock::clock::{self, Time}; + use super::AuthKey; use crate::core::auth::{self, Key}; - use crate::shared::clock::{Current, TimeNow}; + use crate::CurrentClock; struct TestTime { pub timestamp: u64, @@ -65,6 +68,8 @@ mod tests { #[test] #[allow(deprecated)] fn it_should_be_convertible_into_an_auth_key() { + clock::Stopped::local_set_to_unix_epoch(); + let auth_key_resource = AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: one_hour_after_unix_epoch().timestamp, @@ -75,7 +80,7 @@ mod tests { auth::ExpiringKey::from(auth_key_resource), auth::ExpiringKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line - valid_until: Current::add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap() + valid_until: CurrentClock::now_add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap() } ); } @@ -83,9 +88,11 @@ mod tests { #[test] #[allow(deprecated)] fn it_should_be_convertible_from_an_auth_key() { + clock::Stopped::local_set_to_unix_epoch(); + let auth_key = auth::ExpiringKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line - valid_until: Current::add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap(), + valid_until: CurrentClock::now_add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap(), }; assert_eq!( diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index b241c469c..9e8ab6bab 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -50,9 +50,9 @@ impl From for Stats { fn from(metrics: TrackerMetrics) -> Self { Self { torrents: metrics.torrents_metrics.torrents, - seeders: metrics.torrents_metrics.seeders, - completed: metrics.torrents_metrics.completed, - leechers: metrics.torrents_metrics.leechers, + seeders: metrics.torrents_metrics.complete, + completed: metrics.torrents_metrics.downloaded, + leechers: metrics.torrents_metrics.incomplete, tcp4_connections_handled: metrics.protocol_metrics.tcp4_connections_handled, tcp4_announces_handled: metrics.protocol_metrics.tcp4_announces_handled, tcp4_scrapes_handled: metrics.protocol_metrics.tcp4_scrapes_handled, @@ -71,19 +71,20 @@ impl From for Stats { #[cfg(test)] mod tests { + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use super::Stats; use crate::core::services::statistics::TrackerMetrics; use crate::core::statistics::Metrics; - use crate::core::TorrentsMetrics; #[test] fn stats_resource_should_be_converted_from_tracker_metrics() { assert_eq!( Stats::from(TrackerMetrics { torrents_metrics: TorrentsMetrics { - seeders: 1, - completed: 2, - leechers: 3, + complete: 1, + downloaded: 2, + incomplete: 3, torrents: 4 }, protocol_metrics: Metrics { diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index dcb92dec3..15f70c8b6 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -10,13 +10,14 @@ use axum_extra::extract::Query; use log::debug; use serde::{de, Deserialize, Deserializer}; use thiserror::Error; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; -use crate::core::services::torrent::{get_torrent_info, get_torrents, get_torrents_page, Pagination}; +use crate::core::services::torrent::{get_torrent_info, get_torrents, get_torrents_page}; use crate::core::Tracker; use crate::servers::apis::v1::responses::invalid_info_hash_param_response; use crate::servers::apis::InfoHashParam; -use crate::shared::bit_torrent::info_hash::InfoHash; /// It handles the request to get the torrent data. /// @@ -82,7 +83,7 @@ pub async fn get_torrents_handler(State(tracker): State>, paginatio torrent_list_response( &get_torrents_page( tracker.clone(), - &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), + Some(&Pagination::new_with_options(pagination.0.offset, pagination.0.limit)), ) .await, ) diff --git a/src/servers/apis/v1/context/torrent/resources/peer.rs b/src/servers/apis/v1/context/torrent/resources/peer.rs index 752694393..e7a0802c1 100644 --- a/src/servers/apis/v1/context/torrent/resources/peer.rs +++ b/src/servers/apis/v1/context/torrent/resources/peer.rs @@ -1,7 +1,7 @@ //! `Peer` and Peer `Id` API resources. +use derive_more::From; use serde::{Deserialize, Serialize}; - -use crate::core; +use torrust_tracker_primitives::peer; /// `Peer` API resource. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -22,7 +22,7 @@ pub struct Peer { /// The peer's left bytes (pending to download). pub left: i64, /// The peer's event: `started`, `stopped`, `completed`. - /// See [`AnnounceEventDef`](crate::shared::bit_torrent::common::AnnounceEventDef). + /// See [`AnnounceEvent`](torrust_tracker_primitives::announce_event::AnnounceEvent). pub event: String, } @@ -35,8 +35,8 @@ pub struct Id { pub client: Option, } -impl From for Id { - fn from(peer_id: core::peer::Id) -> Self { +impl From for Id { + fn from(peer_id: peer::Id) -> Self { Id { id: peer_id.to_hex_string(), client: peer_id.get_client_name(), @@ -44,18 +44,32 @@ impl From for Id { } } -impl From for Peer { - #[allow(deprecated)] - fn from(peer: core::peer::Peer) -> Self { +impl From for Peer { + fn from(value: peer::Peer) -> Self { + #[allow(deprecated)] Peer { - peer_id: Id::from(peer.peer_id), - peer_addr: peer.peer_addr.to_string(), - updated: peer.updated.as_millis(), - updated_milliseconds_ago: peer.updated.as_millis(), - uploaded: peer.uploaded.0, - downloaded: peer.downloaded.0, - left: peer.left.0, - event: format!("{:?}", peer.event), + peer_id: Id::from(value.peer_id), + peer_addr: value.peer_addr.to_string(), + updated: value.updated.as_millis(), + updated_milliseconds_ago: value.updated.as_millis(), + uploaded: value.uploaded.0, + downloaded: value.downloaded.0, + left: value.left.0, + event: format!("{:?}", value.event), + } + } +} + +#[derive(From, PartialEq, Default)] +pub struct Vector(pub Vec); + +impl FromIterator for Vector { + fn from_iter>(iter: T) -> Self { + let mut peers = Vector::default(); + + for i in iter { + peers.0.push(i.into()); } + peers } } diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs index fc43fbb7a..2f1ace5c9 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -6,7 +6,6 @@ //! the JSON response. use serde::{Deserialize, Serialize}; -use super::peer; use crate::core::services::torrent::{BasicInfo, Info}; /// `Torrent` API resource. @@ -68,14 +67,16 @@ pub fn to_resource(basic_info_vec: &[BasicInfo]) -> Vec { impl From for Torrent { fn from(info: Info) -> Self { + let peers: Option = info.peers.map(|peers| peers.into_iter().collect()); + + let peers: Option> = peers.map(|peers| peers.0); + Self { info_hash: info.info_hash.to_string(), seeders: info.seeders, completed: info.completed, leechers: info.leechers, - peers: info - .peers - .map(|peers| peers.iter().map(|peer| peer::Peer::from(*peer)).collect()), + peers, } } } @@ -96,15 +97,14 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; use super::Torrent; - use crate::core::peer; use crate::core::services::torrent::{BasicInfo, Info}; use crate::servers::apis::v1::context::torrent::resources::peer::Peer; use crate::servers::apis::v1::context::torrent::resources::torrent::ListItem; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::shared::clock::DurationSinceUnixEpoch; fn sample_peer() -> peer::Peer { peer::Peer { diff --git a/src/servers/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs index fc32f667b..c88f8cc1d 100644 --- a/src/servers/apis/v1/context/whitelist/handlers.rs +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use axum::extract::{Path, State}; use axum::response::Response; +use torrust_tracker_primitives::info_hash::InfoHash; use super::responses::{ failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, @@ -12,7 +13,6 @@ use super::responses::{ use crate::core::Tracker; use crate::servers::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; use crate::servers::apis::InfoHashParam; -use crate::shared::bit_torrent::info_hash::InfoHash; /// It handles the request to add a torrent to the whitelist. /// diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index 08a59ef90..3ef85e600 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -71,7 +71,7 @@ //! is behind a reverse proxy. //! //! > **NOTICE**: the maximum number of peers that the tracker can return is -//! `74`. Defined with a hardcoded const [`TORRENT_PEERS_LIMIT`](crate::core::TORRENT_PEERS_LIMIT). +//! `74`. Defined with a hardcoded const [`TORRENT_PEERS_LIMIT`](torrust_tracker_configuration::TORRENT_PEERS_LIMIT). //! Refer to [issue 262](https://github.com/torrust/torrust-tracker/issues/262) //! for more information about this limitation. //! @@ -206,15 +206,15 @@ //! //! ### Scrape //! -//! The `scrape` request allows a peer to get [swarm metadata](crate::core::torrent::SwarmMetadata) +//! The `scrape` request allows a peer to get [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) //! for multiple torrents at the same time. //! -//! The response contains the [swarm metadata](crate::core::torrent::SwarmMetadata) +//! The response contains the [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) //! for that torrent: //! -//! - [complete](crate::core::torrent::SwarmMetadata::complete) -//! - [downloaded](crate::core::torrent::SwarmMetadata::downloaded) -//! - [incomplete](crate::core::torrent::SwarmMetadata::incomplete) +//! - [complete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::complete) +//! - [downloaded](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::downloaded) +//! - [incomplete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::incomplete) //! //! **Query parameters** //! @@ -266,7 +266,7 @@ //! Where the `files` key contains a dictionary of dictionaries. The first //! dictionary key is the `info_hash` of the torrent (`iiiiiiiiiiiiiiiiiiii` in //! the example). The second level dictionary contains the -//! [swarm metadata](crate::core::torrent::SwarmMetadata) for that torrent. +//! [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) for that torrent. //! //! If you save the response as a file and you open it with a program that //! can handle binary data you would see: diff --git a/src/servers/http/percent_encoding.rs b/src/servers/http/percent_encoding.rs index 472b1e724..90f4b9a43 100644 --- a/src/servers/http/percent_encoding.rs +++ b/src/servers/http/percent_encoding.rs @@ -15,8 +15,8 @@ //! - //! - //! - -use crate::core::peer::{self, IdConversionError}; -use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; +use torrust_tracker_primitives::info_hash::{self, InfoHash}; +use torrust_tracker_primitives::peer; /// Percent decodes a percent encoded infohash. Internally an /// [`InfoHash`] is a 20-byte array. @@ -27,8 +27,8 @@ use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; /// ```rust /// use std::str::FromStr; /// use torrust_tracker::servers::http::percent_encoding::percent_decode_info_hash; -/// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -/// use torrust_tracker::core::peer; +/// use torrust_tracker_primitives::info_hash::InfoHash; +/// use torrust_tracker_primitives::peer; /// /// let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; /// @@ -44,12 +44,12 @@ use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; /// /// Will return `Err` if the decoded bytes do not represent a valid /// [`InfoHash`]. -pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { +pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { let bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); InfoHash::try_from(bytes) } -/// Percent decodes a percent encoded peer id. Internally a peer [`Id`](crate::core::peer::Id) +/// Percent decodes a percent encoded peer id. Internally a peer [`Id`](peer::Id) /// is a 20-byte array. /// /// For example, given the peer id `*b"-qB00000000000000000"`, @@ -58,8 +58,8 @@ pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result Result Result { +pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result { let bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); peer::Id::try_from(bytes) } @@ -80,9 +80,10 @@ pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result) -> Result) -> Result R /// /// It ignores the peer address in the announce request params. #[must_use] -fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> Peer { - Peer { +fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> peer::Peer { + peer::Peer { peer_id: announce_request.peer_id, peer_addr: SocketAddr::new(*peer_ip, announce_request.port), - updated: Current::now(), + updated: CurrentClock::now(), uploaded: NumberOfBytes(announce_request.uploaded.unwrap_or(0)), downloaded: NumberOfBytes(announce_request.downloaded.unwrap_or(0)), left: NumberOfBytes(announce_request.left.unwrap_or(0)), - event: map_to_aquatic_event(&announce_request.event), + event: map_to_torrust_event(&announce_request.event), } } -fn map_to_aquatic_event(event: &Option) -> AnnounceEvent { +#[must_use] +pub fn map_to_aquatic_event(event: &Option) -> aquatic_udp_protocol::AnnounceEvent { match event { Some(event) => match &event { Event::Started => aquatic_udp_protocol::AnnounceEvent::Started, @@ -153,17 +155,30 @@ fn map_to_aquatic_event(event: &Option) -> AnnounceEvent { } } +#[must_use] +pub fn map_to_torrust_event(event: &Option) -> AnnounceEvent { + match event { + Some(event) => match &event { + Event::Started => AnnounceEvent::Started, + Event::Stopped => AnnounceEvent::Stopped, + Event::Completed => AnnounceEvent::Completed, + }, + None => AnnounceEvent::None, + } +} + #[cfg(test)] mod tests { + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; - use crate::core::{peer, Tracker}; + use crate::core::Tracker; use crate::servers::http::v1::requests::announce::Announce; use crate::servers::http::v1::responses; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; - use crate::shared::bit_torrent::info_hash::InfoHash; fn private_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_mode_private()) diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 49b1aebc7..d6b39cc53 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -111,6 +111,7 @@ mod tests { use std::net::IpAddr; use std::str::FromStr; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; @@ -118,7 +119,6 @@ mod tests { use crate::servers::http::v1::requests::scrape::Scrape; use crate::servers::http::v1::responses; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; - use crate::shared::bit_torrent::info_hash::InfoHash; fn private_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_mode_private()) diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index 08dd9da29..39a6c1846 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -7,12 +7,12 @@ use std::str::FromStr; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; +use torrust_tracker_primitives::info_hash::{self, InfoHash}; +use torrust_tracker_primitives::peer; -use crate::core::peer::{self, IdConversionError}; use crate::servers::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::servers::http::v1::query::{ParseQueryError, Query}; use crate::servers::http::v1::responses; -use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; /// The number of bytes `downloaded`, `uploaded` or `left`. It's used in the /// `Announce` request for parameters that represent a number of bytes. @@ -33,8 +33,8 @@ const COMPACT: &str = "compact"; /// /// ```rust /// use torrust_tracker::servers::http::v1::requests::announce::{Announce, Compact, Event}; -/// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -/// use torrust_tracker::core::peer; +/// use torrust_tracker_primitives::info_hash::InfoHash; +/// use torrust_tracker_primitives::peer; /// /// let request = Announce { /// // Mandatory params @@ -119,14 +119,14 @@ pub enum ParseAnnounceQueryError { InvalidInfoHashParam { param_name: String, param_value: String, - source: LocatedError<'static, ConversionError>, + source: LocatedError<'static, info_hash::ConversionError>, }, /// The `peer_id` is invalid. #[error("invalid param value {param_value} for {param_name} in {source}")] InvalidPeerIdParam { param_name: String, param_value: String, - source: LocatedError<'static, IdConversionError>, + source: LocatedError<'static, peer::IdConversionError>, }, } @@ -355,12 +355,13 @@ mod tests { mod announce_request { - use crate::core::peer; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::announce::{ Announce, Compact, Event, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, }; - use crate::shared::bit_torrent::info_hash::InfoHash; #[test] fn should_be_instantiated_from_the_url_query_with_only_the_mandatory_params() { diff --git a/src/servers/http/v1/requests/scrape.rs b/src/servers/http/v1/requests/scrape.rs index 7c52b9fc4..19f6e35a6 100644 --- a/src/servers/http/v1/requests/scrape.rs +++ b/src/servers/http/v1/requests/scrape.rs @@ -5,11 +5,11 @@ use std::panic::Location; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; +use torrust_tracker_primitives::info_hash::{self, InfoHash}; use crate::servers::http::percent_encoding::percent_decode_info_hash; use crate::servers::http::v1::query::Query; use crate::servers::http::v1::responses; -use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; pub type NumberOfBytes = i64; @@ -34,7 +34,7 @@ pub enum ParseScrapeQueryError { InvalidInfoHashParam { param_name: String, param_value: String, - source: LocatedError<'static, ConversionError>, + source: LocatedError<'static, info_hash::ConversionError>, }, } @@ -86,9 +86,10 @@ mod tests { mod scrape_request { + use torrust_tracker_primitives::info_hash::InfoHash; + use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::scrape::{Scrape, INFO_HASH}; - use crate::shared::bit_torrent::info_hash::InfoHash; #[test] fn should_be_instantiated_from_the_url_query_with_only_one_infohash() { diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index b1b474ea9..134da919e 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -7,10 +7,10 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use axum::http::StatusCode; use derive_more::{AsRef, Constructor, From}; use torrust_tracker_contrib_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut}; +use torrust_tracker_primitives::peer; use super::Response; -use crate::core::peer::Peer; -use crate::core::{self, AnnounceData}; +use crate::core::AnnounceData; use crate::servers::http::v1::responses; /// An [`Announce`] response, that can be anything that is convertible from [`AnnounceData`]. @@ -79,7 +79,7 @@ impl From for Normal { incomplete: data.stats.incomplete.into(), interval: data.policy.interval.into(), min_interval: data.policy.interval_min.into(), - peers: data.peers.into_iter().collect(), + peers: data.peers.iter().map(AsRef::as_ref).copied().collect(), } } } @@ -116,7 +116,7 @@ pub struct Compact { impl From for Compact { fn from(data: AnnounceData) -> Self { - let compact_peers: Vec = data.peers.into_iter().collect(); + let compact_peers: Vec = data.peers.iter().map(AsRef::as_ref).copied().collect(); let (peers, peers6): (Vec>, Vec>) = compact_peers.into_iter().collect(); @@ -150,21 +150,6 @@ impl Into> for Compact { } } -/// Marker Trait for Peer Vectors -pub trait PeerEncoding: From + PartialEq {} - -impl FromIterator for Vec

{ - fn from_iter>(iter: T) -> Self { - let mut peers: Vec

= vec![]; - - for peer in iter { - peers.push(peer.into()); - } - - peers - } -} - /// A [`NormalPeer`], for the [`Normal`] form. /// /// ```rust @@ -188,10 +173,10 @@ pub struct NormalPeer { pub port: u16, } -impl PeerEncoding for NormalPeer {} +impl peer::Encoding for NormalPeer {} -impl From for NormalPeer { - fn from(peer: core::peer::Peer) -> Self { +impl From for NormalPeer { + fn from(peer: peer::Peer) -> Self { NormalPeer { peer_id: peer.peer_id.to_bytes(), ip: peer.peer_addr.ip(), @@ -240,10 +225,10 @@ pub enum CompactPeer { V6(CompactPeerData), } -impl PeerEncoding for CompactPeer {} +impl peer::Encoding for CompactPeer {} -impl From for CompactPeer { - fn from(peer: core::peer::Peer) -> Self { +impl From for CompactPeer { + fn from(peer: peer::Peer) -> Self { match (peer.peer_addr.ip(), peer.peer_addr.port()) { (IpAddr::V4(ip), port) => Self::V4(CompactPeerData { ip, port }), (IpAddr::V6(ip), port) => Self::V6(CompactPeerData { ip, port }), @@ -313,12 +298,13 @@ impl FromIterator> for CompactPeersEncoded { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; use torrust_tracker_configuration::AnnouncePolicy; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::core::peer::fixture::PeerBuilder; - use crate::core::peer::Id; - use crate::core::torrent::SwarmStats; use crate::core::AnnounceData; use crate::servers::http::v1::responses::announce::{Announce, Compact, Normal, Response}; @@ -338,20 +324,20 @@ mod tests { let policy = AnnouncePolicy::new(111, 222); let peer_ipv4 = PeerBuilder::default() - .with_peer_id(&Id(*b"-qB00000000000000001")) + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 0x7070)) .build(); let peer_ipv6 = PeerBuilder::default() - .with_peer_id(&Id(*b"-qB00000000000000002")) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new( IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), 0x7070, )) .build(); - let peers = vec![peer_ipv4, peer_ipv6]; - let stats = SwarmStats::new(333, 333, 444); + let peers = vec![Arc::new(peer_ipv4), Arc::new(peer_ipv6)]; + let stats = SwarmMetadata::new(333, 333, 444); AnnounceData::new(peers, stats, policy) } diff --git a/src/servers/http/v1/responses/scrape.rs b/src/servers/http/v1/responses/scrape.rs index e16827824..11f361028 100644 --- a/src/servers/http/v1/responses/scrape.rs +++ b/src/servers/http/v1/responses/scrape.rs @@ -13,8 +13,8 @@ use crate::core::ScrapeData; /// /// ```rust /// use torrust_tracker::servers::http::v1::responses::scrape::Bencoded; -/// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -/// use torrust_tracker::core::torrent::SwarmMetadata; +/// use torrust_tracker_primitives::info_hash::InfoHash; +/// use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; /// use torrust_tracker::core::ScrapeData; /// /// let info_hash = InfoHash([0x69; 20]); @@ -92,10 +92,11 @@ impl IntoResponse for Bencoded { mod tests { mod scrape_response { - use crate::core::torrent::SwarmMetadata; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + use crate::core::ScrapeData; use crate::servers::http::v1::responses::scrape::Bencoded; - use crate::shared::bit_torrent::info_hash::InfoHash; fn sample_scrape_data() -> ScrapeData { let info_hash = InfoHash([0x69; 20]); diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index b791defd7..b37081045 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -11,9 +11,10 @@ use std::net::IpAddr; use std::sync::Arc; -use crate::core::peer::Peer; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; + use crate::core::{statistics, AnnounceData, Tracker}; -use crate::shared::bit_torrent::info_hash::InfoHash; /// The HTTP tracker `announce` service. /// @@ -25,7 +26,7 @@ use crate::shared::bit_torrent::info_hash::InfoHash; /// > **NOTICE**: as the HTTP tracker does not requires a connection request /// like the UDP tracker, the number of TCP connections is incremented for /// each `announce` request. -pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) -> AnnounceData { +pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut peer::Peer) -> AnnounceData { let original_peer_ip = peer.peer_addr.ip(); // The tracker could change the original peer ip @@ -47,13 +48,13 @@ pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; - use crate::core::{peer, Tracker}; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::shared::clock::DurationSinceUnixEpoch; + use crate::core::Tracker; fn public_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_mode_public()) @@ -94,11 +95,11 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::core::peer::Peer; - use crate::core::torrent::SwarmStats; use crate::core::{statistics, AnnounceData, Tracker}; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; @@ -113,7 +114,7 @@ mod tests { let expected_announce_data = AnnounceData { peers: vec![], - stats: SwarmStats { + stats: SwarmMetadata { downloaded: 0, complete: 1, incomplete: 0, @@ -150,7 +151,7 @@ mod tests { Tracker::new(&configuration, Some(stats_event_sender), statistics::Repo::new()).unwrap() } - fn peer_with_the_ipv4_loopback_ip() -> Peer { + fn peer_with_the_ipv4_loopback_ip() -> peer::Peer { let loopback_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); let mut peer = sample_peer(); peer.peer_addr = SocketAddr::new(loopback_ip, 8080); diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 82ca15dc8..18b57f479 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -11,8 +11,9 @@ use std::net::IpAddr; use std::sync::Arc; +use torrust_tracker_primitives::info_hash::InfoHash; + use crate::core::{statistics, ScrapeData, Tracker}; -use crate::shared::bit_torrent::info_hash::InfoHash; /// The HTTP tracker `scrape` service. /// @@ -60,13 +61,13 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; - use crate::core::{peer, Tracker}; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::shared::clock::DurationSinceUnixEpoch; + use crate::core::Tracker; fn public_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_mode_public()) @@ -99,9 +100,9 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; - use crate::core::torrent::SwarmMetadata; use crate::core::{statistics, ScrapeData, Tracker}; use crate::servers::http::v1::services::scrape::invoke; use crate::servers::http::v1::services::scrape::tests::{ diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs index 19e61f14e..49ea6261b 100644 --- a/src/servers/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -70,9 +70,9 @@ use std::net::SocketAddr; use std::panic::Location; use aquatic_udp_protocol::ConnectionId; +use torrust_tracker_clock::time_extent::{Extent, TimeExtent}; use super::error::Error; -use crate::shared::clock::time_extent::{Extent, TimeExtent}; pub type Cookie = [u8; 8]; @@ -133,9 +133,11 @@ mod cookie_builder { use std::hash::{Hash, Hasher}; use std::net::SocketAddr; + use torrust_tracker_clock::time_extent::{Extent, Make, TimeExtent}; + use super::{Cookie, SinceUnixEpochTimeExtent, COOKIE_LIFETIME}; - use crate::shared::clock::time_extent::{DefaultTimeExtentMaker, Extent, Make, TimeExtent}; use crate::shared::crypto::keys::seeds::{Current, Keeper}; + use crate::DefaultTimeExtentMaker; pub(super) fn get_last_time_extent() -> SinceUnixEpochTimeExtent { DefaultTimeExtentMaker::now(&COOKIE_LIFETIME.increment) @@ -162,10 +164,12 @@ mod cookie_builder { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use torrust_tracker_clock::clock::stopped::Stopped as _; + use torrust_tracker_clock::clock::{self}; + use torrust_tracker_clock::time_extent::{self, Extent}; + use super::cookie_builder::{self}; use crate::servers::udp::connection_cookie::{check, make, Cookie, COOKIE_LIFETIME}; - use crate::shared::clock::time_extent::{self, Extent}; - use crate::shared::clock::{Stopped, StoppedTime}; // #![feature(const_socketaddr)] // const REMOTE_ADDRESS_IPV4_ZERO: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); @@ -176,6 +180,8 @@ mod tests { const ID_COOKIE_OLD: Cookie = [23, 204, 198, 29, 48, 180, 62, 19]; const ID_COOKIE_NEW: Cookie = [41, 166, 45, 246, 249, 24, 108, 203]; + clock::Stopped::local_set_to_unix_epoch(); + let cookie = make(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)); assert!(cookie == ID_COOKIE_OLD || cookie == ID_COOKIE_NEW); @@ -276,7 +282,7 @@ mod tests { let cookie = make(&remote_address); - Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); + clock::Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); let cookie_next = make(&remote_address); @@ -298,7 +304,7 @@ mod tests { let cookie = make(&remote_address); - Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); + clock::Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); check(&remote_address, &cookie).unwrap(); } @@ -307,9 +313,11 @@ mod tests { fn it_should_be_valid_for_the_last_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + clock::Stopped::local_set_to_unix_epoch(); + let cookie = make(&remote_address); - Stopped::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); + clock::Stopped::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); check(&remote_address, &cookie).unwrap(); } @@ -321,7 +329,7 @@ mod tests { let cookie = make(&remote_address); - Stopped::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); + clock::Stopped::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); check(&remote_address, &cookie).unwrap(); } diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 91a371a7b..2d5038ec3 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -12,6 +12,7 @@ use aquatic_udp_protocol::{ use log::debug; use tokio::net::UdpSocket; use torrust_tracker_located_error::DynError; +use torrust_tracker_primitives::info_hash::InfoHash; use uuid::Uuid; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; @@ -22,7 +23,6 @@ use crate::servers::udp::logging::{log_bad_request, log_error_response, log_requ use crate::servers::udp::peer_builder; use crate::servers::udp::request::AnnounceWrapper; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; -use crate::shared::bit_torrent::info_hash::InfoHash; /// It handles the incoming UDP packets. /// @@ -318,13 +318,14 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::{peer, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; - use crate::core::{peer, Tracker}; - use crate::shared::clock::{Current, Time}; + use crate::core::Tracker; + use crate::CurrentClock; fn tracker_configuration() -> Configuration { default_testing_tracker_configuration() @@ -366,39 +367,41 @@ mod tests { SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) } - struct TorrentPeerBuilder { + #[derive(Debug, Default)] + pub struct TorrentPeerBuilder { peer: peer::Peer, } impl TorrentPeerBuilder { - pub fn default() -> TorrentPeerBuilder { - let default_peer = peer::Peer { - peer_id: peer::Id([255u8; 20]), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: Current::now(), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - TorrentPeerBuilder { peer: default_peer } + #[must_use] + pub fn new() -> Self { + Self { + peer: peer::Peer { + updated: CurrentClock::now(), + ..Default::default() + }, + } } - pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { - self.peer.peer_id = peer_id; + #[must_use] + pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; self } - pub fn with_peer_addr(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; + #[must_use] + pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { + self.peer.peer_id = peer_id; self } - pub fn with_bytes_left(mut self, left: i64) -> Self { + #[must_use] + pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { self.peer.left = NumberOfBytes(left); self } + #[must_use] pub fn into(self) -> peer::Peer { self.peer } @@ -605,8 +608,9 @@ mod tests { Response, ResponsePeer, }; use mockall::predicate::eq; + use torrust_tracker_primitives::peer; - use crate::core::{self, peer, statistics}; + use crate::core::{self, statistics}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -635,14 +639,14 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); - let expected_peer = TorrentPeerBuilder::default() + let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip), client_port)) + .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip), client_port)) .into(); - assert_eq!(peers[0], expected_peer); + assert_eq!(peers[0], Arc::new(expected_peer)); } #[tokio::test] @@ -696,7 +700,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } @@ -709,9 +713,9 @@ mod tests { let client_port = 8080; let peer_id = AquaticPeerId([255u8; 20]); - let peer_using_ipv6 = TorrentPeerBuilder::default() + let peer_using_ipv6 = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); tracker @@ -770,10 +774,11 @@ mod tests { mod from_a_loopback_ip { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use torrust_tracker_primitives::peer; - use crate::core::peer; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -800,16 +805,16 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); - let expected_peer = TorrentPeerBuilder::default() + let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) + .with_peer_address(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) .into(); - assert_eq!(peers[0], expected_peer); + assert_eq!(peers[0], Arc::new(expected_peer)); } } } @@ -825,8 +830,9 @@ mod tests { Response, ResponsePeer, }; use mockall::predicate::eq; + use torrust_tracker_primitives::peer; - use crate::core::{self, peer, statistics}; + use crate::core::{self, statistics}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -856,14 +862,14 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); - let expected_peer = TorrentPeerBuilder::default() + let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); - assert_eq!(peers[0], expected_peer); + assert_eq!(peers[0], Arc::new(expected_peer)); } #[tokio::test] @@ -920,7 +926,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); // When using IPv6 the tracker converts the remote client ip into a IPv4 address assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); @@ -933,9 +939,9 @@ mod tests { let client_port = 8080; let peer_id = AquaticPeerId([255u8; 20]); - let peer_using_ipv4 = TorrentPeerBuilder::default() + let peer_using_ipv4 = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) + .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); tracker @@ -1037,7 +1043,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); @@ -1062,9 +1068,10 @@ mod tests { InfoHash, NumberOfDownloads, NumberOfPeers, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; + use torrust_tracker_primitives::peer; use super::TorrentPeerBuilder; - use crate::core::{self, peer}; + use crate::core::{self}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; @@ -1106,10 +1113,10 @@ mod tests { async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { let peer_id = peer::Id([255u8; 20]); - let peer = TorrentPeerBuilder::default() + let peer = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(*remote_addr) - .with_bytes_left(0) + .with_peer_address(*remote_addr) + .with_number_of_bytes_left(0) .into(); tracker diff --git a/src/servers/udp/logging.rs b/src/servers/udp/logging.rs index a32afc6a3..9bbb48f6a 100644 --- a/src/servers/udp/logging.rs +++ b/src/servers/udp/logging.rs @@ -4,9 +4,9 @@ use std::net::SocketAddr; use std::time::Duration; use aquatic_udp_protocol::{Request, Response, TransactionId}; +use torrust_tracker_primitives::info_hash::InfoHash; use super::handlers::RequestId; -use crate::shared::bit_torrent::info_hash::InfoHash; pub fn log_request(request: &Request, request_id: &RequestId, server_socket_addr: &SocketAddr) { let action = map_action_name(request); diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index 8ef562086..fa4e8e926 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -62,7 +62,7 @@ //! ``` //! //! For the `Announce` request there is a wrapper struct [`AnnounceWrapper`](crate::servers::udp::request::AnnounceWrapper). -//! It was added to add an extra field with the internal [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash) struct. +//! It was added to add an extra field with the internal [`InfoHash`](torrust_tracker_primitives::info_hash::InfoHash) struct. //! //! ### Connect //! @@ -345,7 +345,7 @@ //! packet. //! //! We are using a wrapper struct for the aquatic [`AnnounceRequest`](aquatic_udp_protocol::request::AnnounceRequest) -//! struct, because we have our internal [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash) +//! struct, because we have our internal [`InfoHash`](torrust_tracker_primitives::info_hash::InfoHash) //! struct. //! //! ```text @@ -467,15 +467,15 @@ //! //! ### Scrape //! -//! The `scrape` request allows a peer to get [swarm metadata](crate::core::torrent::SwarmMetadata) +//! The `scrape` request allows a peer to get [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) //! for multiple torrents at the same time. //! -//! The response contains the [swarm metadata](crate::core::torrent::SwarmMetadata) +//! The response contains the [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) //! for that torrent: //! -//! - [complete](crate::core::torrent::SwarmMetadata::complete) -//! - [downloaded](crate::core::torrent::SwarmMetadata::downloaded) -//! - [incomplete](crate::core::torrent::SwarmMetadata::incomplete) +//! - [complete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::complete) +//! - [downloaded](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::downloaded) +//! - [incomplete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::incomplete) //! //! > **NOTICE**: up to about 74 torrents can be scraped at once. A full scrape //! can't be done with this protocol. This is a limitation of the UDP protocol. diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs index 5168e2578..f7eb935a0 100644 --- a/src/servers/udp/peer_builder.rs +++ b/src/servers/udp/peer_builder.rs @@ -1,11 +1,14 @@ //! Logic to extract the peer info from the announce request. use std::net::{IpAddr, SocketAddr}; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::{peer, NumberOfBytes}; + use super::request::AnnounceWrapper; -use crate::core::peer::{Id, Peer}; -use crate::shared::clock::{Current, Time}; +use crate::CurrentClock; -/// Extracts the [`Peer`] info from the +/// Extracts the [`peer::Peer`] info from the /// announce request. /// /// # Arguments @@ -14,14 +17,14 @@ use crate::shared::clock::{Current, Time}; /// * `peer_ip` - The real IP address of the peer, not the one in the announce /// request. #[must_use] -pub fn from_request(announce_wrapper: &AnnounceWrapper, peer_ip: &IpAddr) -> Peer { - Peer { - peer_id: Id(announce_wrapper.announce_request.peer_id.0), +pub fn from_request(announce_wrapper: &AnnounceWrapper, peer_ip: &IpAddr) -> peer::Peer { + peer::Peer { + peer_id: peer::Id(announce_wrapper.announce_request.peer_id.0), peer_addr: SocketAddr::new(*peer_ip, announce_wrapper.announce_request.port.0), - updated: Current::now(), - uploaded: announce_wrapper.announce_request.bytes_uploaded, - downloaded: announce_wrapper.announce_request.bytes_downloaded, - left: announce_wrapper.announce_request.bytes_left, - event: announce_wrapper.announce_request.event, + updated: CurrentClock::now(), + uploaded: NumberOfBytes(announce_wrapper.announce_request.bytes_uploaded.0), + downloaded: NumberOfBytes(announce_wrapper.announce_request.bytes_downloaded.0), + left: NumberOfBytes(announce_wrapper.announce_request.bytes_left.0), + event: AnnounceEvent::from_i32(announce_wrapper.announce_request.event.to_i32()), } } diff --git a/src/servers/udp/request.rs b/src/servers/udp/request.rs index f655fd36a..e172e03b1 100644 --- a/src/servers/udp/request.rs +++ b/src/servers/udp/request.rs @@ -6,8 +6,7 @@ //! Some of the type in this module are wrappers around the types in the //! `aquatic_udp_protocol` crate. use aquatic_udp_protocol::AnnounceRequest; - -use crate::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; /// Wrapper around [`AnnounceRequest`]. pub struct AnnounceWrapper { diff --git a/src/shared/bit_torrent/common.rs b/src/shared/bit_torrent/common.rs index 9bf9dfd3c..9625b88e7 100644 --- a/src/shared/bit_torrent/common.rs +++ b/src/shared/bit_torrent/common.rs @@ -1,7 +1,6 @@ //! `BitTorrent` protocol primitive types //! //! [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde::{Deserialize, Serialize}; /// The maximum number of torrents that can be returned in an `scrape` response. @@ -33,23 +32,3 @@ enum Actions { Scrape = 2, Error = 3, } - -/// Announce events. Described on the -/// [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -#[derive(Serialize, Deserialize)] -#[serde(remote = "AnnounceEvent")] -pub enum AnnounceEventDef { - /// The peer has started downloading the torrent. - Started, - /// The peer has ceased downloading the torrent. - Stopped, - /// The peer has completed downloading the torrent. - Completed, - /// This is one of the announcements done at regular intervals. - None, -} - -/// Number of bytes downloaded, uploaded or pending to download (left) by the peer. -#[derive(Serialize, Deserialize)] -#[serde(remote = "NumberOfBytes")] -pub struct NumberOfBytesDef(pub i64); diff --git a/src/shared/bit_torrent/info_hash.rs b/src/shared/bit_torrent/info_hash.rs index 20c3cb38b..506c37758 100644 --- a/src/shared/bit_torrent/info_hash.rs +++ b/src/shared/bit_torrent/info_hash.rs @@ -129,169 +129,38 @@ //! You can hash that byte string with //! //! The result is a 20-char string: `5452869BE36F9F3350CCEE6B4544E7E76CAAADAB` -use std::panic::Location; -use thiserror::Error; +use torrust_tracker_primitives::info_hash::InfoHash; -/// `BitTorrent` Info Hash v1 -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct InfoHash(pub [u8; 20]); +pub mod fixture { + use std::hash::{DefaultHasher, Hash, Hasher}; -const INFO_HASH_BYTES_LEN: usize = 20; + use super::InfoHash; -impl InfoHash { - /// Create a new `InfoHash` from a byte slice. + /// Generate as semi-stable pseudo-random infohash /// - /// # Panics + /// Note: If the [`DefaultHasher`] implementation changes + /// so will the resulting info-hashes. /// - /// Will panic if byte slice does not contains the exact amount of bytes need for the `InfoHash`. - #[must_use] - pub fn from_bytes(bytes: &[u8]) -> Self { - assert_eq!(bytes.len(), INFO_HASH_BYTES_LEN); - let mut ret = Self([0u8; INFO_HASH_BYTES_LEN]); - ret.0.clone_from_slice(bytes); - ret - } - - /// Returns the `InfoHash` internal byte array. - #[must_use] - pub fn bytes(&self) -> [u8; 20] { - self.0 - } - - /// Returns the `InfoHash` as a hex string. + /// The results should not be relied upon between versions. #[must_use] - pub fn to_hex_string(&self) -> String { - self.to_string() - } -} - -impl std::fmt::Display for InfoHash { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut chars = [0u8; 40]; - binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); - write!(f, "{}", std::str::from_utf8(&chars).unwrap()) - } -} - -impl std::str::FromStr for InfoHash { - type Err = binascii::ConvertError; - - fn from_str(s: &str) -> Result { - let mut i = Self([0u8; 20]); - if s.len() != 40 { - return Err(binascii::ConvertError::InvalidInputLength); - } - binascii::hex2bin(s.as_bytes(), &mut i.0)?; - Ok(i) - } -} - -impl Ord for InfoHash { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.0.cmp(&other.0) - } -} - -impl std::cmp::PartialOrd for InfoHash { - fn partial_cmp(&self, other: &InfoHash) -> Option { - Some(self.cmp(other)) - } -} + pub fn gen_seeded_infohash(seed: &u64) -> InfoHash { + let mut buf_a: [[u8; 8]; 4] = Default::default(); + let mut buf_b = InfoHash::default(); -impl std::convert::From<&[u8]> for InfoHash { - fn from(data: &[u8]) -> InfoHash { - assert_eq!(data.len(), 20); - let mut ret = InfoHash([0u8; 20]); - ret.0.clone_from_slice(data); - ret - } -} - -impl std::convert::From<[u8; 20]> for InfoHash { - fn from(val: [u8; 20]) -> Self { - InfoHash(val) - } -} - -/// Errors that can occur when converting from a `Vec` to an `InfoHash`. -#[derive(Error, Debug)] -pub enum ConversionError { - /// Not enough bytes for infohash. An infohash is 20 bytes. - #[error("not enough bytes for infohash: {message} {location}")] - NotEnoughBytes { - location: &'static Location<'static>, - message: String, - }, - /// Too many bytes for infohash. An infohash is 20 bytes. - #[error("too many bytes for infohash: {message} {location}")] - TooManyBytes { - location: &'static Location<'static>, - message: String, - }, -} - -impl TryFrom> for InfoHash { - type Error = ConversionError; + let mut hasher = DefaultHasher::new(); + seed.hash(&mut hasher); - fn try_from(bytes: Vec) -> Result { - if bytes.len() < INFO_HASH_BYTES_LEN { - return Err(ConversionError::NotEnoughBytes { - location: Location::caller(), - message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, - }); - } - if bytes.len() > INFO_HASH_BYTES_LEN { - return Err(ConversionError::TooManyBytes { - location: Location::caller(), - message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, - }); + for u in &mut buf_a { + seed.hash(&mut hasher); + *u = hasher.finish().to_le_bytes(); } - Ok(Self::from_bytes(&bytes)) - } -} -impl serde::ser::Serialize for InfoHash { - fn serialize(&self, serializer: S) -> Result { - let mut buffer = [0u8; 40]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); - let str_out = std::str::from_utf8(bytes_out).unwrap(); - serializer.serialize_str(str_out) - } -} - -impl<'de> serde::de::Deserialize<'de> for InfoHash { - fn deserialize>(des: D) -> Result { - des.deserialize_str(InfoHashVisitor) - } -} - -struct InfoHashVisitor; - -impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { - type Value = InfoHash; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "a 40 character long hash") - } - - fn visit_str(self, v: &str) -> Result { - if v.len() != 40 { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"a 40 character long string", - )); + for (a, b) in buf_a.iter().flat_map(|a| a.iter()).zip(buf_b.0.iter_mut()) { + *b = *a; } - let mut res = InfoHash([0u8; 20]); - - if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"a hexadecimal string", - )); - }; - Ok(res) + buf_b } } diff --git a/src/shared/bit_torrent/tracker/http/client/requests/announce.rs b/src/shared/bit_torrent/tracker/http/client/requests/announce.rs index 6cae79888..b872e76e9 100644 --- a/src/shared/bit_torrent/tracker/http/client/requests/announce.rs +++ b/src/shared/bit_torrent/tracker/http/client/requests/announce.rs @@ -3,9 +3,9 @@ use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; use serde_repr::Serialize_repr; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; -use crate::core::peer::Id; -use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::bit_torrent::tracker::http::{percent_encode_byte_array, ByteArray20}; pub struct Query { @@ -99,7 +99,7 @@ impl QueryBuilder { peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), downloaded: 0, uploaded: 0, - peer_id: Id(*b"-qB00000000000000001").0, + peer_id: peer::Id(*b"-qB00000000000000001").0, port: 17548, left: 0, event: Some(Event::Completed), @@ -117,7 +117,7 @@ impl QueryBuilder { } #[must_use] - pub fn with_peer_id(mut self, peer_id: &Id) -> Self { + pub fn with_peer_id(mut self, peer_id: &peer::Id) -> Self { self.announce_query.peer_id = peer_id.0; self } diff --git a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs index 4fa49eed6..4d12fc2d2 100644 --- a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs +++ b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs @@ -2,7 +2,8 @@ use std::error::Error; use std::fmt::{self}; use std::str::FromStr; -use crate::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; + use crate::shared::bit_torrent::tracker::http::{percent_encode_byte_array, ByteArray20}; pub struct Query { diff --git a/src/shared/bit_torrent/tracker/http/client/responses/announce.rs b/src/shared/bit_torrent/tracker/http/client/responses/announce.rs index e75cc6671..15ec446cb 100644 --- a/src/shared/bit_torrent/tracker/http/client/responses/announce.rs +++ b/src/shared/bit_torrent/tracker/http/client/responses/announce.rs @@ -1,8 +1,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use serde::{Deserialize, Serialize}; - -use crate::core::peer::Peer; +use torrust_tracker_primitives::peer; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Announce { @@ -23,8 +22,8 @@ pub struct DictionaryPeer { pub port: u16, } -impl From for DictionaryPeer { - fn from(peer: Peer) -> Self { +impl From for DictionaryPeer { + fn from(peer: peer::Peer) -> Self { DictionaryPeer { peer_id: peer.peer_id.to_bytes().to_vec(), ip: peer.peer_addr.ip().to_string(), diff --git a/src/shared/clock/mod.rs b/src/shared/clock/mod.rs deleted file mode 100644 index 6d9d4112a..000000000 --- a/src/shared/clock/mod.rs +++ /dev/null @@ -1,395 +0,0 @@ -//! Time related functions and types. -//! -//! It's usually a good idea to control where the time comes from -//! in an application so that it can be mocked for testing and it can be -//! controlled in production so we get the intended behavior without -//! relying on the specific time zone for the underlying system. -//! -//! Clocks use the type `DurationSinceUnixEpoch` which is a -//! `std::time::Duration` since the Unix Epoch (timestamp). -//! -//! ```text -//! Local time: lun 2023-03-27 16:12:00 WEST -//! Universal time: lun 2023-03-27 15:12:00 UTC -//! Time zone: Atlantic/Canary (WEST, +0100) -//! Timestamp: 1679929914 -//! Duration: 1679929914.10167426 -//! ``` -//! -//! > **NOTICE**: internally the `Duration` is stores it's main unit as seconds in a `u64` and it will -//! overflow in 584.9 billion years. -//! -//! > **NOTICE**: the timestamp does not depend on the time zone. That gives you -//! the ability to use the clock regardless of the underlying system time zone -//! configuration. See [Unix time Wikipedia entry](https://en.wikipedia.org/wiki/Unix_time). -pub mod static_time; -pub mod time_extent; -pub mod utils; - -use std::num::IntErrorKind; -use std::str::FromStr; -use std::time::Duration; - -use chrono::{DateTime, Utc}; - -/// Duration since the Unix Epoch. -pub type DurationSinceUnixEpoch = Duration; - -/// Clock types. -#[derive(Debug)] -pub enum Type { - /// Clock that returns the current time. - WorkingClock, - /// Clock that returns always the same fixed time. - StoppedClock, -} - -/// A generic structure that represents a clock. -/// -/// It can be either the working clock (production) or the stopped clock -/// (testing). It implements the `Time` trait, which gives you the current time. -#[derive(Debug)] -pub struct Clock; - -/// The working clock. It returns the current time. -pub type Working = Clock<{ Type::WorkingClock as usize }>; -/// The stopped clock. It returns always the same fixed time. -pub type Stopped = Clock<{ Type::StoppedClock as usize }>; - -/// The current clock. Defined at compilation time. -/// It can be either the working clock (production) or the stopped clock (testing). -#[cfg(not(test))] -pub type Current = Working; - -/// The current clock. Defined at compilation time. -/// It can be either the working clock (production) or the stopped clock (testing). -#[cfg(test)] -pub type Current = Stopped; - -/// Trait for types that can be used as a timestamp clock. -pub trait Time: Sized { - fn now() -> DurationSinceUnixEpoch; -} - -/// Trait for types that can be manipulate the current time in order to -/// get time in the future or in the past after or before a duration of time. -pub trait TimeNow: Time { - #[must_use] - fn add(add_time: &Duration) -> Option { - Self::now().checked_add(*add_time) - } - #[must_use] - fn sub(sub_time: &Duration) -> Option { - Self::now().checked_sub(*sub_time) - } -} - -/// It converts a string in ISO 8601 format to a timestamp. -/// For example, the string `1970-01-01T00:00:00.000Z` which is the Unix Epoch -/// will be converted to a timestamp of 0: `DurationSinceUnixEpoch::ZERO`. -/// -/// # Panics -/// -/// Will panic if the input time cannot be converted to `DateTime::`, internally using the `i64` type. -/// (this will naturally happen in 292.5 billion years) -#[must_use] -pub fn convert_from_iso_8601_to_timestamp(iso_8601: &str) -> DurationSinceUnixEpoch { - convert_from_datetime_utc_to_timestamp(&DateTime::::from_str(iso_8601).unwrap()) -} - -/// It converts a `DateTime::` to a timestamp. -/// For example, the `DateTime::` of the Unix Epoch will be converted to a -/// timestamp of 0: `DurationSinceUnixEpoch::ZERO`. -/// -/// # Panics -/// -/// Will panic if the input time overflows the `u64` type. -/// (this will naturally happen in 584.9 billion years) -#[must_use] -pub fn convert_from_datetime_utc_to_timestamp(datetime_utc: &DateTime) -> DurationSinceUnixEpoch { - DurationSinceUnixEpoch::from_secs(u64::try_from(datetime_utc.timestamp()).expect("Overflow of u64 seconds, very future!")) -} - -/// It converts a timestamp to a `DateTime::`. -/// For example, the timestamp of 0: `DurationSinceUnixEpoch::ZERO` will be -/// converted to the `DateTime::` of the Unix Epoch. -/// -/// # Panics -/// -/// Will panic if the input time overflows the `u64` seconds overflows the `i64` type. -/// (this will naturally happen in 292.5 billion years) -#[must_use] -pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) -> DateTime { - DateTime::from_timestamp( - i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), - duration.subsec_nanos(), - ) - .unwrap() -} - -#[cfg(test)] -mod tests { - use std::any::TypeId; - - use crate::shared::clock::{Current, Stopped, Time, Working}; - - #[test] - fn it_should_be_the_stopped_clock_as_default_when_testing() { - // We are testing, so we should default to the fixed time. - assert_eq!(TypeId::of::(), TypeId::of::()); - assert_eq!(Stopped::now(), Current::now()); - } - - #[test] - fn it_should_have_different_times() { - assert_ne!(TypeId::of::(), TypeId::of::()); - assert_ne!(Stopped::now(), Working::now()); - } - - mod timestamp { - use chrono::DateTime; - - use crate::shared::clock::{ - convert_from_datetime_utc_to_timestamp, convert_from_iso_8601_to_timestamp, convert_from_timestamp_to_datetime_utc, - DurationSinceUnixEpoch, - }; - - #[test] - fn should_be_converted_to_datetime_utc() { - let timestamp = DurationSinceUnixEpoch::ZERO; - assert_eq!( - convert_from_timestamp_to_datetime_utc(timestamp), - DateTime::from_timestamp(0, 0).unwrap() - ); - } - - #[test] - fn should_be_converted_from_datetime_utc() { - let datetime = DateTime::from_timestamp(0, 0).unwrap(); - assert_eq!( - convert_from_datetime_utc_to_timestamp(&datetime), - DurationSinceUnixEpoch::ZERO - ); - } - - #[test] - fn should_be_converted_from_datetime_utc_in_iso_8601() { - let iso_8601 = "1970-01-01T00:00:00.000Z".to_string(); - assert_eq!(convert_from_iso_8601_to_timestamp(&iso_8601), DurationSinceUnixEpoch::ZERO); - } - } -} - -mod working_clock { - use std::time::SystemTime; - - use super::{DurationSinceUnixEpoch, Time, TimeNow, Working}; - - impl Time for Working { - fn now() -> DurationSinceUnixEpoch { - SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap() - } - } - - impl TimeNow for Working {} -} - -/// Trait for types that can be used as a timestamp clock stopped -/// at a given time. -pub trait StoppedTime: TimeNow { - /// It sets the clock to a given time. - fn local_set(unix_time: &DurationSinceUnixEpoch); - - /// It sets the clock to the Unix Epoch. - fn local_set_to_unix_epoch() { - Self::local_set(&DurationSinceUnixEpoch::ZERO); - } - - /// It sets the clock to the time the application started. - fn local_set_to_app_start_time(); - - /// It sets the clock to the current system time. - fn local_set_to_system_time_now(); - - /// It adds a `Duration` to the clock. - /// - /// # Errors - /// - /// Will return `IntErrorKind` if `duration` would overflow the internal `Duration`. - fn local_add(duration: &Duration) -> Result<(), IntErrorKind>; - - /// It subtracts a `Duration` from the clock. - /// # Errors - /// - /// Will return `IntErrorKind` if `duration` would underflow the internal `Duration`. - fn local_sub(duration: &Duration) -> Result<(), IntErrorKind>; - - /// It resets the clock to default fixed time that is application start time (or the unix epoch when testing). - fn local_reset(); -} - -mod stopped_clock { - use std::num::IntErrorKind; - use std::time::Duration; - - use super::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow}; - - impl Time for Stopped { - fn now() -> DurationSinceUnixEpoch { - detail::FIXED_TIME.with(|time| { - return *time.borrow(); - }) - } - } - - impl TimeNow for Stopped {} - - impl StoppedTime for Stopped { - fn local_set(unix_time: &DurationSinceUnixEpoch) { - detail::FIXED_TIME.with(|time| { - *time.borrow_mut() = *unix_time; - }); - } - - fn local_set_to_app_start_time() { - Self::local_set(&detail::get_app_start_time()); - } - - fn local_set_to_system_time_now() { - Self::local_set(&detail::get_app_start_time()); - } - - fn local_add(duration: &Duration) -> Result<(), IntErrorKind> { - detail::FIXED_TIME.with(|time| { - let time_borrowed = *time.borrow(); - *time.borrow_mut() = match time_borrowed.checked_add(*duration) { - Some(time) => time, - None => { - return Err(IntErrorKind::PosOverflow); - } - }; - Ok(()) - }) - } - - fn local_sub(duration: &Duration) -> Result<(), IntErrorKind> { - detail::FIXED_TIME.with(|time| { - let time_borrowed = *time.borrow(); - *time.borrow_mut() = match time_borrowed.checked_sub(*duration) { - Some(time) => time, - None => { - return Err(IntErrorKind::NegOverflow); - } - }; - Ok(()) - }) - } - - fn local_reset() { - Self::local_set(&detail::get_default_fixed_time()); - } - } - - #[cfg(test)] - mod tests { - use std::thread; - use std::time::Duration; - - use crate::shared::clock::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow, Working}; - - #[test] - fn it_should_default_to_zero_when_testing() { - assert_eq!(Stopped::now(), DurationSinceUnixEpoch::ZERO); - } - - #[test] - fn it_should_possible_to_set_the_time() { - // Check we start with ZERO. - assert_eq!(Stopped::now(), Duration::ZERO); - - // Set to Current Time and Check - let timestamp = Working::now(); - Stopped::local_set(×tamp); - assert_eq!(Stopped::now(), timestamp); - - // Elapse the Current Time and Check - Stopped::local_add(×tamp).unwrap(); - assert_eq!(Stopped::now(), timestamp + timestamp); - - // Reset to ZERO and Check - Stopped::local_reset(); - assert_eq!(Stopped::now(), Duration::ZERO); - } - - #[test] - fn it_should_default_to_zero_on_thread_exit() { - assert_eq!(Stopped::now(), Duration::ZERO); - let after5 = Working::add(&Duration::from_secs(5)).unwrap(); - Stopped::local_set(&after5); - assert_eq!(Stopped::now(), after5); - - let t = thread::spawn(move || { - // each thread starts out with the initial value of ZERO - assert_eq!(Stopped::now(), Duration::ZERO); - - // and gets set to the current time. - let timestamp = Working::now(); - Stopped::local_set(×tamp); - assert_eq!(Stopped::now(), timestamp); - }); - - // wait for the thread to complete and bail out on panic - t.join().unwrap(); - - // we retain our original value of current time + 5sec despite the child thread - assert_eq!(Stopped::now(), after5); - - // Reset to ZERO and Check - Stopped::local_reset(); - assert_eq!(Stopped::now(), Duration::ZERO); - } - } - - mod detail { - use std::cell::RefCell; - use std::time::SystemTime; - - use crate::shared::clock::{static_time, DurationSinceUnixEpoch}; - - pub fn get_app_start_time() -> DurationSinceUnixEpoch { - (*static_time::TIME_AT_APP_START) - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - } - - #[cfg(not(test))] - pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { - get_app_start_time() - } - - #[cfg(test)] - pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { - DurationSinceUnixEpoch::ZERO - } - - thread_local!(pub static FIXED_TIME: RefCell = RefCell::new(get_default_fixed_time())); - - #[cfg(test)] - mod tests { - use std::time::Duration; - - use crate::shared::clock::stopped_clock::detail::{get_app_start_time, get_default_fixed_time}; - - #[test] - fn it_should_get_the_zero_start_time_when_testing() { - assert_eq!(get_default_fixed_time(), Duration::ZERO); - } - - #[test] - fn it_should_get_app_start_time() { - const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1_662_983_731, 22312); - assert!(get_app_start_time() > TIME_AT_WRITING_THIS_TEST); - } - } - } -} diff --git a/src/shared/clock/utils.rs b/src/shared/clock/utils.rs deleted file mode 100644 index 94d88d288..000000000 --- a/src/shared/clock/utils.rs +++ /dev/null @@ -1,11 +0,0 @@ -//! It contains helper functions related to time. -use super::DurationSinceUnixEpoch; - -/// Serializes a `DurationSinceUnixEpoch` as a Unix timestamp in milliseconds. -/// # Errors -/// -/// Will return `serde::Serializer::Error` if unable to serialize the `unix_time_value`. -pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { - #[allow(clippy::cast_possible_truncation)] - ser.serialize_u64(unix_time_value.as_millis() as u64) -} diff --git a/src/shared/mod.rs b/src/shared/mod.rs index f016ba913..8c95effe1 100644 --- a/src/shared/mod.rs +++ b/src/shared/mod.rs @@ -1,8 +1,6 @@ //! Modules with generic logic used by several modules. //! //! - [`bit_torrent`]: `BitTorrent` protocol related logic. -//! - [`clock`]: Times services. //! - [`crypto`]: Encryption related logic. pub mod bit_torrent; -pub mod clock; pub mod crypto; diff --git a/tests/common/clock.rs b/tests/common/clock.rs new file mode 100644 index 000000000..5d94bb83d --- /dev/null +++ b/tests/common/clock.rs @@ -0,0 +1,16 @@ +use std::time::Duration; + +use torrust_tracker_clock::clock::Time; + +use crate::CurrentClock; + +#[test] +fn it_should_use_stopped_time_for_testing() { + assert_eq!(CurrentClock::dbg_clock_type(), "Stopped".to_owned()); + + let time = CurrentClock::now(); + std::thread::sleep(Duration::from_millis(50)); + let time_2 = CurrentClock::now(); + + assert_eq!(time, time_2); +} diff --git a/tests/common/mod.rs b/tests/common/mod.rs index b57996292..281c1fb9c 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,3 +1,4 @@ +pub mod clock; pub mod fixtures; pub mod http; pub mod udp; diff --git a/tests/integration.rs b/tests/integration.rs index 5d66d9074..8e3d46826 100644 --- a/tests/integration.rs +++ b/tests/integration.rs @@ -3,5 +3,18 @@ //! ```text //! cargo test --test integration //! ``` + +use torrust_tracker_clock::clock; mod common; mod servers; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 186b7ea3b..8d91f3ae8 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -4,12 +4,12 @@ use std::sync::Arc; use futures::executor::block_on; use torrust_tracker::bootstrap::app::initialize_with_configuration; use torrust_tracker::bootstrap::jobs::make_rust_tls; -use torrust_tracker::core::peer::Peer; use torrust_tracker::core::Tracker; use torrust_tracker::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; use torrust_tracker::servers::registar::Registar; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_configuration::{Configuration, HttpApi}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; use super::connection_info::ConnectionInfo; @@ -22,7 +22,7 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; } } diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index 54263f8b8..af6587673 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -1,8 +1,8 @@ use std::str::FromStr; -use torrust_tracker::core::peer::fixture::PeerBuilder; use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index ee701ecc4..d54935f80 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -1,9 +1,9 @@ use std::str::FromStr; -use torrust_tracker::core::peer::fixture::PeerBuilder; use torrust_tracker::servers::apis::v1::context::torrent::resources::peer::Peer; use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{self, Torrent}; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use crate::common::http::{Query, QueryParam}; diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index 358a4a19e..29064ec9e 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -1,6 +1,6 @@ use std::str::FromStr; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; diff --git a/tests/servers/health_check_api/environment.rs b/tests/servers/health_check_api/environment.rs index 37344858d..0856985d5 100644 --- a/tests/servers/health_check_api/environment.rs +++ b/tests/servers/health_check_api/environment.rs @@ -12,6 +12,7 @@ use torrust_tracker_configuration::HealthCheckApi; #[derive(Debug)] pub enum Error { + #[allow(dead_code)] Error(String), } diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 326f4e534..5638713aa 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -3,12 +3,12 @@ use std::sync::Arc; use futures::executor::block_on; use torrust_tracker::bootstrap::app::initialize_with_configuration; use torrust_tracker::bootstrap::jobs::make_rust_tls; -use torrust_tracker::core::peer::Peer; use torrust_tracker::core::Tracker; use torrust_tracker::servers::http::server::{HttpServer, Launcher, Running, Stopped}; use torrust_tracker::servers::registar::Registar; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_configuration::{Configuration, HttpTracker}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; pub struct Environment { pub config: Arc, @@ -19,7 +19,7 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; } } diff --git a/tests/servers/http/requests/announce.rs b/tests/servers/http/requests/announce.rs index 2cc615d0f..061990621 100644 --- a/tests/servers/http/requests/announce.rs +++ b/tests/servers/http/requests/announce.rs @@ -3,8 +3,8 @@ use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; use serde_repr::Serialize_repr; -use torrust_tracker::core::peer::Id; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; use crate::servers::http::{percent_encode_byte_array, ByteArray20}; @@ -93,7 +93,7 @@ impl QueryBuilder { peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), downloaded: 0, uploaded: 0, - peer_id: Id(*b"-qB00000000000000001").0, + peer_id: peer::Id(*b"-qB00000000000000001").0, port: 17548, left: 0, event: Some(Event::Completed), @@ -109,7 +109,7 @@ impl QueryBuilder { self } - pub fn with_peer_id(mut self, peer_id: &Id) -> Self { + pub fn with_peer_id(mut self, peer_id: &peer::Id) -> Self { self.announce_query.peer_id = peer_id.0; self } diff --git a/tests/servers/http/requests/scrape.rs b/tests/servers/http/requests/scrape.rs index 264c72c33..f66605855 100644 --- a/tests/servers/http/requests/scrape.rs +++ b/tests/servers/http/requests/scrape.rs @@ -1,7 +1,7 @@ use std::fmt; use std::str::FromStr; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; use crate::servers::http::{percent_encode_byte_array, ByteArray20}; diff --git a/tests/servers/http/responses/announce.rs b/tests/servers/http/responses/announce.rs index 968c327eb..2b49b4405 100644 --- a/tests/servers/http/responses/announce.rs +++ b/tests/servers/http/responses/announce.rs @@ -1,7 +1,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use serde::{Deserialize, Serialize}; -use torrust_tracker::core::peer::Peer; +use torrust_tracker_primitives::peer; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Announce { @@ -22,8 +22,8 @@ pub struct DictionaryPeer { pub port: u16, } -impl From for DictionaryPeer { - fn from(peer: Peer) -> Self { +impl From for DictionaryPeer { + fn from(peer: peer::Peer) -> Self { DictionaryPeer { peer_id: peer.peer_id.to_bytes().to_vec(), ip: peer.peer_addr.ip().to_string(), diff --git a/tests/servers/http/responses/scrape.rs b/tests/servers/http/responses/scrape.rs index eadecb603..fc741cbf4 100644 --- a/tests/servers/http/responses/scrape.rs +++ b/tests/servers/http/responses/scrape.rs @@ -73,9 +73,13 @@ impl ResponseBuilder { #[derive(Debug)] pub enum BencodeParseError { + #[allow(dead_code)] InvalidValueExpectedDict { value: Value }, + #[allow(dead_code)] InvalidValueExpectedInt { value: Value }, + #[allow(dead_code)] InvalidFileField { value: Value }, + #[allow(dead_code)] MissingFileField { field_name: String }, } diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index be285dcd7..a7962db0f 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -89,9 +89,9 @@ mod for_all_config_modes { use local_ip_address::local_ip; use reqwest::{Response, StatusCode}; use tokio::net::TcpListener; - use torrust_tracker::core::peer; - use torrust_tracker::core::peer::fixture::PeerBuilder; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::invalid_info_hashes; @@ -750,7 +750,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.get_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), client_ip); @@ -786,7 +786,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.get_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), env.tracker.get_maybe_external_ip().unwrap()); @@ -826,7 +826,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.get_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), env.tracker.get_maybe_external_ip().unwrap()); @@ -864,7 +864,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.get_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); @@ -887,9 +887,9 @@ mod for_all_config_modes { use std::str::FromStr; use tokio::net::TcpListener; - use torrust_tracker::core::peer; - use torrust_tracker::core::peer::fixture::PeerBuilder; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::invalid_info_hashes; @@ -1113,7 +1113,7 @@ mod configured_as_whitelisted { mod and_receiving_an_announce_request { use std::str::FromStr; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::servers::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; @@ -1160,9 +1160,9 @@ mod configured_as_whitelisted { mod receiving_an_scrape_request { use std::str::FromStr; - use torrust_tracker::core::peer; - use torrust_tracker::core::peer::fixture::PeerBuilder; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use crate::servers::http::asserts::assert_scrape_response; @@ -1253,7 +1253,7 @@ mod configured_as_private { use std::time::Duration; use torrust_tracker::core::auth::Key; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::servers::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; @@ -1329,9 +1329,9 @@ mod configured_as_private { use std::time::Duration; use torrust_tracker::core::auth::Key; - use torrust_tracker::core::peer; - use torrust_tracker::core::peer::fixture::PeerBuilder; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use crate::servers::http::asserts::{assert_authentication_error_response, assert_scrape_response}; diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index da7705016..12f4aeb9e 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -2,12 +2,12 @@ use std::net::SocketAddr; use std::sync::Arc; use torrust_tracker::bootstrap::app::initialize_with_configuration; -use torrust_tracker::core::peer::Peer; use torrust_tracker::core::Tracker; use torrust_tracker::servers::registar::Registar; use torrust_tracker::servers::udp::server::{Launcher, Running, Stopped, UdpServer}; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_configuration::{Configuration, UdpTracker}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; pub struct Environment { pub config: Arc, @@ -19,7 +19,7 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker #[allow(dead_code)] - pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { + pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; } }