diff --git a/Cargo.lock b/Cargo.lock index 61720c7..8204658 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -28,6 +28,18 @@ dependencies = [ "version_check", ] +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" version = "1.1.3" @@ -37,6 +49,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + [[package]] name = "android-tzdata" version = "0.1.1" @@ -146,6 +164,15 @@ dependencies = [ "syn 2.0.87", ] +[[package]] +name = "atoi" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" +dependencies = [ + "num-traits", +] + [[package]] name = "atomic-waker" version = "1.1.2" @@ -273,6 +300,7 @@ dependencies = [ "serde", "serde_json", "sqlformat", + "sqlx", "testcontainers-modules", "tokio", "tokio-postgres", @@ -291,6 +319,12 @@ version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + [[package]] name = "bitflags" version = "1.3.2" @@ -302,6 +336,9 @@ name = "bitflags" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +dependencies = [ + "serde", +] [[package]] name = "bitvec" @@ -517,6 +554,21 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + [[package]] name = "core-foundation" version = "0.9.4" @@ -552,6 +604,36 @@ dependencies = [ "libc", ] +[[package]] +name = "crc" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + +[[package]] +name = "crossbeam-queue" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + [[package]] name = "crypto-common" version = "0.1.6" @@ -603,6 +685,17 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" +[[package]] +name = "der" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + [[package]] name = "deranged" version = "0.3.11" @@ -632,6 +725,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", + "const-oid", "crypto-common", "subtle", ] @@ -674,11 +768,20 @@ version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f" +[[package]] +name = "dotenvy" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" + [[package]] name = "either" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +dependencies = [ + "serde", +] [[package]] name = "encoding_rs" @@ -730,6 +833,38 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +[[package]] +name = "errno" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "etcetera" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" +dependencies = [ + "cfg-if", + "home", + "windows-sys 0.48.0", +] + +[[package]] +name = "event-listener" +version = "5.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + [[package]] name = "fake" version = "3.0.1" @@ -746,6 +881,23 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "flume" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095" +dependencies = [ + "futures-core", + "futures-sink", + "spin", +] + [[package]] name = "fnv" version = "1.0.7" @@ -809,6 +961,17 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-intrusive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" +dependencies = [ + "futures-core", + "lock_api", + "parking_lot", +] + [[package]] name = "futures-io" version = "0.3.30" @@ -908,7 +1071,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.8", ] [[package]] @@ -916,6 +1079,19 @@ name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash 0.8.11", + "allocator-api2", +] + +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] [[package]] name = "hdrhistogram" @@ -996,6 +1172,15 @@ dependencies = [ "tracing", ] +[[package]] +name = "hkdf" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" +dependencies = [ + "hmac", +] + [[package]] name = "hmac" version = "0.12.1" @@ -1300,12 +1485,21 @@ name = "lazy_static" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +dependencies = [ + "spin", +] [[package]] name = "libc" -version = "0.2.155" +version = "0.2.168" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" + +[[package]] +name = "libm" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libredox" @@ -1317,12 +1511,29 @@ dependencies = [ "libc", ] +[[package]] +name = "libsqlite3-sys" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "linked-hash-map" version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linux-raw-sys" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" + [[package]] name = "lock_api" version = "0.4.12" @@ -1429,12 +1640,49 @@ dependencies = [ "winapi", ] +[[package]] +name = "num-bigint-dig" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" +dependencies = [ + "byteorder", + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand", + "smallvec", + "zeroize", +] + [[package]] name = "num-conv" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.19" @@ -1442,6 +1690,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", + "libm", ] [[package]] @@ -1477,6 +1726,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + [[package]] name = "parking_lot" version = "0.12.3" @@ -1525,6 +1780,21 @@ dependencies = [ "syn 2.0.87", ] +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -1581,6 +1851,33 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der", + "pkcs8", + "spki", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "pkg-config" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" + [[package]] name = "postgres-protocol" version = "0.6.7" @@ -1960,6 +2257,26 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "rsa" +version = "0.9.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47c75d7c5c6b673e58bf54d8544a9f432e3a925b0e80f7cd3602ab5c50c55519" +dependencies = [ + "const-oid", + "digest", + "num-bigint-dig", + "num-integer", + "num-traits", + "pkcs1", + "pkcs8", + "rand_core", + "signature", + "spki", + "subtle", + "zeroize", +] + [[package]] name = "rust_decimal" version = "1.35.0" @@ -1989,6 +2306,19 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustix" +version = "0.38.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" +dependencies = [ + "bitflags 2.6.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.59.0", +] + [[package]] name = "rustls" version = "0.22.4" @@ -2206,9 +2536,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.9.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" +checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" dependencies = [ "base64 0.22.1", "chrono", @@ -2224,9 +2554,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.9.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" +checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" dependencies = [ "darling", "proc-macro2", @@ -2234,6 +2564,17 @@ dependencies = [ "syn 2.0.87", ] +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + [[package]] name = "sha2" version = "0.10.8" @@ -2263,6 +2604,16 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest", + "rand_core", +] + [[package]] name = "simdutf8" version = "0.1.4" @@ -2289,6 +2640,9 @@ name = "smallvec" version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +dependencies = [ + "serde", +] [[package]] name = "socket2" @@ -2305,6 +2659,19 @@ name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] [[package]] name = "sqlformat" @@ -2316,6 +2683,200 @@ dependencies = [ "unicode_categories", ] +[[package]] +name = "sqlx" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93334716a037193fac19df402f8571269c84a00852f6a7066b5d2616dcd64d3e" +dependencies = [ + "sqlx-core", + "sqlx-macros", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", +] + +[[package]] +name = "sqlx-core" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4d8060b456358185f7d50c55d9b5066ad956956fddec42ee2e8567134a8936e" +dependencies = [ + "atoi", + "byteorder", + "bytes", + "crc", + "crossbeam-queue", + "either", + "event-listener", + "futures-channel", + "futures-core", + "futures-intrusive", + "futures-io", + "futures-util", + "hashbrown 0.14.5", + "hashlink", + "hex", + "indexmap 2.2.6", + "log", + "memchr", + "once_cell", + "paste", + "percent-encoding", + "serde", + "serde_json", + "sha2", + "smallvec", + "sqlformat", + "thiserror", + "tokio", + "tokio-stream", + "tracing", + "url", +] + +[[package]] +name = "sqlx-macros" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cac0692bcc9de3b073e8d747391827297e075c7710ff6276d9f7a1f3d58c6657" +dependencies = [ + "proc-macro2", + "quote", + "sqlx-core", + "sqlx-macros-core", + "syn 2.0.87", +] + +[[package]] +name = "sqlx-macros-core" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1804e8a7c7865599c9c79be146dc8a9fd8cc86935fa641d3ea58e5f0688abaa5" +dependencies = [ + "dotenvy", + "either", + "heck 0.5.0", + "hex", + "once_cell", + "proc-macro2", + "quote", + "serde", + "serde_json", + "sha2", + "sqlx-core", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", + "syn 2.0.87", + "tempfile", + "tokio", + "url", +] + +[[package]] +name = "sqlx-mysql" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64bb4714269afa44aef2755150a0fc19d756fb580a67db8885608cf02f47d06a" +dependencies = [ + "atoi", + "base64 0.22.1", + "bitflags 2.6.0", + "byteorder", + "bytes", + "crc", + "digest", + "dotenvy", + "either", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "generic-array", + "hex", + "hkdf", + "hmac", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "percent-encoding", + "rand", + "rsa", + "serde", + "sha1", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "tracing", + "whoami", +] + +[[package]] +name = "sqlx-postgres" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fa91a732d854c5d7726349bb4bb879bb9478993ceb764247660aee25f67c2f8" +dependencies = [ + "atoi", + "base64 0.22.1", + "bitflags 2.6.0", + "byteorder", + "crc", + "dotenvy", + "etcetera", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "hex", + "hkdf", + "hmac", + "home", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "rand", + "serde", + "serde_json", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "tracing", + "whoami", +] + +[[package]] +name = "sqlx-sqlite" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5b2cf34a45953bfd3daaf3db0f7a7878ab9b7a6b91b422d24a7a9e4c857b680" +dependencies = [ + "atoi", + "flume", + "futures-channel", + "futures-core", + "futures-executor", + "futures-intrusive", + "futures-util", + "libsqlite3-sys", + "log", + "percent-encoding", + "serde", + "serde_urlencoded", + "sqlx-core", + "tracing", + "url", +] + [[package]] name = "stringprep" version = "0.1.5" @@ -2417,6 +2978,19 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +[[package]] +name = "tempfile" +version = "3.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" +dependencies = [ + "cfg-if", + "fastrand", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + [[package]] name = "testcontainers" version = "0.20.1" @@ -2853,9 +3427,12 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" +dependencies = [ + "serde", +] [[package]] name = "valuable" @@ -2869,16 +3446,28 @@ version = "0.1.1" dependencies = [ "anyhow", "chrono", + "env_logger", "futures", "indexmap 2.2.6", + "log", "pretty_assertions", "rust_decimal", "serde", "serde_json", + "serde_with", + "sqlx", "tokio", "tokio-postgres", + "tracing", + "uuid", ] +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "version_check" version = "0.9.4" @@ -3251,6 +3840,26 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "zeroize" version = "1.8.1" diff --git a/TODO.md b/TODO.md index 59c9d6c..3c2044b 100644 --- a/TODO.md +++ b/TODO.md @@ -1,83 +1,47 @@ -MVP: +# v0.2 (eta January 2025) + +- [x] Swap to sqlx +- [ ] Allow use of custom columns +- [ ] "returning `id` should properly choose ID column" +- [ ] Add thread safety (currently tests in bakery_api fail) +- [ ] Implement transaction support +- [ ] Add MySQL support +- [ ] Add a proper database integration test-suite +- [ ] Implement all basic SQL types +- [ ] Implement more operations +- [ ] Fully implement joins +- [ ] Implement and Document Disjoint Subtypes pattern +- [ ] Add and document more hooks +- [ ] Comprehensive documentation for mock data testing +- [ ] Implement "Realworld" example application in a separate repository +- [ ] Implement Uuid support +- [ ] with_id() shouldn't need into() + +# v0.3 + +- [ ] Implement associated records (update and save back) +- [ ] Implement table aggregations (group by) +- [ ] Implement NoSQL support +- [ ] Implement RestAPI support +- [ ] Implement Queue support +- [ ] Add expression as a field value (e.g. when inserting) +- [ ] Add delayed method evaluation as a field value (e.g. when inserting) +- [ ] Add tests for cross-database queries +- [ ] Explore replayability for idempotent operations and workflow retries +- [ ] Provide example for scalable worker pattern + +# Someday maybe: -0.0.1: Query Building - -- [x] create a basic query type -- [x] have query ability to render into a SQL query -- [x] add ability to have expressions -- [x] add ability to have where conditions -- [x] add support for datasource -- [x] add support for dataset -- [x] add integration with sqlite -- [x] add integration with postgres -- [x] implement insert query -- [x] implement delete query -- [x] implement operations: (field.eq(otherfield)) -- [x] implement parametric queries -- [x] reimplement "with_condition" into "with_where_condition" and "with_having_condition" - - 0.0.2: Nested Query Building - -- [x] properly handle nested queries -- [x] table should own DataSource, which should be cloneable and use Arc for client -- [x] implemented condition chaining -- [x] implemented and/or conditions -- [x] implemented expression query -- [x] implemented table::sum() -- [x] implemented TableDelegate trait -- [x] implemented Query::add_join() - - 0.0.3: Table Structure - -- [x] add uniq id vendor -- [x] implemented Table::join_table() for merging tables -- [x] field prefixing with table alias/name (optional) -- [x] Table::join_table can be used to reference fields. Also add Table::with_join() -- [x] Table::join_table should preserve joins on other_table -- [x] When joining table, combine their UniqueIdVendors into one -- [x] Implement has_one and has_many in a lazy way -- [x] Implement expressions in a lazy way -- [x] Implemented bakery example - - 0.0.4: Improve Entity tracking and add target documentation - -- [x] Add documentation for target vision of the library -- [x] Add "Entity" concept into Table -- [x] Add example on how to use traits for augmenting Table of specific Entity -- [x] Implement rendering of QueryType::Update so that we could update records -- [x] Refine "AnyTable" concept, so that we can use table as dyn without (if we want) -- [x] Check on "Join", they should allow for Entity mutation (joined table associated with a different entity) -- [x] Implement has_one and has_many in a correct way, moving functionality to Related Reference -- [x] Implement Unrelated Reference (when ref leads to a table with different Data Source) -- [x] Implement a better data fetching mechanism, using default entity -- [x] Restore functionality of bakery example -- [x] Implement ability to include sub-queries based on related tables - - 0.0.5: Refactor internal crates - -- [x] Move ReadableDataSet and WritableDataSet to separate crate and document -- [x] Implement WritableDataSet for Table (almost) - [ ] Implement todo in update() in WritableDataSet for Table - [ ] Continue through the docs - align crates with documentation -Create integration test-suite for SQL testing +# Create integration test-suite for SQL testing -- [x] Quality of life improvements - nextest and improved assert_eq -- [x] Implement testcontainers postgres connectivity -- [x] Get rid of testcontainers (they don't work anyway), use regular Postgres - [ ] Create separate test-suite, connect DB etc -- [x] Populate Bakery tables for tests -- [x] Seed some data into Bakery tests - [ ] Make use of Postgres snapshots in the tests - [ ] Add integration tests for update() and delete() for Table -Control field queries - -- [x] Convert Field and &Field into Arc everywhere -- [x] Implement a way to create a query with custom field references -- [x] Implement a way to query with a serialized structure -- [x] Separate fields from active fields structure -- [x] Implement ability to specify which fields to query for +# Control field queries - [ ] add tests for all CRUD operations (ID-less table) - [ ] implemented `each` functionality for DataSet @@ -86,23 +50,12 @@ Control field queries - [ ] add tests for table conditions (add_condition(field1.eq(field2)) - [ ] implement sub-library for datasource, supporting serde - [ ] add second data-source (csv) as an example -- [x] datasource should convert query into result (traited) -- [x] select where a field is a sub-query -- [x] insert where a field value is an expression -- [x] insert where a field is imported from related table -- [x] select from a subquery - [ ] add sql table as a dataset at a query level (+ clean up method naming) - [ ] postgres expressions should add type annotation into query ("$1::text") -Pratcitacl tests: - -- [x] Populate bakery tests -- [ ] Make bakery model more usable -- [ ] table.itsert_query should quote field names (bug) - -Lazy features: +Implement extensions: -- [ ] Implement join_table_lazy() +- [ ] Lazy table joins (read-only) - [ ] Implement add_field_lazy() Minor Cases: @@ -112,135 +65,6 @@ Minor Cases: - [ ] Condition::or() shouldn't be limited to only two arguments - [ ] It should not be possible to change table alias, after ownership of Fields is given -Implementing examples: - -- [x] Add query filters -- [x] Add sum() function - -```rust -let vip_client = Table::new('client', db) - .add_title('name') - .add_field('is_vip') - .add_condition('is_vip', true); - -let sum = vip_client.sum('total_spent'); -``` - -- [ ] Implement relations between tables - -```rust -let mut clients = Table::new('client', db) - .add_title('name') - .add_field('is_vip'); -let mut orders = Table::new('orders', db) - .add_field('total'); - -users.has_many('orders', orders, 'order_id', 'id'); - -let vip_total = clients.clone() - .add_condition('is_vip', true) - .ref('orders') - .sum('total'); -``` - -- [ ] Implement syntax sugar for models -- [ ] Implement support for types - -```rust - -#[vantage::table] -struct Client { - name: String, - is_vip: bool, -} - -#[vantage::table] -struct Order { - #[vantage::has_one(Client, "id"))] - user_id: i32, - total: f64, -} - -let vip_total = Client::new(db) - .add_condition(is_vip.eq(true)) - .ref_orders() - .sum(total); -``` - -# Future features - -## Implement persistence-aware model - -By a model we call a struct implementing ser/de traits that can be used with -DataSet to load, store and iterate over data. We do not need a basic implementation -to be persistence-aware. However with persistence-aware model we can implement -id-tracked conditioning. The model will know where it was loaded from and -will be able to update itself if changed, which can even be done on drop. - -```rust -#[vantage::persistence(id = "my_id")] -struct Client { - my_id: i32, - name: String, - is_vip: bool, - - _dsp: DataSourcePersistence, // required for persistence-aware model -} - -let client = ClientSet::new(db) - .load(1); - -db.transaction(|_| { - - client.orders.each(|order: Order| { - order.price-= 10; - }); - - client.is_vip = true; - client.save(); -}); -``` - -## Implement non-table SQL data source - -Basic implementation allows to use Table as an ORM data source. We can implement -a read-only source that have a query as a source. - -TODO: query-based model can be a curious feature, but this example should be rewritten -to use a different table-like construct, returned by table.group() method. - -```rust -struct GraphData { - date: Date, - value: f64, -} - -struct DailyDeployments { - table_deployment: Deployments, - query: Query, -} - -impl DailyDeployments { - // like Deployments, but with date grouping and date-range - pub fn new(ds: DataSource, date_from: Date, date_to: Date) -> Self { - let td = Deployments::new(ds); - let query = td - .query_fields(vec![td.date(), td.value()]) - .add_condition(td.date().gte(date_from)) - .add_condition(td.date().lte(date_to)) - .group_by(td.date()); - - Self { ds, table } - } - pub fn date(&self) -> Field { - self.query.field(0) - } -} - -let dd = DailyDeployments::new(db, Date::new(2020, 1, 1), Date::new(2020, 1, 31)); -let data = dd.query().fetch::(); -``` - ## Implement cross-datasource operations Developers who operate with the models do not have to be aware of the data source. diff --git a/bakery_model/Cargo.toml b/bakery_model/Cargo.toml index 82fa26d..ab4939a 100644 --- a/bakery_model/Cargo.toml +++ b/bakery_model/Cargo.toml @@ -16,6 +16,7 @@ testcontainers-modules = { version = "0.8.0", features = [ tokio = "1.38.1" tokio-postgres = "0.7.10" sqlformat = "0.2.3" +sqlx = { version = "0.8.2", default-features = false, features = ["json", "postgres", "runtime-tokio"] } [[example]] name = "0-intro" diff --git a/bakery_model/examples/0-intro.rs b/bakery_model/examples/0-intro.rs index d982c24..c7331b0 100644 --- a/bakery_model/examples/0-intro.rs +++ b/bakery_model/examples/0-intro.rs @@ -11,8 +11,8 @@ async fn create_bootstrap_db() -> Result<()> { bakery_model::connect_postgres().await?; let vantage_client = bakery_model::postgres(); let client = vantage_client.client(); - let schema = tokio::fs::read_to_string("bakery_model/schema-pg.sql").await?; - client.batch_execute(&schema).await?; + let schema = tokio::fs::read_to_string("schema-pg.sql").await?; + sqlx::raw_sql(&schema).execute(client).await?; Ok(()) } diff --git a/bakery_model/examples/1-soft-delete.rs b/bakery_model/examples/1-soft-delete.rs index a411071..3201d0b 100644 --- a/bakery_model/examples/1-soft-delete.rs +++ b/bakery_model/examples/1-soft-delete.rs @@ -10,10 +10,8 @@ async fn create_bootstrap_db() -> Result<()> { // Get the postgres client for batch execution let vantage_client = bakery_model::postgres(); let client = vantage_client.client(); - - // Read the schema from the file and execute it let schema = tokio::fs::read_to_string("bakery_model/schema-pg.sql").await?; - client.batch_execute(&schema).await?; + sqlx::raw_sql(&schema).execute(client).await?; Ok(()) } diff --git a/bakery_model/examples/2-joined-tables.rs b/bakery_model/examples/2-joined-tables.rs index 4a95d90..9962be7 100644 --- a/bakery_model/examples/2-joined-tables.rs +++ b/bakery_model/examples/2-joined-tables.rs @@ -11,10 +11,8 @@ async fn create_bootstrap_db() -> Result<()> { // Get the postgres client for batch execution let vantage_client = bakery_model::postgres(); let client = vantage_client.client(); - - // Read the schema from the file and execute it let schema = tokio::fs::read_to_string("bakery_model/schema-pg.sql").await?; - client.batch_execute(&schema).await?; + sqlx::raw_sql(&schema).execute(client).await?; Ok(()) } diff --git a/bakery_model/schema-pg.sql b/bakery_model/schema-pg.sql index 44465c7..0315cd7 100644 --- a/bakery_model/schema-pg.sql +++ b/bakery_model/schema-pg.sql @@ -66,14 +66,27 @@ VALUES INSERT INTO client ( name, + email, contact_details, is_paying_client, bakery_id ) VALUES - ('Marty McFly', '555-1955', true, 1), - ('Doc Brown', '555-1885', true, 1), - ('Biff Tannen', '555-1955', false, 1); + ( + 'Marty McFly', + 'marty@gmail.com', + '555-1955', + true, + 1 + ), + ('Doc Brown', 'doc@brown.com', '555-1885', true, 1), + ( + 'Biff Tannen', + 'biff-3293@hotmail.com', + '555-1955', + false, + 1 + ); INSERT INTO product (name, calories, bakery_id, price) diff --git a/bakery_model/src/lib.rs b/bakery_model/src/lib.rs index e906cf6..a8f3f1e 100644 --- a/bakery_model/src/lib.rs +++ b/bakery_model/src/lib.rs @@ -44,32 +44,6 @@ pub async fn connect_postgres() -> Result<()> { let connection_string = std::env::var("DATABASE_URL") .unwrap_or_else(|_| "postgres://postgres@localhost:5432/postgres".to_string()); - let timeout = Duration::from_secs(3); // Max time to wait - let start_time = Instant::now(); - let mut last_error: Result<()> = Ok(()); - - while Instant::now().duration_since(start_time) < timeout { - match tokio_postgres::connect(&connection_string, NoTls).await { - Ok((client, connection)) => { - tokio::spawn(async move { - if let Err(e) = connection.await { - eprintln!("connection error: {}", e); - } - }); - - set_postgres(Postgres::new(Arc::new(Box::new(client))))?; - - println!("Successfully connected to the database."); - return Ok(()); - } - Err(e) => { - println!("Error connecting to database: {}, retrying...", &e); - last_error = Err(anyhow::Error::new(e)); - // sleep(Duration::from_secs(2)).await; // Wait before retrying - thread::sleep(Duration::from_millis(100)); - } - } - } - - last_error + let postgres = Postgres::new(&connection_string).await; + set_postgres(postgres) } diff --git a/bakery_model/tests/bakery_integration_test.rs b/bakery_model/tests/bakery_integration_test.rs index b1acd24..ad4d451 100644 --- a/bakery_model/tests/bakery_integration_test.rs +++ b/bakery_model/tests/bakery_integration_test.rs @@ -62,11 +62,11 @@ async fn create_bootstrap_db() -> Result<()> { // Get the postgres client for batch execution let vantage_client = bakery_model::postgres(); - let client = vantage_client.client(); + let client = vantage_client.pool; // Read the schema from the file and execute it let schema = tokio::fs::read_to_string("schema-pg.sql").await?; - client.batch_execute(&schema).await?; + sqlx::raw_sql(&schema).execute(&*client).await?; Ok(()) } diff --git a/vantage/Cargo.toml b/vantage/Cargo.toml index 28ca196..d476f5a 100644 --- a/vantage/Cargo.toml +++ b/vantage/Cargo.toml @@ -14,6 +14,7 @@ readme = "../README.md" doctest = false [dependencies] +chrono = { version = "0.4.38", features = ["serde"] } rust_decimal = { version = "1", features = ["db-postgres"] } tokio-postgres = { version = "0.7.12", features = ["with-serde_json-1"] } indexmap = { version = "2.2.6", features = ["serde"] } @@ -24,9 +25,19 @@ serde_json = { version = "1", features = [ "arbitrary_precision", ] } serde = { version = "1", features = ["derive"] } -chrono = "0.4.38" anyhow = "1.0.82" futures = "0.3.30" +sqlx = { version = "0.8.2", features = [ + "json", + "postgres", + "sqlite", + "runtime-tokio", +], default-features = false } +tracing = "0.1.41" +env_logger = "0.11.5" +log = "0.4.22" +uuid = { version = "1.11.0", features = ["serde"] } +serde_with = { version = "3.11.0", features = ["chrono"] } [dev-dependencies] pretty_assertions = "1.4.0" diff --git a/vantage/src/datasource/associated_query.rs b/vantage/src/datasource/associated_query.rs new file mode 100644 index 0000000..24fef51 --- /dev/null +++ b/vantage/src/datasource/associated_query.rs @@ -0,0 +1,176 @@ +use anyhow::Result; +use serde_json::{Map, Value}; +use std::ops::{Deref, DerefMut}; + +use crate::{ + dataset::ReadableDataSet, + prelude::Entity, + sql::{query::SqlQuery, Chunk, Expression, Query}, + traits::DataSource, +}; + +/// While [`Query`] does not generally associate with the [`DataSource`], it may be inconvenient +/// to execute it. AssociatedQuery combines query with the datasource, allowing you to ealily +/// pass it around and execute it. +/// +/// ``` +/// let clients = Client::table(); +/// let client_count = clients.count(); // returns AssociatedQuery +/// +/// let cnt: Value = client_count.get_one_untuped().await?; // actually executes the query +/// ``` +/// +/// AssociatedQuery can be used to make a link between DataSources: +/// +/// ``` +/// let clients = Client::table(); +/// let client_code_query = clients.field_query(clients.code())?; +/// // returns field query (SELECT code FROM client) +/// +/// let orders = Order::table(); +/// let orders = orders.with_condition( +/// orders.client_code().in(orders.glue(client_code_query).await?) +/// ); +/// ``` +/// If Order and Client tables do share same [`DataSource`], the conditioun would be set as +/// `WHERE (client_code IN (SELECT code FROM client))`, ultimatelly saving you from +/// redundant query. +/// +/// When datasources are different, [`glue()`] would execute `SELECT code FROM client`, fetch +/// the results and use those as a vector of values in a condition clause: +/// `WHERE (client_code IN [12, 13, 14])` +/// +/// [`DataSource`]: crate::traits::datasource::DataSource +/// [`glue()`]: Table::glue +/// +#[derive(Clone)] +pub struct AssociatedQuery { + pub query: Query, + pub ds: T, + pub _phantom: std::marker::PhantomData, +} +impl Deref for AssociatedQuery { + type Target = Query; + + fn deref(&self) -> &Self::Target { + &self.query + } +} +impl DerefMut for AssociatedQuery { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.query + } +} + +impl AssociatedQuery { + pub fn new(query: Query, ds: T) -> Self { + Self { + query, + ds, + _phantom: std::marker::PhantomData, + } + } + + pub fn with_skip(mut self, skip: i64) -> Self { + self.query.add_skip(Some(skip)); + self + } + + pub fn with_limit(mut self, limit: i64) -> Self { + self.query.add_limit(Some(limit)); + self + } + + pub fn with_skip_and_limit(mut self, skip: i64, limit: i64) -> Self { + self.query.add_limit(Some(limit)); + self.query.add_skip(Some(skip)); + self + } + + /// Presented with another AssociatedQuery - calculate if queries + /// are linked with the same or different [`DataSource`]s. + /// + /// The same - return expression as-is. + /// Different - execute the query and return the result as a vector of values. + async fn glue(&self, other: AssociatedQuery) -> Result { + if self.ds.eq(&other.ds) { + Ok(other.query.render_chunk()) + } else { + let vals = other.get_col_untyped().await?; + let tpl = vec!["{}"; vals.len()].join(", "); + Ok(Expression::new(tpl, vals)) + } + } +} +impl Chunk for AssociatedQuery { + fn render_chunk(&self) -> Expression { + self.query.render_chunk() + } +} +impl std::fmt::Debug for AssociatedQuery { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("AssociatedQuery") + .field("query", &self.query) + .field("ds", &self.ds) + .finish() + } +} +impl ReadableDataSet for AssociatedQuery { + async fn get_all_untyped(&self) -> Result>> { + self.ds.query_fetch(&self.query).await + } + + async fn get_row_untyped(&self) -> Result> { + self.ds.query_row(&self.query).await + } + + async fn get_one_untyped(&self) -> Result { + self.ds.query_one(&self.query).await + } + + async fn get_col_untyped(&self) -> Result> { + self.ds.query_col(&self.query).await + } + + async fn get(&self) -> Result> { + let data = self.get_all_untyped().await?; + Ok(data + .into_iter() + .map(|row| serde_json::from_value(Value::Object(row)).unwrap()) + .collect()) + } + + async fn get_as(&self) -> Result> { + let data = self.get_all_untyped().await?; + Ok(data + .into_iter() + .map(|row| serde_json::from_value(Value::Object(row)).unwrap()) + .collect()) + } + + async fn get_some(&self) -> Result> { + let data = self.ds.query_fetch(&self.query).await?; + if data.len() > 0 { + let row = data[0].clone(); + let row = serde_json::from_value(Value::Object(row)).unwrap(); + Ok(Some(row)) + } else { + Ok(None) + } + } + + async fn get_some_as(&self) -> Result> { + let data = self.ds.query_fetch(&self.query).await?; + if data.len() > 0 { + let row = data[0].clone(); + let row = serde_json::from_value(Value::Object(row)).unwrap(); + Ok(Some(row)) + } else { + Ok(None) + } + } + + fn select_query(&self) -> Query { + self.query.clone() + } +} diff --git a/vantage/src/datasource/mod.rs b/vantage/src/datasource/mod.rs index 26e9103..0c240de 100644 --- a/vantage/src/datasource/mod.rs +++ b/vantage/src/datasource/mod.rs @@ -1 +1,3 @@ +pub mod associated_query; pub mod postgres; +pub mod sqlx; diff --git a/vantage/src/datasource/postgres.rs b/vantage/src/datasource/postgres.rs index 1e6a5db..3f35626 100644 --- a/vantage/src/datasource/postgres.rs +++ b/vantage/src/datasource/postgres.rs @@ -304,172 +304,6 @@ impl AssociatedExpressionArc { } } -/// While [`Query`] does not generally associate with the [`DataSource`], it may be inconvenient -/// to execute it. AssociatedQuery combines query with the datasource, allowing you to ealily -/// pass it around and execute it. -/// -/// ``` -/// let clients = Client::table(); -/// let client_count = clients.count(); // returns AssociatedQuery -/// -/// let cnt: Value = client_count.get_one_untuped().await?; // actually executes the query -/// ``` -/// -/// AssociatedQuery can be used to make a link between DataSources: -/// -/// ``` -/// let clients = Client::table(); -/// let client_code_query = clients.field_query(clients.code())?; -/// // returns field query (SELECT code FROM client) -/// -/// let orders = Order::table(); -/// let orders = orders.with_condition( -/// orders.client_code().in(orders.glue(client_code_query).await?) -/// ); -/// ``` -/// If Order and Client tables do share same [`DataSource`], the conditioun would be set as -/// `WHERE (client_code IN (SELECT code FROM client))`, ultimatelly saving you from -/// redundant query. -/// -/// When datasources are different, [`glue()`] would execute `SELECT code FROM client`, fetch -/// the results and use those as a vector of values in a condition clause: -/// `WHERE (client_code IN [12, 13, 14])` -/// -/// [`DataSource`]: crate::traits::datasource::DataSource -/// [`glue()`]: Table::glue -/// -#[derive(Clone)] -pub struct AssociatedQuery { - pub query: Query, - pub ds: T, - pub _phantom: std::marker::PhantomData, -} -impl Deref for AssociatedQuery { - type Target = Query; - - fn deref(&self) -> &Self::Target { - &self.query - } -} -impl DerefMut for AssociatedQuery { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.query - } -} - -impl AssociatedQuery { - pub fn new(query: Query, ds: T) -> Self { - Self { - query, - ds, - _phantom: std::marker::PhantomData, - } - } - - pub fn with_skip(mut self, skip: i64) -> Self { - self.query.add_skip(Some(skip)); - self - } - - pub fn with_limit(mut self, limit: i64) -> Self { - self.query.add_limit(Some(limit)); - self - } - - pub fn with_skip_and_limit(mut self, skip: i64, limit: i64) -> Self { - self.query.add_limit(Some(limit)); - self.query.add_skip(Some(skip)); - self - } - - /// Presented with another AssociatedQuery - calculate if queries - /// are linked with the same or different [`DataSource`]s. - /// - /// The same - return expression as-is. - /// Different - execute the query and return the result as a vector of values. - async fn glue(&self, other: AssociatedQuery) -> Result { - if self.ds.eq(&other.ds) { - Ok(other.query.render_chunk()) - } else { - let vals = other.get_col_untyped().await?; - let tpl = vec!["{}"; vals.len()].join(", "); - Ok(Expression::new(tpl, vals)) - } - } -} -impl Chunk for AssociatedQuery { - fn render_chunk(&self) -> Expression { - self.query.render_chunk() - } -} -impl std::fmt::Debug for AssociatedQuery { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("AssociatedQuery") - .field("query", &self.query) - .field("ds", &self.ds) - .finish() - } -} -impl ReadableDataSet for AssociatedQuery { - async fn get_all_untyped(&self) -> Result>> { - self.ds.query_fetch(&self.query).await - } - - async fn get_row_untyped(&self) -> Result> { - self.ds.query_row(&self.query).await - } - - async fn get_one_untyped(&self) -> Result { - self.ds.query_one(&self.query).await - } - - async fn get_col_untyped(&self) -> Result> { - self.ds.query_col(&self.query).await - } - - async fn get(&self) -> Result> { - let data = self.get_all_untyped().await?; - Ok(data - .into_iter() - .map(|row| serde_json::from_value(Value::Object(row)).unwrap()) - .collect()) - } - - async fn get_as(&self) -> Result> { - let data = self.get_all_untyped().await?; - Ok(data - .into_iter() - .map(|row| serde_json::from_value(Value::Object(row)).unwrap()) - .collect()) - } - - async fn get_some(&self) -> Result> { - let data = self.ds.query_fetch(&self.query).await?; - if data.len() > 0 { - let row = data[0].clone(); - let row = serde_json::from_value(Value::Object(row)).unwrap(); - Ok(Some(row)) - } else { - Ok(None) - } - } - - async fn get_some_as(&self) -> Result> { - let data = self.ds.query_fetch(&self.query).await?; - if data.len() > 0 { - let row = data[0].clone(); - let row = serde_json::from_value(Value::Object(row)).unwrap(); - Ok(Some(row)) - } else { - Ok(None) - } - } - - fn select_query(&self) -> Query { - self.query.clone() - } -} - #[cfg(test)] mod tests { diff --git a/vantage/src/datasource/sqlx/mod.rs b/vantage/src/datasource/sqlx/mod.rs new file mode 100644 index 0000000..551574c --- /dev/null +++ b/vantage/src/datasource/sqlx/mod.rs @@ -0,0 +1,2 @@ +pub mod postgres; +pub mod sql_to_json; diff --git a/vantage/src/datasource/sqlx/postgres.rs b/vantage/src/datasource/sqlx/postgres.rs new file mode 100644 index 0000000..4593c62 --- /dev/null +++ b/vantage/src/datasource/sqlx/postgres.rs @@ -0,0 +1,137 @@ +use std::sync::Arc; + +use anyhow::{anyhow, Context, Result}; +use serde_json::{Map, Value}; +use sqlx::{postgres::PgArguments, Execute}; + +use crate::{ + prelude::Query, + sql::{Chunk, Expression}, + traits::DataSource, +}; + +use super::sql_to_json::row_to_json; + +#[derive(Debug, Clone)] +pub struct Postgres { + pub pool: Arc, +} + +impl Postgres { + pub async fn new(url: &str) -> Self { + let pool = sqlx::PgPool::connect(url).await.unwrap(); + Self { + pool: Arc::new(pool), + } + } + + // Will be possible extended with some advanced types, that can potentially come out of expression + pub fn bind<'a>( + &self, + mut query: sqlx::query::Query<'a, sqlx::Postgres, PgArguments>, + expression: &'a Expression, + ) -> sqlx::query::Query<'a, sqlx::Postgres, PgArguments> { + for param in expression.params() { + query = match param { + Value::String(v) => query.bind(v), + Value::Null => query.bind(Option::::None), + Value::Bool(v) => query.bind(v), + Value::Number(v) => { + if let Some(v) = v.as_i64() { + query.bind(v) + } else if let Some(v) = v.as_f64() { + query.bind(v) + } else { + query.bind(param) + } + } + Value::Object(_) => query.bind(param), + _ => todo!("Not implemented for {:?}", param), + // Value::Array(v) => query.bind(v), + }; + } + query + } + + pub fn client(&self) -> &sqlx::PgPool { + &*self.pool + } +} + +impl PartialEq for Postgres { + fn eq(&self, other: &Postgres) -> bool { + Arc::ptr_eq(&self.pool, &other.pool) + } +} + +impl DataSource for Postgres { + async fn query_fetch(&self, query: &Query) -> Result>> { + let expression = query.render_chunk(); + let sql_final = expression.sql_final(); + + let query = sqlx::query(&sql_final); + let query = self.bind(query, &expression); + + let rows = query + .fetch_all(&*self.pool) + .await + .with_context(|| anyhow!("Error in query {:?}", expression))?; + + Ok(rows.iter().map(row_to_json).collect()) + } + + async fn query_exec(&self, query: &Query) -> Result> { + let expression = query.render_chunk(); + let sql_final = expression.sql_final(); + + let query = sqlx::query(&sql_final); + let query = self.bind(query, &expression); + + let row = query + .fetch_one(&*self.pool) + .await + .with_context(|| anyhow!("Error in query {:?}", expression))?; + + let row = row_to_json(&row); + if row.is_empty() { + Ok(None) + } else { + Ok(row.values().next().cloned()) + } + } + + async fn query_insert(&self, query: &Query, rows: Vec>) -> Result<()> { + todo!() + } + + async fn query_one(&self, query: &Query) -> Result { + let expression = query.render_chunk(); + let sql_final = expression.sql_final(); + + let query = sqlx::query(&sql_final); + let query = self.bind(query, &expression); + + let row = query + .fetch_one(&*self.pool) + .await + .with_context(|| anyhow!("Error in query {:?}", expression))?; + + let row = row_to_json(&row); + if row.is_empty() { + Ok(Value::Null) + } else { + row.values() + .next() + .ok_or(anyhow::anyhow!("Bad value")) + .cloned() + } + } + + async fn query_row(&self, query: &Query) -> Result> { + todo!() + } + + async fn query_col(&self, query: &Query) -> Result> { + todo!() + } +} diff --git a/vantage/src/datasource/sqlx/sql_to_json.rs b/vantage/src/datasource/sqlx/sql_to_json.rs new file mode 100644 index 0000000..0fb6dd4 --- /dev/null +++ b/vantage/src/datasource/sqlx/sql_to_json.rs @@ -0,0 +1,449 @@ +// source from https://github.com/sqlpage/SQLPage/blob/a8f6d338bb3807f2bf23c75752c0be20bbec233c/src/utils.rs#L4 + +// use chrono::{DateTime, FixedOffset, NaiveDateTime}; +use serde_json::{self, Map, Value}; +use sqlx::postgres::PgRow; +use sqlx::Decode; +use sqlx::{Column, Row, TypeInfo, ValueRef}; + +pub fn add_value_to_map( + mut map: Map, + (key, value): (String, Value), +) -> Map { + use serde_json::map::Entry::{Occupied, Vacant}; + use Value::Array; + match map.entry(key) { + Vacant(vacant) => { + vacant.insert(value); + } + Occupied(mut old_entry) => { + let mut new_array = if let Array(v) = value { v } else { vec![value] }; + match old_entry.get_mut() { + Array(old_array) => old_array.append(&mut new_array), + old_scalar => { + new_array.insert(0, old_scalar.take()); + *old_scalar = Array(new_array); + } + } + } + } + map +} + +pub fn row_to_json(row: &PgRow) -> Map { + let columns = row.columns(); + let mut map = Map::new(); + for col in columns { + let key = col.name().to_string(); + let value: Value = sql_to_json(row, col); + map = add_value_to_map(map, (key, value)); + } + map +} + +pub fn sql_to_json(row: &PgRow, col: &sqlx::postgres::PgColumn) -> Value { + let raw_value_result = row.try_get_raw(col.ordinal()); + match raw_value_result { + Ok(raw_value) if !raw_value.is_null() => { + let mut raw_value = Some(raw_value); + let decoded = sql_nonnull_to_json(|| { + raw_value + .take() + .unwrap_or_else(|| row.try_get_raw(col.ordinal()).unwrap()) + }); + decoded + } + Ok(_null) => Value::Null, + Err(_e) => Value::Null, + } +} + +fn decode_raw<'a, T: Decode<'a, sqlx::Postgres> + Default>( + raw_value: sqlx::postgres::PgValueRef<'a>, +) -> T { + match T::decode(raw_value) { + Ok(v) => v, + Err(_e) => { + let _type_name = std::any::type_name::(); + T::default() + } + } +} + +pub fn sql_nonnull_to_json<'r>( + mut get_ref: impl FnMut() -> sqlx::postgres::PgValueRef<'r>, +) -> Value { + let raw_value = get_ref(); + let type_info = raw_value.type_info(); + let type_name = type_info.name(); + match type_name { + "REAL" | "FLOAT" | "FLOAT4" | "FLOAT8" | "DOUBLE" | "NUMERIC" | "DECIMAL" => { + decode_raw::(raw_value).into() + } + "INT8" | "BIGINT" | "SERIAL8" | "BIGSERIAL" | "IDENTITY" | "INT64" | "INTEGER8" + | "BIGINT SIGNED" => decode_raw::(raw_value).into(), + "INT" | "INT4" | "INTEGER" | "MEDIUMINT" | "YEAR" => decode_raw::(raw_value).into(), + "INT2" | "SMALLINT" | "TINYINT" => decode_raw::(raw_value).into(), + // "BIGINT UNSIGNED" => decode_raw::(raw_value).into(), + // "INT UNSIGNED" | "MEDIUMINT UNSIGNED" | "SMALLINT UNSIGNED" | "TINYINT UNSIGNED" => { + // decode_raw::(raw_value).into() + // } + "BOOL" | "BOOLEAN" => decode_raw::(raw_value).into(), + // "BIT" if matches!(*type_info, AnyTypeInfo(AnyTypeInfoKind::Mssql(_))) => { + // decode_raw::(raw_value).into() + // } + // "BIT" if matches!(*type_info, AnyTypeInfo(AnyTypeInfoKind::MySql(ref mysql_type)) if mysql_type.max_size() == Some(1)) => { + // decode_raw::(raw_value).into() + // } + // "BIT" if matches!(*type_info, AnyTypeInfo(AnyTypeInfoKind::MySql(_))) => { + // decode_raw::(raw_value).into() + // } + // "DATE" => decode_raw::(raw_value) + // .to_string() + // .into(), + // "TIME" | "TIMETZ" => decode_raw::(raw_value) + // .to_string() + // .into(), + // "DATETIMEOFFSET" | "TIMESTAMP" | "TIMESTAMPTZ" => { + // decode_raw::>(raw_value) + // .to_rfc3339() + // .into() + // } + // "DATETIME" | "DATETIME2" => decode_raw::(raw_value) + // .format("%FT%T%.f") + // .to_string() + // .into(), + "JSON" | "JSON[]" | "JSONB" | "JSONB[]" => decode_raw::(raw_value), + // Deserialize as a string by default + _ => decode_raw::(raw_value).into(), + } +} + +/// Takes the first column of a row and converts it to a string. +pub fn row_to_string(row: &PgRow) -> Option { + let col = row.columns().first()?; + match sql_to_json(row, col) { + serde_json::Value::String(s) => Some(s), + serde_json::Value::Null => None, + other => Some(other.to_string()), + } +} + +#[cfg(test)] +mod tests { + // use crate::app_config::tests::test_database_url; + + use super::*; + use sqlx::Connection; + + fn test_database_url() -> String { + "TODO".to_string() + } + + fn setup_logging() { + let _ = env_logger::builder() + .filter_level(log::LevelFilter::Trace) + .is_test(true) + .try_init(); + } + + fn db_specific_test(db_type: &str) -> Option { + setup_logging(); + let db_url = test_database_url(); + if db_url.starts_with(db_type) { + Some(db_url) + } else { + log::warn!("Skipping test because DATABASE_URL is not set to a {db_type} database"); + None + } + } + + #[ignore = "will check later"] + #[tokio::test] + async fn test_row_to_json() -> anyhow::Result<()> { + use sqlx::Connection; + let db_url = test_database_url(); + let mut c = sqlx::PgConnection::connect(&db_url).await?; + let row = sqlx::query( + "SELECT \ + 123.456 as one_value, \ + 1 as two_values, \ + 2 as two_values, \ + 'x' as three_values, \ + 'y' as three_values, \ + 'z' as three_values \ + ", + ) + .fetch_one(&mut c) + .await?; + assert_eq!( + Value::Object(row_to_json(&row)), + serde_json::json!({ + "one_value": 123.456, + "two_values": [1,2], + "three_values": ["x","y","z"], + }) + ); + Ok(()) + } + + #[ignore = "will check later"] + #[tokio::test] + async fn test_postgres_types() -> anyhow::Result<()> { + let Some(db_url) = db_specific_test("postgres") else { + return Ok(()); + }; + let mut c = sqlx::PgConnection::connect(&db_url).await?; + let row = sqlx::query( + "SELECT + 42::INT2 as small_int, + 42::INT4 as integer, + 42::INT8 as big_int, + 42.25::FLOAT4 as float4, + 42.25::FLOAT8 as float8, + TRUE as boolean, + '2024-03-14'::DATE as date, + '13:14:15'::TIME as time, + '2024-03-14 13:14:15'::TIMESTAMP as timestamp, + '2024-03-14 13:14:15+02:00'::TIMESTAMPTZ as timestamptz, + INTERVAL '1 year 2 months 3 days' as complex_interval, + INTERVAL '4 hours' as hour_interval, + INTERVAL '1.5 days' as fractional_interval, + '{\"key\": \"value\"}'::JSON as json, + '{\"key\": \"value\"}'::JSONB as jsonb", + ) + .fetch_one(&mut c) + .await?; + + expect_json_object_equal( + &Value::Object(row_to_json(&row)), + &serde_json::json!({ + "small_int": 42, + "integer": 42, + "big_int": 42, + "float4": 42.25, + "float8": 42.25, + "boolean": true, + "date": "2024-03-14", + "time": "13:14:15", + "timestamp": "2024-03-14T13:14:15+00:00", + "timestamptz": "2024-03-14T11:14:15+00:00", + "complex_interval": "1 year 2 mons 3 days", + "hour_interval": "04:00:00", + "fractional_interval": "1 day 12:00:00", + "json": {"key": "value"}, + "jsonb": {"key": "value"}, + }), + ); + Ok(()) + } + + // #[actix_web::test] + // async fn test_mysql_types() -> anyhow::Result<()> { + // let db_url = db_specific_test("mysql").or_else(|| db_specific_test("mariadb")); + // let Some(db_url) = db_url else { + // return Ok(()); + // }; + // let mut c = sqlx::AnyConnection::connect(&db_url).await?; + + // sqlx::query( + // "CREATE TEMPORARY TABLE _sqlp_t ( + // tiny_int TINYINT, + // small_int SMALLINT, + // medium_int MEDIUMINT, + // signed_int INTEGER, + // big_int BIGINT, + // unsigned_int INTEGER UNSIGNED, + // tiny_int_unsigned TINYINT UNSIGNED, + // small_int_unsigned SMALLINT UNSIGNED, + // medium_int_unsigned MEDIUMINT UNSIGNED, + // big_int_unsigned BIGINT UNSIGNED, + // decimal_num DECIMAL(10,2), + // float_num FLOAT, + // double_num DOUBLE, + // bit_val BIT(1), + // date_val DATE, + // time_val TIME, + // datetime_val DATETIME, + // timestamp_val TIMESTAMP, + // year_val YEAR, + // char_val CHAR(10), + // varchar_val VARCHAR(50), + // text_val TEXT + // ) AS + // SELECT + // 127 as tiny_int, + // 32767 as small_int, + // 8388607 as medium_int, + // -1000000 as signed_int, + // 9223372036854775807 as big_int, + // 1000000 as unsigned_int, + // 255 as tiny_int_unsigned, + // 65535 as small_int_unsigned, + // 16777215 as medium_int_unsigned, + // 18446744073709551615 as big_int_unsigned, + // 123.45 as decimal_num, + // 42.25 as float_num, + // 42.25 as double_num, + // 1 as bit_val, + // '2024-03-14' as date_val, + // '13:14:15' as time_val, + // '2024-03-14 13:14:15' as datetime_val, + // '2024-03-14 13:14:15' as timestamp_val, + // 2024 as year_val, + // 'CHAR' as char_val, + // 'VARCHAR' as varchar_val, + // 'TEXT' as text_val", + // ) + // .execute(&mut c) + // .await?; + + // let row = sqlx::query("SELECT * FROM _sqlp_t") + // .fetch_one(&mut c) + // .await?; + + // expect_json_object_equal( + // &row_to_json(&row), + // &serde_json::json!({ + // "tiny_int": 127, + // "small_int": 32767, + // "medium_int": 8_388_607, + // "signed_int": -1_000_000, + // "big_int": 9_223_372_036_854_775_807_u64, + // "unsigned_int": 1_000_000, + // "tiny_int_unsigned": 255, + // "small_int_unsigned": 65_535, + // "medium_int_unsigned": 16_777_215, + // "big_int_unsigned": 18_446_744_073_709_551_615_u64, + // "decimal_num": 123.45, + // "float_num": 42.25, + // "double_num": 42.25, + // "bit_val": true, + // "date_val": "2024-03-14", + // "time_val": "13:14:15", + // "datetime_val": "2024-03-14T13:14:15", + // "timestamp_val": "2024-03-14T13:14:15+00:00", + // "year_val": 2024, + // "char_val": "CHAR", + // "varchar_val": "VARCHAR", + // "text_val": "TEXT" + // }), + // ); + + // sqlx::query("DROP TABLE _sqlp_t").execute(&mut c).await?; + + // Ok(()) + // } + + // #[actix_web::test] + // async fn test_sqlite_types() -> anyhow::Result<()> { + // let Some(db_url) = db_specific_test("sqlite") else { + // return Ok(()); + // }; + // let mut c = sqlx::AnyConnection::connect(&db_url).await?; + // let row = sqlx::query( + // "SELECT + // 42 as integer, + // 42.25 as real, + // 'xxx' as string, + // x'68656c6c6f20776f726c64' as blob", + // ) + // .fetch_one(&mut c) + // .await?; + + // expect_json_object_equal( + // &row_to_json(&row), + // &serde_json::json!({ + // "integer": 42, + // "real": 42.25, + // "string": "xxx", + // "blob": "hello world", + // }), + // ); + // Ok(()) + // } + + // #[actix_web::test] + // async fn test_mssql_types() -> anyhow::Result<()> { + // let Some(db_url) = db_specific_test("mssql") else { + // return Ok(()); + // }; + // let mut c = sqlx::AnyConnection::connect(&db_url).await?; + // let row = sqlx::query( + // "SELECT + // CAST(1 AS BIT) as true_bit, + // CAST(0 AS BIT) as false_bit, + // CAST(NULL AS BIT) as null_bit, + // CAST(42 AS SMALLINT) as small_int, + // CAST(42 AS INT) as integer, + // CAST(42 AS BIGINT) as big_int, + // CAST(42.25 AS REAL) as real, + // CAST(42.25 AS FLOAT) as float, + // CAST(42.25 AS DECIMAL(10,2)) as decimal, + // CAST('2024-03-14' AS DATE) as date, + // CAST('13:14:15' AS TIME) as time, + // CAST('2024-03-14 13:14:15' AS DATETIME) as datetime, + // CAST('2024-03-14 13:14:15' AS DATETIME2) as datetime2, + // CAST('2024-03-14 13:14:15 +02:00' AS DATETIMEOFFSET) as datetimeoffset, + // N'Unicode String' as nvarchar, + // 'ASCII String' as varchar", + // ) + // .fetch_one(&mut c) + // .await?; + + // expect_json_object_equal( + // &row_to_json(&row), + // &serde_json::json!({ + // "true_bit": true, + // "false_bit": false, + // "null_bit": null, + // "small_int": 42, + // "integer": 42, + // "big_int": 42, + // "real": 42.25, + // "float": 42.25, + // "decimal": 42.25, + // "date": "2024-03-14", + // "time": "13:14:15", + // "datetime": "2024-03-14T13:14:15", + // "datetime2": "2024-03-14T13:14:15", + // "datetimeoffset": "2024-03-14T13:14:15+02:00", + // "nvarchar": "Unicode String", + // "varchar": "ASCII String", + // }), + // ); + // Ok(()) + // } + + fn expect_json_object_equal(actual: &Value, expected: &Value) { + use std::fmt::Write; + + if actual == expected { + return; + } + let actual = actual.as_object().unwrap(); + let expected = expected.as_object().unwrap(); + + let all_keys: std::collections::BTreeSet<_> = + actual.keys().chain(expected.keys()).collect(); + let max_key_len = all_keys.iter().map(|k| k.len()).max().unwrap_or(0); + + let mut comparison_string = String::new(); + for key in all_keys { + let actual_value = actual.get(key).unwrap_or(&Value::Null); + let expected_value = expected.get(key).unwrap_or(&Value::Null); + if actual_value == expected_value { + continue; + } + writeln!( + &mut comparison_string, + "{key:), diff --git a/vantage/src/sql/table.rs b/vantage/src/sql/table.rs index a583a34..aa2b43c 100644 --- a/vantage/src/sql/table.rs +++ b/vantage/src/sql/table.rs @@ -27,10 +27,11 @@ use std::fmt::{Debug, Display}; use std::ops::Deref; use std::sync::{Arc, Mutex}; -mod column; +pub mod column; mod join; pub use column::Column; +use column::SqlColumn; pub use extensions::{Hooks, SoftDelete, TableExtension}; pub use join::Join; diff --git a/vantage/src/sql/table/column.rs b/vantage/src/sql/table/column.rs index 5b284a1..b34c4d5 100644 --- a/vantage/src/sql/table/column.rs +++ b/vantage/src/sql/table/column.rs @@ -8,6 +8,11 @@ use crate::sql::Operations; use crate::sql::WrapArc; use crate::traits::column::SqlField; +mod chrono; +mod sqlcolumn; + +pub use sqlcolumn::SqlColumn; + #[derive(Debug, Clone)] pub struct Column { name: String, @@ -23,7 +28,10 @@ impl Column { column_alias: None, } } - pub fn name(&self) -> String { +} + +impl SqlColumn for Column { + fn name(&self) -> String { self.name.clone() } fn name_with_table(&self) -> String { @@ -32,14 +40,17 @@ impl Column { None => self.name.clone(), } } - pub fn set_table_alias(&mut self, alias: String) { + fn set_name(&mut self, name: String) { + self.name = name; + } + fn set_table_alias(&mut self, alias: String) { self.table_alias = Some(alias); } - pub fn set_column_alias(&mut self, alias: String) { + fn set_column_alias(&mut self, alias: String) { self.column_alias = Some(alias); } - pub fn get_column_alias(&self) -> Option { + fn get_column_alias(&self) -> Option { self.column_alias.clone() } } @@ -89,6 +100,22 @@ impl SqlField for Arc { } } +impl From for Column { + fn from(name: String) -> Self { + Column { + name, + table_alias: None, + column_alias: None, + } + } +} + +impl From<&str> for Column { + fn from(name: &str) -> Self { + name.to_string().into() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/vantage/src/sql/table/column/chrono.rs b/vantage/src/sql/table/column/chrono.rs new file mode 100644 index 0000000..e69de29 diff --git a/vantage/src/sql/table/column/sqlcolumn.rs b/vantage/src/sql/table/column/sqlcolumn.rs new file mode 100644 index 0000000..c4648fa --- /dev/null +++ b/vantage/src/sql/table/column/sqlcolumn.rs @@ -0,0 +1,10 @@ +use crate::sql::Chunk; + +pub trait SqlColumn: Chunk { + fn name(&self) -> String; + fn name_with_table(&self) -> String; + fn set_name(&mut self, name: String); + fn set_table_alias(&mut self, alias: String); + fn set_column_alias(&mut self, alias: String); + fn get_column_alias(&self) -> Option; +} diff --git a/vantage/src/sql/table/with_columns.rs b/vantage/src/sql/table/with_columns.rs index 588ae8a..2d8f20a 100644 --- a/vantage/src/sql/table/with_columns.rs +++ b/vantage/src/sql/table/with_columns.rs @@ -4,6 +4,7 @@ use serde_json::Value; use std::ops::Deref; use std::sync::Arc; +use super::column::SqlColumn; use super::{Column, RelatedTable}; use crate::lazy_expression::LazyExpression; use crate::prelude::Operations; @@ -206,11 +207,12 @@ impl TableWithColumns for Table { impl Table { /// When building a table - a way to chain column declarations. - pub fn with_column(mut self, column: &str) -> Self { - self.add_column( - column.to_string(), - Column::new(column.to_string(), self.table_alias.clone()), - ); + pub fn with_column(mut self, column: impl Into) -> Self { + let mut column: Column = column.into(); + if self.table_alias.is_some() { + column.set_table_alias(self.table_alias.clone().unwrap()); + } + self.add_column(column.name(), column); self } @@ -255,6 +257,18 @@ mod tests { assert!(roles.get_column("name").is_some()); } + #[test] + fn test_add_column() { + let data = json!([]); + let db = MockDataSource::new(&data); + + let roles = Table::new("roles", db.clone()) + .with_column(Column::new("name".to_string(), Some("id".to_string()))); + + assert!(roles.get_column("qq").is_none()); + assert!(roles.get_column("name").is_some()); + } + #[test] fn test_search_for_field() { let data = json!([]); diff --git a/vantage/src/sql/table/with_queries.rs b/vantage/src/sql/table/with_queries.rs index 17874ba..76dc918 100644 --- a/vantage/src/sql/table/with_queries.rs +++ b/vantage/src/sql/table/with_queries.rs @@ -3,6 +3,7 @@ use serde::Serialize; use serde_json::{to_value, Value}; use std::sync::Arc; +use super::column::SqlColumn; use super::{AnyTable, Column, TableWithColumns}; use crate::prelude::AssociatedQuery; use crate::sql::query::{QueryType, SqlQuery}; diff --git a/vantage/tests/types_test.rs b/vantage/tests/types_test.rs new file mode 100644 index 0000000..58d2e4c --- /dev/null +++ b/vantage/tests/types_test.rs @@ -0,0 +1,124 @@ +use std::sync::OnceLock; + +use anyhow::Ok; +use chrono::{DateTime, Duration, NaiveDate, NaiveDateTime, NaiveTime, TimeZone, Utc}; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; +use serde_with::serde_as; +use uuid::Uuid; +use vantage::{ + prelude::{Entity, Postgres, WritableDataSet}, + sql::Table, +}; + +use anyhow::Result; + +static POSTGRESS: OnceLock = OnceLock::new(); + +pub async fn postgres() -> Postgres { + if let Some(p) = POSTGRESS.get() { + return p.clone(); + } + + let connection_string = std::env::var("DATABASE_URL") + .unwrap_or_else(|_| "postgres://postgres@localhost:5432/postgres".to_string()); + + let postgres = Postgres::new(&connection_string).await; + + POSTGRESS.set(postgres.clone()).unwrap(); + + postgres +} + +#[serde_as] +#[derive(Debug, Deserialize, Serialize, Clone, Default)] +struct TestStruct { + small_int: i16, + integer: i32, + big_int: i64, + float4: f32, + float8: f64, + boolean: bool, + date: NaiveDate, // For "2024-03-14" + time: NaiveTime, // For "13:14:15" + timestamp: NaiveDateTime, // For "2024-03-14T13:14:15" + timestamptz: DateTime, // For "2024-03-14T11:14:15+00:00" + complex_interval: String, // Keep as string since no existing interval type handles this directly + #[serde_as(as = "serde_with::DurationSeconds")] + hour_interval: Duration, // Use chrono::Duration + #[serde_as(as = "serde_with::DurationSeconds")] + fractional_interval: Duration, // Use chrono::Duration + #[serde(with = "uuid::serde::simple")] + uuid: Uuid, // For UUIDs + json: Value, // Generic JSON + jsonb: Value, // Generic JSON +} +impl Entity for TestStruct {} + +#[tokio::test] +async fn my_test() -> Result<()> { + let p = postgres(); + + let t = Table::new_with_entity("test_types", p.await) + .with_column("small_int") + .with_id_column("integer") + .with_column("big_int") + .with_column("float4") + .with_column("float8") + .with_column("boolean") + .with_column("date") + .with_column("time") + .with_column("timestamp") + .with_column("timestamptz") + .with_column("complex_interval") + .with_column("hour_interval") + .with_column("fractional_interval") + .with_column("uuid") + .with_column("json") + .with_column("jsonb"); + + let max_values = TestStruct { + small_int: i16::MAX, + integer: i32::MAX, + big_int: i64::MAX, + float4: f32::MAX, + float8: f64::MAX, + boolean: true, + date: NaiveDate::from_ymd_opt(9999, 12, 31).unwrap(), + time: NaiveTime::from_hms_opt(23, 59, 59).unwrap(), + timestamp: Utc + .timestamp_opt(253402300799, 999_999_999) + .unwrap() + .naive_utc(), + timestamptz: Utc.timestamp_opt(253402300799, 999_999_999).unwrap(), + complex_interval: "999 years 12 mons 31 days".to_string(), + hour_interval: Duration::hours(3000), + fractional_interval: Duration::days(3000), + uuid: Uuid::nil(), // Replace with meaningful maximum UUID if needed + json: json!({"max_key": "max_value"}), + jsonb: json!({"max_key": "max_value"}), + }; + + let min_values = TestStruct { + small_int: i16::MIN, + integer: i32::MIN, + big_int: i64::MIN, + float4: f32::MIN, + float8: f64::MIN, + boolean: false, + date: NaiveDate::from_ymd_opt(1, 1, 1).unwrap(), + time: NaiveTime::from_hms_opt(0, 0, 0).unwrap(), + timestamp: Utc.timestamp_opt(-62135596800, 0).unwrap().naive_utc(), + timestamptz: Utc.timestamp_opt(-62135596800, 0).unwrap(), + complex_interval: "0 years 0 mons 0 days".to_string(), + hour_interval: Duration::zero(), + fractional_interval: Duration::zero(), + uuid: Uuid::nil(), + json: json!({"min_key": "min_value"}), + jsonb: json!({"min_key": "min_value"}), + }; + + t.insert(min_values).await?; + + Ok(()) +}