Compare commits

...

56 Commits

Author SHA1 Message Date
filipriec
d1ebe4732f steel with decimal math, saving before separating steel to a separate crate 2025-07-02 14:44:37 +02:00
filipriec
7b7f3ca05a more tests for the frontend 2025-06-26 20:25:59 +02:00
filipriec
234613f831 more frontend tests 2025-06-26 20:03:47 +02:00
filipriec
f6d84e70cc testing frontend to connect to the backend in the form page 2025-06-26 19:19:08 +02:00
filipriec
5cd324b6ae client tests now have a proper structure 2025-06-26 11:56:18 +02:00
filipriec
a7457f5749 frontend tui tests 2025-06-25 23:00:51 +02:00
filipriec
a5afc75099 crit bug fixed 2025-06-25 17:33:37 +02:00
filipriec
625c9b3e09 adresar and uctovnictvo are now wiped out of the existence 2025-06-25 16:14:43 +02:00
filipriec
e20623ed53 removing adresar and uctovnictvo hardcoded way of doing things from the project entirely 2025-06-25 13:52:00 +02:00
filipriec
aa9adf7348 removed unused tests 2025-06-25 13:50:08 +02:00
filipriec
2e82aba0d1 full passer on the tables data now 2025-06-25 13:46:35 +02:00
filipriec
b7a3f0f8d9 count is now fixed and working properly 2025-06-25 12:40:27 +02:00
filipriec
38c82389f7 count gets a full passer in tests 2025-06-25 12:37:37 +02:00
filipriec
cb0a2bee17 get by count well tested 2025-06-25 11:47:25 +02:00
filipriec
dc99131794 ordering of the tests for tables data 2025-06-25 10:34:58 +02:00
filipriec
5c23f61a10 get method passing without any problem 2025-06-25 09:44:38 +02:00
filipriec
f87e3c03cb get test updated, working now 2025-06-25 09:16:32 +02:00
filipriec
d346670839 tests for delete endpoint are passing all the tests 2025-06-25 09:04:58 +02:00
filipriec
560d8b7234 delete tests robustness not yet fully working 2025-06-25 08:44:36 +02:00
filipriec
b297c2b311 working full passer on put request 2025-06-24 20:06:39 +02:00
filipriec
d390c567d5 more tests 2025-06-24 00:46:51 +02:00
filipriec
029e614b9c more put tests 2025-06-24 00:45:37 +02:00
filipriec
f9a78e4eec the tests for the put endpoint is now being tested and passing but its not what i would love 2025-06-23 23:25:45 +02:00
filipriec
d8758f7531 we are passing all the tests now properly with the table definition and the post tables data now 2025-06-23 13:52:29 +02:00
filipriec
4e86ecff84 its now passing all the tests 2025-06-22 23:05:38 +02:00
filipriec
070d091e07 robustness, one test still failing, will fix it 2025-06-22 23:02:41 +02:00
filipriec
7403b3c3f8 4 tests are failing 2025-06-22 22:15:08 +02:00
filipriec
1b1e7b7205 robust decimal solution to push tables data to the backend 2025-06-22 22:08:22 +02:00
filipriec
1b8f19f1ce tables data tests are now generalized, needs a bit more fixes, 6/6 are passing 2025-06-22 16:10:24 +02:00
filipriec
2a14eadf34 fixed compatibility layer to old tests git status REMOVE IN THE FUTURE 2025-06-22 14:00:49 +02:00
filipriec
fd36cd5795 tests are now passing fully 2025-06-22 13:13:20 +02:00
filipriec
f4286ac3c9 more changes and more fixes, 3 more tests to go 2025-06-22 12:48:36 +02:00
filipriec
92d5eb4844 needs last one to be fixed, otherwise its getting perfect 2025-06-21 23:57:52 +02:00
filipriec
87b9f6ab87 more fixes 2025-06-21 21:43:39 +02:00
filipriec
06d98aab5c 5 more tests to go 2025-06-21 21:01:49 +02:00
filipriec
298f56a53c tests are passing better than ever before, its looking decent actually nowc 2025-06-21 16:18:32 +02:00
filipriec
714a5f2f1c tests compiled 2025-06-21 15:11:27 +02:00
filipriec
4e29d0084f compiled with the profile to be schemas 2025-06-21 10:37:37 +02:00
filipriec
63f1b4da2e changing profile id to schema in the whole project 2025-06-21 09:57:14 +02:00
filipriec
9477f53432 big change in the schema, its profile names now and not gen 2025-06-20 22:31:49 +02:00
filipriec
ed786f087c changing test for a huge change in a project 2025-06-20 20:07:07 +02:00
filipriec
8e22ea05ff improvements and fixing of the tests 2025-06-20 19:59:42 +02:00
filipriec
8414657224 gen isolated tables 2025-06-18 23:19:19 +02:00
filipriec
e25213ed1b tests are robusts running in parallel 2025-06-18 22:38:00 +02:00
filipriec
4843b0778c robust testing of the table definitions 2025-06-18 21:37:30 +02:00
filipriec
f5fae98c69 tests now working via make file 2025-06-18 14:44:38 +02:00
filipriec
6faf0a4a31 tests for table definitions 2025-06-17 22:46:04 +02:00
filipriec
011fafc0ff now working proper types 2025-06-17 17:31:11 +02:00
filipriec
8ebe74484c now not creating tables with the year_ prefix and living in the gen schema by default 2025-06-17 11:45:55 +02:00
filipriec
3eb9523103 you are going to kill me but please dont, i just cleaned up migration file and its 100% valid, do not use any version before this version and after this version so many things needs to be changed so haha... im ashamed but i love it at the same time 2025-06-17 11:21:33 +02:00
filipriec
3dfa922b9e unimportant change 2025-06-17 10:27:22 +02:00
filipriec
248d54a30f accpeting now null in the post table data as nothing 2025-06-16 22:51:05 +02:00
filipriec
b30fef4ccd post doesnt work, but refactored code displays the autocomplete at least, needs fix 2025-06-16 16:42:25 +02:00
filipriec
a9c4527318 complete redesign oh how client is displaying data 2025-06-16 16:10:24 +02:00
filipriec
c31f08d5b8 fixing post with links 2025-06-16 14:42:49 +02:00
filipriec
9e0fa9ddb1 autocomplete now autocompleting data not just id 2025-06-16 11:54:54 +02:00
132 changed files with 17109 additions and 4554 deletions

283
Cargo.lock generated
View File

@@ -65,6 +65,17 @@ version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
[[package]]
name = "ahash"
version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9"
dependencies = [
"getrandom 0.2.15",
"once_cell",
"version_check",
]
[[package]]
name = "ahash"
version = "0.8.11"
@@ -125,6 +136,12 @@ version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
[[package]]
name = "arrayvec"
version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "as_derive_utils"
version = "0.11.0"
@@ -146,6 +163,28 @@ dependencies = [
"abi_stable",
]
[[package]]
name = "async-stream"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476"
dependencies = [
"async-stream-impl",
"futures-core",
"pin-project-lite",
]
[[package]]
name = "async-stream-impl"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.100",
]
[[package]]
name = "async-trait"
version = "0.1.88"
@@ -312,6 +351,18 @@ dependencies = [
"crunchy",
]
[[package]]
name = "bitvec"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c"
dependencies = [
"funty",
"radium",
"tap",
"wyz",
]
[[package]]
name = "block-buffer"
version = "0.10.4"
@@ -356,12 +407,57 @@ dependencies = [
"syn 2.0.100",
]
[[package]]
name = "borsh"
version = "1.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce"
dependencies = [
"borsh-derive",
"cfg_aliases",
]
[[package]]
name = "borsh-derive"
version = "1.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3"
dependencies = [
"once_cell",
"proc-macro-crate",
"proc-macro2",
"quote",
"syn 2.0.100",
]
[[package]]
name = "bumpalo"
version = "3.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf"
[[package]]
name = "bytecheck"
version = "0.6.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2"
dependencies = [
"bytecheck_derive",
"ptr_meta",
"simdutf8",
]
[[package]]
name = "bytecheck_derive"
version = "0.6.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "byteorder"
version = "1.5.0"
@@ -412,6 +508,12 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "cfg_aliases"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
[[package]]
name = "chrono"
version = "0.4.40"
@@ -447,13 +549,17 @@ dependencies = [
"crossterm",
"dirs 6.0.0",
"dotenvy",
"futures",
"lazy_static",
"prost",
"prost-types",
"ratatui",
"rstest",
"serde",
"serde_json",
"time",
"tokio",
"tokio-test",
"toml",
"tonic",
"tracing",
@@ -461,6 +567,7 @@ dependencies = [
"tui-textarea",
"unicode-segmentation",
"unicode-width 0.2.0",
"uuid",
]
[[package]]
@@ -487,6 +594,7 @@ name = "common"
version = "0.3.13"
dependencies = [
"prost",
"prost-types",
"serde",
"tantivy",
"tonic",
@@ -965,6 +1073,27 @@ dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "funty"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c"
[[package]]
name = "futures"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
dependencies = [
"futures-channel",
"futures-core",
"futures-executor",
"futures-io",
"futures-sink",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-channel"
version = "0.3.31"
@@ -1044,6 +1173,7 @@ version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
dependencies = [
"futures-channel",
"futures-core",
"futures-io",
"futures-macro",
@@ -1146,6 +1276,9 @@ name = "hashbrown"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
dependencies = [
"ahash 0.7.8",
]
[[package]]
name = "hashbrown"
@@ -1153,7 +1286,7 @@ version = "0.14.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
dependencies = [
"ahash",
"ahash 0.8.11",
"allocator-api2",
"serde",
]
@@ -1657,7 +1790,7 @@ version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e14eda50a3494b3bf7b9ce51c52434a761e383d7238ce1dd5dcec2fbc13e9fb"
dependencies = [
"ahash",
"ahash 0.8.11",
"dashmap",
"hashbrown 0.14.5",
"serde",
@@ -2251,7 +2384,7 @@ version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac98773b7109bc75f475ab5a134c9b64b87e59d776d31098d8f346922396a477"
dependencies = [
"arrayvec",
"arrayvec 0.5.2",
"typed-arena",
"unicode-width 0.1.14",
]
@@ -2358,6 +2491,26 @@ dependencies = [
"prost",
]
[[package]]
name = "ptr_meta"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1"
dependencies = [
"ptr_meta_derive",
]
[[package]]
name = "ptr_meta_derive"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "quickscope"
version = "0.2.0"
@@ -2383,6 +2536,12 @@ version = "5.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
[[package]]
name = "radium"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09"
[[package]]
name = "radix_fmt"
version = "1.0.0"
@@ -2574,6 +2733,15 @@ version = "1.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2"
[[package]]
name = "rend"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71fe3824f5629716b1589be05dacd749f6aa084c87e00e016714a8cdfccc997c"
dependencies = [
"bytecheck",
]
[[package]]
name = "repr_offset"
version = "0.2.2"
@@ -2597,6 +2765,35 @@ dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "rkyv"
version = "0.7.45"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b"
dependencies = [
"bitvec",
"bytecheck",
"bytes",
"hashbrown 0.12.3",
"ptr_meta",
"rend",
"rkyv_derive",
"seahash",
"tinyvec",
"uuid",
]
[[package]]
name = "rkyv_derive"
version = "0.7.45"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "rsa"
version = "0.9.8"
@@ -2657,6 +2854,32 @@ dependencies = [
"serde_derive",
]
[[package]]
name = "rust_decimal"
version = "1.37.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b203a6425500a03e0919c42d3c47caca51e79f1132046626d2c8871c5092035d"
dependencies = [
"arrayvec 0.7.6",
"borsh",
"bytes",
"num-traits",
"rand 0.8.5",
"rkyv",
"serde",
"serde_json",
]
[[package]]
name = "rust_decimal_macros"
version = "1.37.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6268b74858287e1a062271b988a0c534bf85bbeb567fe09331bf40ed78113d5"
dependencies = [
"quote",
"syn 2.0.100",
]
[[package]]
name = "rustc-demangle"
version = "0.1.24"
@@ -2731,6 +2954,12 @@ version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "seahash"
version = "4.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b"
[[package]]
name = "search"
version = "0.3.13"
@@ -2840,12 +3069,17 @@ dependencies = [
"common",
"dashmap",
"dotenvy",
"futures",
"jsonwebtoken",
"lazy_static",
"prost",
"prost-types",
"rand 0.9.1",
"regex",
"rstest",
"rust-stemmers",
"rust_decimal",
"rust_decimal_macros",
"search",
"serde",
"serde_json",
@@ -2940,6 +3174,12 @@ dependencies = [
"rand_core 0.6.4",
]
[[package]]
name = "simdutf8"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e"
[[package]]
name = "simple_asn1"
version = "0.6.3"
@@ -3056,6 +3296,7 @@ dependencies = [
"native-tls",
"once_cell",
"percent-encoding",
"rust_decimal",
"serde",
"serde_json",
"sha2",
@@ -3139,6 +3380,7 @@ dependencies = [
"percent-encoding",
"rand 0.8.5",
"rsa",
"rust_decimal",
"serde",
"sha1",
"sha2",
@@ -3179,6 +3421,7 @@ dependencies = [
"memchr",
"once_cell",
"rand 0.8.5",
"rust_decimal",
"serde",
"serde_json",
"sha2",
@@ -3556,6 +3799,12 @@ dependencies = [
"serde",
]
[[package]]
name = "tap"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
[[package]]
name = "tempfile"
version = "3.19.1"
@@ -3726,6 +3975,19 @@ dependencies = [
"tokio",
]
[[package]]
name = "tokio-test"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7"
dependencies = [
"async-stream",
"bytes",
"futures-core",
"tokio",
"tokio-stream",
]
[[package]]
name = "tokio-util"
version = "0.7.14"
@@ -4056,12 +4318,14 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
[[package]]
name = "uuid"
version = "1.16.0"
version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9"
checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d"
dependencies = [
"getrandom 0.3.2",
"js-sys",
"serde",
"wasm-bindgen",
]
[[package]]
@@ -4502,6 +4766,15 @@ version = "0.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51"
[[package]]
name = "wyz"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed"
dependencies = [
"tap",
]
[[package]]
name = "yoke"
version = "0.7.5"

View File

@@ -24,6 +24,7 @@ tokio = { version = "1.44.2", features = ["full"] }
tonic = "0.13.0"
prost = "0.13.5"
async-trait = "0.1.88"
prost-types = "0.13.0"
# Data Handling & Serialization
serde = { version = "1.0.219", features = ["derive"] }

View File

@@ -9,6 +9,7 @@ anyhow = "1.0.98"
async-trait = "0.1.88"
common = { path = "../common" }
prost-types = { workspace = true }
crossterm = "0.28.1"
dirs = "6.0.0"
dotenvy = "0.15.7"
@@ -30,3 +31,9 @@ unicode-width = "0.2.0"
[features]
default = []
ui-debug = []
[dev-dependencies]
rstest = "0.25.0"
tokio-test = "0.4.4"
uuid = { version = "1.17.0", features = ["v4"] }
futures = "0.3.31"

View File

@@ -1,30 +1,18 @@
// src/components/common/autocomplete.rs
use common::proto::multieko2::search::search_response::Hit;
use crate::config::colors::themes::Theme;
use crate::state::pages::form::FormState;
use common::proto::multieko2::search::search_response::Hit;
use ratatui::{
layout::Rect,
style::{Color, Modifier, Style},
widgets::{Block, List, ListItem, ListState},
Frame,
};
use std::collections::HashMap;
use unicode_width::UnicodeWidthStr;
/// Converts a serde_json::Value into a displayable String.
/// Handles String, Number, and Bool variants. Returns an empty string for Null and others.
fn json_value_to_string(value: &serde_json::Value) -> String {
match value {
serde_json::Value::String(s) => s.clone(),
serde_json::Value::Number(n) => n.to_string(),
serde_json::Value::Bool(b) => b.to_string(),
// Return an empty string for Null, Array, or Object so we can filter them out.
_ => String::new(),
}
}
/// Renders an opaque dropdown list for simple string-based suggestions.
/// This function remains unchanged.
/// THIS IS THE RESTORED FUNCTION.
pub fn render_autocomplete_dropdown(
f: &mut Frame,
input_rect: Rect,
@@ -84,22 +72,22 @@ pub fn render_autocomplete_dropdown(
.collect();
let list = List::new(items);
let mut profile_list_state = ListState::default();
profile_list_state.select(selected_index);
let mut list_state = ListState::default();
list_state.select(selected_index);
f.render_stateful_widget(list, dropdown_area, &mut profile_list_state);
f.render_stateful_widget(list, dropdown_area, &mut list_state);
}
// --- MODIFIED FUNCTION FOR RICH SUGGESTIONS ---
/// Renders an opaque dropdown list for rich `Hit`-based suggestions.
/// Displays the value of the first meaningful column, followed by the Hit ID.
pub fn render_rich_autocomplete_dropdown(
/// RENAMED from render_rich_autocomplete_dropdown
pub fn render_hit_autocomplete_dropdown(
f: &mut Frame,
input_rect: Rect,
frame_area: Rect,
theme: &Theme,
suggestions: &[Hit],
selected_index: Option<usize>,
form_state: &FormState,
) {
if suggestions.is_empty() {
return;
@@ -107,50 +95,9 @@ pub fn render_rich_autocomplete_dropdown(
let display_names: Vec<String> = suggestions
.iter()
.map(|hit| {
// Use serde_json::Value to handle mixed types (string, null, etc.)
if let Ok(content_map) =
serde_json::from_str::<HashMap<String, serde_json::Value>>(
&hit.content_json,
)
{
// Define keys to ignore for a cleaner display
const IGNORED_KEYS: &[&str] = &["id", "deleted", "created_at"];
// Get keys, filter out ignored ones, and sort for consistency
let mut keys: Vec<_> = content_map
.keys()
.filter(|k| !IGNORED_KEYS.contains(&k.as_str()))
.cloned()
.collect();
keys.sort();
// Get only the first non-empty value from the sorted keys
let values: Vec<_> = keys
.iter()
.map(|key| {
content_map
.get(key)
.map(json_value_to_string)
.unwrap_or_default()
})
.filter(|s| !s.is_empty()) // Filter out null/empty values
.take(1) // Changed from take(2) to take(1)
.collect();
let display_part = values.first().cloned().unwrap_or_default(); // Get the first value
if display_part.is_empty() {
format!("ID: {}", hit.id)
} else {
format!("{} | ID: {}", display_part, hit.id) // ID at the end
}
} else {
format!("ID: {} (parse error)", hit.id)
}
})
.map(|hit| form_state.get_display_name_for_hit(hit))
.collect();
// --- Calculate Dropdown Size & Position ---
let max_suggestion_width =
display_names.iter().map(|s| s.width()).max().unwrap_or(0) as u16;
let horizontal_padding: u16 = 2;
@@ -164,7 +111,6 @@ pub fn render_rich_autocomplete_dropdown(
height: dropdown_height,
};
// --- Clamping Logic ---
if dropdown_area.bottom() > frame_area.height {
dropdown_area.y = input_rect.y.saturating_sub(dropdown_height);
}
@@ -174,7 +120,6 @@ pub fn render_rich_autocomplete_dropdown(
dropdown_area.x = dropdown_area.x.max(0);
dropdown_area.y = dropdown_area.y.max(0);
// --- Rendering Logic ---
let background_block =
Block::default().style(Style::default().bg(Color::DarkGray));
f.render_widget(background_block, dropdown_area);

View File

@@ -1,10 +1,10 @@
// src/components/form/form.rs
use crate::components::common::autocomplete; // <--- ADD THIS IMPORT
use crate::components::common::autocomplete;
use crate::components::handlers::canvas::render_canvas;
use crate::config::colors::themes::Theme;
use crate::state::app::highlight::HighlightState;
use crate::state::pages::canvas_state::CanvasState;
use crate::state::pages::form::FormState; // <--- CHANGE THIS IMPORT
use crate::state::pages::form::FormState;
use ratatui::{
layout::{Alignment, Constraint, Direction, Layout, Margin, Rect},
style::Style,
@@ -78,25 +78,25 @@ pub fn render_form(
// --- NEW: RENDER AUTOCOMPLETE ---
if form_state.autocomplete_active {
// Use the Rect of the active field that render_canvas found for us.
if let Some(active_rect) = active_field_rect {
let selected_index = form_state.get_selected_suggestion_index();
// THE DECIDER LOGIC:
// 1. Check for rich suggestions first.
if let Some(rich_suggestions) = form_state.get_rich_suggestions() {
if !rich_suggestions.is_empty() {
autocomplete::render_rich_autocomplete_dropdown(
// CHANGE THIS to call the renamed function
autocomplete::render_hit_autocomplete_dropdown(
f,
active_rect,
f.area(), // Use f.area() for clamping, not f.size()
f.area(),
theme,
rich_suggestions,
selected_index,
form_state,
);
}
}
// 2. Fallback to simple suggestions if rich ones aren't available.
// The fallback to simple suggestions is now correctly handled
// because the original render_autocomplete_dropdown exists again.
else if let Some(simple_suggestions) = form_state.get_suggestions() {
if !simple_suggestions.is_empty() {
autocomplete::render_autocomplete_dropdown(
@@ -112,3 +112,4 @@ pub fn render_form(
}
}
}

View File

@@ -1,16 +1,16 @@
// src/components/handlers/canvas.rs
use ratatui::{
widgets::{Paragraph, Block, Borders},
layout::{Layout, Constraint, Direction, Rect},
style::{Style, Modifier},
layout::{Alignment, Constraint, Direction, Layout, Rect},
style::{Modifier, Style},
text::{Line, Span},
widgets::{Block, Borders, Paragraph},
Frame,
prelude::Alignment,
};
use crate::config::colors::themes::Theme;
use crate::state::app::highlight::HighlightState;
use crate::state::pages::canvas_state::CanvasState;
use crate::state::app::highlight::HighlightState; // Ensure correct import path
use std::cmp::{min, max};
use std::cmp::{max, min};
pub fn render_canvas(
f: &mut Frame,
@@ -21,9 +21,8 @@ pub fn render_canvas(
inputs: &[&String],
theme: &Theme,
is_edit_mode: bool,
highlight_state: &HighlightState, // Using the enum state
highlight_state: &HighlightState,
) -> Option<Rect> {
// ... (setup code remains the same) ...
let columns = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(30), Constraint::Percentage(70)])
@@ -58,46 +57,47 @@ pub fn render_canvas(
let mut active_field_input_rect = None;
// Render labels
for (i, field) in fields.iter().enumerate() {
let label = Paragraph::new(Line::from(Span::styled(
format!("{}:", field),
Style::default().fg(theme.fg)),
));
f.render_widget(label, Rect {
x: columns[0].x,
y: input_block.y + 1 + i as u16,
width: columns[0].width,
height: 1,
});
Style::default().fg(theme.fg),
)));
f.render_widget(
label,
Rect {
x: columns[0].x,
y: input_block.y + 1 + i as u16,
width: columns[0].width,
height: 1,
},
);
}
// Render inputs and cursor
for (i, input) in inputs.iter().enumerate() {
for (i, _input) in inputs.iter().enumerate() {
let is_active = i == *current_field_idx;
let current_cursor_pos = form_state.current_cursor_pos();
let text = input.as_str();
let text_len = text.chars().count();
// Use the trait method to get display value
let text = form_state.get_display_value_for_field(i);
let text_len = text.chars().count();
let line: Line;
// --- Use match on the highlight_state enum ---
match highlight_state {
HighlightState::Off => {
// Not in highlight mode, render normally
line = Line::from(Span::styled(
text,
if is_active { Style::default().fg(theme.highlight) } else { Style::default().fg(theme.fg) }
if is_active {
Style::default().fg(theme.highlight)
} else {
Style::default().fg(theme.fg)
},
));
}
HighlightState::Characterwise { anchor } => {
// --- Character-wise Highlight Logic ---
let (anchor_field, anchor_char) = *anchor;
let start_field = min(anchor_field, *current_field_idx);
let end_field = max(anchor_field, *current_field_idx);
// Use start_char and end_char consistently
let (start_char, end_char) = if anchor_field == *current_field_idx {
(min(anchor_char, current_cursor_pos), max(anchor_char, current_cursor_pos))
} else if anchor_field < *current_field_idx {
@@ -111,24 +111,20 @@ pub fn render_canvas(
let normal_style_outside = Style::default().fg(theme.fg);
if i >= start_field && i <= end_field {
// This line is within the character-wise highlight range
if start_field == end_field { // Case 1: Single Line Highlight
// Use start_char and end_char here
if start_field == end_field {
let clamped_start = start_char.min(text_len);
let clamped_end = end_char.min(text_len); // Use text_len for slicing logic
let clamped_end = end_char.min(text_len);
let before: String = text.chars().take(clamped_start).collect();
let highlighted: String = text.chars().skip(clamped_start).take(clamped_end.saturating_sub(clamped_start) + 1).collect();
// Define 'after' here
let after: String = text.chars().skip(clamped_end + 1).collect();
line = Line::from(vec![
Span::styled(before, normal_style_in_highlight),
Span::styled(highlighted, highlight_style),
Span::styled(after, normal_style_in_highlight), // Use defined 'after'
Span::styled(after, normal_style_in_highlight),
]);
} else if i == start_field { // Case 2: Multi-Line Highlight - Start Line
// Use start_char here
} else if i == start_field {
let safe_start = start_char.min(text_len);
let before: String = text.chars().take(safe_start).collect();
let highlighted: String = text.chars().skip(safe_start).collect();
@@ -136,8 +132,7 @@ pub fn render_canvas(
Span::styled(before, normal_style_in_highlight),
Span::styled(highlighted, highlight_style),
]);
} else if i == end_field { // Case 3: Multi-Line Highlight - End Line (Corrected index)
// Use end_char here
} else if i == end_field {
let safe_end_inclusive = if text_len > 0 { end_char.min(text_len - 1) } else { 0 };
let highlighted: String = text.chars().take(safe_end_inclusive + 1).collect();
let after: String = text.chars().skip(safe_end_inclusive + 1).collect();
@@ -145,19 +140,17 @@ pub fn render_canvas(
Span::styled(highlighted, highlight_style),
Span::styled(after, normal_style_in_highlight),
]);
} else { // Case 4: Multi-Line Highlight - Middle Line (Corrected index)
line = Line::from(Span::styled(text, highlight_style)); // Highlight whole line
} else {
line = Line::from(Span::styled(text, highlight_style));
}
} else { // Case 5: Line Outside Character-wise Highlight Range
} else {
line = Line::from(Span::styled(
text,
// Use normal styling (active or inactive)
if is_active { normal_style_in_highlight } else { normal_style_outside }
));
}
}
HighlightState::Linewise { anchor_line } => {
// --- Linewise Highlight Logic ---
let start_field = min(*anchor_line, *current_field_idx);
let end_field = max(*anchor_line, *current_field_idx);
let highlight_style = Style::default().fg(theme.highlight).bg(theme.highlight_bg).add_modifier(Modifier::BOLD);
@@ -165,25 +158,31 @@ pub fn render_canvas(
let normal_style_outside = Style::default().fg(theme.fg);
if i >= start_field && i <= end_field {
// Highlight the entire line
line = Line::from(Span::styled(text, highlight_style));
} else {
// Line outside linewise highlight range
line = Line::from(Span::styled(
text,
// Use normal styling (active or inactive)
if is_active { normal_style_in_highlight } else { normal_style_outside }
));
}
}
} // End match highlight_state
}
let input_display = Paragraph::new(line).alignment(Alignment::Left);
f.render_widget(input_display, input_rows[i]);
if is_active {
active_field_input_rect = Some(input_rows[i]);
let cursor_x = input_rows[i].x + form_state.current_cursor_pos() as u16;
// --- CORRECTED CURSOR POSITIONING LOGIC ---
// Use the new generic trait method to check for an override.
let cursor_x = if form_state.has_display_override(i) {
// If an override exists, place the cursor at the end.
input_rows[i].x + text.chars().count() as u16
} else {
// Otherwise, use the real cursor position.
input_rows[i].x + form_state.current_cursor_pos() as u16
};
let cursor_y = input_rows[i].y;
f.set_cursor_position((cursor_x, cursor_y));
}
@@ -191,4 +190,3 @@ pub fn render_canvas(
active_field_input_rect
}

View File

@@ -4,6 +4,7 @@ use crate::services::grpc_client::GrpcClient;
use crate::state::pages::canvas_state::CanvasState;
use crate::state::pages::form::FormState;
use crate::state::pages::auth::RegisterState;
use crate::state::app::state::AppState;
use crate::tui::functions::common::form::{revert, save};
use crossterm::event::{KeyCode, KeyEvent};
use std::any::Any;
@@ -13,6 +14,7 @@ pub async fn execute_common_action<S: CanvasState + Any>(
action: &str,
state: &mut S,
grpc_client: &mut GrpcClient,
app_state: &AppState,
current_position: &mut u64,
total_count: u64,
) -> Result<String> {
@@ -27,6 +29,7 @@ pub async fn execute_common_action<S: CanvasState + Any>(
match action {
"save" => {
let outcome = save(
app_state,
form_state,
grpc_client,
)

View File

@@ -3,6 +3,7 @@
use crate::services::grpc_client::GrpcClient;
use crate::state::pages::canvas_state::CanvasState;
use crate::state::pages::form::FormState;
use crate::state::app::state::AppState;
use crate::tui::functions::common::form::{revert, save};
use crate::tui::functions::common::form::SaveOutcome;
use crate::modes::handlers::event::EventOutcome;
@@ -14,6 +15,7 @@ pub async fn execute_common_action<S: CanvasState + Any>(
action: &str,
state: &mut S,
grpc_client: &mut GrpcClient,
app_state: &AppState,
) -> Result<EventOutcome> {
match action {
"save" | "revert" => {
@@ -26,6 +28,7 @@ pub async fn execute_common_action<S: CanvasState + Any>(
match action {
"save" => {
let save_result = save(
app_state,
form_state,
grpc_client,
).await;

View File

@@ -32,6 +32,7 @@ pub async fn handle_core_action(
Ok(EventOutcome::Ok(message))
} else {
let save_outcome = form_save(
app_state,
form_state,
grpc_client,
).await.context("Register save action failed")?;
@@ -52,6 +53,7 @@ pub async fn handle_core_action(
login_save(auth_state, login_state, auth_client, app_state).await.context("Login save n quit action failed")?
} else {
let save_outcome = form_save(
app_state,
form_state,
grpc_client,
).await?;

View File

@@ -132,13 +132,24 @@ pub async fn handle_edit_event(
.get(selected_idx)
.cloned()
{
// --- THIS IS THE CORE LOGIC CHANGE ---
// 1. Get the friendly display name for the UI
let display_name =
form_state.get_display_name_for_hit(&selection);
// 2. Store the REAL ID in the form's values
let current_input =
form_state.get_current_input_mut();
*current_input = selection.id.to_string();
let new_cursor_pos = current_input.len();
form_state.set_current_cursor_pos(new_cursor_pos);
// FIX: Access ideal_cursor_column through event_handler
event_handler.ideal_cursor_column = new_cursor_pos;
// 3. Set the persistent display override in the map
form_state.link_display_map.insert(
form_state.current_field,
display_name,
);
// 4. Finalize state
form_state.deactivate_autocomplete();
form_state.set_has_unsaved_changes(true);
return Ok(EditEventOutcome::Message(

View File

@@ -15,7 +15,7 @@ use anyhow::Result;
pub async fn handle_command_event(
key: KeyEvent,
config: &Config,
app_state: &AppState,
app_state: &mut AppState,
login_state: &LoginState,
register_state: &RegisterState,
form_state: &mut FormState,
@@ -74,7 +74,7 @@ pub async fn handle_command_event(
async fn process_command(
config: &Config,
form_state: &mut FormState,
app_state: &AppState,
app_state: &mut AppState,
login_state: &LoginState,
register_state: &RegisterState,
command_input: &mut String,
@@ -117,6 +117,7 @@ async fn process_command(
},
"save" => {
let outcome = save(
app_state,
form_state,
grpc_client,
).await?;

View File

@@ -1,7 +1,6 @@
// src/services/grpc_client.rs
use tonic::transport::Channel;
use common::proto::multieko2::common::{CountResponse, Empty};
use common::proto::multieko2::common::Empty;
use common::proto::multieko2::table_structure::table_structure_service_client::TableStructureServiceClient;
use common::proto::multieko2::table_structure::{GetTableStructureRequest, TableStructureResponse};
use common::proto::multieko2::table_definition::{
@@ -15,7 +14,10 @@ use common::proto::multieko2::table_script::{
use common::proto::multieko2::tables_data::{
tables_data_client::TablesDataClient,
GetTableDataByPositionRequest,
GetTableDataRequest, // ADD THIS
GetTableDataResponse,
DeleteTableDataRequest, // ADD THIS
DeleteTableDataResponse, // ADD THIS
GetTableDataCountRequest,
PostTableDataRequest, PostTableDataResponse, PutTableDataRequest,
PutTableDataResponse,
@@ -23,8 +25,10 @@ use common::proto::multieko2::tables_data::{
use common::proto::multieko2::search::{
searcher_client::SearcherClient, SearchRequest, SearchResponse,
};
use anyhow::{Context, Result}; // Added Context
use std::collections::HashMap; // NEW
use anyhow::{Context, Result};
use std::collections::HashMap;
use tonic::transport::Channel;
use prost_types::Value;
#[derive(Clone)]
pub struct GrpcClient {
@@ -48,7 +52,6 @@ impl GrpcClient {
TableDefinitionClient::new(channel.clone());
let table_script_client = TableScriptClient::new(channel.clone());
let tables_data_client = TablesDataClient::new(channel.clone());
// NEW: Instantiate the search client
let search_client = SearcherClient::new(channel.clone());
Ok(Self {
@@ -56,7 +59,7 @@ impl GrpcClient {
table_definition_client,
table_script_client,
tables_data_client,
search_client, // NEW
search_client,
})
}
@@ -116,7 +119,7 @@ impl GrpcClient {
Ok(response.into_inner())
}
// NEW Methods for TablesData service
// Existing TablesData methods
pub async fn get_table_data_count(
&mut self,
profile_name: String,
@@ -155,11 +158,53 @@ impl GrpcClient {
Ok(response.into_inner())
}
// ADD THIS: Missing get_table_data method
pub async fn get_table_data(
&mut self,
profile_name: String,
table_name: String,
id: i64,
) -> Result<GetTableDataResponse> {
let grpc_request = GetTableDataRequest {
profile_name,
table_name,
id,
};
let request = tonic::Request::new(grpc_request);
let response = self
.tables_data_client
.get_table_data(request)
.await
.context("gRPC GetTableData call failed")?;
Ok(response.into_inner())
}
// ADD THIS: Missing delete_table_data method
pub async fn delete_table_data(
&mut self,
profile_name: String,
table_name: String,
record_id: i64,
) -> Result<DeleteTableDataResponse> {
let grpc_request = DeleteTableDataRequest {
profile_name,
table_name,
record_id,
};
let request = tonic::Request::new(grpc_request);
let response = self
.tables_data_client
.delete_table_data(request)
.await
.context("gRPC DeleteTableData call failed")?;
Ok(response.into_inner())
}
pub async fn post_table_data(
&mut self,
profile_name: String,
table_name: String,
data: HashMap<String, String>,
data: HashMap<String, Value>,
) -> Result<PostTableDataResponse> {
let grpc_request = PostTableDataRequest {
profile_name,
@@ -180,7 +225,7 @@ impl GrpcClient {
profile_name: String,
table_name: String,
id: i64,
data: HashMap<String, String>,
data: HashMap<String, Value>,
) -> Result<PutTableDataResponse> {
let grpc_request = PutTableDataRequest {
profile_name,

View File

@@ -1,16 +1,100 @@
// src/services/ui_service.rs
use crate::services::grpc_client::GrpcClient;
use crate::state::pages::form::FormState;
use crate::tui::functions::common::form::SaveOutcome;
use crate::state::pages::add_logic::AddLogicState;
use crate::state::app::state::AppState;
use crate::state::pages::add_logic::AddLogicState;
use crate::state::pages::form::{FieldDefinition, FormState};
use crate::tui::functions::common::form::SaveOutcome;
use crate::utils::columns::filter_user_columns;
use anyhow::{Context, Result};
use anyhow::{anyhow, Context, Result};
use std::sync::Arc;
pub struct UiService;
impl UiService {
pub async fn load_table_view(
grpc_client: &mut GrpcClient,
app_state: &mut AppState,
profile_name: &str,
table_name: &str,
) -> Result<FormState> {
// 1. & 2. Fetch and Cache Schema - UNCHANGED
let table_structure = grpc_client
.get_table_structure(profile_name.to_string(), table_name.to_string())
.await
.context(format!(
"Failed to get table structure for {}.{}",
profile_name, table_name
))?;
let cache_key = format!("{}.{}", profile_name, table_name);
app_state
.schema_cache
.insert(cache_key, Arc::new(table_structure.clone()));
tracing::info!("Schema for '{}.{}' cached.", profile_name, table_name);
// --- START: FINAL, SIMPLIFIED, CORRECT LOGIC ---
// 3a. Create definitions for REGULAR fields first.
let mut fields: Vec<FieldDefinition> = table_structure
.columns
.iter()
.filter(|col| {
!col.is_primary_key
&& col.name != "deleted"
&& col.name != "created_at"
&& !col.name.ends_with("_id") // Filter out ALL potential links
})
.map(|col| FieldDefinition {
display_name: col.name.clone(),
data_key: col.name.clone(),
is_link: false,
link_target_table: None,
})
.collect();
// 3b. Now, find and APPEND definitions for LINK fields based on the `_id` convention.
let link_fields: Vec<FieldDefinition> = table_structure
.columns
.iter()
.filter(|col| col.name.ends_with("_id")) // Find all foreign key columns
.map(|col| {
// The table we link to is derived from the column name.
// e.g., "test_diacritics_id" -> "test_diacritics"
let target_table_base = col
.name
.strip_suffix("_id")
.unwrap_or(&col.name);
// Find the full table name from the profile tree for display.
// e.g., "test_diacritics" -> "2025_test_diacritics"
let full_target_table_name = app_state
.profile_tree
.profiles
.iter()
.find(|p| p.name == profile_name)
.and_then(|p| p.tables.iter().find(|t| t.name.ends_with(target_table_base)))
.map_or(target_table_base.to_string(), |t| t.name.clone());
FieldDefinition {
display_name: full_target_table_name.clone(),
data_key: col.name.clone(), // The actual FK column name
is_link: true,
link_target_table: Some(full_target_table_name),
}
})
.collect();
fields.extend(link_fields); // Append the link fields to the end
// --- END: FINAL, SIMPLIFIED, CORRECT LOGIC ---
Ok(FormState::new(
profile_name.to_string(),
table_name.to_string(),
fields,
))
}
pub async fn initialize_add_logic_table_data(
grpc_client: &mut GrpcClient,
add_logic_state: &mut AddLogicState,
@@ -92,6 +176,7 @@ impl UiService {
}
}
// REFACTOR THIS FUNCTION
pub async fn initialize_app_state_and_form(
grpc_client: &mut GrpcClient,
app_state: &mut AppState,
@@ -102,7 +187,6 @@ impl UiService {
.context("Failed to get profile tree")?;
app_state.profile_tree = profile_tree;
// Determine initial table to load (e.g., first table of first profile, or a default)
let initial_profile_name = app_state
.profile_tree
.profiles
@@ -115,33 +199,26 @@ impl UiService {
.profiles
.first()
.and_then(|p| p.tables.first().map(|t| t.name.clone()))
.unwrap_or_else(|| "2025_company_data1".to_string()); // Fallback if no tables
.unwrap_or_else(|| "2025_company_data1".to_string());
app_state.set_current_view_table(
initial_profile_name.clone(),
initial_table_name.clone(),
);
let table_structure = grpc_client
.get_table_structure(
initial_profile_name.clone(),
initial_table_name.clone(),
)
.await
.context(format!(
"Failed to get initial table structure for {}.{}",
initial_profile_name, initial_table_name
))?;
// NOW, just call our new central function. This avoids code duplication.
let form_state = Self::load_table_view(
grpc_client,
app_state,
&initial_profile_name,
&initial_table_name,
)
.await?;
let column_names: Vec<String> = table_structure
.columns
.iter()
.map(|col| col.name.clone())
.collect();
// The field names for the UI are derived from the new form_state
let field_names = form_state.fields.iter().map(|f| f.display_name.clone()).collect();
let filtered_columns = filter_user_columns(column_names);
Ok((initial_profile_name, initial_table_name, filtered_columns))
Ok((initial_profile_name, initial_table_name, field_names))
}
pub async fn fetch_and_set_table_count(

View File

@@ -1,15 +1,19 @@
// src/state/app/state.rs
use std::env;
use common::proto::multieko2::table_definition::ProfileTreeResponse;
use crate::modes::handlers::mode_manager::AppMode;
use crate::ui::handlers::context::DialogPurpose;
use crate::state::app::search::SearchState; // ADDED
use anyhow::Result;
use common::proto::multieko2::table_definition::ProfileTreeResponse;
// NEW: Import the types we need for the cache
use common::proto::multieko2::table_structure::TableStructureResponse;
use crate::modes::handlers::mode_manager::AppMode;
use crate::state::app::search::SearchState;
use crate::ui::handlers::context::DialogPurpose;
use std::collections::HashMap;
use std::env;
use std::sync::Arc;
#[cfg(feature = "ui-debug")]
use std::time::Instant;
// --- YOUR EXISTING DIALOGSTATE IS UNTOUCHED ---
// --- DialogState and UiState are unchanged ---
pub struct DialogState {
pub dialog_show: bool,
pub dialog_title: String,
@@ -30,7 +34,7 @@ pub struct UiState {
pub show_form: bool,
pub show_login: bool,
pub show_register: bool,
pub show_search_palette: bool, // ADDED
pub show_search_palette: bool,
pub focus_outside_canvas: bool,
pub dialog: DialogState,
}
@@ -52,10 +56,12 @@ pub struct AppState {
pub current_view_profile_name: Option<String>,
pub current_view_table_name: Option<String>,
// NEW: The "Rulebook" cache. We use Arc for efficient sharing.
pub schema_cache: HashMap<String, Arc<TableStructureResponse>>,
pub focused_button_index: usize,
pub pending_table_structure_fetch: Option<(String, String)>,
// ADDED: State for the search palette
pub search_state: Option<SearchState>,
// UI preferences
@@ -67,9 +73,7 @@ pub struct AppState {
impl AppState {
pub fn new() -> Result<Self> {
let current_dir = env::current_dir()?
.to_string_lossy()
.to_string();
let current_dir = env::current_dir()?.to_string_lossy().to_string();
Ok(AppState {
current_dir,
profile_tree: ProfileTreeResponse::default(),
@@ -77,9 +81,10 @@ impl AppState {
current_view_profile_name: None,
current_view_table_name: None,
current_mode: AppMode::General,
schema_cache: HashMap::new(), // NEW: Initialize the cache
focused_button_index: 0,
pending_table_structure_fetch: None,
search_state: None, // ADDED
search_state: None,
ui: UiState::default(),
#[cfg(feature = "ui-debug")]

View File

@@ -3,6 +3,7 @@
use common::proto::multieko2::search::search_response::Hit;
pub trait CanvasState {
// --- Existing methods (unchanged) ---
fn current_field(&self) -> usize;
fn current_cursor_pos(&self) -> usize;
fn has_unsaved_changes(&self) -> bool;
@@ -10,15 +11,22 @@ pub trait CanvasState {
fn get_current_input(&self) -> &str;
fn get_current_input_mut(&mut self) -> &mut String;
fn fields(&self) -> Vec<&str>;
fn set_current_field(&mut self, index: usize);
fn set_current_cursor_pos(&mut self, pos: usize);
fn set_has_unsaved_changes(&mut self, changed: bool);
// --- Autocomplete Support ---
fn get_suggestions(&self) -> Option<&[String]>;
fn get_selected_suggestion_index(&self) -> Option<usize>;
fn get_rich_suggestions(&self) -> Option<&[Hit]> {
None
}
fn get_display_value_for_field(&self, index: usize) -> &str {
self.inputs()
.get(index)
.map(|s| s.as_str())
.unwrap_or("")
}
fn has_display_override(&self, _index: usize) -> bool {
false
}
}

View File

@@ -3,18 +3,26 @@
use crate::config::colors::themes::Theme;
use crate::state::app::highlight::HighlightState;
use crate::state::pages::canvas_state::CanvasState;
use common::proto::multieko2::search::search_response::Hit; // Import Hit
use common::proto::multieko2::search::search_response::Hit;
use ratatui::layout::Rect;
use ratatui::Frame;
use std::collections::HashMap;
// A struct to bridge the display name (label) to the data key from the server.
fn json_value_to_string(value: &serde_json::Value) -> String {
match value {
serde_json::Value::String(s) => s.clone(),
serde_json::Value::Number(n) => n.to_string(),
serde_json::Value::Bool(b) => b.to_string(),
_ => String::new(),
}
}
#[derive(Debug, Clone)]
pub struct FieldDefinition {
pub display_name: String,
pub data_key: String,
pub is_link: bool,
pub link_target_table: Option<String>,
pub link_target_table: Option<String>,
}
#[derive(Clone)]
@@ -29,12 +37,11 @@ pub struct FormState {
pub current_field: usize,
pub has_unsaved_changes: bool,
pub current_cursor_pos: usize,
// --- MODIFIED AUTOCOMPLETE STATE ---
pub autocomplete_active: bool,
pub autocomplete_suggestions: Vec<Hit>, // Changed to use the Hit struct
pub autocomplete_suggestions: Vec<Hit>,
pub selected_suggestion_index: Option<usize>,
pub autocomplete_loading: bool, // To show a loading indicator
pub autocomplete_loading: bool,
pub link_display_map: HashMap<usize, String>,
}
impl FormState {
@@ -55,11 +62,48 @@ impl FormState {
current_field: 0,
has_unsaved_changes: false,
current_cursor_pos: 0,
// --- INITIALIZE NEW STATE ---
autocomplete_active: false,
autocomplete_suggestions: Vec::new(),
selected_suggestion_index: None,
autocomplete_loading: false, // Initialize loading state
autocomplete_loading: false,
link_display_map: HashMap::new(),
}
}
pub fn get_display_name_for_hit(&self, hit: &Hit) -> String {
if let Ok(content_map) =
serde_json::from_str::<HashMap<String, serde_json::Value>>(
&hit.content_json,
)
{
const IGNORED_KEYS: &[&str] = &["id", "deleted", "created_at"];
let mut keys: Vec<_> = content_map
.keys()
.filter(|k| !IGNORED_KEYS.contains(&k.as_str()))
.cloned()
.collect();
keys.sort();
let values: Vec<_> = keys
.iter()
.map(|key| {
content_map
.get(key)
.map(json_value_to_string)
.unwrap_or_default()
})
.filter(|s| !s.is_empty())
.take(1)
.collect();
let display_part = values.first().cloned().unwrap_or_default();
if display_part.is_empty() {
format!("ID: {}", hit.id)
} else {
format!("{} | ID: {}", display_part, hit.id)
}
} else {
format!("ID: {} (parse error)", hit.id)
}
}
@@ -78,7 +122,7 @@ impl FormState {
crate::components::form::form::render_form(
f,
area,
self, // <--- This now correctly passes the concrete &FormState
self,
&fields_str_slice,
&self.current_field,
&values_str_slice,
@@ -102,7 +146,8 @@ impl FormState {
} else {
self.current_position = 1;
}
self.deactivate_autocomplete(); // Deactivate on reset
self.deactivate_autocomplete();
self.link_display_map.clear();
}
pub fn get_current_input(&self) -> &str {
@@ -113,6 +158,7 @@ impl FormState {
}
pub fn get_current_input_mut(&mut self) -> &mut String {
self.link_display_map.remove(&self.current_field);
self.values
.get_mut(self.current_field)
.expect("Invalid current_field index")
@@ -159,11 +205,10 @@ impl FormState {
self.has_unsaved_changes = false;
self.current_field = 0;
self.current_cursor_pos = 0;
self.deactivate_autocomplete(); // Deactivate on update
self.deactivate_autocomplete();
self.link_display_map.clear();
}
// --- NEW HELPER METHOD ---
/// Deactivates autocomplete and clears its state.
pub fn deactivate_autocomplete(&mut self) {
self.autocomplete_active = false;
self.autocomplete_suggestions.clear();
@@ -176,58 +221,42 @@ impl CanvasState for FormState {
fn current_field(&self) -> usize {
self.current_field
}
fn current_cursor_pos(&self) -> usize {
self.current_cursor_pos
}
fn has_unsaved_changes(&self) -> bool {
self.has_unsaved_changes
}
fn inputs(&self) -> Vec<&String> {
self.values.iter().collect()
}
fn get_current_input(&self) -> &str {
FormState::get_current_input(self)
}
fn get_current_input_mut(&mut self) -> &mut String {
FormState::get_current_input_mut(self)
}
fn fields(&self) -> Vec<&str> {
self.fields
.iter()
.map(|f| f.display_name.as_str())
.collect()
}
fn set_current_field(&mut self, index: usize) {
if index < self.fields.len() {
self.current_field = index;
}
// Deactivate autocomplete when changing fields
self.deactivate_autocomplete();
}
fn set_current_cursor_pos(&mut self, pos: usize) {
self.current_cursor_pos = pos;
}
fn set_has_unsaved_changes(&mut self, changed: bool) {
self.has_unsaved_changes = changed;
}
// --- MODIFIED: Implement autocomplete trait methods ---
/// Returns None because this state uses rich suggestions.
fn get_suggestions(&self) -> Option<&[String]> {
None
}
/// Returns rich suggestions.
fn get_rich_suggestions(&self) -> Option<&[Hit]> {
if self.autocomplete_active {
Some(&self.autocomplete_suggestions)
@@ -235,7 +264,6 @@ impl CanvasState for FormState {
None
}
}
fn get_selected_suggestion_index(&self) -> Option<usize> {
if self.autocomplete_active {
self.selected_suggestion_index
@@ -243,4 +271,19 @@ impl CanvasState for FormState {
None
}
}
fn get_display_value_for_field(&self, index: usize) -> &str {
if let Some(display_text) = self.link_display_map.get(&index) {
return display_text.as_str();
}
self.inputs()
.get(index)
.map(|s| s.as_str())
.unwrap_or("")
}
// --- IMPLEMENT THE NEW TRAIT METHOD ---
fn has_display_override(&self, index: usize) -> bool {
self.link_display_map.contains_key(&index)
}
}

View File

@@ -1,19 +1,22 @@
// src/tui/functions/common/form.rs
use crate::services::grpc_client::GrpcClient;
use crate::state::app::state::AppState; // NEW: Import AppState
use crate::state::pages::form::FormState;
use anyhow::{Context, Result}; // Added Context
use std::collections::HashMap; // NEW
use crate::utils::data_converter; // NEW: Import our translator
use anyhow::{anyhow, Context, Result};
use std::collections::HashMap;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SaveOutcome {
NoChange,
UpdatedExisting,
CreatedNew(i64), // Keep the ID
CreatedNew(i64),
}
// MODIFIED save function
// MODIFIED save function signature and logic
pub async fn save(
app_state: &AppState, // NEW: Pass in AppState
form_state: &mut FormState,
grpc_client: &mut GrpcClient,
) -> Result<SaveOutcome> {
@@ -21,42 +24,64 @@ pub async fn save(
return Ok(SaveOutcome::NoChange);
}
let data_map: HashMap<String, String> = form_state.fields.iter()
// --- NEW: VALIDATION & CONVERSION STEP ---
let cache_key =
format!("{}.{}", form_state.profile_name, form_state.table_name);
let schema = match app_state.schema_cache.get(&cache_key) {
Some(s) => s,
None => {
return Err(anyhow!(
"Schema for table '{}' not found in cache. Cannot save.",
form_state.table_name
));
}
};
let data_map: HashMap<String, String> = form_state
.fields
.iter()
.zip(form_state.values.iter())
.map(|(field_def, value)| (field_def.data_key.clone(), value.clone()))
.collect();
// Use our new translator. It returns a user-friendly error on failure.
let converted_data =
match data_converter::convert_and_validate_data(&data_map, schema) {
Ok(data) => data,
Err(user_error) => return Err(anyhow!(user_error)),
};
// --- END OF NEW STEP ---
let outcome: SaveOutcome;
let is_new_entry = form_state.id == 0 || (form_state.total_count > 0 && form_state.current_position > form_state.total_count) || (form_state.total_count == 0 && form_state.current_position == 1) ;
let is_new_entry = form_state.id == 0
|| (form_state.total_count > 0
&& form_state.current_position > form_state.total_count)
|| (form_state.total_count == 0 && form_state.current_position == 1);
if is_new_entry {
let response = grpc_client
.post_table_data(
form_state.profile_name.clone(),
form_state.table_name.clone(),
data_map,
converted_data, // Use the validated & converted data
)
.await
.context("Failed to post new table data")?;
if response.success {
form_state.id = response.inserted_id;
// After creating a new entry, total_count increases, and current_position becomes this new total_count
form_state.total_count += 1;
form_state.current_position = form_state.total_count;
outcome = SaveOutcome::CreatedNew(response.inserted_id);
} else {
return Err(anyhow::anyhow!(
return Err(anyhow!(
"Server failed to insert data: {}",
response.message
));
}
} else {
// This assumes form_state.id is valid for an existing record
if form_state.id == 0 {
return Err(anyhow::anyhow!(
return Err(anyhow!(
"Cannot update record: ID is 0, but not classified as new entry."
));
}
@@ -65,7 +90,7 @@ pub async fn save(
form_state.profile_name.clone(),
form_state.table_name.clone(),
form_state.id,
data_map,
converted_data, // Use the validated & converted data
)
.await
.context("Failed to put (update) table data")?;
@@ -73,7 +98,7 @@ pub async fn save(
if response.success {
outcome = SaveOutcome::UpdatedExisting;
} else {
return Err(anyhow::anyhow!(
return Err(anyhow!(
"Server failed to update data: {}",
response.message
));

View File

@@ -350,123 +350,91 @@ pub async fn run_ui() -> Result<()> {
let current_view_profile = app_state.current_view_profile_name.clone();
let current_view_table = app_state.current_view_table_name.clone();
// This condition correctly detects a table switch.
if prev_view_profile_name != current_view_profile
|| prev_view_table_name != current_view_table
{
if let (Some(prof_name), Some(tbl_name)) =
(current_view_profile.as_ref(), current_view_table.as_ref())
{
// --- START OF REFACTORED LOGIC ---
app_state.show_loading_dialog(
"Loading Table",
&format!("Fetching data for {}.{}...", prof_name, tbl_name),
);
needs_redraw = true;
match grpc_client
.get_table_structure(prof_name.clone(), tbl_name.clone())
.await
// 1. Call our new, central function. It handles fetching AND caching.
match UiService::load_table_view(
&mut grpc_client,
&mut app_state,
prof_name,
tbl_name,
)
.await
{
Ok(structure_response) => {
// --- START OF MODIFIED LOGIC ---
let all_columns: Vec<String> = structure_response
.columns
.iter()
.map(|c| c.name.clone())
.collect();
let mut field_definitions: Vec<FieldDefinition> =
filter_user_columns(all_columns)
.into_iter()
.filter(|col_name| !col_name.ends_with("_id"))
.map(|col_name| FieldDefinition {
display_name: col_name.clone(),
data_key: col_name,
is_link: false,
link_target_table: None, // Regular fields have no target
})
.collect();
let linked_tables: Vec<String> = app_state
.profile_tree
.profiles
.iter()
.find(|p| p.name == *prof_name)
.and_then(|profile| {
profile.tables.iter().find(|t| t.name == *tbl_name)
})
.map_or(vec![], |table| table.depends_on.clone());
for linked_table_name in linked_tables {
let base_name = linked_table_name
.split_once('_')
.map_or(linked_table_name.as_str(), |(_, rest)| rest);
let data_key = format!("{}_id", base_name);
let display_name = linked_table_name.clone(); // Clone for use below
field_definitions.push(FieldDefinition {
display_name,
data_key,
is_link: true,
// --- POPULATE THE NEW FIELD ---
link_target_table: Some(linked_table_name),
});
}
// --- END OF MODIFIED LOGIC ---
form_state = FormState::new(
prof_name.clone(),
tbl_name.clone(),
field_definitions, // This now contains the complete definitions
);
Ok(mut new_form_state) => {
// 2. The function succeeded, we have a new FormState.
// Now, fetch its data.
if let Err(e) = UiService::fetch_and_set_table_count(
&mut grpc_client,
&mut form_state,
&mut new_form_state,
)
.await
{
// Handle count fetching error
app_state.update_dialog_content(
&format!("Error fetching count: {}", e),
vec!["OK".to_string()],
DialogPurpose::LoginFailed,
DialogPurpose::LoginFailed, // Or a more appropriate purpose
);
} else if form_state.total_count > 0 {
} else if new_form_state.total_count > 0 {
// If there are records, load the first/last one
if let Err(e) = UiService::load_table_data_by_position(
&mut grpc_client,
&mut form_state,
&mut new_form_state,
)
.await
{
// Handle data loading error
app_state.update_dialog_content(
&format!("Error loading data: {}", e),
vec!["OK".to_string()],
DialogPurpose::LoginFailed,
DialogPurpose::LoginFailed, // Or a more appropriate purpose
);
} else {
// Success! Hide the loading dialog.
app_state.hide_dialog();
}
} else {
form_state.reset_to_empty();
// No records, so just reset to an empty form.
new_form_state.reset_to_empty();
app_state.hide_dialog();
}
// 3. CRITICAL: Replace the old form_state with the new one.
form_state = new_form_state;
// 4. Update our tracking variables.
prev_view_profile_name = current_view_profile;
prev_view_table_name = current_view_table;
table_just_switched = true;
}
Err(e) => {
// This handles errors from load_table_view (e.g., schema fetch failed)
app_state.update_dialog_content(
&format!("Error fetching table structure: {}", e),
&format!("Error loading table: {}", e),
vec!["OK".to_string()],
DialogPurpose::LoginFailed,
DialogPurpose::LoginFailed, // Or a more appropriate purpose
);
// Revert the view change in app_state to avoid a loop
app_state.current_view_profile_name =
prev_view_profile_name.clone();
app_state.current_view_table_name =
prev_view_table_name.clone();
}
}
// --- END OF REFACTORED LOGIC ---
}
needs_redraw = true;
}

View File

@@ -0,0 +1,50 @@
// src/utils/data_converter.rs
use common::proto::multieko2::table_structure::TableStructureResponse;
use prost_types::{value::Kind, NullValue, Value};
use std::collections::HashMap;
pub fn convert_and_validate_data(
data: &HashMap<String, String>,
schema: &TableStructureResponse,
) -> Result<HashMap<String, Value>, String> {
let type_map: HashMap<_, _> = schema
.columns
.iter()
.map(|col| (col.name.as_str(), col.data_type.as_str()))
.collect();
data.iter()
.map(|(key, str_value)| {
let expected_type = type_map.get(key.as_str()).unwrap_or(&"TEXT");
let kind = if str_value.is_empty() {
// TODO: Use the correct enum variant
Kind::NullValue(NullValue::NullValue.into())
} else {
// Attempt to parse the string based on the expected type
match *expected_type {
"BOOL" => match str_value.to_lowercase().parse::<bool>() {
Ok(v) => Kind::BoolValue(v),
Err(_) => return Err(format!("Invalid boolean for '{}': must be 'true' or 'false'", key)),
},
"INT8" | "INT4" | "INT2" | "SERIAL" | "BIGSERIAL" => {
match str_value.parse::<f64>() {
Ok(v) => Kind::NumberValue(v),
Err(_) => return Err(format!("Invalid number for '{}': must be a whole number", key)),
}
}
"NUMERIC" | "FLOAT4" | "FLOAT8" => match str_value.parse::<f64>() {
Ok(v) => Kind::NumberValue(v),
Err(_) => return Err(format!("Invalid decimal for '{}': must be a number", key)),
},
"TIMESTAMPTZ" | "DATE" | "TIME" | "TEXT" | "VARCHAR" | "UUID" => {
Kind::StringValue(str_value.clone())
}
_ => Kind::StringValue(str_value.clone()),
}
};
Ok((key.clone(), Value { kind: Some(kind) }))
})
.collect()
}

View File

@@ -2,5 +2,8 @@
pub mod columns;
pub mod debug_logger;
pub mod data_converter;
pub use columns::*;
pub use debug_logger::*;
pub use data_converter::*;

View File

@@ -0,0 +1,262 @@
// client/tests/form_tests.rs
use rstest::{fixture, rstest};
use std::collections::HashMap;
use client::state::pages::form::{FormState, FieldDefinition};
use client::state::pages::canvas_state::CanvasState;
#[fixture]
fn test_form_state() -> FormState {
let fields = vec![
FieldDefinition {
display_name: "Company".to_string(),
data_key: "firma".to_string(),
is_link: false,
link_target_table: None,
},
FieldDefinition {
display_name: "Phone".to_string(),
data_key: "telefon".to_string(),
is_link: false,
link_target_table: None,
},
FieldDefinition {
display_name: "Email".to_string(),
data_key: "email".to_string(),
is_link: false,
link_target_table: None,
},
];
FormState::new("test_profile".to_string(), "test_table".to_string(), fields)
}
#[fixture]
fn test_form_data() -> HashMap<String, String> {
let mut data = HashMap::new();
data.insert("firma".to_string(), "Test Company".to_string());
data.insert("telefon".to_string(), "+421123456789".to_string());
data.insert("email".to_string(), "test@example.com".to_string());
data
}
#[rstest]
fn test_form_state_creation(test_form_state: FormState) {
assert_eq!(test_form_state.profile_name, "test_profile");
assert_eq!(test_form_state.table_name, "test_table");
assert_eq!(test_form_state.fields.len(), 3);
assert_eq!(test_form_state.current_field(), 0);
assert!(!test_form_state.has_unsaved_changes());
}
#[rstest]
fn test_form_field_navigation(mut test_form_state: FormState) {
// Test initial field
assert_eq!(test_form_state.current_field(), 0);
// Test navigation to next field
test_form_state.set_current_field(1);
assert_eq!(test_form_state.current_field(), 1);
// Test navigation to last field
test_form_state.set_current_field(2);
assert_eq!(test_form_state.current_field(), 2);
// Test invalid field (should not crash)
test_form_state.set_current_field(999);
assert_eq!(test_form_state.current_field(), 2); // Should stay at valid field
}
#[rstest]
fn test_form_data_entry(mut test_form_state: FormState) {
// Test entering data in first field
*test_form_state.get_current_input_mut() = "Test Company".to_string();
test_form_state.set_has_unsaved_changes(true);
assert_eq!(test_form_state.get_current_input(), "Test Company");
assert!(test_form_state.has_unsaved_changes());
}
#[rstest]
fn test_form_field_switching_with_data(mut test_form_state: FormState) {
// Enter data in first field
*test_form_state.get_current_input_mut() = "Company Name".to_string();
// Switch to second field
test_form_state.set_current_field(1);
*test_form_state.get_current_input_mut() = "+421123456789".to_string();
// Switch back to first field
test_form_state.set_current_field(0);
assert_eq!(test_form_state.get_current_input(), "Company Name");
// Switch to second field again
test_form_state.set_current_field(1);
assert_eq!(test_form_state.get_current_input(), "+421123456789");
}
#[rstest]
fn test_form_reset_functionality(mut test_form_state: FormState) {
// Add some data
test_form_state.set_current_field(0);
*test_form_state.get_current_input_mut() = "Test Company".to_string();
test_form_state.set_current_field(1);
*test_form_state.get_current_input_mut() = "+421123456789".to_string();
test_form_state.set_has_unsaved_changes(true);
test_form_state.id = 123;
test_form_state.current_position = 5;
// Reset the form
test_form_state.reset_to_empty();
// Verify reset
assert_eq!(test_form_state.id, 0);
assert!(!test_form_state.has_unsaved_changes());
assert_eq!(test_form_state.current_field(), 0);
// Check all fields are empty
for i in 0..test_form_state.fields.len() {
test_form_state.set_current_field(i);
assert!(test_form_state.get_current_input().is_empty());
}
}
#[rstest]
fn test_form_update_from_response(mut test_form_state: FormState, test_form_data: HashMap<String, String>) {
let position = 3;
// Update form with response data
test_form_state.update_from_response(&test_form_data, position);
// Verify data was loaded
assert_eq!(test_form_state.current_position, position);
assert!(!test_form_state.has_unsaved_changes());
assert_eq!(test_form_state.current_field(), 0);
// Check field values
test_form_state.set_current_field(0);
assert_eq!(test_form_state.get_current_input(), "Test Company");
test_form_state.set_current_field(1);
assert_eq!(test_form_state.get_current_input(), "+421123456789");
test_form_state.set_current_field(2);
assert_eq!(test_form_state.get_current_input(), "test@example.com");
}
#[rstest]
fn test_form_cursor_position(mut test_form_state: FormState) {
// Test initial cursor position
assert_eq!(test_form_state.current_cursor_pos(), 0);
// Add some text
*test_form_state.get_current_input_mut() = "Test Company".to_string();
// Test cursor positioning
test_form_state.set_current_cursor_pos(5);
assert_eq!(test_form_state.current_cursor_pos(), 5);
// Test cursor bounds
test_form_state.set_current_cursor_pos(999);
// Should be clamped to text length
assert!(test_form_state.current_cursor_pos() <= "Test Company".len());
}
#[rstest]
fn test_form_field_display_names(test_form_state: FormState) {
let field_names = test_form_state.fields();
assert_eq!(field_names.len(), 3);
assert_eq!(field_names[0], "Company");
assert_eq!(field_names[1], "Phone");
assert_eq!(field_names[2], "Email");
}
#[rstest]
fn test_form_inputs_vector(mut test_form_state: FormState) {
// Add data to fields
test_form_state.set_current_field(0);
*test_form_state.get_current_input_mut() = "Company A".to_string();
test_form_state.set_current_field(1);
*test_form_state.get_current_input_mut() = "123456789".to_string();
test_form_state.set_current_field(2);
*test_form_state.get_current_input_mut() = "test@test.com".to_string();
// Get inputs vector
let inputs = test_form_state.inputs();
assert_eq!(inputs.len(), 3);
assert_eq!(inputs[0], "Company A");
assert_eq!(inputs[1], "123456789");
assert_eq!(inputs[2], "test@test.com");
}
#[rstest]
fn test_form_position_management(mut test_form_state: FormState) {
// Test initial position
assert_eq!(test_form_state.current_position, 1);
assert_eq!(test_form_state.total_count, 0);
// Set some values
test_form_state.total_count = 10;
test_form_state.current_position = 5;
assert_eq!(test_form_state.current_position, 5);
assert_eq!(test_form_state.total_count, 10);
// Test reset affects position
test_form_state.reset_to_empty();
assert_eq!(test_form_state.current_position, 11); // total_count + 1
}
#[rstest]
fn test_form_autocomplete_state(mut test_form_state: FormState) {
// Test initial autocomplete state
assert!(!test_form_state.autocomplete_active);
assert!(test_form_state.autocomplete_suggestions.is_empty());
assert!(test_form_state.selected_suggestion_index.is_none());
// Test deactivating autocomplete
test_form_state.autocomplete_active = true;
test_form_state.deactivate_autocomplete();
assert!(!test_form_state.autocomplete_active);
assert!(test_form_state.autocomplete_suggestions.is_empty());
assert!(test_form_state.selected_suggestion_index.is_none());
assert!(!test_form_state.autocomplete_loading);
}
#[rstest]
fn test_form_empty_data_handling(mut test_form_state: FormState) {
let empty_data = HashMap::new();
// Update with empty data
test_form_state.update_from_response(&empty_data, 1);
// All fields should be empty
for i in 0..test_form_state.fields.len() {
test_form_state.set_current_field(i);
assert!(test_form_state.get_current_input().is_empty());
}
}
#[rstest]
fn test_form_partial_data_handling(mut test_form_state: FormState) {
let mut partial_data = HashMap::new();
partial_data.insert("firma".to_string(), "Partial Company".to_string());
// Intentionally missing telefon and email
test_form_state.update_from_response(&partial_data, 1);
// First field should have data
test_form_state.set_current_field(0);
assert_eq!(test_form_state.get_current_input(), "Partial Company");
// Other fields should be empty
test_form_state.set_current_field(1);
assert!(test_form_state.get_current_input().is_empty());
test_form_state.set_current_field(2);
assert!(test_form_state.get_current_input().is_empty());
}

View File

@@ -0,0 +1 @@
pub mod form_tests;

2
client/tests/form/mod.rs Normal file
View File

@@ -0,0 +1,2 @@
pub mod gui;
pub mod requests;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,267 @@
// ========================================================================
// ROBUST WORKFLOW AND INTEGRATION TESTS
// ========================================================================
#[rstest]
#[tokio::test]
async fn test_partial_update_preserves_other_fields(
#[future] populated_test_context: FormTestContext,
) {
let mut context = populated_test_context.await;
skip_if_backend_unavailable!();
// 1. Create a record with multiple fields
let mut initial_data = context.create_test_form_data();
let original_email = "preserve.this@email.com";
initial_data.insert(
"email".to_string(),
create_string_value(original_email),
);
let post_res = context
.client
.post_table_data(
context.profile_name.clone(),
context.table_name.clone(),
initial_data,
)
.await
.expect("Setup: Failed to create record for partial update test");
let created_id = post_res.inserted_id;
println!("Partial Update Test: Created record ID {}", created_id);
// 2. Update only ONE field
let mut partial_update = HashMap::new();
let updated_firma = "Partially Updated Inc.";
partial_update.insert(
"firma".to_string(),
create_string_value(updated_firma),
);
context
.client
.put_table_data(
context.profile_name.clone(),
context.table_name.clone(),
created_id,
partial_update,
)
.await
.expect("Partial update failed");
println!("Partial Update Test: Updated only 'firma' field");
// 3. Get the record back and verify ALL fields
let get_res = context
.client
.get_table_data(
context.profile_name.clone(),
context.table_name.clone(),
created_id,
)
.await
.expect("Failed to get record after partial update");
let final_data = get_res.data;
assert_eq!(
final_data.get("firma").unwrap(),
updated_firma,
"The 'firma' field should be updated"
);
assert_eq!(
final_data.get("email").unwrap(),
original_email,
"The 'email' field should have been preserved"
);
println!("Partial Update Test: Verified other fields were preserved. OK.");
}
#[rstest]
#[tokio::test]
async fn test_data_edge_cases_and_unicode(
#[future] form_test_context: FormTestContext,
) {
let mut context = form_test_context.await;
skip_if_backend_unavailable!();
let edge_case_strings = vec![
("Unicode", "José María González, Москва, 北京市"),
("Emoji", "🚀 Tech Company 🌟"),
("Quotes", "Quote\"Test'Apostrophe"),
("Symbols", "Price: $1,000.50 (50% off!)"),
("Empty", ""),
("Whitespace", " "),
];
for (case_name, test_string) in edge_case_strings {
let mut data = HashMap::new();
data.insert("firma".to_string(), create_string_value(test_string));
data.insert(
"kz".to_string(),
create_string_value(&format!("EDGE-{}", case_name)),
);
let post_res = context
.client
.post_table_data(
context.profile_name.clone(),
context.table_name.clone(),
data,
)
.await
.expect(&format!("POST should succeed for case: {}", case_name));
let created_id = post_res.inserted_id;
let get_res = context
.client
.get_table_data(
context.profile_name.clone(),
context.table_name.clone(),
created_id,
)
.await
.expect(&format!(
"GET should succeed for case: {}",
case_name
));
assert_eq!(
get_res.data.get("firma").unwrap(),
test_string,
"Data should be identical after round-trip for case: {}",
case_name
);
println!("Edge Case Test: '{}' passed.", case_name);
}
}
#[rstest]
#[tokio::test]
async fn test_numeric_and_null_edge_cases(
#[future] form_test_context: FormTestContext,
) {
let mut context = form_test_context.await;
skip_if_backend_unavailable!();
// 1. Test NULL value
let mut null_data = HashMap::new();
null_data.insert(
"firma".to_string(),
create_string_value("Company With Null Phone"),
);
null_data.insert("telefon".to_string(), create_null_value());
let post_res_null = context
.client
.post_table_data(
context.profile_name.clone(),
context.table_name.clone(),
null_data,
)
.await
.expect("POST with NULL value should succeed");
let get_res_null = context
.client
.get_table_data(
context.profile_name.clone(),
context.table_name.clone(),
post_res_null.inserted_id,
)
.await
.unwrap();
// Depending on DB, NULL may come back as empty string or be absent.
// The important part is that the operation doesn't fail.
assert!(
get_res_null.data.get("telefon").unwrap_or(&"".to_string()).is_empty(),
"NULL value should result in an empty or absent field"
);
println!("Edge Case Test: NULL value handled correctly. OK.");
// 2. Test Zero value for a numeric field (assuming 'age' is numeric)
let mut zero_data = HashMap::new();
zero_data.insert(
"firma".to_string(),
create_string_value("Newborn Company"),
);
// Assuming 'age' is a field in your actual table definition
// zero_data.insert("age".to_string(), create_number_value(0.0));
// let post_res_zero = context.client.post_table_data(...).await.expect("POST with zero should succeed");
// ... then get and verify it's "0"
println!("Edge Case Test: Zero value test skipped (uncomment if 'age' field exists).");
}
#[rstest]
#[tokio::test]
async fn test_concurrent_updates_on_same_record(
#[future] populated_test_context: FormTestContext,
) {
let mut context = populated_test_context.await;
skip_if_backend_unavailable!();
// 1. Create a single record to be updated by all tasks
let initial_data = context.create_minimal_form_data();
let post_res = context
.client
.post_table_data(
context.profile_name.clone(),
context.table_name.clone(),
initial_data,
)
.await
.expect("Setup: Failed to create record for concurrency test");
let record_id = post_res.inserted_id;
println!("Concurrency Test: Target record ID is {}", record_id);
// 2. Spawn multiple concurrent UPDATE operations
let mut handles = Vec::new();
let num_concurrent_tasks = 5;
let mut final_values = Vec::new();
for i in 0..num_concurrent_tasks {
let mut client_clone = context.client.clone();
let profile_name = context.profile_name.clone();
let table_name = context.table_name.clone();
let final_value = format!("Concurrent Update {}", i);
final_values.push(final_value.clone());
let handle = tokio::spawn(async move {
let mut update_data = HashMap::new();
update_data.insert(
"firma".to_string(),
create_string_value(&final_value),
);
client_clone
.put_table_data(profile_name, table_name, record_id, update_data)
.await
});
handles.push(handle);
}
// 3. Wait for all tasks to complete and check for panics
let results = futures::future::join_all(handles).await;
assert!(
results.iter().all(|r| r.is_ok()),
"No concurrent task should panic"
);
println!("Concurrency Test: All update tasks completed without panicking.");
// 4. Get the final state of the record
let final_get_res = context
.client
.get_table_data(
context.profile_name.clone(),
context.table_name.clone(),
record_id,
)
.await
.expect("Should be able to get the record after concurrent updates");
let final_firma = final_get_res.data.get("firma").unwrap();
assert!(
final_values.contains(final_firma),
"The final state '{}' must be one of the states set by the tasks",
final_firma
);
println!(
"Concurrency Test: Final state is '{}', which is a valid outcome. OK.",
final_firma
);
}

View File

@@ -0,0 +1,727 @@
// form_request_tests3.rs - Comprehensive and Robust Testing
// ========================================================================
// STEEL SCRIPT VALIDATION TESTS (HIGHEST PRIORITY)
// ========================================================================
#[rstest]
#[tokio::test]
async fn test_steel_script_validation_success(#[future] form_test_context: FormTestContext) {
let mut context = form_test_context.await;
skip_if_backend_unavailable!();
// Test with data that should pass script validation
// Assuming there's a script that validates 'kz' field to start with "KZ" and be 5 chars
let mut valid_data = HashMap::new();
valid_data.insert("firma".to_string(), create_string_value("Script Test Company"));
valid_data.insert("kz".to_string(), create_string_value("KZ123"));
valid_data.insert("telefon".to_string(), create_string_value("+421123456789"));
let result = context.client.post_table_data(
context.profile_name.clone(),
context.table_name.clone(),
valid_data,
).await;
match result {
Ok(response) => {
assert!(response.success, "Valid data should pass script validation");
println!("Script Validation Test: Valid data passed - ID {}", response.inserted_id);
}
Err(e) => {
if let Some(status) = e.downcast_ref::<Status>() {
if status.code() == tonic::Code::Unavailable {
println!("Script validation test skipped - backend not available");
return;
}
// If there are no scripts configured, this might still work
println!("Script validation test: {}", status.message());
}
}
}
}
#[rstest]
#[tokio::test]
async fn test_steel_script_validation_failure(#[future] form_test_context: FormTestContext) {
let mut context = form_test_context.await;
skip_if_backend_unavailable!();
// Test with data that should fail script validation
let invalid_script_data = vec![
("TooShort", "KZ12"), // Too short
("TooLong", "KZ12345"), // Too long
("WrongPrefix", "AB123"), // Wrong prefix
("NoPrefix", "12345"), // No prefix
("Empty", ""), // Empty
];
for (test_case, invalid_kz) in invalid_script_data {
let mut invalid_data = HashMap::new();
invalid_data.insert("firma".to_string(), create_string_value("Script Fail Company"));
invalid_data.insert("kz".to_string(), create_string_value(invalid_kz));
let result = context.client.post_table_data(
context.profile_name.clone(),
context.table_name.clone(),
invalid_data,
).await;
match result {
Ok(_) => {
println!("Script Validation Test: {} passed (no validation script configured)", test_case);
}
Err(e) => {
if let Some(status) = e.downcast_ref::<Status>() {
assert_eq!(status.code(), tonic::Code::InvalidArgument,
"Script validation failure should return InvalidArgument for case: {}", test_case);
println!("Script Validation Test: {} correctly failed - {}", test_case, status.message());
}
}
}
}
}
#[rstest]
#[tokio::test]
async fn test_steel_script_validation_on_update(#[future] form_test_context: FormTestContext) {
let mut context = form_test_context.await;
skip_if_backend_unavailable!();
// 1. Create a valid record first
let mut initial_data = HashMap::new();
initial_data.insert("firma".to_string(), create_string_value("Update Script Test"));
initial_data.insert("kz".to_string(), create_string_value("KZ123"));
let post_result = context.client.post_table_data(
context.profile_name.clone(),
context.table_name.clone(),
initial_data,
).await;
if let Ok(post_response) = post_result {
let record_id = post_response.inserted_id;
// 2. Try to update with invalid data
let mut invalid_update = HashMap::new();
invalid_update.insert("kz".to_string(), create_string_value("INVALID"));
let update_result = context.client.put_table_data(
context.profile_name.clone(),
context.table_name.clone(),
record_id,
invalid_update,
).await;
match update_result {
Ok(_) => {
println!("Script Validation on Update: No validation script configured for updates");
}
Err(e) => {
if let Some(status) = e.downcast_ref::<Status>() {
assert_eq!(status.code(), tonic::Code::InvalidArgument,
"Update with invalid data should fail script validation");
println!("Script Validation on Update: Correctly rejected invalid update");
}
}
}
}
}
// ========================================================================
// COMPREHENSIVE DATA TYPE TESTS
// ========================================================================
#[rstest]
#[tokio::test]
async fn test_boolean_data_type(#[future] form_test_context: FormTestContext) {
let mut context = form_test_context.await;
skip_if_backend_unavailable!();
// Test valid boolean values
let boolean_test_cases = vec![
("true", true),
("false", false),
];
for (case_name, bool_value) in boolean_test_cases {
let mut data = HashMap::new();
data.insert("firma".to_string(), create_string_value("Boolean Test Company"));
// Assuming there's a boolean field called 'active'
data.insert("active".to_string(), create_bool_value(bool_value));
let result = context.client.post_table_data(
context.profile_name.clone(),
context.table_name.clone(),
data,
).await;
match result {
Ok(response) => {
println!("Boolean Test: {} value succeeded", case_name);
// Verify the value round-trip
if let Ok(get_response) = context.client.get_table_data(
context.profile_name.clone(),
context.table_name.clone(),
response.inserted_id,
).await {
if let Some(retrieved_value) = get_response.data.get("active") {
println!("Boolean Test: {} round-trip value: {}", case_name, retrieved_value);
}
}
}
Err(e) => {
println!("Boolean Test: {} failed (field may not exist): {}", case_name, e);
}
}
}
}
#[rstest]
#[tokio::test]
async fn test_numeric_data_types(#[future] form_test_context: FormTestContext) {
let mut context = form_test_context.await;
skip_if_backend_unavailable!();
// Test various numeric values
let numeric_test_cases = vec![
("Zero", 0.0),
("Positive", 123.45),
("Negative", -67.89),
("Large", 999999.99),
("SmallDecimal", 0.01),
];
for (case_name, numeric_value) in numeric_test_cases {
let mut data = HashMap::new();
data.insert("firma".to_string(), create_string_value("Numeric Test Company"));
// Assuming there's a numeric field called 'price' or 'amount'
data.insert("amount".to_string(), create_number_value(numeric_value));
let result = context.client.post_table_data(
context.profile_name.clone(),
context.table_name.clone(),
data,
).await;
match result {
Ok(response) => {
println!("Numeric Test: {} ({}) succeeded", case_name, numeric_value);
// Verify round-trip
if let Ok(get_response) = context.client.get_table_data(
context.profile_name.clone(),
context.table_name.clone(),
response.inserted_id,
).await {
if let Some(retrieved_value) = get_response.data.get("amount") {
println!("Numeric Test: {} round-trip value: {}", case_name, retrieved_value);
}
}
}
Err(e) => {
println!("Numeric Test: {} failed (field may not exist): {}", case_name, e);
}
}
}
}
#[rstest]
#[tokio::test]
async fn test_timestamp_data_type(#[future] form_test_context: FormTestContext) {
let mut context = form_test_context.await;
skip_if_backend_unavailable!();
// Test various timestamp formats
let timestamp_test_cases = vec![
("ISO8601", "2024-01-15T10:30:00Z"),
("WithTimezone", "2024-01-15T10:30:00+01:00"),
("WithMilliseconds", "2024-01-15T10:30:00.123Z"),
];
for (case_name, timestamp_str) in timestamp_test_cases {
let mut data = HashMap::new();
data.insert("firma".to_string(), create_string_value("Timestamp Test Company"));
// Assuming there's a timestamp field called 'created_at'
data.insert("created_at".to_string(), create_string_value(timestamp_str));
let result = context.client.post_table_data(
context.profile_name.clone(),
context.table_name.clone(),
data,
).await;
match result {
Ok(response) => {
println!("Timestamp Test: {} succeeded", case_name);
// Verify round-trip
if let Ok(get_response) = context.client.get_table_data(
context.profile_name.clone(),
context.table_name.clone(),
response.inserted_id,
).await {
if let Some(retrieved_value) = get_response.data.get("created_at") {
println!("Timestamp Test: {} round-trip value: {}", case_name, retrieved_value);
}
}
}
Err(e) => {
println!("Timestamp Test: {} failed (field may not exist): {}", case_name, e);
}
}
}
}
#[rstest]
#[tokio::test]
async fn test_invalid_data_types(#[future] form_test_context: FormTestContext) {
let mut context = form_test_context.await;
skip_if_backend_unavailable!();
// Test invalid data type combinations
let invalid_type_cases = vec![
("StringForNumber", "amount", create_string_value("not-a-number")),
("NumberForBoolean", "active", create_number_value(123.0)),
("StringForBoolean", "active", create_string_value("maybe")),
("InvalidTimestamp", "created_at", create_string_value("not-a-date")),
];
for (case_name, field_name, invalid_value) in invalid_type_cases {
let mut data = HashMap::new();
data.insert("firma".to_string(), create_string_value("Invalid Type Test"));
data.insert(field_name.to_string(), invalid_value);
let result = context.client.post_table_data(
context.profile_name.clone(),
context.table_name.clone(),
data,
).await;
match result {
Ok(_) => {
println!("Invalid Type Test: {} passed (no type validation or field doesn't exist)", case_name);
}
Err(e) => {
if let Some(status) = e.downcast_ref::<Status>() {
assert_eq!(status.code(), tonic::Code::InvalidArgument,
"Invalid data type should return InvalidArgument for case: {}", case_name);
println!("Invalid Type Test: {} correctly rejected - {}", case_name, status.message());
}
}
}
}
}
// ========================================================================
// FOREIGN KEY RELATIONSHIP TESTS
// ========================================================================
#[rstest]
#[tokio::test]
async fn test_foreign_key_valid_relationship(#[future] form_test_context: FormTestContext) {
let mut context = form_test_context.await;
skip_if_backend_unavailable!();
// 1. Create a parent record first (e.g., company)
let mut parent_data = HashMap::new();
parent_data.insert("firma".to_string(), create_string_value("Parent Company"));
let parent_result = context.client.post_table_data(
context.profile_name.clone(),
"companies".to_string(), // Assuming companies table exists
parent_data,
).await;
if let Ok(parent_response) = parent_result {
let parent_id = parent_response.inserted_id;
// 2. Create a child record that references the parent
let mut child_data = HashMap::new();
child_data.insert("name".to_string(), create_string_value("Child Record"));
child_data.insert("company_id".to_string(), create_number_value(parent_id as f64));
let child_result = context.client.post_table_data(
context.profile_name.clone(),
"contacts".to_string(), // Assuming contacts table exists
child_data,
).await;
match child_result {
Ok(child_response) => {
assert!(child_response.success, "Valid foreign key relationship should succeed");
println!("Foreign Key Test: Valid relationship created - Parent ID: {}, Child ID: {}",
parent_id, child_response.inserted_id);
}
Err(e) => {
println!("Foreign Key Test: Failed (tables may not exist or no FK constraint): {}", e);
}
}
} else {
println!("Foreign Key Test: Could not create parent record");
}
}
#[rstest]
#[tokio::test]
async fn test_foreign_key_invalid_relationship(#[future] form_test_context: FormTestContext) {
let mut context = form_test_context.await;
skip_if_backend_unavailable!();
// Try to create a child record with non-existent parent ID
let mut invalid_child_data = HashMap::new();
invalid_child_data.insert("name".to_string(), create_string_value("Orphan Record"));
invalid_child_data.insert("company_id".to_string(), create_number_value(99999.0)); // Non-existent ID
let result = context.client.post_table_data(
context.profile_name.clone(),
"contacts".to_string(),
invalid_child_data,
).await;
match result {
Ok(_) => {
println!("Foreign Key Test: Invalid relationship passed (no FK constraint configured)");
}
Err(e) => {
if let Some(status) = e.downcast_ref::<Status>() {
// Could be InvalidArgument or NotFound depending on implementation
assert!(matches!(status.code(), tonic::Code::InvalidArgument | tonic::Code::NotFound),
"Invalid foreign key should return InvalidArgument or NotFound");
println!("Foreign Key Test: Invalid relationship correctly rejected - {}", status.message());
}
}
}
}
// ========================================================================
// DELETED RECORD INTERACTION TESTS
// ========================================================================
#[rstest]
#[tokio::test]
async fn test_update_deleted_record_behavior(#[future] form_test_context: FormTestContext) {
let mut context = form_test_context.await;
skip_if_backend_unavailable!();
// 1. Create a record
let initial_data = context.create_test_form_data();
let post_result = context.client.post_table_data(
context.profile_name.clone(),
context.table_name.clone(),
initial_data,
).await;
if let Ok(post_response) = post_result {
let record_id = post_response.inserted_id;
println!("Deleted Record Test: Created record ID {}", record_id);
// 2. Delete the record (soft delete)
let delete_result = context.client.delete_table_data(
context.profile_name.clone(),
context.table_name.clone(),
record_id,
).await;
assert!(delete_result.is_ok(), "Delete should succeed");
println!("Deleted Record Test: Soft-deleted record {}", record_id);
// 3. Try to UPDATE the deleted record
let mut update_data = HashMap::new();
update_data.insert("firma".to_string(), create_string_value("Updated Deleted Record"));
let update_result = context.client.put_table_data(
context.profile_name.clone(),
context.table_name.clone(),
record_id,
update_data,
).await;
match update_result {
Ok(_) => {
// This might be a bug - updating deleted records should probably fail
println!("Deleted Record Test: UPDATE on deleted record succeeded (potential bug?)");
// Check if the record is still considered deleted
let get_result = context.client.get_table_data(
context.profile_name.clone(),
context.table_name.clone(),
record_id,
).await;
if get_result.is_err() {
println!("Deleted Record Test: Record still appears deleted after update");
} else {
println!("Deleted Record Test: Record appears to be undeleted after update");
}
}
Err(e) => {
if let Some(status) = e.downcast_ref::<Status>() {
assert_eq!(status.code(), tonic::Code::NotFound,
"UPDATE on deleted record should return NotFound");
println!("Deleted Record Test: UPDATE correctly rejected on deleted record");
}
}
}
}
}
#[rstest]
#[tokio::test]
async fn test_delete_already_deleted_record(#[future] form_test_context: FormTestContext) {
let mut context = form_test_context.await;
skip_if_backend_unavailable!();
// 1. Create and delete a record
let initial_data = context.create_test_form_data();
let post_result = context.client.post_table_data(
context.profile_name.clone(),
context.table_name.clone(),
initial_data,
).await;
if let Ok(post_response) = post_result {
let record_id = post_response.inserted_id;
// First deletion
let delete_result1 = context.client.delete_table_data(
context.profile_name.clone(),
context.table_name.clone(),
record_id,
).await;
assert!(delete_result1.is_ok(), "First delete should succeed");
// Second deletion (idempotent)
let delete_result2 = context.client.delete_table_data(
context.profile_name.clone(),
context.table_name.clone(),
record_id,
).await;
assert!(delete_result2.is_ok(), "Second delete should succeed (idempotent)");
if let Ok(response) = delete_result2 {
assert!(response.success, "Delete should report success even for already-deleted record");
}
println!("Double Delete Test: Both deletions succeeded (idempotent behavior)");
}
}
// ========================================================================
// VALIDATION AND BOUNDARY TESTS
// ========================================================================
#[rstest]
#[tokio::test]
async fn test_large_data_handling(#[future] form_test_context: FormTestContext) {
let mut context = form_test_context.await;
skip_if_backend_unavailable!();
// Test with very large string values
let large_string = "A".repeat(10000); // 10KB string
let very_large_string = "B".repeat(100000); // 100KB string
let test_cases = vec![
("Large", large_string),
("VeryLarge", very_large_string),
];
for (case_name, large_value) in test_cases {
let mut data = HashMap::new();
data.insert("firma".to_string(), create_string_value(&large_value));
let result = context.client.post_table_data(
context.profile_name.clone(),
context.table_name.clone(),
data,
).await;
match result {
Ok(response) => {
println!("Large Data Test: {} string handled successfully", case_name);
// Verify round-trip
if let Ok(get_response) = context.client.get_table_data(
context.profile_name.clone(),
context.table_name.clone(),
response.inserted_id,
).await {
if let Some(retrieved_value) = get_response.data.get("firma") {
assert_eq!(retrieved_value.len(), large_value.len(),
"Large string should survive round-trip for case: {}", case_name);
}
}
}
Err(e) => {
println!("Large Data Test: {} failed (may hit size limits): {}", case_name, e);
}
}
}
}
#[rstest]
#[tokio::test]
async fn test_sql_injection_attempts(#[future] form_test_context: FormTestContext) {
let mut context = form_test_context.await;
skip_if_backend_unavailable!();
// Test potential SQL injection strings
let injection_attempts = vec![
("SingleQuote", "'; DROP TABLE users; --"),
("DoubleQuote", "\"; DROP TABLE users; --"),
("Union", "' UNION SELECT * FROM users --"),
("Comment", "/* malicious comment */"),
("Semicolon", "; DELETE FROM users;"),
];
for (case_name, injection_string) in injection_attempts {
let mut data = HashMap::new();
data.insert("firma".to_string(), create_string_value(injection_string));
data.insert("kz".to_string(), create_string_value("KZ123"));
let result = context.client.post_table_data(
context.profile_name.clone(),
context.table_name.clone(),
data,
).await;
match result {
Ok(response) => {
println!("SQL Injection Test: {} handled safely (parameterized queries)", case_name);
// Verify the malicious string was stored as-is (not executed)
if let Ok(get_response) = context.client.get_table_data(
context.profile_name.clone(),
context.table_name.clone(),
response.inserted_id,
).await {
if let Some(retrieved_value) = get_response.data.get("firma") {
assert_eq!(retrieved_value, injection_string,
"Injection string should be stored literally for case: {}", case_name);
}
}
}
Err(e) => {
println!("SQL Injection Test: {} rejected: {}", case_name, e);
}
}
}
}
#[rstest]
#[tokio::test]
async fn test_concurrent_operations_with_same_data(#[future] form_test_context: FormTestContext) {
let context = form_test_context.await;
skip_if_backend_unavailable!();
// Test multiple concurrent operations with identical data
let mut handles = Vec::new();
let num_tasks = 10;
for i in 0..num_tasks {
let mut context_clone = context.clone();
let handle = tokio::spawn(async move {
let mut data = HashMap::new();
data.insert("firma".to_string(), create_string_value("Concurrent Identical"));
data.insert("kz".to_string(), create_string_value(&format!("SAME{:02}", i)));
context_clone.client.post_table_data(
context_clone.profile_name,
context_clone.table_name,
data,
).await
});
handles.push(handle);
}
// Wait for all to complete
let mut success_count = 0;
let mut inserted_ids = Vec::new();
for (i, handle) in handles.into_iter().enumerate() {
match handle.await {
Ok(Ok(response)) => {
success_count += 1;
inserted_ids.push(response.inserted_id);
println!("Concurrent Identical Data: Task {} succeeded with ID {}", i, response.inserted_id);
}
Ok(Err(e)) => {
println!("Concurrent Identical Data: Task {} failed: {}", i, e);
}
Err(e) => {
println!("Concurrent Identical Data: Task {} panicked: {}", i, e);
}
}
}
assert!(success_count > 0, "At least some concurrent operations should succeed");
// Verify all IDs are unique
let unique_ids: std::collections::HashSet<_> = inserted_ids.iter().collect();
assert_eq!(unique_ids.len(), inserted_ids.len(), "All inserted IDs should be unique");
println!("Concurrent Identical Data: {}/{} operations succeeded with unique IDs",
success_count, num_tasks);
}
// ========================================================================
// PERFORMANCE AND STRESS TESTS
// ========================================================================
#[rstest]
#[tokio::test]
async fn test_bulk_operations_performance(#[future] form_test_context: FormTestContext) {
let mut context = form_test_context.await;
skip_if_backend_unavailable!();
let operation_count = 50;
let start_time = std::time::Instant::now();
let mut successful_operations = 0;
let mut created_ids = Vec::new();
// Bulk create
for i in 0..operation_count {
let mut data = HashMap::new();
data.insert("firma".to_string(), create_string_value(&format!("Bulk Company {}", i)));
data.insert("kz".to_string(), create_string_value(&format!("BLK{:02}", i)));
if let Ok(response) = context.client.post_table_data(
context.profile_name.clone(),
context.table_name.clone(),
data,
).await {
successful_operations += 1;
created_ids.push(response.inserted_id);
}
}
let create_duration = start_time.elapsed();
println!("Bulk Performance: Created {} records in {:?}", successful_operations, create_duration);
// Bulk read
let read_start = std::time::Instant::now();
let mut successful_reads = 0;
for &record_id in &created_ids {
if context.client.get_table_data(
context.profile_name.clone(),
context.table_name.clone(),
record_id,
).await.is_ok() {
successful_reads += 1;
}
}
let read_duration = read_start.elapsed();
println!("Bulk Performance: Read {} records in {:?}", successful_reads, read_duration);
// Performance assertions
assert!(successful_operations > operation_count * 8 / 10,
"At least 80% of operations should succeed");
assert!(create_duration.as_secs() < 60,
"Bulk operations should complete in reasonable time");
println!("Bulk Performance Test: {}/{} creates, {}/{} reads successful",
successful_operations, operation_count, successful_reads, created_ids.len());
}

View File

@@ -0,0 +1 @@
pub mod form_request_tests;

3
client/tests/mod.rs Normal file
View File

@@ -0,0 +1,3 @@
// tests/mod.rs
pub mod form;

View File

@@ -5,6 +5,8 @@ edition.workspace = true
license.workspace = true
[dependencies]
prost-types = { workspace = true }
tonic = "0.13.0"
prost = "0.13.5"
serde = { version = "1.0.219", features = ["derive"] }

View File

@@ -3,6 +3,7 @@ syntax = "proto3";
package multieko2.tables_data;
import "common.proto";
import "google/protobuf/struct.proto";
service TablesData {
rpc PostTableData (PostTableDataRequest) returns (PostTableDataResponse);
@@ -16,7 +17,7 @@ service TablesData {
message PostTableDataRequest {
string profile_name = 1;
string table_name = 2;
map<string, string> data = 3;
map<string, google.protobuf.Value> data = 3;
}
message PostTableDataResponse {
@@ -29,7 +30,7 @@ message PutTableDataRequest {
string profile_name = 1;
string table_name = 2;
int64 id = 3;
map<string, string> data = 4;
map<string, google.protobuf.Value> data = 4;
}
message PutTableDataResponse {

Binary file not shown.

View File

@@ -5,10 +5,10 @@ pub struct PostTableDataRequest {
pub profile_name: ::prost::alloc::string::String,
#[prost(string, tag = "2")]
pub table_name: ::prost::alloc::string::String,
#[prost(map = "string, string", tag = "3")]
#[prost(map = "string, message", tag = "3")]
pub data: ::std::collections::HashMap<
::prost::alloc::string::String,
::prost::alloc::string::String,
::prost_types::Value,
>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
@@ -28,10 +28,10 @@ pub struct PutTableDataRequest {
pub table_name: ::prost::alloc::string::String,
#[prost(int64, tag = "3")]
pub id: i64,
#[prost(map = "string, string", tag = "4")]
#[prost(map = "string, message", tag = "4")]
pub data: ::std::collections::HashMap<
::prost::alloc::string::String,
::prost::alloc::string::String,
::prost_types::Value,
>,
}
#[derive(Clone, PartialEq, ::prost::Message)]

View File

@@ -10,12 +10,13 @@ search = { path = "../search" }
anyhow = { workspace = true }
tantivy = { workspace = true }
prost-types = { workspace = true }
chrono = { version = "0.4.40", features = ["serde"] }
dotenvy = "0.15.7"
prost = "0.13.5"
serde = { version = "1.0.219", features = ["derive"] }
serde_json = "1.0.140"
sqlx = { version = "0.8.5", features = ["chrono", "postgres", "runtime-tokio", "runtime-tokio-native-tls", "time", "uuid"] }
sqlx = { version = "0.8.5", features = ["chrono", "postgres", "runtime-tokio", "runtime-tokio-native-tls", "rust_decimal", "time", "uuid"] }
tokio = { version = "1.44.2", features = ["full", "macros"] }
tonic = "0.13.0"
tonic-reflection = "0.13.0"
@@ -32,6 +33,8 @@ validator = { version = "0.20.0", features = ["derive"] }
uuid = { version = "1.16.0", features = ["serde", "v4"] }
jsonwebtoken = "9.3.1"
rust-stemmers = "1.2.0"
rust_decimal = { version = "1.37.2", features = ["maths", "serde"] }
rust_decimal_macros = "1.37.1"
[lib]
name = "server"
@@ -41,3 +44,5 @@ path = "src/lib.rs"
tokio = { version = "1.44", features = ["full", "test-util"] }
rstest = "0.25.0"
lazy_static = "1.5.0"
rand = "0.9.1"
futures = "0.3.31"

13
server/Makefile Normal file
View File

@@ -0,0 +1,13 @@
# Makefile
test: reset_db run_tests
reset_db:
@echo "Resetting test database..."
@./scripts/reset_test_db.sh
run_tests:
@echo "Running tests..."
@cargo test --test mod -- --test-threads=1
.PHONY: test

View File

@@ -1,24 +0,0 @@
-- Add migration script here
CREATE TABLE adresar (
id BIGSERIAL PRIMARY KEY,
deleted BOOLEAN NOT NULL DEFAULT FALSE,
firma TEXT NOT NULL,
kz TEXT,
drc TEXT,
ulica TEXT,
psc TEXT,
mesto TEXT,
stat TEXT,
banka TEXT,
ucet TEXT,
skladm TEXT,
ico TEXT,
kontakt TEXT,
telefon TEXT,
skladu TEXT,
fax TEXT,
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX idx_adresar_firma ON adresar (firma);
CREATE INDEX idx_adresar_mesto ON adresar (mesto);

View File

@@ -1,22 +0,0 @@
-- Add migration script here
CREATE TABLE uctovnictvo (
id BIGSERIAL PRIMARY KEY,
deleted BOOLEAN NOT NULL DEFAULT FALSE,
adresar_id BIGINT NOT NULL REFERENCES adresar(id), -- Link to adresar table
c_dokladu TEXT NOT NULL,
datum DATE NOT NULL,
c_faktury TEXT NOT NULL,
obsah TEXT,
stredisko TEXT,
c_uctu TEXT,
md TEXT,
identif TEXT,
poznanka TEXT,
firma TEXT NOT NULL,
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX idx_uctovnictvo_adresar_id ON uctovnictvo (adresar_id);
CREATE INDEX idx_uctovnictvo_firma ON uctovnictvo (firma);
CREATE INDEX idx_uctovnictvo_c_dokladu ON uctovnictvo (c_dokladu);
CREATE INDEX idx_uctovnictvo_poznanka ON uctovnictvo (poznanka);

View File

@@ -1,9 +1,12 @@
-- Add migration script here
CREATE TABLE profiles (
CREATE TABLE schemas (
id BIGSERIAL PRIMARY KEY,
name TEXT NOT NULL UNIQUE,
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
description TEXT,
is_active BOOLEAN DEFAULT TRUE
);
-- Create default profile for existing data
INSERT INTO profiles (name) VALUES ('default');
INSERT INTO schemas (name) VALUES ('default');
CREATE SCHEMA IF NOT EXISTS "default";

View File

@@ -1,4 +1,5 @@
-- Main table definitions
CREATE TABLE table_definitions (
id BIGSERIAL PRIMARY KEY,
deleted BOOLEAN NOT NULL DEFAULT FALSE,
@@ -6,7 +7,7 @@ CREATE TABLE table_definitions (
columns JSONB NOT NULL,
indexes JSONB NOT NULL,
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
profile_id BIGINT NOT NULL REFERENCES profiles(id) DEFAULT 1
schema_id BIGINT NOT NULL REFERENCES schemas(id)
);
-- Relationship table for multiple links
@@ -18,9 +19,10 @@ CREATE TABLE table_definition_links (
PRIMARY KEY (source_table_id, linked_table_id)
);
-- Create composite unique index for profile+table combination
CREATE UNIQUE INDEX idx_table_definitions_profile_table
ON table_definitions (profile_id, table_name);
-- Create composite unique index for schema+table combination
CREATE UNIQUE INDEX idx_table_definitions_schema_table
ON table_definitions (schema_id, table_name);
CREATE INDEX idx_links_source ON table_definition_links (source_table_id);
CREATE INDEX idx_links_target ON table_definition_links (linked_table_id);

View File

@@ -8,7 +8,7 @@ CREATE TABLE table_scripts (
script TEXT NOT NULL,
description TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
profile_id BIGINT NOT NULL REFERENCES profiles(id) DEFAULT 1,
schema_id BIGINT NOT NULL REFERENCES schemas(id),
UNIQUE(table_definitions_id, target_column)
);

View File

@@ -1,3 +0,0 @@
-- Add migration script here
CREATE SCHEMA IF NOT EXISTS gen;

View File

@@ -0,0 +1,9 @@
#!/bin/bash
# scripts/reset_test_db.sh
DATABASE_URL=${TEST_DATABASE_URL:-"postgres://multi_psql_dev:3@localhost:5432/multi_rust_test"}
echo "Reset db script"
yes | sqlx database drop --database-url "$DATABASE_URL"
sqlx database create --database-url "$DATABASE_URL"
echo "Test database reset complete."

View File

@@ -1,156 +0,0 @@
grpcurl -plaintext -d '{"id": 1}' localhost:50051 multieko2.adresar.Adresar/GetAdresar
{
"id": "1",
"firma": "Updated Firma",
"kz": "Updated KZ",
"drc": "Updated DRC",
"ulica": "Updated Ulica",
"psc": "Updated PSC",
"mesto": "Updated Mesto",
"stat": "Updated Stat",
"banka": "Updated Banka",
"ucet": "Updated Ucet",
"skladm": "Updated Skladm",
"ico": "Updated ICO",
"kontakt": "Updated Kontakt",
"telefon": "Updated Telefon",
"skladu": "Updated Skladu",
"fax": "Updated Fax"
}
grpcurl -plaintext -d '{"id": 2}' localhost:50051 multieko2.adresar.Adresar/GetAdresar
{
"id": "2",
"firma": "asdfasf",
"kz": " ",
"drc": " ",
"ulica": " ",
"psc": "sdfasdf",
"mesto": "asf",
"stat": "as",
"banka": "df",
"ucet": "asf",
"skladm": "f",
"ico": "f",
"kontakt": "f",
"telefon": "f",
"skladu": "f",
"fax": " "
}
grpcurl -plaintext -d '{"id": 1}' localhost:50051 multieko2.adresar.Adresar/DeleteAdresar
{
"success": true
}
grpcurl -plaintext -d '{"id": 1}' localhost:50051 multieko2.adresar.Adresar/GetAdresar
ERROR:
Code: NotFound
Message: no rows returned by a query that expected to return at least one row
grpcurl -plaintext -d '{"id": 2}' localhost:50051 multieko2.adresar.Adresar/GetAdresar
{
"id": "2",
"firma": "asdfasf",
"kz": " ",
"drc": " ",
"ulica": " ",
"psc": "sdfasdf",
"mesto": "asf",
"stat": "as",
"banka": "df",
"ucet": "asf",
"skladm": "f",
"ico": "f",
"kontakt": "f",
"telefon": "f",
"skladu": "f",
"fax": " "
}
grpcurl -plaintext -d '{
"firma": "New Firma",
"kz": "New KZ",
"drc": "New DRC",
"ulica": "New Ulica",
"psc": "New PSC",
"mesto": "New Mesto",
"stat": "New Stat",
"banka": "New Banka",
"ucet": "New Ucet",
"skladm": "New Skladm",
"ico": "New ICO",
"kontakt": "New Kontakt",
"telefon": "New Telefon",
"skladu": "New Skladu",
"fax": "New Fax"
}' localhost:50051 multieko2.adresar.Adresar/PostAdresar
{
"id": "43",
"firma": "New Firma",
"kz": "New KZ",
"drc": "New DRC",
"ulica": "New Ulica",
"psc": "New PSC",
"mesto": "New Mesto",
"stat": "New Stat",
"banka": "New Banka",
"ucet": "New Ucet",
"skladm": "New Skladm",
"ico": "New ICO",
"kontakt": "New Kontakt",
"telefon": "New Telefon",
"skladu": "New Skladu",
"fax": "New Fax"
}
grpcurl -plaintext -d '{
"id": 43,
"firma": "Updated Firma",
"kz": "Updated KZ",
"drc": "Updated DRC",
"ulica": "Updated Ulica",
"psc": "Updated PSC",
"mesto": "Updated Mesto",
"stat": "Updated Stat",
"banka": "Updated Banka",
"ucet": "Updated Ucet",
"skladm": "Updated Skladm",
"ico": "Updated ICO",
"kontakt": "Updated Kontakt",
"telefon": "Updated Telefon",
"skladu": "Updated Skladu",
"fax": "Updated Fax"
}' localhost:50051 multieko2.adresar.Adresar/PutAdresar
{
"id": "43",
"firma": "Updated Firma",
"kz": "Updated KZ",
"drc": "Updated DRC",
"ulica": "Updated Ulica",
"psc": "Updated PSC",
"mesto": "Updated Mesto",
"stat": "Updated Stat",
"banka": "Updated Banka",
"ucet": "Updated Ucet",
"skladm": "Updated Skladm",
"ico": "Updated ICO",
"kontakt": "Updated Kontakt",
"telefon": "Updated Telefon",
"skladu": "Updated Skladu",
"fax": "Updated Fax"
}
grpcurl -plaintext -d '{"id": 43}' localhost:50051 multieko2.adresar.Adresar/GetAdresar
{
"id": "43",
"firma": "Updated Firma",
"kz": "Updated KZ",
"drc": "Updated DRC",
"ulica": "Updated Ulica",
"psc": "Updated PSC",
"mesto": "Updated Mesto",
"stat": "Updated Stat",
"banka": "Updated Banka",
"ucet": "Updated Ucet",
"skladm": "Updated Skladm",
"ico": "Updated ICO",
"kontakt": "Updated Kontakt",
"telefon": "Updated Telefon",
"skladu": "Updated Skladu",
"fax": "Updated Fax"
}

View File

@@ -1,29 +0,0 @@
# TOTAL items in the adresar
grpcurl -plaintext localhost:50051 multieko2.adresar.Adresar/GetAdresarCount
{
"count": "5"
}
# Item at this count. If there are 43 items, number 1 is the first item
grpcurl -plaintext -d '{"position": 1}' localhost:50051 multieko2.adresar.Adresar/GetAdresarByPosition
{
"id": "1",
"firma": "ks555",
"kz": "f",
"drc": "asdf",
"ulica": "as",
"psc": "f",
"mesto": "asf",
"stat": "as",
"banka": "fa",
"telefon": "a",
"skladu": "fd",
"fax": "asf"
}
# Item fetched by id. The first item was created and marked as deleted, therefore number 1 in ids shouldnt be fetched.
grpcurl -plaintext -d '{"id": 1}' localhost:50051 multieko2.adresar.Adresar/GetAdresar
ERROR:
Code: NotFound
Message: no rows returned by a query that expected to return at least one row
╭─    ~ ············································· 69 ✘
╰─

View File

@@ -1,15 +0,0 @@
// src/adresar/handlers.rs
pub mod post_adresar;
pub mod get_adresar;
pub mod put_adresar;
pub mod delete_adresar;
pub mod get_adresar_count;
pub mod get_adresar_by_position;
pub use post_adresar::post_adresar;
pub use get_adresar::get_adresar;
pub use put_adresar::put_adresar;
pub use delete_adresar::delete_adresar;
pub use get_adresar_count::get_adresar_count;
pub use get_adresar_by_position::get_adresar_by_position;

View File

@@ -1,27 +0,0 @@
// src/adresar/handlers/delete_adresar.rs
use tonic::Status;
use sqlx::PgPool;
use common::proto::multieko2::adresar::{DeleteAdresarRequest, DeleteAdresarResponse};
pub async fn delete_adresar(
db_pool: &PgPool,
request: DeleteAdresarRequest,
) -> Result<DeleteAdresarResponse, Status> {
let rows_affected = sqlx::query!(
r#"
UPDATE adresar
SET deleted = true
WHERE id = $1 AND deleted = false
"#,
request.id
)
.execute(db_pool)
.await
.map_err(|e| Status::internal(e.to_string()))?
.rows_affected();
Ok(DeleteAdresarResponse {
success: rows_affected > 0,
})
}

View File

@@ -1,63 +0,0 @@
// src/adresar/handlers/get_adresar.rs
use tonic::Status;
use sqlx::PgPool;
use crate::adresar::models::Adresar;
use common::proto::multieko2::adresar::{GetAdresarRequest, AdresarResponse};
pub async fn get_adresar(
db_pool: &PgPool,
request: GetAdresarRequest,
) -> Result<AdresarResponse, Status> {
let adresar = sqlx::query_as!(
Adresar,
r#"
SELECT
id,
deleted,
firma,
kz,
drc,
ulica,
psc,
mesto,
stat,
banka,
ucet,
skladm,
ico,
kontakt,
telefon,
skladu,
fax
FROM adresar
WHERE id = $1 AND deleted = false
"#,
request.id
)
.fetch_one(db_pool)
.await
.map_err(|e| match e {
sqlx::Error::RowNotFound => Status::not_found("Record not found"),
_ => Status::internal(format!("Database error: {}", e)),
})?;
Ok(AdresarResponse {
id: adresar.id,
firma: adresar.firma,
kz: adresar.kz.unwrap_or_default(),
drc: adresar.drc.unwrap_or_default(),
ulica: adresar.ulica.unwrap_or_default(),
psc: adresar.psc.unwrap_or_default(),
mesto: adresar.mesto.unwrap_or_default(),
stat: adresar.stat.unwrap_or_default(),
banka: adresar.banka.unwrap_or_default(),
ucet: adresar.ucet.unwrap_or_default(),
skladm: adresar.skladm.unwrap_or_default(),
ico: adresar.ico.unwrap_or_default(),
kontakt: adresar.kontakt.unwrap_or_default(),
telefon: adresar.telefon.unwrap_or_default(),
skladu: adresar.skladu.unwrap_or_default(),
fax: adresar.fax.unwrap_or_default(),
})
}

View File

@@ -1,35 +0,0 @@
// src/adresar/handlers/get_adresar_by_position.rs
use tonic::{Status};
use sqlx::PgPool;
use common::proto::multieko2::adresar::{AdresarResponse, GetAdresarRequest};
use common::proto::multieko2::common::PositionRequest;
use super::get_adresar;
pub async fn get_adresar_by_position(
db_pool: &PgPool,
request: PositionRequest,
) -> Result<AdresarResponse, Status> {
if request.position < 1 {
return Err(Status::invalid_argument("Position must be at least 1"));
}
// Find the ID of the Nth non-deleted record
let id: i64 = sqlx::query_scalar!(
r#"
SELECT id
FROM adresar
WHERE deleted = FALSE
ORDER BY id ASC
OFFSET $1
LIMIT 1
"#,
request.position - 1
)
.fetch_optional(db_pool)
.await
.map_err(|e| Status::internal(e.to_string()))?
.ok_or_else(|| Status::not_found("Position out of bounds"))?;
// Now fetch the complete record using the existing get_adresar function
get_adresar(db_pool, GetAdresarRequest { id }).await
}

View File

@@ -1,23 +0,0 @@
// src/adresar/handlers/get_adresar_count.rs
use tonic::Status;
use sqlx::PgPool;
use common::proto::multieko2::common::{CountResponse, Empty};
pub async fn get_adresar_count(
db_pool: &PgPool,
_request: Empty,
) -> Result<CountResponse, Status> {
let count: i64 = sqlx::query_scalar!(
r#"
SELECT COUNT(*) AS count
FROM adresar
WHERE deleted = FALSE
"#
)
.fetch_one(db_pool)
.await
.map_err(|e| Status::internal(e.to_string()))?
.unwrap_or(0);
Ok(CountResponse { count })
}

View File

@@ -1,99 +0,0 @@
// src/adresar/handlers/post_adresar.rs
use tonic::Status;
use sqlx::PgPool;
use crate::adresar::models::Adresar;
use common::proto::multieko2::adresar::{PostAdresarRequest, AdresarResponse};
// Helper function to sanitize inputs
fn sanitize_input(input: &str) -> Option<String> {
let trimmed = input.trim().to_string();
if trimmed.is_empty() {
None
} else {
Some(trimmed)
}
}
pub async fn post_adresar(
db_pool: &PgPool,
mut request: PostAdresarRequest,
) -> Result<AdresarResponse, Status> {
request.firma = request.firma.trim().to_string();
if request.firma.is_empty() {
return Err(Status::invalid_argument("Firma je povinne pole"));
}
// Sanitize optional fields
let kz = sanitize_input(&request.kz);
let drc = sanitize_input(&request.drc);
let ulica = sanitize_input(&request.ulica);
let psc = sanitize_input(&request.psc);
let mesto = sanitize_input(&request.mesto);
let stat = sanitize_input(&request.stat);
let banka = sanitize_input(&request.banka);
let ucet = sanitize_input(&request.ucet);
let skladm = sanitize_input(&request.skladm);
let ico = sanitize_input(&request.ico);
let kontakt = sanitize_input(&request.kontakt);
let telefon = sanitize_input(&request.telefon);
let skladu = sanitize_input(&request.skladu);
let fax = sanitize_input(&request.fax);
let adresar = sqlx::query_as!(
Adresar,
r#"
INSERT INTO adresar (
firma, kz, drc, ulica, psc, mesto, stat, banka, ucet,
skladm, ico, kontakt, telefon, skladu, fax, deleted
)
VALUES (
$1, $2, $3, $4, $5, $6, $7, $8, $9,
$10, $11, $12, $13, $14, $15, $16
)
RETURNING
id, deleted, firma, kz, drc, ulica, psc, mesto, stat,
banka, ucet, skladm, ico, kontakt, telefon, skladu, fax
"#,
request.firma,
kz,
drc,
ulica,
psc,
mesto,
stat,
banka,
ucet,
skladm,
ico,
kontakt,
telefon,
skladu,
fax,
false
)
.fetch_one(db_pool)
.await
.map_err(|e| Status::internal(e.to_string()))?;
Ok(AdresarResponse {
id: adresar.id,
// Do not include `deleted` in the response since it's not
// defined in the proto message.
firma: adresar.firma,
kz: adresar.kz.unwrap_or_default(),
drc: adresar.drc.unwrap_or_default(),
ulica: adresar.ulica.unwrap_or_default(),
psc: adresar.psc.unwrap_or_default(),
mesto: adresar.mesto.unwrap_or_default(),
stat: adresar.stat.unwrap_or_default(),
banka: adresar.banka.unwrap_or_default(),
ucet: adresar.ucet.unwrap_or_default(),
skladm: adresar.skladm.unwrap_or_default(),
ico: adresar.ico.unwrap_or_default(),
kontakt: adresar.kontakt.unwrap_or_default(),
telefon: adresar.telefon.unwrap_or_default(),
skladu: adresar.skladu.unwrap_or_default(),
fax: adresar.fax.unwrap_or_default(),
})
}

View File

@@ -1,122 +0,0 @@
// src/adresar/handlers/put_adresar.rs
use tonic::Status;
use sqlx::PgPool;
use crate::adresar::models::Adresar;
use common::proto::multieko2::adresar::{PutAdresarRequest, AdresarResponse};
// Add the same sanitize_input helper as in POST handler
fn sanitize_input(input: &str) -> Option<String> {
let trimmed = input.trim().to_string();
if trimmed.is_empty() {
None
} else {
Some(trimmed)
}
}
pub async fn put_adresar(
db_pool: &PgPool,
mut request: PutAdresarRequest,
) -> Result<AdresarResponse, Status> {
// Add validation for required fields like in POST
request.firma = request.firma.trim().to_string();
if request.firma.is_empty() {
return Err(Status::invalid_argument("Firma je povinne pole"));
}
// Sanitize optional fields like in POST
let kz = sanitize_input(&request.kz);
let drc = sanitize_input(&request.drc);
let ulica = sanitize_input(&request.ulica);
let psc = sanitize_input(&request.psc);
let mesto = sanitize_input(&request.mesto);
let stat = sanitize_input(&request.stat);
let banka = sanitize_input(&request.banka);
let ucet = sanitize_input(&request.ucet);
let skladm = sanitize_input(&request.skladm);
let ico = sanitize_input(&request.ico);
let kontakt = sanitize_input(&request.kontakt);
let telefon = sanitize_input(&request.telefon);
let skladu = sanitize_input(&request.skladu);
let fax = sanitize_input(&request.fax);
let adresar = sqlx::query_as!(
Adresar,
r#"
UPDATE adresar
SET
firma = $2,
kz = $3,
drc = $4,
ulica = $5,
psc = $6,
mesto = $7,
stat = $8,
banka = $9,
ucet = $10,
skladm = $11,
ico = $12,
kontakt = $13,
telefon = $14,
skladu = $15,
fax = $16
WHERE id = $1 AND deleted = FALSE
RETURNING
id,
deleted,
firma,
kz,
drc,
ulica,
psc,
mesto,
stat,
banka,
ucet,
skladm,
ico,
kontakt,
telefon,
skladu,
fax
"#,
request.id,
request.firma,
kz,
drc,
ulica,
psc,
mesto,
stat,
banka,
ucet,
skladm,
ico,
kontakt,
telefon,
skladu,
fax
)
.fetch_one(db_pool)
.await
.map_err(|e| Status::internal(e.to_string()))?;
Ok(AdresarResponse {
id: adresar.id,
firma: adresar.firma,
kz: adresar.kz.unwrap_or_default(),
drc: adresar.drc.unwrap_or_default(),
ulica: adresar.ulica.unwrap_or_default(),
psc: adresar.psc.unwrap_or_default(),
mesto: adresar.mesto.unwrap_or_default(),
stat: adresar.stat.unwrap_or_default(),
banka: adresar.banka.unwrap_or_default(),
ucet: adresar.ucet.unwrap_or_default(),
skladm: adresar.skladm.unwrap_or_default(),
ico: adresar.ico.unwrap_or_default(),
kontakt: adresar.kontakt.unwrap_or_default(),
telefon: adresar.telefon.unwrap_or_default(),
skladu: adresar.skladu.unwrap_or_default(),
fax: adresar.fax.unwrap_or_default(),
})
}

View File

@@ -1,7 +0,0 @@
// src/adresar/mod.rs
pub mod models;
pub mod handlers;
// #[cfg(test)]
// pub mod tests;

View File

@@ -1,23 +0,0 @@
// src/adresar/models.rs
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
pub struct Adresar {
pub id: i64,
pub deleted: bool,
pub firma: String,
pub kz: Option<String>,
pub drc: Option<String>,
pub ulica: Option<String>,
pub psc: Option<String>,
pub mesto: Option<String>,
pub stat: Option<String>,
pub banka: Option<String>,
pub ucet: Option<String>,
pub skladm: Option<String>,
pub ico: Option<String>,
pub kontakt: Option<String>,
pub telefon: Option<String>,
pub skladu: Option<String>,
pub fax: Option<String>,
}

View File

@@ -3,6 +3,8 @@
use tower::ServiceBuilder;
use crate::auth::logic::rbac;
// TODO redesign this, adresar and uctovnictvo are nonexistent, but we are keeping this code for
// the reference. Please adjust in the future rbac.
pub async fn run_server(db_pool: sqlx::PgPool) -> Result<(), Box<dyn std::error::Error>> {
// ... existing setup code ...

View File

@@ -8,8 +8,6 @@ use tracing::{error, info, warn};
use tantivy::schema::Schema;
use crate::search_schema;
const INDEX_DIR: &str = "./tantivy_indexes";
/// Defines the commands that can be sent to the indexer task.
#[derive(Debug)]
pub enum IndexCommand {

View File

@@ -4,8 +4,6 @@ pub mod auth;
pub mod indexer;
pub mod search_schema;
pub mod server;
pub mod adresar;
pub mod uctovnictvo;
pub mod shared;
pub mod table_structure;
pub mod table_definition;

View File

@@ -1,4 +1,2 @@
// src/server/handlers.rs
pub use crate::server::services::adresar_service::AdresarService;
pub use crate::server::services::uctovnictvo_service::UctovnictvoService;
pub use crate::server::services::table_structure_service::TableStructureHandler;

View File

@@ -6,8 +6,6 @@ use crate::indexer::{indexer_task, IndexCommand};
use common::proto::multieko2::FILE_DESCRIPTOR_SET;
use crate::server::services::{
AdresarService,
UctovnictvoService,
TableStructureHandler,
TableDefinitionService,
TablesDataService,
@@ -15,8 +13,6 @@ use crate::server::services::{
AuthServiceImpl
};
use common::proto::multieko2::{
adresar::adresar_server::AdresarServer,
uctovnictvo::uctovnictvo_server::UctovnictvoServer,
table_structure::table_structure_service_server::TableStructureServiceServer,
table_definition::table_definition_server::TableDefinitionServer,
tables_data::tables_data_server::TablesDataServer,
@@ -47,7 +43,7 @@ pub async fn run_server(db_pool: sqlx::PgPool) -> Result<(), Box<dyn std::error:
let table_definition_service = TableDefinitionService { db_pool: db_pool.clone() };
let tables_data_service = TablesDataService {
db_pool: db_pool.clone(),
indexer_tx: indexer_tx.clone(), // Pass the sender
indexer_tx: indexer_tx.clone(),
};
let table_script_service = TableScriptService { db_pool: db_pool.clone() };
let auth_service = AuthServiceImpl { db_pool: db_pool.clone() };
@@ -56,14 +52,12 @@ pub async fn run_server(db_pool: sqlx::PgPool) -> Result<(), Box<dyn std::error:
let search_service = SearcherService { pool: db_pool.clone() };
Server::builder()
.add_service(AdresarServer::new(AdresarService { db_pool: db_pool.clone() }))
.add_service(UctovnictvoServer::new(UctovnictvoService { db_pool: db_pool.clone() }))
.add_service(TableStructureServiceServer::new(TableStructureHandler { db_pool: db_pool.clone() }))
.add_service(TableDefinitionServer::new(table_definition_service))
.add_service(TablesDataServer::new(tables_data_service))
.add_service(TableScriptServer::new(table_script_service))
.add_service(AuthServiceServer::new(auth_service))
.add_service(SearcherServer::new(search_service)) // This now works correctly
.add_service(SearcherServer::new(search_service))
.add_service(reflection_service)
.serve(addr)
.await?;

View File

@@ -1,69 +0,0 @@
// src/server/services/adresar_service.rs
use tonic::{Request, Response, Status};
use common::proto::multieko2::adresar::{
adresar_server::Adresar,
PostAdresarRequest, AdresarResponse, GetAdresarRequest, PutAdresarRequest,
DeleteAdresarRequest, DeleteAdresarResponse,
};
use common::proto::multieko2::common::{Empty, CountResponse, PositionRequest};
use crate::adresar::handlers::{
post_adresar, get_adresar, put_adresar, delete_adresar,
get_adresar_count, get_adresar_by_position,
};
use sqlx::PgPool;
#[derive(Debug)]
pub struct AdresarService {
pub db_pool: PgPool,
}
#[tonic::async_trait]
impl Adresar for AdresarService {
async fn post_adresar(
&self,
request: Request<PostAdresarRequest>,
) -> Result<Response<AdresarResponse>, Status> {
let response = post_adresar(&self.db_pool, request.into_inner()).await?;
Ok(Response::new(response))
}
async fn get_adresar(
&self,
request: Request<GetAdresarRequest>,
) -> Result<Response<AdresarResponse>, Status> {
let response = get_adresar(&self.db_pool, request.into_inner()).await?;
Ok(Response::new(response))
}
async fn put_adresar(
&self,
request: Request<PutAdresarRequest>,
) -> Result<Response<AdresarResponse>, Status> {
let response = put_adresar(&self.db_pool, request.into_inner()).await?;
Ok(Response::new(response))
}
async fn delete_adresar(
&self,
request: Request<DeleteAdresarRequest>,
) -> Result<Response<DeleteAdresarResponse>, Status> {
let response = delete_adresar(&self.db_pool, request.into_inner()).await?;
Ok(Response::new(response))
}
async fn get_adresar_count(
&self,
request: Request<Empty>,
) -> Result<Response<CountResponse>, Status> {
let response = get_adresar_count(&self.db_pool, request.into_inner()).await?;
Ok(Response::new(response))
}
async fn get_adresar_by_position(
&self,
request: Request<PositionRequest>,
) -> Result<Response<AdresarResponse>, Status> {
let response = get_adresar_by_position(&self.db_pool, request.into_inner()).await?;
Ok(Response::new(response))
}
}

View File

@@ -1,16 +1,12 @@
// src/server/services/mod.rs
pub mod adresar_service;
pub mod table_structure_service;
pub mod uctovnictvo_service;
pub mod table_definition_service;
pub mod tables_data_service;
pub mod table_script_service;
pub mod auth_service;
pub use adresar_service::AdresarService;
pub use table_structure_service::TableStructureHandler;
pub use uctovnictvo_service::UctovnictvoService;
pub use table_definition_service::TableDefinitionService;
pub use tables_data_service::TablesDataService;
pub use table_script_service::TableScriptService;

View File

@@ -41,14 +41,17 @@ impl TablesData for TablesDataService {
Ok(Response::new(response))
}
// You will later apply the same pattern to put_table_data...
async fn put_table_data(
&self,
request: Request<PutTableDataRequest>,
) -> Result<Response<PutTableDataResponse>, Status> {
let request = request.into_inner();
// TODO: Update put_table_data handler to accept and use indexer_tx
let response = put_table_data(&self.db_pool, request).await?;
let response = put_table_data(
&self.db_pool,
request,
&self.indexer_tx,
)
.await?;
Ok(Response::new(response))
}

View File

@@ -1,60 +0,0 @@
// src/server/services/uctovnictvo_service.rs
use tonic::{Request, Response, Status};
use common::proto::multieko2::uctovnictvo::{
uctovnictvo_server::Uctovnictvo,
PostUctovnictvoRequest, UctovnictvoResponse, GetUctovnictvoRequest, PutUctovnictvoRequest,
};
use crate::uctovnictvo::handlers::{
post_uctovnictvo, get_uctovnictvo, get_uctovnictvo_count,
get_uctovnictvo_by_position, put_uctovnictvo,
};
use common::proto::multieko2::common::{Empty, CountResponse, PositionRequest};
use sqlx::PgPool;
#[derive(Debug)]
pub struct UctovnictvoService {
pub db_pool: PgPool,
}
#[tonic::async_trait]
impl Uctovnictvo for UctovnictvoService {
async fn post_uctovnictvo(
&self,
request: Request<PostUctovnictvoRequest>,
) -> Result<Response<UctovnictvoResponse>, Status> {
let response = post_uctovnictvo(&self.db_pool, request.into_inner()).await?;
Ok(Response::new(response))
}
async fn get_uctovnictvo(
&self,
request: Request<GetUctovnictvoRequest>,
) -> Result<Response<UctovnictvoResponse>, Status> {
let response = get_uctovnictvo(&self.db_pool, request.into_inner()).await?;
Ok(Response::new(response))
}
async fn get_uctovnictvo_count(
&self,
request: Request<Empty>,
) -> Result<Response<CountResponse>, Status> {
let response = get_uctovnictvo_count(&self.db_pool, request.into_inner()).await?;
Ok(Response::new(response))
}
async fn get_uctovnictvo_by_position(
&self,
request: Request<PositionRequest>,
) -> Result<Response<UctovnictvoResponse>, Status> {
let response = get_uctovnictvo_by_position(&self.db_pool, request.into_inner()).await?;
Ok(Response::new(response))
}
async fn put_uctovnictvo(
&self,
request: Request<PutUctovnictvoRequest>,
) -> Result<Response<UctovnictvoResponse>, Status> {
let response = put_uctovnictvo(&self.db_pool, request.into_inner()).await?;
Ok(Response::new(response))
}
}

View File

@@ -1,34 +1,50 @@
// src/shared/schema_qualifier.rs
// src/shared/schema_qualifier.rs
use sqlx::PgPool;
use tonic::Status;
/// Qualifies table names with the appropriate schema
///
// TODO in the future, remove database query on every request and implement caching for scalable
// solution with many data and requests
/// Qualifies a table name by checking for its existence in the table_definitions table.
/// This is the robust, "source of truth" approach.
///
/// Rules:
/// - Tables created via PostTableDefinition (dynamically created tables) are in 'gen' schema
/// - System tables (like users, profiles) remain in 'public' schema
pub fn qualify_table_name(table_name: &str) -> String {
// Check if table matches the pattern of dynamically created tables (e.g., 2025_something)
if table_name.starts_with(|c: char| c.is_ascii_digit()) && table_name.contains('_') {
format!("gen.\"{}\"", table_name)
/// - If a table is found in `table_definitions`, it is qualified with the 'gen' schema.
/// - Otherwise, it is assumed to be a system table in the 'public' schema.
pub async fn qualify_table_name(
db_pool: &PgPool,
profile_name: &str,
table_name: &str,
) -> Result<String, Status> {
// Check if a definition exists for this table in the given profile.
let definition_exists = sqlx::query!(
r#"SELECT EXISTS (
SELECT 1 FROM table_definitions td
JOIN schemas s ON td.schema_id = s.id
WHERE s.name = $1 AND td.table_name = $2
)"#,
profile_name,
table_name
)
.fetch_one(db_pool)
.await
.map_err(|e| Status::internal(format!("Schema lookup failed: {}", e)))?
.exists
.unwrap_or(false);
if definition_exists {
Ok(format!("\"{}\".\"{}\"", profile_name, table_name))
} else {
format!("\"{}\"", table_name)
// It's not a user-defined table, so it must be a system table in 'public.
Ok(format!("\"{}\"", table_name))
}
}
/// Qualifies table names for data operations
pub fn qualify_table_name_for_data(table_name: &str) -> Result<String, Status> {
Ok(qualify_table_name(table_name))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_qualify_table_name() {
assert_eq!(qualify_table_name("2025_test_schema3"), "gen.\"2025_test_schema3\"");
assert_eq!(qualify_table_name("users"), "\"users\"");
assert_eq!(qualify_table_name("profiles"), "\"profiles\"");
assert_eq!(qualify_table_name("adresar"), "\"adresar\"");
}
pub async fn qualify_table_name_for_data(
db_pool: &PgPool,
profile_name: &str,
table_name: &str,
) -> Result<String, Status> {
qualify_table_name(db_pool, profile_name, table_name).await
}

View File

@@ -0,0 +1,190 @@
// src/steel/server/decimal_math.rs
use rust_decimal::prelude::*;
use rust_decimal::MathematicalOps;
use steel::rvals::SteelVal;
use std::str::FromStr;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum DecimalMathError {
#[error("Invalid decimal format: {0}")]
InvalidDecimal(String),
#[error("Math operation failed: {0}")]
MathError(String),
#[error("Division by zero")]
DivisionByZero,
}
/// Converts a SteelVal to a Decimal
fn steel_val_to_decimal(val: &SteelVal) -> Result<Decimal, DecimalMathError> {
match val {
SteelVal::StringV(s) => {
Decimal::from_str(&s.to_string())
.map_err(|e| DecimalMathError::InvalidDecimal(format!("{}: {}", s, e)))
}
SteelVal::NumV(n) => {
Decimal::try_from(*n)
.map_err(|e| DecimalMathError::InvalidDecimal(format!("{}: {}", n, e)))
}
SteelVal::IntV(i) => {
Ok(Decimal::from(*i))
}
_ => Err(DecimalMathError::InvalidDecimal(format!("Unsupported type: {:?}", val)))
}
}
/// Converts a Decimal back to a SteelVal string
fn decimal_to_steel_val(decimal: Decimal) -> SteelVal {
SteelVal::StringV(decimal.to_string().into())
}
// Basic arithmetic operations
pub fn decimal_add(a: String, b: String) -> Result<String, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
let b_dec = Decimal::from_str(&b).map_err(|e| format!("Invalid decimal '{}': {}", b, e))?;
Ok((a_dec + b_dec).to_string())
}
pub fn decimal_sub(a: String, b: String) -> Result<String, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
let b_dec = Decimal::from_str(&b).map_err(|e| format!("Invalid decimal '{}': {}", b, e))?;
Ok((a_dec - b_dec).to_string())
}
pub fn decimal_mul(a: String, b: String) -> Result<String, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
let b_dec = Decimal::from_str(&b).map_err(|e| format!("Invalid decimal '{}': {}", b, e))?;
Ok((a_dec * b_dec).to_string())
}
pub fn decimal_div(a: String, b: String) -> Result<String, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
let b_dec = Decimal::from_str(&b).map_err(|e| format!("Invalid decimal '{}': {}", b, e))?;
if b_dec.is_zero() {
return Err("Division by zero".to_string());
}
Ok((a_dec / b_dec).to_string())
}
// Advanced mathematical functions (requires maths feature)
pub fn decimal_pow(base: String, exp: String) -> Result<String, String> {
let base_dec = Decimal::from_str(&base).map_err(|e| format!("Invalid decimal '{}': {}", base, e))?;
let exp_dec = Decimal::from_str(&exp).map_err(|e| format!("Invalid decimal '{}': {}", exp, e))?;
base_dec.checked_powd(exp_dec)
.map(|result| result.to_string())
.ok_or_else(|| "Power operation failed or overflowed".to_string())
}
pub fn decimal_sqrt(a: String) -> Result<String, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
a_dec.sqrt()
.map(|result| result.to_string())
.ok_or_else(|| "Square root failed (negative number?)".to_string())
}
pub fn decimal_ln(a: String) -> Result<String, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
a_dec.checked_ln()
.map(|result| result.to_string())
.ok_or_else(|| "Natural log failed (non-positive number?)".to_string())
}
pub fn decimal_log10(a: String) -> Result<String, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
a_dec.checked_log10()
.map(|result| result.to_string())
.ok_or_else(|| "Log10 failed (non-positive number?)".to_string())
}
pub fn decimal_exp(a: String) -> Result<String, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
a_dec.checked_exp()
.map(|result| result.to_string())
.ok_or_else(|| "Exponential failed or overflowed".to_string())
}
// Trigonometric functions (input in radians)
pub fn decimal_sin(a: String) -> Result<String, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
a_dec.checked_sin()
.map(|result| result.to_string())
.ok_or_else(|| "Sine calculation failed or overflowed".to_string())
}
pub fn decimal_cos(a: String) -> Result<String, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
a_dec.checked_cos()
.map(|result| result.to_string())
.ok_or_else(|| "Cosine calculation failed or overflowed".to_string())
}
pub fn decimal_tan(a: String) -> Result<String, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
a_dec.checked_tan()
.map(|result| result.to_string())
.ok_or_else(|| "Tangent calculation failed or overflowed".to_string())
}
// Comparison functions
pub fn decimal_gt(a: String, b: String) -> Result<bool, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
let b_dec = Decimal::from_str(&b).map_err(|e| format!("Invalid decimal '{}': {}", b, e))?;
Ok(a_dec > b_dec)
}
pub fn decimal_lt(a: String, b: String) -> Result<bool, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
let b_dec = Decimal::from_str(&b).map_err(|e| format!("Invalid decimal '{}': {}", b, e))?;
Ok(a_dec < b_dec)
}
pub fn decimal_eq(a: String, b: String) -> Result<bool, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
let b_dec = Decimal::from_str(&b).map_err(|e| format!("Invalid decimal '{}': {}", b, e))?;
Ok(a_dec == b_dec)
}
// Utility functions
pub fn decimal_abs(a: String) -> Result<String, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
Ok(a_dec.abs().to_string())
}
pub fn decimal_round(a: String, places: i32) -> Result<String, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
Ok(a_dec.round_dp(places as u32).to_string())
}
pub fn decimal_min(a: String, b: String) -> Result<String, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
let b_dec = Decimal::from_str(&b).map_err(|e| format!("Invalid decimal '{}': {}", b, e))?;
Ok(a_dec.min(b_dec).to_string())
}
pub fn decimal_max(a: String, b: String) -> Result<String, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
let b_dec = Decimal::from_str(&b).map_err(|e| format!("Invalid decimal '{}': {}", b, e))?;
Ok(a_dec.max(b_dec).to_string())
}
pub fn decimal_gte(a: String, b: String) -> Result<bool, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
let b_dec = Decimal::from_str(&b).map_err(|e| format!("Invalid decimal '{}': {}", b, e))?;
Ok(a_dec >= b_dec)
}
pub fn decimal_lte(a: String, b: String) -> Result<bool, String> {
let a_dec = Decimal::from_str(&a).map_err(|e| format!("Invalid decimal '{}': {}", a, e))?;
let b_dec = Decimal::from_str(&b).map_err(|e| format!("Invalid decimal '{}': {}", b, e))?;
Ok(a_dec <= b_dec)
}

View File

@@ -1,8 +1,9 @@
// src/steel/server/execution.rs
// Updated src/steel/server/execution.rs
use steel::steel_vm::engine::Engine;
use steel::steel_vm::register_fn::RegisterFn;
use steel::rvals::SteelVal;
use super::functions::SteelContext;
use super::decimal_math::*;
use sqlx::PgPool;
use std::sync::Arc;
use thiserror::Error;
@@ -33,6 +34,24 @@ pub fn execute_script(
let mut vm = Engine::new();
let context = Arc::new(context);
// Register existing Steel functions
register_steel_functions(&mut vm, context.clone());
// Register all decimal math functions
register_decimal_math_functions(&mut vm);
// Execute script and process results
let results = vm.compile_and_run_raw_program(script)
.map_err(|e| ExecutionError::RuntimeError(e.to_string()))?;
// Convert results to target type
match target_type {
"STRINGS" => process_string_results(results),
_ => Err(ExecutionError::UnsupportedType(target_type.into()))
}
}
fn register_steel_functions(vm: &mut Engine, context: Arc<SteelContext>) {
// Register steel_get_column with row context
vm.register_fn("steel_get_column", {
let ctx = context.clone();
@@ -59,27 +78,101 @@ pub fn execute_script(
.map_err(|e| e.to_string())
}
});
}
// Execute script and process results
let results = vm.compile_and_run_raw_program(script)
.map_err(|e| ExecutionError::RuntimeError(e.to_string()))?;
fn register_decimal_math_functions(vm: &mut Engine) {
// Basic arithmetic operations
vm.register_fn("decimal-add", decimal_add);
vm.register_fn("decimal-sub", decimal_sub);
vm.register_fn("decimal-mul", decimal_mul);
vm.register_fn("decimal-div", decimal_div);
// Convert results to target type
match target_type {
"STRINGS" => process_string_results(results),
_ => Err(ExecutionError::UnsupportedType(target_type.into()))
}
// Advanced mathematical functions
vm.register_fn("decimal-pow", decimal_pow);
vm.register_fn("decimal-sqrt", decimal_sqrt);
vm.register_fn("decimal-ln", decimal_ln);
vm.register_fn("decimal-log10", decimal_log10);
vm.register_fn("decimal-exp", decimal_exp);
// Trigonometric functions
vm.register_fn("decimal-sin", decimal_sin);
vm.register_fn("decimal-cos", decimal_cos);
vm.register_fn("decimal-tan", decimal_tan);
// Comparison functions
vm.register_fn("decimal-gt", decimal_gt);
vm.register_fn("decimal-lt", decimal_lt);
vm.register_fn("decimal-eq", decimal_eq);
// Utility functions
vm.register_fn("decimal-abs", decimal_abs);
vm.register_fn("decimal-round", decimal_round);
vm.register_fn("decimal-min", decimal_min);
vm.register_fn("decimal-max", decimal_max);
// Additional convenience functions
vm.register_fn("decimal-zero", || "0".to_string());
vm.register_fn("decimal-one", || "1".to_string());
vm.register_fn("decimal-pi", || "3.1415926535897932384626433833".to_string());
vm.register_fn("decimal-e", || "2.7182818284590452353602874714".to_string());
// Type conversion helpers
vm.register_fn("to-decimal", |s: String| -> Result<String, String> {
use rust_decimal::prelude::*;
use std::str::FromStr;
Decimal::from_str(&s)
.map(|d| d.to_string())
.map_err(|e| format!("Invalid decimal: {}", e))
});
// Financial functions
vm.register_fn("decimal-percentage", |amount: String, percentage: String| -> Result<String, String> {
use rust_decimal::prelude::*;
use std::str::FromStr;
let amount_dec = Decimal::from_str(&amount)
.map_err(|e| format!("Invalid amount: {}", e))?;
let percentage_dec = Decimal::from_str(&percentage)
.map_err(|e| format!("Invalid percentage: {}", e))?;
let hundred = Decimal::from(100);
Ok((amount_dec * percentage_dec / hundred).to_string())
});
vm.register_fn("decimal-compound", |principal: String, rate: String, time: String| -> Result<String, String> {
use rust_decimal::prelude::*;
use rust_decimal::MathematicalOps;
use std::str::FromStr;
let principal_dec = Decimal::from_str(&principal)
.map_err(|e| format!("Invalid principal: {}", e))?;
let rate_dec = Decimal::from_str(&rate)
.map_err(|e| format!("Invalid rate: {}", e))?;
let time_dec = Decimal::from_str(&time)
.map_err(|e| format!("Invalid time: {}", e))?;
let one = Decimal::ONE;
let compound_factor = (one + rate_dec).checked_powd(time_dec)
.ok_or("Compound calculation overflow")?;
Ok((principal_dec * compound_factor).to_string())
});
}
fn process_string_results(results: Vec<SteelVal>) -> Result<Value, ExecutionError> {
let mut strings = Vec::new();
for result in results {
if let SteelVal::StringV(s) = result {
strings.push(s.to_string());
} else {
return Err(ExecutionError::TypeConversionError(
format!("Expected string, got {:?}", result)
));
match result {
SteelVal::StringV(s) => strings.push(s.to_string()),
SteelVal::NumV(n) => strings.push(n.to_string()),
SteelVal::IntV(i) => strings.push(i.to_string()),
SteelVal::BoolV(b) => strings.push(b.to_string()),
_ => {
return Err(ExecutionError::TypeConversionError(
format!("Expected string-convertible type, got {:?}", result)
));
}
}
}
Ok(Value::Strings(strings))

View File

@@ -21,7 +21,8 @@ pub enum FunctionError {
#[derive(Clone)]
pub struct SteelContext {
pub current_table: String,
pub profile_id: i64,
pub schema_id: i64,
pub schema_name: String,
pub row_data: HashMap<String, String>,
pub db_pool: Arc<PgPool>,
}
@@ -30,8 +31,8 @@ impl SteelContext {
pub async fn get_related_table_name(&self, base_name: &str) -> Result<String, FunctionError> {
let table_def = sqlx::query!(
r#"SELECT table_name FROM table_definitions
WHERE profile_id = $1 AND table_name LIKE $2"#,
self.profile_id,
WHERE schema_id = $1 AND table_name LIKE $2"#,
self.schema_id,
format!("%_{}", base_name)
)
.fetch_optional(&*self.db_pool)
@@ -66,7 +67,7 @@ impl SteelContext {
// Add quotes around the table name
sqlx::query_scalar::<_, String>(
&format!("SELECT {} FROM \"{}\" WHERE id = $1", column, actual_table)
&format!("SELECT {} FROM \"{}\".\"{}\" WHERE id = $1", column, self.schema_name, actual_table)
)
.bind(fk_value.parse::<i64>().map_err(|_|
SteelVal::StringV("Invalid foreign key format".into()))?)

View File

@@ -2,7 +2,9 @@
pub mod execution;
pub mod syntax_parser;
pub mod functions;
pub mod decimal_math;
pub use execution::*;
pub use syntax_parser::*;
pub use functions::*;
pub use decimal_math::*;

View File

@@ -1,27 +1,111 @@
// src/steel/server/syntax_parser.rs
use regex::Regex;
use std::collections::HashSet;
pub struct SyntaxParser {
// Existing patterns for column/SQL integration
current_table_column_re: Regex,
different_table_column_re: Regex,
one_to_many_indexed_re: Regex,
sql_integration_re: Regex,
// Simple math operation replacement patterns
math_operators: Vec<(Regex, &'static str)>,
number_literal_re: Regex,
}
impl SyntaxParser {
pub fn new() -> Self {
// Define math operator replacements
let math_operators = vec![
// Basic arithmetic
(Regex::new(r"\(\s*\+\s+").unwrap(), "(decimal-add "),
(Regex::new(r"\(\s*-\s+").unwrap(), "(decimal-sub "),
(Regex::new(r"\(\s*\*\s+").unwrap(), "(decimal-mul "),
(Regex::new(r"\(\s*/\s+").unwrap(), "(decimal-div "),
// Power and advanced operations
(Regex::new(r"\(\s*\^\s+").unwrap(), "(decimal-pow "),
(Regex::new(r"\(\s*\*\*\s+").unwrap(), "(decimal-pow "),
(Regex::new(r"\(\s*pow\s+").unwrap(), "(decimal-pow "),
(Regex::new(r"\(\s*sqrt\s+").unwrap(), "(decimal-sqrt "),
// Logarithmic functions
(Regex::new(r"\(\s*ln\s+").unwrap(), "(decimal-ln "),
(Regex::new(r"\(\s*log\s+").unwrap(), "(decimal-ln "),
(Regex::new(r"\(\s*log10\s+").unwrap(), "(decimal-log10 "),
(Regex::new(r"\(\s*exp\s+").unwrap(), "(decimal-exp "),
// Trigonometric functions
(Regex::new(r"\(\s*sin\s+").unwrap(), "(decimal-sin "),
(Regex::new(r"\(\s*cos\s+").unwrap(), "(decimal-cos "),
(Regex::new(r"\(\s*tan\s+").unwrap(), "(decimal-tan "),
// Comparison operators
(Regex::new(r"\(\s*>\s+").unwrap(), "(decimal-gt "),
(Regex::new(r"\(\s*<\s+").unwrap(), "(decimal-lt "),
(Regex::new(r"\(\s*=\s+").unwrap(), "(decimal-eq "),
(Regex::new(r"\(\s*>=\s+").unwrap(), "(decimal-gte "),
(Regex::new(r"\(\s*<=\s+").unwrap(), "(decimal-lte "),
// Utility functions
(Regex::new(r"\(\s*abs\s+").unwrap(), "(decimal-abs "),
(Regex::new(r"\(\s*min\s+").unwrap(), "(decimal-min "),
(Regex::new(r"\(\s*max\s+").unwrap(), "(decimal-max "),
(Regex::new(r"\(\s*round\s+").unwrap(), "(decimal-round "),
];
SyntaxParser {
current_table_column_re: Regex::new(r"@(\w+)").unwrap(),
different_table_column_re: Regex::new(r"@(\w+)\.(\w+)").unwrap(),
one_to_many_indexed_re: Regex::new(r"@(\w+)\[(\d+)\]\.(\w+)").unwrap(),
sql_integration_re: Regex::new(r#"@sql\((['"])(.*?)['"]\)"#).unwrap(),
// FIXED: Match negative numbers and avoid already quoted strings
number_literal_re: Regex::new(r#"(?<!")(-?\d+\.?\d*(?:[eE][+-]?\d+)?)(?!")"#).unwrap(),
math_operators,
}
}
pub fn parse(&self, script: &str, current_table: &str) -> String {
let mut transformed = script.to_string();
// Step 1: Convert all numeric literals to strings (FIXED to handle negative numbers)
transformed = self.convert_numbers_to_strings(&transformed);
// Step 2: Replace math function calls with decimal equivalents (SIMPLIFIED)
transformed = self.replace_math_functions(&transformed);
// Step 3: Handle existing column and SQL integrations (unchanged)
transformed = self.process_column_integrations(&transformed, current_table);
transformed
}
/// Convert all unquoted numeric literals to quoted strings
fn convert_numbers_to_strings(&self, script: &str) -> String {
// This regex matches numbers that are NOT already inside quotes
self.number_literal_re.replace_all(script, |caps: &regex::Captures| {
format!("\"{}\"", &caps[1])
}).to_string()
}
/// Replace math function calls with decimal equivalents (SIMPLIFIED)
fn replace_math_functions(&self, script: &str) -> String {
let mut result = script.to_string();
// Apply all math operator replacements
for (pattern, replacement) in &self.math_operators {
result = pattern.replace_all(&result, *replacement).to_string();
}
result
}
/// Process existing column and SQL integrations (unchanged logic)
fn process_column_integrations(&self, script: &str, current_table: &str) -> String {
let mut transformed = script.to_string();
// Process indexed access first to avoid overlap with relationship matches
transformed = self.one_to_many_indexed_re.replace_all(&transformed, |caps: &regex::Captures| {
format!("(steel_get_column_with_index \"{}\" {} \"{}\")",

View File

@@ -1,4 +1,4 @@
// server/src/table_definition/handlers/delete_table.rs
// src/table_definition/handlers/delete_table.rs
use tonic::Status;
use sqlx::PgPool;
use common::proto::multieko2::table_definition::{DeleteTableRequest, DeleteTableResponse};
@@ -10,25 +10,25 @@ pub async fn delete_table(
let mut transaction = db_pool.begin().await
.map_err(|e| Status::internal(format!("Failed to start transaction: {}", e)))?;
// Step 1: Get profile and validate existence
let profile = sqlx::query!(
"SELECT id FROM profiles WHERE name = $1",
// Step 1: Get schema and validate existence
let schema = sqlx::query!(
"SELECT id, name FROM schemas WHERE name = $1",
request.profile_name
)
.fetch_optional(&mut *transaction)
.await
.map_err(|e| Status::internal(format!("Profile lookup failed: {}", e)))?;
.map_err(|e| Status::internal(format!("Schema lookup failed: {}", e)))?;
let profile_id = match profile {
Some(p) => p.id,
let (schema_id, schema_name) = match schema {
Some(s) => (s.id, s.name),
None => return Err(Status::not_found("Profile not found")),
};
// Step 2: Get table definition and validate existence
let table_def = sqlx::query!(
"SELECT id FROM table_definitions
WHERE profile_id = $1 AND table_name = $2",
profile_id,
"SELECT id FROM table_definitions
WHERE schema_id = $1 AND table_name = $2",
schema_id,
request.table_name
)
.fetch_optional(&mut *transaction)
@@ -40,8 +40,9 @@ pub async fn delete_table(
None => return Err(Status::not_found("Table not found in profile")),
};
// Step 3: Drop the actual PostgreSQL table with CASCADE
sqlx::query(&format!(r#"DROP TABLE IF EXISTS "{}" CASCADE"#, request.table_name))
// Step 3: Drop the actual PostgreSQL table with CASCADE (schema-qualified)
let drop_table_sql = format!(r#"DROP TABLE IF EXISTS "{}"."{}" CASCADE"#, schema_name, request.table_name);
sqlx::query(&drop_table_sql)
.execute(&mut *transaction)
.await
.map_err(|e| Status::internal(format!("Table drop failed: {}", e)))?;
@@ -55,23 +56,31 @@ pub async fn delete_table(
.await
.map_err(|e| Status::internal(format!("Definition deletion failed: {}", e)))?;
// Step 5: Check and clean up profile if empty
// Step 5: Check and clean up schema if empty
let remaining = sqlx::query!(
"SELECT COUNT(*) as count FROM table_definitions WHERE profile_id = $1",
profile_id
"SELECT COUNT(*) as count FROM table_definitions WHERE schema_id = $1",
schema_id
)
.fetch_one(&mut *transaction)
.await
.map_err(|e| Status::internal(format!("Count query failed: {}", e)))?;
if remaining.count.unwrap_or(1) == 0 {
// Drop the PostgreSQL schema if empty
let drop_schema_sql = format!(r#"DROP SCHEMA IF EXISTS "{}" CASCADE"#, schema_name);
sqlx::query(&drop_schema_sql)
.execute(&mut *transaction)
.await
.map_err(|e| Status::internal(format!("Schema drop failed: {}", e)))?;
// Delete the schema record
sqlx::query!(
"DELETE FROM profiles WHERE id = $1",
profile_id
"DELETE FROM schemas WHERE id = $1",
schema_id
)
.execute(&mut *transaction)
.await
.map_err(|e| Status::internal(format!("Profile cleanup failed: {}", e)))?;
.map_err(|e| Status::internal(format!("Schema cleanup failed: {}", e)))?;
}
transaction.commit().await

View File

@@ -15,13 +15,15 @@ pub async fn get_profile_tree(
) -> Result<Response<ProfileTreeResponse>, Status> {
let mut profiles = Vec::new();
// Get all profiles
let profile_records = sqlx::query!("SELECT id, name FROM profiles")
.fetch_all(db_pool)
.await
.map_err(|e| Status::internal(format!("Failed to fetch profiles: {}", e)))?;
// Get all schemas (internally changed from profiles to schemas)
let schema_records = sqlx::query!(
"SELECT id, name FROM schemas ORDER BY name"
)
.fetch_all(db_pool)
.await
.map_err(|e| Status::internal(format!("Failed to fetch schemas: {}", e)))?;
for profile in profile_records {
for schema in schema_records {
// Get all tables with their dependencies from the links table
let tables = sqlx::query!(
r#"
@@ -35,15 +37,16 @@ pub async fn get_profile_tree(
'required', tdl.is_required
)
) FILTER (WHERE ltd.id IS NOT NULL),
'[]'
'[]'::json
) as dependencies
FROM table_definitions td
LEFT JOIN table_definition_links tdl ON td.id = tdl.source_table_id
LEFT JOIN table_definitions ltd ON tdl.linked_table_id = ltd.id
WHERE td.profile_id = $1
WHERE td.schema_id = $1
GROUP BY td.id, td.table_name
ORDER BY td.table_name
"#,
profile.id
schema.id
)
.fetch_all(db_pool)
.await
@@ -70,8 +73,9 @@ pub async fn get_profile_tree(
})
.collect();
// External API still returns "profiles" for compatibility
profiles.push(Profile {
name: profile.name,
name: schema.name,
tables: proto_tables
});
}

View File

@@ -1,48 +1,170 @@
// src/table_definition/handlers/post_table_definition.rs
use tonic::Status;
use sqlx::{PgPool, Transaction, Postgres};
use serde_json::json;
use time::OffsetDateTime;
use common::proto::multieko2::table_definition::{PostTableDefinitionRequest, TableDefinitionResponse};
const GENERATED_SCHEMA_NAME: &str = "gen";
const PREDEFINED_FIELD_TYPES: &[(&str, &str)] = &[
("text", "TEXT"),
("psc", "TEXT"),
("phone", "VARCHAR(15)"),
("address", "TEXT"),
("email", "VARCHAR(255)"),
("string", "TEXT"),
("boolean", "BOOLEAN"),
("timestamp", "TIMESTAMPTZ"),
("timestamptz", "TIMESTAMPTZ"),
("time", "TIMESTAMPTZ"),
("money", "NUMERIC(14, 4)"),
("integer", "INTEGER"),
("int", "INTEGER"),
("biginteger", "BIGINT"),
("bigint", "BIGINT"),
("date", "DATE"),
];
fn is_valid_identifier(s: &str) -> bool {
!s.is_empty() &&
s.chars().all(|c| c.is_ascii_alphanumeric() || c == '_') &&
!s.starts_with('_') &&
!s.chars().next().unwrap().is_ascii_digit()
// NEW: Helper function to provide detailed error messages
fn validate_identifier_format(s: &str, identifier_type: &str) -> Result<(), Status> {
if s.is_empty() {
return Err(Status::invalid_argument(format!("{} cannot be empty", identifier_type)));
}
if s.starts_with('_') {
return Err(Status::invalid_argument(format!("{} cannot start with underscore", identifier_type)));
}
if s.chars().next().unwrap().is_ascii_digit() {
return Err(Status::invalid_argument(format!("{} cannot start with a number", identifier_type)));
}
// Check for invalid characters
let invalid_chars: Vec<char> = s.chars()
.filter(|c| !c.is_ascii_lowercase() && !c.is_ascii_digit() && *c != '_')
.collect();
if !invalid_chars.is_empty() {
return Err(Status::invalid_argument(format!(
"{} contains invalid characters: {:?}. Only lowercase letters, numbers, and underscores are allowed",
identifier_type, invalid_chars
)));
}
// Check for uppercase letters specifically to give a helpful message
if s.chars().any(|c| c.is_ascii_uppercase()) {
return Err(Status::invalid_argument(format!(
"{} contains uppercase letters. Only lowercase letters are allowed",
identifier_type
)));
}
Ok(())
}
fn sanitize_table_name(s: &str) -> String {
let year = OffsetDateTime::now_utc().year();
let cleaned = s.replace(|c: char| !c.is_ascii_alphanumeric() && c != '_', "")
.trim()
.to_lowercase();
format!("{}_{}", year, cleaned)
fn validate_decimal_number_format(num_str: &str, param_name: &str) -> Result<(), Status> {
if num_str.is_empty() {
return Err(Status::invalid_argument(format!(
"{} cannot be empty",
param_name
)));
}
// Check for explicit signs
if num_str.starts_with('+') || num_str.starts_with('-') {
return Err(Status::invalid_argument(format!(
"{} cannot have explicit positive or negative signs",
param_name
)));
}
// Check for decimal points
if num_str.contains('.') {
return Err(Status::invalid_argument(format!(
"{} must be a whole number (no decimal points)",
param_name
)));
}
// Check for leading zeros (but allow "0" itself)
if num_str.len() > 1 && num_str.starts_with('0') {
let trimmed = num_str.trim_start_matches('0');
let suggestion = if trimmed.is_empty() { "0" } else { trimmed };
return Err(Status::invalid_argument(format!(
"{} cannot have leading zeros (use '{}' instead of '{}')",
param_name,
suggestion,
num_str
)));
}
// Check that all characters are digits
if !num_str.chars().all(|c| c.is_ascii_digit()) {
return Err(Status::invalid_argument(format!(
"{} contains invalid characters. Only digits 0-9 are allowed",
param_name
)));
}
Ok(())
}
fn sanitize_identifier(s: &str) -> String {
s.replace(|c: char| !c.is_ascii_alphanumeric() && c != '_', "")
.trim()
.to_lowercase()
}
fn map_field_type(field_type: &str) -> Result<String, Status> {
let lower_field_type = field_type.to_lowercase();
fn map_field_type(field_type: &str) -> Result<&str, Status> {
// Special handling for "decimal(precision, scale)"
if lower_field_type.starts_with("decimal(") && lower_field_type.ends_with(')') {
// Extract the part inside the parentheses, e.g., "10, 2"
let args = lower_field_type
.strip_prefix("decimal(")
.and_then(|s| s.strip_suffix(')'))
.unwrap_or(""); // Should always succeed due to the checks above
// Split into precision and scale parts
if let Some((p_str, s_str)) = args.split_once(',') {
let precision_str = p_str.trim();
let scale_str = s_str.trim();
// NEW: Validate format BEFORE parsing
validate_decimal_number_format(precision_str, "precision")?;
validate_decimal_number_format(scale_str, "scale")?;
// Parse precision, returning an error if it's not a valid number
let precision = precision_str.parse::<u32>().map_err(|_| {
Status::invalid_argument("Invalid precision in decimal type")
})?;
// Parse scale, returning an error if it's not a valid number
let scale = scale_str.parse::<u32>().map_err(|_| {
Status::invalid_argument("Invalid scale in decimal type")
})?;
// Add validation based on PostgreSQL rules
if precision < 1 {
return Err(Status::invalid_argument("Precision must be at least 1"));
}
if scale > precision {
return Err(Status::invalid_argument(
"Scale cannot be greater than precision",
));
}
// If everything is valid, build and return the NUMERIC type string
return Ok(format!("NUMERIC({}, {})", precision, scale));
} else {
// The format was wrong, e.g., "decimal(10)" or "decimal()"
return Err(Status::invalid_argument(
"Invalid decimal format. Expected: decimal(precision, scale)",
));
}
}
// If not a decimal, fall back to the predefined list
PREDEFINED_FIELD_TYPES
.iter()
.find(|(key, _)| *key == field_type.to_lowercase().as_str())
.map(|(_, sql_type)| *sql_type)
.ok_or_else(|| Status::invalid_argument(format!("Invalid field type: {}", field_type)))
.find(|(key, _)| *key == lower_field_type.as_str())
.map(|(_, sql_type)| sql_type.to_string()) // Convert to an owned String
.ok_or_else(|| {
Status::invalid_argument(format!(
"Invalid field type: {}",
field_type
))
})
}
fn is_invalid_table_name(table_name: &str) -> bool {
@@ -52,33 +174,65 @@ fn is_invalid_table_name(table_name: &str) -> bool {
table_name == "created_at"
}
fn is_reserved_schema(schema_name: &str) -> bool {
let lower = schema_name.to_lowercase();
lower == "public" ||
lower == "information_schema" ||
lower.starts_with("pg_")
}
pub async fn post_table_definition(
db_pool: &PgPool,
request: PostTableDefinitionRequest,
) -> Result<TableDefinitionResponse, Status> {
let base_name = sanitize_table_name(&request.table_name);
let user_part_cleaned = request.table_name
.replace(|c: char| !c.is_ascii_alphanumeric() && c != '_', "")
.trim_matches('_')
.to_lowercase();
// Create owned copies of the strings after validation
let profile_name = {
let trimmed = request.profile_name.trim();
validate_identifier_format(trimmed, "Profile name")?;
trimmed.to_string()
};
// New validation check
if is_invalid_table_name(&user_part_cleaned) {
return Err(Status::invalid_argument(
"Table name cannot be 'id', 'deleted', 'created_at' or end with '_id'"
));
// Add validation to prevent reserved schemas
if is_reserved_schema(&profile_name) {
return Err(Status::invalid_argument("Profile name is reserved and cannot be used"));
}
if !user_part_cleaned.is_empty() && !is_valid_identifier(&user_part_cleaned) {
return Err(Status::invalid_argument("Invalid table name"));
} else if user_part_cleaned.is_empty() {
return Err(Status::invalid_argument("Table name cannot be empty"));
const MAX_IDENTIFIER_LENGTH: usize = 63;
if profile_name.len() > MAX_IDENTIFIER_LENGTH {
return Err(Status::invalid_argument(format!(
"Profile name '{}' exceeds the {} character limit.",
profile_name,
MAX_IDENTIFIER_LENGTH
)));
}
let table_name = {
let trimmed = request.table_name.trim();
validate_identifier_format(trimmed, "Table name")?;
if trimmed.len() > MAX_IDENTIFIER_LENGTH {
return Err(Status::invalid_argument(format!(
"Table name '{}' exceeds the {} character limit.",
trimmed,
MAX_IDENTIFIER_LENGTH
)));
}
// Check invalid table names on the original input
if is_invalid_table_name(trimmed) {
return Err(Status::invalid_argument(
"Table name cannot be 'id', 'deleted', 'created_at' or end with '_id'"
));
}
trimmed.to_string()
};
let mut tx = db_pool.begin().await
.map_err(|e| Status::internal(format!("Failed to start transaction: {}", e)))?;
match execute_table_definition(&mut tx, request, base_name).await {
match execute_table_definition(&mut tx, request, table_name, profile_name).await {
Ok(response) => {
tx.commit().await
.map_err(|e| Status::internal(format!("Failed to commit transaction: {}", e)))?;
@@ -95,23 +249,42 @@ async fn execute_table_definition(
tx: &mut Transaction<'_, Postgres>,
mut request: PostTableDefinitionRequest,
table_name: String,
profile_name: String,
) -> Result<TableDefinitionResponse, Status> {
let profile = sqlx::query!(
"INSERT INTO profiles (name) VALUES ($1)
// Use the validated profile_name for schema insertion
let schema = sqlx::query!(
"INSERT INTO schemas (name) VALUES ($1)
ON CONFLICT (name) DO UPDATE SET name = EXCLUDED.name
RETURNING id",
request.profile_name
profile_name // Use the validated profile name
)
.fetch_one(&mut **tx)
.await
.map_err(|e| Status::internal(format!("Profile error: {}", e)))?;
.map_err(|e| Status::internal(format!("Schema error: {}", e)))?;
// Create PostgreSQL schema if it doesn't exist
let create_schema_sql = format!("CREATE SCHEMA IF NOT EXISTS \"{}\"", profile_name);
sqlx::query(&create_schema_sql)
.execute(&mut **tx)
.await
.map_err(|e| Status::internal(format!("Schema creation failed: {}", e)))?;
let mut links = Vec::new();
let mut seen_tables = std::collections::HashSet::new();
for link in request.links.drain(..) {
// Check for duplicate link
if !seen_tables.insert(link.linked_table_name.clone()) {
return Err(Status::invalid_argument(format!(
"Duplicate link to table '{}'",
link.linked_table_name
)));
}
let linked_table = sqlx::query!(
"SELECT id FROM table_definitions
WHERE profile_id = $1 AND table_name = $2",
profile.id,
WHERE schema_id = $1 AND table_name = $2",
schema.id,
link.linked_table_name
)
.fetch_optional(&mut **tx)
@@ -127,34 +300,40 @@ async fn execute_table_definition(
let mut columns = Vec::new();
for col_def in request.columns.drain(..) {
let col_name = sanitize_identifier(&col_def.name);
if !is_valid_identifier(&col_def.name) {
return Err(Status::invalid_argument("Invalid column name"));
let col_name = col_def.name.trim().to_string();
validate_identifier_format(&col_name, "Column name")?;
if col_name.ends_with("_id") || col_name == "id" || col_name == "deleted" || col_name == "created_at" {
return Err(Status::invalid_argument(format!(
"Column name '{}' cannot be 'id', 'deleted', 'created_at' or end with '_id'",
col_name
)));
}
let sql_type = map_field_type(&col_def.field_type)?;
columns.push(format!("\"{}\" {}", col_name, sql_type));
}
let mut indexes = Vec::new();
for idx in request.indexes.drain(..) {
let idx_name = sanitize_identifier(&idx);
if !is_valid_identifier(&idx) {
return Err(Status::invalid_argument(format!("Invalid index name: {}", idx)));
}
let idx_name = idx.trim().to_string();
validate_identifier_format(&idx_name, "Index name")?;
if !columns.iter().any(|c| c.starts_with(&format!("\"{}\"", idx_name))) {
return Err(Status::invalid_argument(format!("Index column {} not found", idx_name)));
return Err(Status::invalid_argument(format!("Index column '{}' not found", idx_name)));
}
indexes.push(idx_name);
}
let (create_sql, index_sql) = generate_table_sql(tx, &table_name, &columns, &indexes, &links).await?;
let (create_sql, index_sql) = generate_table_sql(tx, &profile_name, &table_name, &columns, &indexes, &links).await?;
// Use schema_id instead of profile_id
let table_def = sqlx::query!(
r#"INSERT INTO table_definitions
(profile_id, table_name, columns, indexes)
(schema_id, table_name, columns, indexes)
VALUES ($1, $2, $3, $4)
RETURNING id"#,
profile.id,
schema.id,
&table_name,
json!(columns),
json!(indexes)
@@ -163,7 +342,8 @@ async fn execute_table_definition(
.await
.map_err(|e| {
if let Some(db_err) = e.as_database_error() {
if db_err.constraint() == Some("idx_table_definitions_profile_table") {
// Update constraint name to match new schema
if db_err.constraint() == Some("idx_table_definitions_schema_table") {
return Status::already_exists("Table already exists in this profile");
}
}
@@ -204,13 +384,13 @@ async fn execute_table_definition(
async fn generate_table_sql(
tx: &mut Transaction<'_, Postgres>,
profile_name: &str,
table_name: &str,
columns: &[String],
indexes: &[String],
links: &[(i64, bool)],
) -> Result<(String, Vec<String>), Status> {
let qualified_table = format!("{}.\"{}\"", GENERATED_SCHEMA_NAME, table_name);
let qualified_table = format!("\"{}\".\"{}\"", profile_name, table_name);
let mut system_columns = vec![
"id BIGSERIAL PRIMARY KEY".to_string(),
"deleted BOOLEAN NOT NULL DEFAULT FALSE".to_string(),
@@ -218,16 +398,13 @@ async fn generate_table_sql(
for (linked_id, required) in links {
let linked_table = get_table_name_by_id(tx, *linked_id).await?;
let qualified_linked_table = format!("{}.\"{}\"", GENERATED_SCHEMA_NAME, linked_table);
let base_name = linked_table.split_once('_')
.map(|(_, rest)| rest)
.unwrap_or(&linked_table)
.to_string();
let null_clause = if *required { "NOT NULL" } else { "" };
let qualified_linked_table = format!("\"{}\".\"{}\"", profile_name, linked_table);
// Simply use the full table name - no truncation!
let null_clause = if *required { "NOT NULL" } else { "" };
system_columns.push(
format!("\"{0}_id\" BIGINT {1} REFERENCES {2}(id)",
base_name, null_clause, qualified_linked_table
format!("\"{}_id\" BIGINT {} REFERENCES {}(id)",
linked_table, null_clause, qualified_linked_table
)
);
}
@@ -247,13 +424,9 @@ async fn generate_table_sql(
let mut all_indexes = Vec::new();
for (linked_id, _) in links {
let linked_table = get_table_name_by_id(tx, *linked_id).await?;
let base_name = linked_table.split_once('_')
.map(|(_, rest)| rest)
.unwrap_or(&linked_table)
.to_string();
all_indexes.push(format!(
"CREATE INDEX \"idx_{}_{}_fk\" ON {} (\"{}_id\")",
table_name, base_name, qualified_table, base_name
table_name, linked_table, qualified_table, linked_table
));
}

View File

@@ -49,7 +49,7 @@ pub async fn post_table_script(
) -> Result<TableScriptResponse, Status> {
// Fetch the table definition
let table_def = sqlx::query!(
r#"SELECT id, table_name, columns, profile_id
r#"SELECT id, table_name, columns, schema_id
FROM table_definitions WHERE id = $1"#,
request.table_definition_id
)
@@ -76,7 +76,7 @@ pub async fn post_table_script(
let script_record = sqlx::query!(
r#"INSERT INTO table_scripts
(table_definitions_id, target_table, target_column,
target_column_type, script, description, profile_id)
target_column_type, script, description, schema_id)
VALUES ($1, $2, $3, $4, $5, $6, $7)
RETURNING id"#,
request.table_definition_id,
@@ -85,7 +85,7 @@ pub async fn post_table_script(
column_type,
parsed_script,
request.description,
table_def.profile_id
table_def.schema_id
)
.fetch_one(db_pool)
.await

View File

@@ -20,11 +20,11 @@ pub async fn get_table_structure(
) -> Result<TableStructureResponse, Status> {
let profile_name = request.profile_name;
let table_name = request.table_name;
let table_schema = "gen";
let table_schema = &profile_name;
// 1. Validate Profile
let profile = sqlx::query!(
"SELECT id FROM profiles WHERE name = $1",
let schema = sqlx::query!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
)
.fetch_optional(db_pool)
@@ -36,8 +36,8 @@ pub async fn get_table_structure(
))
})?;
let profile_id = match profile {
Some(p) => p.id,
let schema_id = match schema {
Some(s) => s.id,
None => {
return Err(Status::not_found(format!(
"Profile '{}' not found",
@@ -48,8 +48,8 @@ pub async fn get_table_structure(
// 2. Validate Table within Profile
sqlx::query!(
"SELECT id FROM table_definitions WHERE profile_id = $1 AND table_name = $2",
profile_id,
"SELECT id FROM table_definitions WHERE schema_id = $1 AND table_name = $2",
schema_id,
table_name
)
.fetch_optional(db_pool)

View File

@@ -9,24 +9,24 @@ pub async fn delete_table_data(
request: DeleteTableDataRequest,
) -> Result<DeleteTableDataResponse, Status> {
// Lookup profile
let profile = sqlx::query!(
"SELECT id FROM profiles WHERE name = $1",
let schema = sqlx::query!(
"SELECT id FROM schemas WHERE name = $1",
request.profile_name
)
.fetch_optional(db_pool)
.await
.map_err(|e| Status::internal(format!("Profile lookup error: {}", e)))?;
let profile_id = match profile {
Some(p) => p.id,
let schema_id = match schema {
Some(s) => s.id,
None => return Err(Status::not_found("Profile not found")),
};
// Verify table exists in profile
let table_exists = sqlx::query!(
"SELECT 1 AS exists FROM table_definitions
WHERE profile_id = $1 AND table_name = $2",
profile_id,
WHERE schema_id = $1 AND table_name = $2",
schema_id,
request.table_name
)
.fetch_optional(db_pool)
@@ -38,7 +38,12 @@ pub async fn delete_table_data(
}
// Qualify table name with schema
let qualified_table = qualify_table_name_for_data(&request.table_name)?;
let qualified_table = qualify_table_name_for_data(
db_pool,
&request.profile_name,
&request.table_name,
)
.await?;
// Perform soft delete using qualified table name
let query = format!(

View File

@@ -15,21 +15,21 @@ pub async fn get_table_data(
let record_id = request.id;
// Lookup profile
let profile = sqlx::query!(
"SELECT id FROM profiles WHERE name = $1",
let schema = sqlx::query!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
)
.fetch_optional(db_pool)
.await
.map_err(|e| Status::internal(format!("Profile lookup error: {}", e)))?;
let profile_id = profile.ok_or_else(|| Status::not_found("Profile not found"))?.id;
let schema_id = schema.ok_or_else(|| Status::not_found("Profile not found"))?.id;
// Lookup table_definition
let table_def = sqlx::query!(
r#"SELECT id, columns FROM table_definitions
WHERE profile_id = $1 AND table_name = $2"#,
profile_id,
WHERE schema_id = $1 AND table_name = $2"#,
schema_id,
table_name
)
.fetch_optional(db_pool)
@@ -66,11 +66,11 @@ pub async fn get_table_data(
.await
.map_err(|e| Status::internal(format!("Foreign key lookup error: {}", e)))?;
// 2. Build the list of foreign key column names
// 2. Build the list of foreign key column names using full table names
let mut foreign_key_columns = Vec::new();
for fk in fk_columns_query {
let base_name = fk.table_name.split_once('_').map_or(fk.table_name.as_str(), |(_, rest)| rest);
foreign_key_columns.push(format!("{}_id", base_name));
// Use the full table name, not a stripped version
foreign_key_columns.push(format!("{}_id", fk.table_name));
}
// 3. Prepare a complete list of all columns to select
@@ -88,7 +88,12 @@ pub async fn get_table_data(
// --- END OF FIX ---
// Qualify table name with schema
let qualified_table = qualify_table_name_for_data(&table_name)?;
let qualified_table = qualify_table_name_for_data(
db_pool,
&profile_name,
&table_name,
)
.await?;
let sql = format!(
"SELECT {} FROM {} WHERE id = $1 AND deleted = false",

View File

@@ -18,22 +18,22 @@ pub async fn get_table_data_by_position(
return Err(Status::invalid_argument("Position must be at least 1"));
}
let profile = sqlx::query!(
"SELECT id FROM profiles WHERE name = $1",
let schema = sqlx::query!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
)
.fetch_optional(db_pool)
.await
.map_err(|e| Status::internal(format!("Profile lookup error: {}", e)))?;
let profile_id = profile.ok_or_else(|| Status::not_found("Profile not found"))?.id;
let schema_id = schema.ok_or_else(|| Status::not_found("Profile not found"))?.id;
let table_exists = sqlx::query_scalar!(
r#"SELECT EXISTS(
SELECT 1 FROM table_definitions
WHERE profile_id = $1 AND table_name = $2
WHERE schema_id = $1 AND table_name = $2
) AS "exists!""#,
profile_id,
schema_id,
table_name
)
.fetch_one(db_pool)
@@ -45,7 +45,12 @@ pub async fn get_table_data_by_position(
}
// Qualify table name with schema
let qualified_table = qualify_table_name_for_data(&table_name)?;
let qualified_table = qualify_table_name_for_data(
db_pool,
&profile_name,
&table_name,
)
.await?;
let id_result = sqlx::query_scalar(
&format!(

View File

@@ -12,15 +12,15 @@ pub async fn get_table_data_count(
// We still need to verify that the table is logically defined for the profile.
// The schema qualifier handles *how* to access it physically, but this check
// ensures the request is valid in the context of the application's definitions.
let profile = sqlx::query!(
"SELECT id FROM profiles WHERE name = $1",
let schema = sqlx::query!(
"SELECT id FROM schemas WHERE name = $1",
request.profile_name
)
.fetch_optional(db_pool)
.await
.map_err(|e| Status::internal(format!("Profile lookup error for '{}': {}", request.profile_name, e)))?;
let profile_id = match profile {
let schema_id = match schema {
Some(p) => p.id,
None => return Err(Status::not_found(format!("Profile '{}' not found", request.profile_name))),
};
@@ -28,9 +28,9 @@ pub async fn get_table_data_count(
let table_defined_for_profile = sqlx::query_scalar!(
r#"SELECT EXISTS(
SELECT 1 FROM table_definitions
WHERE profile_id = $1 AND table_name = $2
) AS "exists!" "#, // Added AS "exists!" for clarity with sqlx macro
profile_id,
WHERE schema_id = $1 AND table_name = $2
) AS "exists!" "#,
schema_id,
request.table_name
)
.fetch_one(db_pool)
@@ -47,7 +47,12 @@ pub async fn get_table_data_count(
}
// 2. QUALIFY THE TABLE NAME using the imported function
let qualified_table_name = qualify_table_name_for_data(&request.table_name)?;
let qualified_table = qualify_table_name_for_data(
db_pool,
&request.profile_name,
&request.table_name,
)
.await?;
// 3. USE THE QUALIFIED NAME in the SQL query
let query_sql = format!(
@@ -56,7 +61,7 @@ pub async fn get_table_data_count(
FROM {}
WHERE deleted = FALSE
"#,
qualified_table_name // Use the schema-qualified name here
qualified_table
);
// The rest of the logic remains largely the same, but error messages can be more specific.
@@ -81,14 +86,14 @@ pub async fn get_table_data_count(
// even though it was defined in table_definitions. This is an inconsistency.
return Err(Status::internal(format!(
"Table '{}' is defined but does not physically exist in the database as {}.",
request.table_name, qualified_table_name
request.table_name, qualified_table
)));
}
}
// For other errors, provide a general message.
Err(Status::internal(format!(
"Count query failed for table {}: {}",
qualified_table_name, e
qualified_table, e
)))
}
}

View File

@@ -7,17 +7,17 @@ use chrono::{DateTime, Utc};
use common::proto::multieko2::tables_data::{PostTableDataRequest, PostTableDataResponse};
use std::collections::HashMap;
use std::sync::Arc;
use crate::shared::schema_qualifier::qualify_table_name_for_data;
use prost_types::value::Kind;
use rust_decimal::Decimal;
use std::str::FromStr;
use crate::steel::server::execution::{self, Value};
use crate::steel::server::functions::SteelContext;
// Add these imports
use crate::indexer::{IndexCommand, IndexCommandData};
use tokio::sync::mpsc;
use tracing::error;
// MODIFIED: Function signature now accepts the indexer sender
pub async fn post_table_data(
db_pool: &PgPool,
request: PostTableDataRequest,
@@ -25,28 +25,21 @@ pub async fn post_table_data(
) -> Result<PostTableDataResponse, Status> {
let profile_name = request.profile_name;
let table_name = request.table_name;
let mut data = HashMap::new();
for (key, value) in request.data {
data.insert(key, value.trim().to_string());
}
// Lookup profile
let profile = sqlx::query!(
"SELECT id FROM profiles WHERE name = $1",
let schema = sqlx::query!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
)
.fetch_optional(db_pool)
.await
.map_err(|e| Status::internal(format!("Profile lookup error: {}", e)))?;
let profile_id = profile.ok_or_else(|| Status::not_found("Profile not found"))?.id;
let schema_id = schema.ok_or_else(|| Status::not_found("Profile not found"))?.id;
// Lookup table_definition
let table_def = sqlx::query!(
r#"SELECT id, columns FROM table_definitions
WHERE profile_id = $1 AND table_name = $2"#,
profile_id,
WHERE schema_id = $1 AND table_name = $2"#,
schema_id,
table_name
)
.fetch_optional(db_pool)
@@ -55,7 +48,6 @@ pub async fn post_table_data(
let table_def = table_def.ok_or_else(|| Status::not_found("Table not found"))?;
// Parse columns from JSON
let columns_json: Vec<String> = serde_json::from_value(table_def.columns.clone())
.map_err(|e| Status::internal(format!("Column parsing error: {}", e)))?;
@@ -70,7 +62,6 @@ pub async fn post_table_data(
columns.push((name, sql_type));
}
// Get all foreign key columns for this table
let fk_columns = sqlx::query!(
r#"SELECT ltd.table_name
FROM table_definition_links tdl
@@ -82,26 +73,41 @@ pub async fn post_table_data(
.await
.map_err(|e| Status::internal(format!("Foreign key lookup error: {}", e)))?;
// Build system columns with foreign keys
let mut system_columns = vec!["deleted".to_string()];
for fk in fk_columns {
let base_name = fk.table_name.split_once('_').map_or(fk.table_name.as_str(), |(_, rest)| rest);
system_columns.push(format!("{}_id", base_name));
system_columns.push(format!("{}_id", fk.table_name));
}
// Convert to HashSet for faster lookups
let system_columns_set: std::collections::HashSet<_> = system_columns.iter().map(|s| s.as_str()).collect();
// Validate all data columns
let user_columns: Vec<&String> = columns.iter().map(|(name, _)| name).collect();
for key in data.keys() {
for key in request.data.keys() {
if !system_columns_set.contains(key.as_str()) &&
!user_columns.contains(&&key.to_string()) {
return Err(Status::invalid_argument(format!("Invalid column: {}", key)));
}
}
// Validate Steel scripts
let mut string_data_for_scripts = HashMap::new();
for (key, proto_value) in &request.data {
let str_val = match &proto_value.kind {
Some(Kind::StringValue(s)) => {
let trimmed = s.trim();
if trimmed.is_empty() {
continue;
}
trimmed.to_string()
},
Some(Kind::NumberValue(n)) => n.to_string(),
Some(Kind::BoolValue(b)) => b.to_string(),
Some(Kind::NullValue(_)) | None => continue,
Some(Kind::StructValue(_)) | Some(Kind::ListValue(_)) => {
return Err(Status::invalid_argument(format!("Unsupported type for script validation in column '{}'", key)));
}
};
string_data_for_scripts.insert(key.clone(), str_val);
}
let scripts = sqlx::query!(
"SELECT target_column, script FROM table_scripts WHERE table_definitions_id = $1",
table_def.id
@@ -113,21 +119,19 @@ pub async fn post_table_data(
for script_record in scripts {
let target_column = script_record.target_column;
// Ensure target column exists in submitted data
let user_value = data.get(&target_column)
let user_value = string_data_for_scripts.get(&target_column)
.ok_or_else(|| Status::invalid_argument(
format!("Script target column '{}' is required", target_column)
))?;
// Create execution context
let context = SteelContext {
current_table: table_name.clone(), // Keep base name for scripts
profile_id,
row_data: data.clone(),
current_table: table_name.clone(),
schema_id,
schema_name: profile_name.clone(),
row_data: string_data_for_scripts.clone(),
db_pool: Arc::new(db_pool.clone()),
};
// Execute validation script
let script_result = execution::execute_script(
script_record.script,
"STRINGS",
@@ -138,7 +142,6 @@ pub async fn post_table_data(
format!("Script execution failed for '{}': {}", target_column, e)
))?;
// Validate script output
let Value::Strings(mut script_output) = script_result else {
return Err(Status::internal("Script must return string values"));
};
@@ -154,17 +157,16 @@ pub async fn post_table_data(
}
}
// Prepare SQL parameters
let mut params = PgArguments::default();
let mut columns_list = Vec::new();
let mut placeholders = Vec::new();
let mut param_idx = 1;
for (col, value) in data {
for (col, proto_value) in request.data {
let sql_type = if system_columns_set.contains(col.as_str()) {
match col.as_str() {
"deleted" => "BOOLEAN",
_ if col.ends_with("_id") => "BIGINT", // Handle foreign keys
_ if col.ends_with("_id") => "BIGINT",
_ => return Err(Status::invalid_argument("Invalid system column")),
}
} else {
@@ -174,38 +176,122 @@ pub async fn post_table_data(
.ok_or_else(|| Status::invalid_argument(format!("Column not found: {}", col)))?
};
match sql_type {
"TEXT" | "VARCHAR(15)" | "VARCHAR(255)" => {
if let Some(max_len) = sql_type.strip_prefix("VARCHAR(")
.and_then(|s| s.strip_suffix(')'))
.and_then(|s| s.parse::<usize>().ok())
{
if value.len() > max_len {
let kind = match &proto_value.kind {
None | Some(Kind::NullValue(_)) => {
match sql_type {
"BOOLEAN" => params.add(None::<bool>),
"TEXT" => params.add(None::<String>),
"TIMESTAMPTZ" => params.add(None::<DateTime<Utc>>),
"BIGINT" => params.add(None::<i64>),
"INTEGER" => params.add(None::<i32>),
s if s.starts_with("NUMERIC") => params.add(None::<Decimal>),
_ => return Err(Status::invalid_argument(format!("Unsupported type for null value: {}", sql_type))),
}.map_err(|e| Status::internal(format!("Failed to add null parameter for {}: {}", col, e)))?;
columns_list.push(format!("\"{}\"", col));
placeholders.push(format!("${}", param_idx));
param_idx += 1;
continue;
}
Some(k) => k,
};
if sql_type == "TEXT" {
if let Kind::StringValue(value) = kind {
let trimmed_value = value.trim();
if trimmed_value.is_empty() {
params.add(None::<String>).map_err(|e| Status::internal(format!("Failed to add null parameter for {}: {}", col, e)))?;
} else {
if col == "telefon" && trimmed_value.len() > 15 {
return Err(Status::internal(format!("Value too long for {}", col)));
}
params.add(trimmed_value).map_err(|e| Status::invalid_argument(format!("Failed to add text parameter for {}: {}", col, e)))?;
}
params.add(value)
.map_err(|e| Status::invalid_argument(format!("Failed to add text parameter for {}: {}", col, e)))?;
},
"BOOLEAN" => {
let val = value.parse::<bool>()
.map_err(|_| Status::invalid_argument(format!("Invalid boolean for {}", col)))?;
params.add(val)
.map_err(|e| Status::invalid_argument(format!("Failed to add boolean parameter for {}: {}", col, e)))?;
},
"TIMESTAMPTZ" => {
let dt = DateTime::parse_from_rfc3339(&value)
.map_err(|_| Status::invalid_argument(format!("Invalid timestamp for {}", col)))?;
params.add(dt.with_timezone(&Utc))
.map_err(|e| Status::invalid_argument(format!("Failed to add timestamp parameter for {}: {}", col, e)))?;
},
"BIGINT" => {
let val = value.parse::<i64>()
.map_err(|_| Status::invalid_argument(format!("Invalid integer for {}", col)))?;
params.add(val)
.map_err(|e| Status::invalid_argument(format!("Failed to add integer parameter for {}: {}", col, e)))?;
},
_ => return Err(Status::invalid_argument(format!("Unsupported type {}", sql_type))),
} else {
return Err(Status::invalid_argument(format!("Expected string for column '{}'", col)));
}
} else if sql_type == "BOOLEAN" {
if let Kind::BoolValue(val) = kind {
params.add(val).map_err(|e| Status::invalid_argument(format!("Failed to add boolean parameter for {}: {}", col, e)))?;
} else {
return Err(Status::invalid_argument(format!("Expected boolean for column '{}'", col)));
}
} else if sql_type == "TIMESTAMPTZ" {
if let Kind::StringValue(value) = kind {
let dt = DateTime::parse_from_rfc3339(value).map_err(|_| Status::invalid_argument(format!("Invalid timestamp for {}", col)))?;
params.add(dt.with_timezone(&Utc)).map_err(|e| Status::invalid_argument(format!("Failed to add timestamp parameter for {}: {}", col, e)))?;
} else {
return Err(Status::invalid_argument(format!("Expected ISO 8601 string for column '{}'", col)));
}
} else if sql_type == "BIGINT" {
if let Kind::NumberValue(val) = kind {
if val.fract() != 0.0 {
return Err(Status::invalid_argument(format!("Expected integer for column '{}', but got a float", col)));
}
// Simple universal check: try the conversion and verify it's reversible
// This handles ALL edge cases: infinity, NaN, overflow, underflow, precision loss
let as_i64 = *val as i64;
if (as_i64 as f64) != *val {
return Err(Status::invalid_argument(format!("Integer value out of range for BIGINT column '{}'", col)));
}
params.add(as_i64).map_err(|e| Status::invalid_argument(format!("Failed to add bigint parameter for {}: {}", col, e)))?;
} else {
return Err(Status::invalid_argument(format!("Expected number for column '{}'", col)));
}
} else if sql_type == "INTEGER" {
if let Kind::NumberValue(val) = kind {
if val.fract() != 0.0 {
return Err(Status::invalid_argument(format!("Expected integer for column '{}', but got a float", col)));
}
// Simple universal check: try the conversion and verify it's reversible
// This handles ALL edge cases: infinity, NaN, overflow, underflow, precision loss
let as_i32 = *val as i32;
if (as_i32 as f64) != *val {
return Err(Status::invalid_argument(format!("Integer value out of range for INTEGER column '{}'", col)));
}
params.add(as_i32).map_err(|e| Status::invalid_argument(format!("Failed to add integer parameter for {}: {}", col, e)))?;
} else {
return Err(Status::invalid_argument(format!("Expected number for column '{}'", col)));
}
} else if sql_type.starts_with("NUMERIC") {
// MODIFIED: This block is now stricter.
let decimal_val = match kind {
Kind::StringValue(s) => {
let trimmed = s.trim();
if trimmed.is_empty() {
None // Treat empty string as NULL
} else {
// This is the only valid path: parse from a string.
Some(Decimal::from_str(trimmed).map_err(|_| {
Status::invalid_argument(format!(
"Invalid decimal string format for column '{}': {}",
col, s
))
})?)
}
}
// CATCH-ALL: Reject NumberValue, BoolValue, etc. for NUMERIC fields.
_ => {
return Err(Status::invalid_argument(format!(
"Expected a string representation for decimal column '{}', but received a different type.",
col
)));
}
};
params.add(decimal_val).map_err(|e| {
Status::invalid_argument(format!(
"Failed to add decimal parameter for {}: {}",
col, e
))
})?;
} else {
return Err(Status::invalid_argument(format!("Unsupported type {}", sql_type)));
}
columns_list.push(format!("\"{}\"", col));
@@ -217,8 +303,12 @@ pub async fn post_table_data(
return Err(Status::invalid_argument("No valid columns to insert"));
}
// Qualify table name with schema
let qualified_table = qualify_table_name_for_data(&table_name)?;
let qualified_table = crate::shared::schema_qualifier::qualify_table_name_for_data(
db_pool,
&profile_name,
&table_name,
)
.await?;
let sql = format!(
"INSERT INTO {} ({}) VALUES ({}) RETURNING id",
@@ -227,7 +317,6 @@ pub async fn post_table_data(
placeholders.join(", ")
);
// Execute query with enhanced error handling
let result = sqlx::query_scalar_with::<_, i64, _>(&sql, params)
.fetch_one(db_pool)
.await;
@@ -235,8 +324,13 @@ pub async fn post_table_data(
let inserted_id = match result {
Ok(id) => id,
Err(e) => {
// Handle "relation does not exist" error specifically
if let Some(db_err) = e.as_database_error() {
if db_err.code() == Some(std::borrow::Cow::Borrowed("22P02")) ||
db_err.code() == Some(std::borrow::Cow::Borrowed("22003")) {
return Err(Status::invalid_argument(format!(
"Numeric field overflow or invalid format. Check precision and scale. Details: {}", db_err.message()
)));
}
if db_err.code() == Some(std::borrow::Cow::Borrowed("42P01")) {
return Err(Status::internal(format!(
"Table '{}' is defined but does not physically exist in the database as {}",
@@ -248,15 +342,12 @@ pub async fn post_table_data(
}
};
// After a successful insert, send a command to the indexer.
let command = IndexCommand::AddOrUpdate(IndexCommandData {
table_name: table_name.clone(),
row_id: inserted_id,
});
if let Err(e) = indexer_tx.send(command).await {
// If sending fails, the DB is updated but the index will be stale.
// This is a critical situation to log and monitor.
error!(
"CRITICAL: DB insert for table '{}' (id: {}) succeeded but failed to queue for indexing: {}. Search index is now inconsistent.",
table_name, inserted_id, e

View File

@@ -1,52 +1,56 @@
// src/tables_data/handlers/put_table_data.rs
use tonic::Status;
use sqlx::{PgPool, Arguments, Postgres};
use sqlx::{PgPool, Arguments};
use sqlx::postgres::PgArguments;
use chrono::{DateTime, Utc};
use common::proto::multieko2::tables_data::{PutTableDataRequest, PutTableDataResponse};
use std::collections::HashMap;
use crate::shared::schema_qualifier::qualify_table_name_for_data; // Import schema qualifier
use std::sync::Arc;
use prost_types::value::Kind;
use rust_decimal::Decimal;
use std::str::FromStr;
use crate::steel::server::execution::{self, Value};
use crate::steel::server::functions::SteelContext;
use crate::indexer::{IndexCommand, IndexCommandData};
use tokio::sync::mpsc;
use tracing::error;
pub async fn put_table_data(
db_pool: &PgPool,
request: PutTableDataRequest,
indexer_tx: &mpsc::Sender<IndexCommand>,
) -> Result<PutTableDataResponse, Status> {
let profile_name = request.profile_name;
let table_name = request.table_name;
let record_id = request.id;
// Preprocess and validate data
let mut processed_data = HashMap::new();
let mut null_fields = Vec::new();
// CORRECTED: Generic handling for all fields.
// Any field with an empty string will be added to the null_fields list.
// The special, hardcoded logic for "firma" has been removed.
for (key, value) in request.data {
let trimmed = value.trim().to_string();
if trimmed.is_empty() {
null_fields.push(key);
} else {
processed_data.insert(key, trimmed);
}
// An update with no fields is a no-op; we can return success early.
if request.data.is_empty() {
return Ok(PutTableDataResponse {
success: true,
message: "No fields to update.".into(),
updated_id: record_id,
});
}
// Lookup profile
let profile = sqlx::query!(
"SELECT id FROM profiles WHERE name = $1",
// --- Start of logic copied and adapted from post_table_data ---
let schema = sqlx::query!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
)
.fetch_optional(db_pool)
.await
.map_err(|e| Status::internal(format!("Profile lookup error: {}", e)))?;
let profile_id = profile.ok_or_else(|| Status::not_found("Profile not found"))?.id;
let schema_id = schema.ok_or_else(|| Status::not_found("Profile not found"))?.id;
// Lookup table_definition
let table_def = sqlx::query!(
r#"SELECT id, columns FROM table_definitions
WHERE profile_id = $1 AND table_name = $2"#,
profile_id,
WHERE schema_id = $1 AND table_name = $2"#,
schema_id,
table_name
)
.fetch_optional(db_pool)
@@ -55,7 +59,6 @@ pub async fn put_table_data(
let table_def = table_def.ok_or_else(|| Status::not_found("Table not found"))?;
// Parse columns from JSON
let columns_json: Vec<String> = serde_json::from_value(table_def.columns.clone())
.map_err(|e| Status::internal(format!("Column parsing error: {}", e)))?;
@@ -70,130 +73,287 @@ pub async fn put_table_data(
columns.push((name, sql_type));
}
// CORRECTED: "firma" is not a system column.
// It should be treated as a user-defined column.
let system_columns = ["deleted"];
let user_columns: Vec<&String> = columns.iter().map(|(name, _)| name).collect();
let fk_columns = sqlx::query!(
r#"SELECT ltd.table_name
FROM table_definition_links tdl
JOIN table_definitions ltd ON tdl.linked_table_id = ltd.id
WHERE tdl.source_table_id = $1"#,
table_def.id
)
.fetch_all(db_pool)
.await
.map_err(|e| Status::internal(format!("Foreign key lookup error: {}", e)))?;
// Validate input columns
for key in processed_data.keys() {
if !system_columns.contains(&key.as_str()) && !user_columns.contains(&key) {
let mut system_columns = vec!["deleted".to_string()];
for fk in fk_columns {
system_columns.push(format!("{}_id", fk.table_name));
}
let system_columns_set: std::collections::HashSet<_> = system_columns.iter().map(|s| s.as_str()).collect();
let user_columns: Vec<&String> = columns.iter().map(|(name, _)| name).collect();
for key in request.data.keys() {
if !system_columns_set.contains(key.as_str()) &&
!user_columns.contains(&&key.to_string()) {
return Err(Status::invalid_argument(format!("Invalid column: {}", key)));
}
}
// Prepare SQL parameters
let mut string_data_for_scripts = HashMap::new();
for (key, proto_value) in &request.data {
let str_val = match &proto_value.kind {
Some(Kind::StringValue(s)) => {
let trimmed = s.trim();
if trimmed.is_empty() {
continue;
}
trimmed.to_string()
},
Some(Kind::NumberValue(n)) => n.to_string(),
Some(Kind::BoolValue(b)) => b.to_string(),
Some(Kind::NullValue(_)) | None => continue,
Some(Kind::StructValue(_)) | Some(Kind::ListValue(_)) => {
return Err(Status::invalid_argument(format!("Unsupported type for script validation in column '{}'", key)));
}
};
string_data_for_scripts.insert(key.clone(), str_val);
}
let scripts = sqlx::query!(
"SELECT target_column, script FROM table_scripts WHERE table_definitions_id = $1",
table_def.id
)
.fetch_all(db_pool)
.await
.map_err(|e| Status::internal(format!("Failed to fetch scripts: {}", e)))?;
for script_record in scripts {
let target_column = script_record.target_column;
if let Some(user_value) = string_data_for_scripts.get(&target_column) {
let context = SteelContext {
current_table: table_name.clone(),
schema_id,
schema_name: profile_name.clone(),
row_data: string_data_for_scripts.clone(),
db_pool: Arc::new(db_pool.clone()),
};
let script_result = execution::execute_script(
script_record.script,
"STRINGS",
Arc::new(db_pool.clone()),
context,
)
.map_err(|e| Status::invalid_argument(
format!("Script execution failed for '{}': {}", target_column, e)
))?;
let Value::Strings(mut script_output) = script_result else {
return Err(Status::internal("Script must return string values"));
};
let expected_value = script_output.pop()
.ok_or_else(|| Status::internal("Script returned no values"))?;
if user_value != &expected_value {
return Err(Status::invalid_argument(format!(
"Validation failed for column '{}': Expected '{}', Got '{}'",
target_column, expected_value, user_value
)));
}
}
}
let mut params = PgArguments::default();
let mut set_clauses = Vec::new();
let mut param_idx = 1;
// Add data parameters for non-empty fields
for (col, value) in &processed_data {
// CORRECTED: The logic for "firma" is removed from this match.
// It will now fall through to the `else` block and have its type
// correctly looked up from the `columns` vector.
let sql_type = if system_columns.contains(&col.as_str()) {
for (col, proto_value) in request.data {
let sql_type = if system_columns_set.contains(col.as_str()) {
match col.as_str() {
"deleted" => "BOOLEAN",
_ if col.ends_with("_id") => "BIGINT",
_ => return Err(Status::invalid_argument("Invalid system column")),
}
} else {
columns.iter()
.find(|(name, _)| name == col)
.find(|(name, _)| name == &col)
.map(|(_, sql_type)| sql_type.as_str())
.ok_or_else(|| Status::invalid_argument(format!("Column not found: {}", col)))?
};
match sql_type {
"TEXT" | "VARCHAR(15)" | "VARCHAR(255)" => {
if let Some(max_len) = sql_type.strip_prefix("VARCHAR(")
.and_then(|s| s.strip_suffix(')'))
.and_then(|s| s.parse::<usize>().ok())
{
if value.len() > max_len {
let kind = match &proto_value.kind {
None | Some(Kind::NullValue(_)) => {
match sql_type {
"BOOLEAN" => params.add(None::<bool>),
"TEXT" => params.add(None::<String>),
"TIMESTAMPTZ" => params.add(None::<DateTime<Utc>>),
"BIGINT" => params.add(None::<i64>),
"INTEGER" => params.add(None::<i32>),
s if s.starts_with("NUMERIC") => params.add(None::<Decimal>),
_ => return Err(Status::invalid_argument(format!("Unsupported type for null value: {}", sql_type))),
}.map_err(|e| Status::internal(format!("Failed to add null parameter for {}: {}", col, e)))?;
set_clauses.push(format!("\"{}\" = ${}", col, param_idx));
param_idx += 1;
continue;
}
Some(k) => k,
};
if sql_type == "TEXT" {
if let Kind::StringValue(value) = kind {
let trimmed_value = value.trim();
if trimmed_value.is_empty() {
params.add(None::<String>).map_err(|e| Status::internal(format!("Failed to add null parameter for {}: {}", col, e)))?;
} else {
if col == "telefon" && trimmed_value.len() > 15 {
return Err(Status::internal(format!("Value too long for {}", col)));
}
params.add(trimmed_value).map_err(|e| Status::invalid_argument(format!("Failed to add text parameter for {}: {}", col, e)))?;
}
params.add(value)
.map_err(|e| Status::internal(format!("Failed to add text parameter for {}: {}", col, e)))?;
},
"BOOLEAN" => {
let val = value.parse::<bool>()
.map_err(|_| Status::invalid_argument(format!("Invalid boolean for {}", col)))?;
params.add(val)
.map_err(|e| Status::internal(format!("Failed to add boolean parameter for {}: {}", col, e)))?;
},
"TIMESTAMPTZ" => {
let dt = DateTime::parse_from_rfc3339(value)
.map_err(|_| Status::invalid_argument(format!("Invalid timestamp for {}", col)))?;
params.add(dt.with_timezone(&Utc))
.map_err(|e| Status::internal(format!("Failed to add timestamp parameter for {}: {}", col, e)))?;
},
// ADDED: BIGINT handling for completeness, if needed for other columns.
"BIGINT" => {
let val = value.parse::<i64>()
.map_err(|_| Status::invalid_argument(format!("Invalid integer for {}", col)))?;
params.add(val)
.map_err(|e| Status::internal(format!("Failed to add integer parameter for {}: {}", col, e)))?;
},
_ => return Err(Status::invalid_argument(format!("Unsupported type {}", sql_type))),
} else {
return Err(Status::invalid_argument(format!("Expected string for column '{}'", col)));
}
} else if sql_type == "BOOLEAN" {
if let Kind::BoolValue(val) = kind {
params.add(val).map_err(|e| Status::invalid_argument(format!("Failed to add boolean parameter for {}: {}", col, e)))?;
} else {
return Err(Status::invalid_argument(format!("Expected boolean for column '{}'", col)));
}
} else if sql_type == "TIMESTAMPTZ" {
if let Kind::StringValue(value) = kind {
let dt = DateTime::parse_from_rfc3339(value).map_err(|_| Status::invalid_argument(format!("Invalid timestamp for {}", col)))?;
params.add(dt.with_timezone(&Utc)).map_err(|e| Status::invalid_argument(format!("Failed to add timestamp parameter for {}: {}", col, e)))?;
} else {
return Err(Status::invalid_argument(format!("Expected ISO 8601 string for column '{}'", col)));
}
} else if sql_type == "BIGINT" {
if let Kind::NumberValue(val) = kind {
if val.fract() != 0.0 {
return Err(Status::invalid_argument(format!("Expected integer for column '{}', but got a float", col)));
}
let as_i64 = *val as i64;
if (as_i64 as f64) != *val {
return Err(Status::invalid_argument(format!("Integer value out of range for BIGINT column '{}'", col)));
}
params.add(as_i64).map_err(|e| Status::invalid_argument(format!("Failed to add bigint parameter for {}: {}", col, e)))?;
} else {
return Err(Status::invalid_argument(format!("Expected number for column '{}'", col)));
}
} else if sql_type == "INTEGER" {
if let Kind::NumberValue(val) = kind {
if val.fract() != 0.0 {
return Err(Status::invalid_argument(format!("Expected integer for column '{}', but got a float", col)));
}
let as_i32 = *val as i32;
if (as_i32 as f64) != *val {
return Err(Status::invalid_argument(format!("Integer value out of range for INTEGER column '{}'", col)));
}
params.add(as_i32).map_err(|e| Status::invalid_argument(format!("Failed to add integer parameter for {}: {}", col, e)))?;
} else {
return Err(Status::invalid_argument(format!("Expected number for column '{}'", col)));
}
} else if sql_type.starts_with("NUMERIC") {
let decimal_val = match kind {
Kind::StringValue(s) => {
let trimmed = s.trim();
if trimmed.is_empty() {
None
} else {
Some(Decimal::from_str(trimmed).map_err(|_| {
Status::invalid_argument(format!(
"Invalid decimal string format for column '{}': {}",
col, s
))
})?)
}
}
_ => {
return Err(Status::invalid_argument(format!(
"Expected a string representation for decimal column '{}', but received a different type.",
col
)));
}
};
params.add(decimal_val).map_err(|e| {
Status::invalid_argument(format!(
"Failed to add decimal parameter for {}: {}",
col, e
))
})?;
} else {
return Err(Status::invalid_argument(format!("Unsupported type {}", sql_type)));
}
set_clauses.push(format!("\"{}\" = ${}", col, param_idx));
param_idx += 1;
}
// Add NULL clauses for empty fields
for field in null_fields {
// Make sure the field is valid
if !system_columns.contains(&field.as_str()) && !user_columns.contains(&&field) {
return Err(Status::invalid_argument(format!("Invalid column to set NULL: {}", field)));
}
set_clauses.push(format!("\"{}\" = NULL", field));
}
// --- End of copied logic ---
// Ensure we have at least one field to update
if set_clauses.is_empty() {
return Err(Status::invalid_argument("No valid fields to update"));
return Ok(PutTableDataResponse {
success: true,
message: "No valid fields to update after processing.".into(),
updated_id: record_id,
});
}
// Add ID parameter at the end
params.add(record_id)
.map_err(|e| Status::internal(format!("Failed to add record_id parameter: {}", e)))?;
// Qualify table name with schema
let qualified_table = qualify_table_name_for_data(&table_name)?;
let qualified_table = crate::shared::schema_qualifier::qualify_table_name_for_data(
db_pool,
&profile_name,
&table_name,
)
.await?;
let set_clause = set_clauses.join(", ");
let sql = format!(
"UPDATE {} SET {} WHERE id = ${} AND deleted = FALSE RETURNING id",
"UPDATE {} SET {} WHERE id = ${} RETURNING id",
qualified_table,
set_clause,
param_idx
);
let result = sqlx::query_scalar_with::<Postgres, i64, _>(&sql, params)
params.add(record_id).map_err(|e| Status::internal(format!("Failed to add record_id parameter: {}", e)))?;
let result = sqlx::query_scalar_with::<_, i64, _>(&sql, params)
.fetch_optional(db_pool)
.await;
match result {
Ok(Some(updated_id)) => Ok(PutTableDataResponse {
success: true,
message: "Data updated successfully".into(),
updated_id,
}),
Ok(None) => Err(Status::not_found("Record not found or already deleted")),
let updated_id = match result {
Ok(Some(id)) => id,
Ok(None) => return Err(Status::not_found("Record not found")),
Err(e) => {
// Handle "relation does not exist" error specifically
if let Some(db_err) = e.as_database_error() {
if db_err.code() == Some(std::borrow::Cow::Borrowed("42P01")) {
return Err(Status::internal(format!(
"Table '{}' is defined but does not physically exist in the database as {}",
table_name, qualified_table
if db_err.code() == Some(std::borrow::Cow::Borrowed("22P02")) ||
db_err.code() == Some(std::borrow::Cow::Borrowed("22003")) {
return Err(Status::invalid_argument(format!(
"Numeric field overflow or invalid format. Check precision and scale. Details: {}", db_err.message()
)));
}
}
Err(Status::internal(format!("Update failed: {}", e)))
return Err(Status::internal(format!("Update failed: {}", e)));
}
};
let command = IndexCommand::AddOrUpdate(IndexCommandData {
table_name: table_name.clone(),
row_id: updated_id,
});
if let Err(e) = indexer_tx.send(command).await {
error!(
"CRITICAL: DB update for table '{}' (id: {}) succeeded but failed to queue for indexing: {}. Search index is now inconsistent.",
table_name, updated_id, e
);
}
Ok(PutTableDataResponse {
success: true,
message: "Data updated successfully".into(),
updated_id,
})
}

View File

@@ -1,58 +0,0 @@
POST
grpcurl -plaintext -d '{
"adresar_id": 1,
"c_dokladu": "DOC123",
"datum": "01:10:2023",
"c_faktury": "INV123",
"obsah": "Sample content",
"stredisko": "Center A",
"c_uctu": "ACC123",
"md": "MD123",
"identif": "ID123",
"poznanka": "Sample note",
"firma": "AAA"
}' localhost:50051 multieko2.uctovnictvo.Uctovnictvo/PostUctovnictvo
{
"id": "3",
"adresarId": "1",
"cDokladu": "DOC123",
"datum": "2023-10-01",
"cFaktury": "INV123",
"obsah": "Sample content",
"stredisko": "Center A",
"cUctu": "ACC123",
"md": "MD123",
"identif": "ID123",
"poznanka": "Sample note",
"firma": "AAA"
}
PUT
grpcurl -plaintext -d '{
"id": '1',
"adresar_id": 1,
"c_dokladu": "UPDATED-DOC",
"datum": "15.11.2023",
"c_faktury": "UPDATED-INV",
"obsah": "Updated content",
"stredisko": "Updated Center",
"c_uctu": "UPD-ACC",
"md": "UPD-MD",
"identif": "UPD-ID",
"poznanka": "Updated note",
"firma": "UPD"
}' localhost:50051 multieko2.uctovnictvo.Uctovnictvo/PutUctovnictvo
{
"id": "1",
"adresarId": "1",
"cDokladu": "UPDATED-DOC",
"datum": "15.11.2023",
"cFaktury": "UPDATED-INV",
"obsah": "Updated content",
"stredisko": "Updated Center",
"cUctu": "UPD-ACC",
"md": "UPD-MD",
"identif": "UPD-ID",
"poznanka": "Updated note",
"firma": "UPD"
}

View File

@@ -1,41 +0,0 @@
grpcurl -plaintext -d '{}' localhost:50051 multieko2.uctovnictvo.Uctovnictvo/GetUctovnictvoCount
{
"count": "4"
}
grpcurl -plaintext -d '{
"position": 2
}' localhost:50051 multieko2.uctovnictvo.Uctovnictvo/GetUctovnictvoByPosition
{
"id": "2",
"adresarId": "1",
"cDokladu": "DOC123",
"datum": "01.10.2023",
"cFaktury": "INV123",
"obsah": "Sample content",
"stredisko": "Center A",
"cUctu": "ACC123",
"md": "MD123",
"identif": "ID123",
"poznanka": "Sample note",
"firma": "AAA"
}
grpcurl -plaintext -d '{
"id": 1
}' localhost:50051 multieko2.uctovnictvo.Uctovnictvo/GetUctovnictvo
{
"id": "1",
"adresarId": "1",
"cDokladu": "DOC123",
"datum": "01.10.2023",
"cFaktury": "INV123",
"obsah": "Sample content",
"stredisko": "Center A",
"cUctu": "ACC123",
"md": "MD123",
"identif": "ID123",
"poznanka": "Sample note",
"firma": "AAA"
}

View File

@@ -1,12 +0,0 @@
// src/uctovnictvo/handlers.rs
pub mod post_uctovnictvo;
pub mod get_uctovnictvo;
pub mod get_uctovnictvo_count;
pub mod get_uctovnictvo_by_position;
pub mod put_uctovnictvo;
pub use post_uctovnictvo::post_uctovnictvo;
pub use get_uctovnictvo::get_uctovnictvo;
pub use get_uctovnictvo_count::get_uctovnictvo_count;
pub use get_uctovnictvo_by_position::get_uctovnictvo_by_position;
pub use put_uctovnictvo::put_uctovnictvo;

View File

@@ -1,51 +0,0 @@
// src/uctovnictvo/handlers/get_uctovnictvo.rs
use tonic::Status;
use sqlx::PgPool;
use crate::uctovnictvo::models::Uctovnictvo;
use common::proto::multieko2::uctovnictvo::{GetUctovnictvoRequest, UctovnictvoResponse};
pub async fn get_uctovnictvo(
db_pool: &PgPool,
request: GetUctovnictvoRequest,
) -> Result<UctovnictvoResponse, Status> {
let uctovnictvo = sqlx::query_as!(
Uctovnictvo,
r#"
SELECT
id,
deleted,
adresar_id,
c_dokladu,
datum as "datum: chrono::NaiveDate",
c_faktury,
obsah,
stredisko,
c_uctu,
md,
identif,
poznanka,
firma
FROM uctovnictvo
WHERE id = $1
"#,
request.id
)
.fetch_one(db_pool)
.await
.map_err(|e| Status::not_found(e.to_string()))?;
Ok(UctovnictvoResponse {
id: uctovnictvo.id,
adresar_id: uctovnictvo.adresar_id,
c_dokladu: uctovnictvo.c_dokladu,
datum: uctovnictvo.datum.format("%d.%m.%Y").to_string(),
c_faktury: uctovnictvo.c_faktury,
obsah: uctovnictvo.obsah.unwrap_or_default(),
stredisko: uctovnictvo.stredisko.unwrap_or_default(),
c_uctu: uctovnictvo.c_uctu.unwrap_or_default(),
md: uctovnictvo.md.unwrap_or_default(),
identif: uctovnictvo.identif.unwrap_or_default(),
poznanka: uctovnictvo.poznanka.unwrap_or_default(),
firma: uctovnictvo.firma,
})
}

View File

@@ -1,34 +0,0 @@
// src/uctovnictvo/handlers/get_uctovnictvo_by_position.rs
use tonic::Status;
use sqlx::PgPool;
use common::proto::multieko2::common::PositionRequest;
use super::get_uctovnictvo;
pub async fn get_uctovnictvo_by_position(
db_pool: &PgPool,
request: PositionRequest,
) -> Result<common::proto::multieko2::uctovnictvo::UctovnictvoResponse, Status> {
if request.position < 1 {
return Err(Status::invalid_argument("Position must be at least 1"));
}
// Find the ID of the Nth non-deleted record
let id: i64 = sqlx::query_scalar!(
r#"
SELECT id
FROM uctovnictvo
WHERE deleted = FALSE
ORDER BY id ASC
OFFSET $1
LIMIT 1
"#,
request.position - 1
)
.fetch_optional(db_pool)
.await
.map_err(|e| Status::internal(e.to_string()))?
.ok_or_else(|| Status::not_found("Position out of bounds"))?;
// Now fetch the complete record using the existing get_uctovnictvo function
get_uctovnictvo(db_pool, common::proto::multieko2::uctovnictvo::GetUctovnictvoRequest { id }).await
}

View File

@@ -1,23 +0,0 @@
// src/uctovnictvo/handlers/get_uctovnictvo_count.rs
use tonic::Status;
use sqlx::PgPool;
use common::proto::multieko2::common::{CountResponse, Empty};
pub async fn get_uctovnictvo_count(
db_pool: &PgPool,
_request: Empty,
) -> Result<CountResponse, Status> {
let count: i64 = sqlx::query_scalar!(
r#"
SELECT COUNT(*) AS count
FROM uctovnictvo
WHERE deleted = FALSE
"#
)
.fetch_one(db_pool)
.await
.map_err(|e| Status::internal(e.to_string()))?
.unwrap_or(0);
Ok(CountResponse { count })
}

View File

@@ -1,73 +0,0 @@
// src/uctovnictvo/handlers/post_uctovnictvo.rs
use tonic::Status;
use sqlx::PgPool;
use crate::uctovnictvo::models::Uctovnictvo;
use common::proto::multieko2::uctovnictvo::{PostUctovnictvoRequest, UctovnictvoResponse};
use crate::shared::date_utils::parse_date_with_multiple_formats; // Import from shared module
pub async fn post_uctovnictvo(
db_pool: &PgPool,
request: PostUctovnictvoRequest,
) -> Result<UctovnictvoResponse, Status> {
let datum = parse_date_with_multiple_formats(&request.datum)
.ok_or_else(|| Status::invalid_argument(format!("Invalid date format: {}", request.datum)))?;
// Pass the NaiveDate value directly.
let uctovnictvo = sqlx::query_as!(
Uctovnictvo,
r#"
INSERT INTO uctovnictvo (
adresar_id, c_dokladu, datum, c_faktury, obsah, stredisko,
c_uctu, md, identif, poznanka, firma, deleted
)
VALUES (
$1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12
)
RETURNING
id,
deleted,
adresar_id,
c_dokladu,
datum as "datum: chrono::NaiveDate",
c_faktury,
obsah,
stredisko,
c_uctu,
md,
identif,
poznanka,
firma
"#,
request.adresar_id,
request.c_dokladu,
datum as chrono::NaiveDate,
request.c_faktury,
request.obsah,
request.stredisko,
request.c_uctu,
request.md,
request.identif,
request.poznanka,
request.firma,
false
)
.fetch_one(db_pool)
.await
.map_err(|e| Status::internal(e.to_string()))?;
// Return the response with formatted date
Ok(UctovnictvoResponse {
id: uctovnictvo.id,
adresar_id: uctovnictvo.adresar_id,
c_dokladu: uctovnictvo.c_dokladu,
datum: uctovnictvo.datum.format("%d.%m.%Y").to_string(), // Standard Slovak format
c_faktury: uctovnictvo.c_faktury,
obsah: uctovnictvo.obsah.unwrap_or_default(),
stredisko: uctovnictvo.stredisko.unwrap_or_default(),
c_uctu: uctovnictvo.c_uctu.unwrap_or_default(),
md: uctovnictvo.md.unwrap_or_default(),
identif: uctovnictvo.identif.unwrap_or_default(),
poznanka: uctovnictvo.poznanka.unwrap_or_default(),
firma: uctovnictvo.firma,
})
}

View File

@@ -1,78 +0,0 @@
// src/uctovnictvo/handlers/put_uctovnictvo.rs
use tonic::Status;
use sqlx::PgPool;
use crate::uctovnictvo::models::Uctovnictvo;
use common::proto::multieko2::uctovnictvo::{PutUctovnictvoRequest, UctovnictvoResponse};
use crate::shared::date_utils::parse_date_with_multiple_formats; // Import from shared module
pub async fn put_uctovnictvo(
db_pool: &PgPool,
request: PutUctovnictvoRequest,
) -> Result<UctovnictvoResponse, Status> {
let datum = parse_date_with_multiple_formats(&request.datum)
.ok_or_else(|| Status::invalid_argument("Invalid date format"))?;
let uctovnictvo = sqlx::query_as!(
Uctovnictvo,
r#"
UPDATE uctovnictvo
SET
adresar_id = $2,
c_dokladu = $3,
datum = $4,
c_faktury = $5,
obsah = $6,
stredisko = $7,
c_uctu = $8,
md = $9,
identif = $10,
poznanka = $11,
firma = $12
WHERE id = $1 AND deleted = FALSE
RETURNING
id,
deleted,
adresar_id,
c_dokladu,
datum as "datum: chrono::NaiveDate",
c_faktury,
obsah,
stredisko,
c_uctu,
md,
identif,
poznanka,
firma
"#,
request.id,
request.adresar_id,
request.c_dokladu,
datum as chrono::NaiveDate,
request.c_faktury,
request.obsah,
request.stredisko,
request.c_uctu,
request.md,
request.identif,
request.poznanka,
request.firma
)
.fetch_one(db_pool)
.await
.map_err(|e| Status::internal(e.to_string()))?;
Ok(UctovnictvoResponse {
id: uctovnictvo.id,
adresar_id: uctovnictvo.adresar_id,
c_dokladu: uctovnictvo.c_dokladu,
datum: uctovnictvo.datum.format("%d.%m.%Y").to_string(),
c_faktury: uctovnictvo.c_faktury,
obsah: uctovnictvo.obsah.unwrap_or_default(),
stredisko: uctovnictvo.stredisko.unwrap_or_default(),
c_uctu: uctovnictvo.c_uctu.unwrap_or_default(),
md: uctovnictvo.md.unwrap_or_default(),
identif: uctovnictvo.identif.unwrap_or_default(),
poznanka: uctovnictvo.poznanka.unwrap_or_default(),
firma: uctovnictvo.firma,
})
}

View File

@@ -1,4 +0,0 @@
// src/uctovnictvo/mod.rs
pub mod models;
pub mod handlers;

View File

@@ -1,21 +0,0 @@
// src/uctovnictvo/models.rs
use chrono::NaiveDate;
use serde::{Deserialize, Serialize};
#[derive(Debug, sqlx::FromRow, Serialize, Deserialize)]
pub struct Uctovnictvo {
pub id: i64,
pub deleted: bool,
pub adresar_id: i64,
pub c_dokladu: String,
pub datum: NaiveDate,
pub c_faktury: String,
pub obsah: Option<String>,
pub stredisko: Option<String>,
pub c_uctu: Option<String>,
pub md: Option<String>,
pub identif: Option<String>,
pub poznanka: Option<String>,
pub firma: String,
}

View File

@@ -1,161 +0,0 @@
// tests/adresar/delete_adresar_test.rs
use rstest::{fixture, rstest};
use server::adresar::handlers::delete_adresar;
use common::proto::multieko2::adresar::DeleteAdresarRequest;
use crate::common::setup_test_db;
use sqlx::PgPool;
use tonic;
use std::sync::Arc;
use tokio::sync::Mutex;
// Reuse the mutex from get_adresar_by_position_test or create a new one
lazy_static::lazy_static! {
static ref TEST_MUTEX: Arc<Mutex<()>> = Arc::new(Mutex::new(()));
}
// Fixtures
#[fixture]
async fn pool() -> PgPool {
setup_test_db().await
}
#[fixture]
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
let pool = pool.await;
pool.close().await;
pool
}
#[fixture]
async fn existing_record(#[future] pool: PgPool) -> (PgPool, i64, String) {
let pool = pool.await;
// Use a unique prefix for test data
let prefix = format!("DeleteTest_{}", chrono::Utc::now().timestamp_nanos_opt().unwrap_or_default());
let record = sqlx::query!(
r#"
INSERT INTO adresar (firma, deleted)
VALUES ($1, false)
RETURNING id
"#,
format!("{}_Company", prefix)
)
.fetch_one(&pool)
.await
.unwrap();
(pool, record.id, prefix)
}
#[fixture]
async fn existing_deleted_record(#[future] pool: PgPool) -> (PgPool, i64, String) {
let pool = pool.await;
// Use a unique prefix for test data
let prefix = format!("DeletedTest_{}", chrono::Utc::now().timestamp_nanos_opt().unwrap_or_default());
let record = sqlx::query!(
r#"
INSERT INTO adresar (firma, deleted)
VALUES ($1, true)
RETURNING id
"#,
format!("{}_Deleted", prefix)
)
.fetch_one(&pool)
.await
.unwrap();
(pool, record.id, prefix)
}
// Helper to check if the record is deleted
async fn assert_record_deleted(pool: &PgPool, id: i64) {
let db_record = sqlx::query!("SELECT deleted FROM adresar WHERE id = $1", id)
.fetch_one(pool)
.await
.unwrap();
assert!(db_record.deleted);
}
// Helper to clean up test records
async fn cleanup_test_records(pool: &PgPool, prefix: &str) {
if !prefix.is_empty() {
sqlx::query!(
"DELETE FROM adresar WHERE firma LIKE $1",
format!("{}%", prefix)
)
.execute(pool)
.await
.unwrap();
}
}
// Tests
#[rstest]
#[tokio::test]
async fn test_delete_adresar_success(
#[future] existing_record: (PgPool, i64, String),
) {
// Take a lock to prevent concurrent test execution
let _guard = TEST_MUTEX.lock().await;
let (pool, id, prefix) = existing_record.await;
let request = DeleteAdresarRequest { id };
let response = delete_adresar(&pool, request).await.unwrap();
assert!(response.success);
assert_record_deleted(&pool, id).await;
// Clean up
cleanup_test_records(&pool, &prefix).await;
}
#[rstest]
#[tokio::test]
async fn test_delete_adresar_nonexistent_id(
#[future] pool: PgPool,
) {
// Take a lock to prevent concurrent test execution
let _guard = TEST_MUTEX.lock().await;
let pool = pool.await;
let request = DeleteAdresarRequest { id: 9999 };
let response = delete_adresar(&pool, request).await.unwrap();
// Deleting a non-existent record should return success: false
assert!(!response.success);
}
#[rstest]
#[tokio::test]
async fn test_delete_adresar_already_deleted(
#[future] existing_deleted_record: (PgPool, i64, String),
) {
// Take a lock to prevent concurrent test execution
let _guard = TEST_MUTEX.lock().await;
let (pool, id, prefix) = existing_deleted_record.await;
let request = DeleteAdresarRequest { id };
let response = delete_adresar(&pool, request).await.unwrap();
// Deleting an already deleted record should return success: false
assert!(!response.success);
// Clean up
cleanup_test_records(&pool, &prefix).await;
}
#[rstest]
#[tokio::test]
async fn test_delete_adresar_database_error(
#[future] closed_pool: PgPool,
) {
// No need for mutex here as we're not modifying the database
let closed_pool = closed_pool.await;
let request = DeleteAdresarRequest { id: 1 };
let result = delete_adresar(&closed_pool, request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
}

View File

@@ -1,368 +0,0 @@
// tests/adresar/get_adresar_by_position_test.rs
use rstest::{fixture, rstest};
use server::adresar::handlers::{get_adresar_by_position, get_adresar_count};
use common::proto::multieko2::common::{PositionRequest, Empty};
use crate::common::setup_test_db;
use sqlx::PgPool;
use tonic;
use std::sync::Arc;
use tokio::sync::Mutex;
// Use a global mutex to synchronize test execution
// This prevents tests from interfering with each other
lazy_static::lazy_static! {
static ref TEST_MUTEX: Arc<Mutex<()>> = Arc::new(Mutex::new(()));
}
#[fixture]
async fn pool() -> PgPool {
setup_test_db().await
}
#[fixture]
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
let pool = pool.await;
pool.close().await;
pool
}
// Create a test record with specific data and delete status
async fn create_test_record(pool: &PgPool, firma: &str, deleted: bool) -> i64 {
sqlx::query_scalar!(
"INSERT INTO adresar (firma, deleted) VALUES ($1, $2) RETURNING id",
firma,
deleted
)
.fetch_one(pool)
.await
.unwrap()
}
// Clean up test records after tests
async fn cleanup_test_records(pool: &PgPool, prefix: &str) {
sqlx::query!(
"DELETE FROM adresar WHERE firma LIKE $1",
format!("{}%", prefix)
)
.execute(pool)
.await
.unwrap();
}
// Find the position of a record in the database
async fn find_position_of_record(pool: &PgPool, id: i64) -> Option<i64> {
// Get all non-deleted records ordered by ID
let records = sqlx::query_scalar!(
"SELECT id FROM adresar WHERE deleted = FALSE ORDER BY id ASC"
)
.fetch_all(pool)
.await
.unwrap();
// Find the position of our record (1-based)
for (index, record_id) in records.iter().enumerate() {
if *record_id == id {
return Some((index + 1) as i64);
}
}
None
}
// Test position validation
#[rstest]
#[tokio::test]
async fn test_position_zero(#[future] pool: PgPool) {
let pool = pool.await;
// Request position 0 (invalid)
let request = PositionRequest { position: 0 };
let result = get_adresar_by_position(&pool, request).await;
// Verify it returns an error
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::InvalidArgument);
}
#[rstest]
#[tokio::test]
async fn test_position_negative(#[future] pool: PgPool) {
let pool = pool.await;
// Request negative position (invalid)
let request = PositionRequest { position: -1 };
let result = get_adresar_by_position(&pool, request).await;
// Verify it returns an error
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::InvalidArgument);
}
#[rstest]
#[tokio::test]
async fn test_basic_position_retrieval(#[future] pool: PgPool) {
let pool = pool.await;
// Take a lock to prevent concurrent test execution
let _guard = TEST_MUTEX.lock().await;
// Use a unique prefix for test data to prevent conflicts
let prefix = "PosBasicTest";
// Clean up any existing test data
cleanup_test_records(&pool, prefix).await;
// Create test records
let id1 = create_test_record(&pool, &format!("{}_1", prefix), false).await;
let id2 = create_test_record(&pool, &format!("{}_2", prefix), false).await;
let id3 = create_test_record(&pool, &format!("{}_3", prefix), false).await;
// Find the positions of these records in the database
let pos1 = find_position_of_record(&pool, id1).await.unwrap();
let pos2 = find_position_of_record(&pool, id2).await.unwrap();
let pos3 = find_position_of_record(&pool, id3).await.unwrap();
// Test retrieving each position
let response1 = get_adresar_by_position(&pool, PositionRequest { position: pos1 }).await.unwrap();
assert_eq!(response1.id, id1);
let response2 = get_adresar_by_position(&pool, PositionRequest { position: pos2 }).await.unwrap();
assert_eq!(response2.id, id2);
let response3 = get_adresar_by_position(&pool, PositionRequest { position: pos3 }).await.unwrap();
assert_eq!(response3.id, id3);
// Clean up test data
cleanup_test_records(&pool, prefix).await;
}
#[rstest]
#[tokio::test]
async fn test_deleted_records_excluded(#[future] pool: PgPool) {
let pool = pool.await;
// Take a lock to prevent concurrent test execution
let _guard = TEST_MUTEX.lock().await;
// Use a unique prefix for test data
let prefix = "PosDeletedTest";
// Clean up any existing test data
cleanup_test_records(&pool, prefix).await;
// Create a mix of active and deleted records
let id1 = create_test_record(&pool, &format!("{}_1", prefix), false).await;
let _id_deleted = create_test_record(&pool, &format!("{}_del", prefix), true).await;
let id2 = create_test_record(&pool, &format!("{}_2", prefix), false).await;
// Find positions
let pos1 = find_position_of_record(&pool, id1).await.unwrap();
let pos2 = find_position_of_record(&pool, id2).await.unwrap();
// Verify positions are consecutive, which means the deleted record is excluded
assert_eq!(pos2, pos1 + 1);
// Retrieve by position and verify
let response1 = get_adresar_by_position(&pool, PositionRequest { position: pos1 }).await.unwrap();
assert_eq!(response1.id, id1);
let response2 = get_adresar_by_position(&pool, PositionRequest { position: pos2 }).await.unwrap();
assert_eq!(response2.id, id2);
// Clean up test data
cleanup_test_records(&pool, prefix).await;
}
#[rstest]
#[tokio::test]
async fn test_position_changes_after_deletion(#[future] pool: PgPool) {
let pool = pool.await;
// Take a lock to prevent concurrent test execution
let _guard = TEST_MUTEX.lock().await;
// Use a unique prefix for test data
let prefix = "PosChangeTest";
// Clean up any existing test data
cleanup_test_records(&pool, prefix).await;
// Create records
let id1 = create_test_record(&pool, &format!("{}_1", prefix), false).await;
let id2 = create_test_record(&pool, &format!("{}_2", prefix), false).await;
let id3 = create_test_record(&pool, &format!("{}_3", prefix), false).await;
// Find initial positions
let _pos1 = find_position_of_record(&pool, id1).await.unwrap();
let pos2 = find_position_of_record(&pool, id2).await.unwrap();
let pos3 = find_position_of_record(&pool, id3).await.unwrap();
// Mark the first record as deleted
sqlx::query!("UPDATE adresar SET deleted = TRUE WHERE id = $1", id1)
.execute(&pool)
.await
.unwrap();
// Find new positions
let pos2_after = find_position_of_record(&pool, id2).await.unwrap();
let pos3_after = find_position_of_record(&pool, id3).await.unwrap();
// Verify positions shifted
assert!(pos2_after < pos2);
assert!(pos3_after < pos3);
// Verify by retrieving records at new positions
let response_at_first = get_adresar_by_position(&pool, PositionRequest { position: pos2_after }).await.unwrap();
assert_eq!(response_at_first.id, id2);
// Clean up test data
cleanup_test_records(&pool, prefix).await;
}
#[rstest]
#[tokio::test]
async fn test_position_out_of_bounds(#[future] pool: PgPool) {
let pool = pool.await;
// Take a lock to prevent concurrent test execution
let _guard = TEST_MUTEX.lock().await;
// Get the total count of non-deleted records
let count = sqlx::query_scalar!(
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
)
.fetch_one(&pool)
.await
.unwrap()
.unwrap_or(0);
// Request a position beyond the count
let request = PositionRequest { position: count + 1 };
let result = get_adresar_by_position(&pool, request).await;
// Verify it returns an error
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
}
#[rstest]
#[tokio::test]
async fn test_database_error(#[future] closed_pool: PgPool) {
let closed_pool = closed_pool.await;
// Attempt to query with a closed pool
let request = PositionRequest { position: 1 };
let result = get_adresar_by_position(&closed_pool, request).await;
// Verify it returns an internal error
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
}
#[rstest]
#[tokio::test]
async fn test_position_after_adding_record(#[future] pool: PgPool) {
let pool = pool.await;
// Take a lock to prevent concurrent test execution
let _guard = TEST_MUTEX.lock().await;
// Use a unique prefix for test data
let prefix = "PosAddTest";
// Clean up any existing test data
cleanup_test_records(&pool, prefix).await;
// Create records
let id1 = create_test_record(&pool, &format!("{}_1", prefix), false).await;
let id2 = create_test_record(&pool, &format!("{}_2", prefix), false).await;
// Find positions
let pos1 = find_position_of_record(&pool, id1).await.unwrap();
let pos2 = find_position_of_record(&pool, id2).await.unwrap();
// Add a new record
let id3 = create_test_record(&pool, &format!("{}_3", prefix), false).await;
// Find its position
let pos3 = find_position_of_record(&pool, id3).await.unwrap();
// Verify retrieval by position
let response3 = get_adresar_by_position(&pool, PositionRequest { position: pos3 }).await.unwrap();
assert_eq!(response3.id, id3);
// Verify original positions still work
let response1 = get_adresar_by_position(&pool, PositionRequest { position: pos1 }).await.unwrap();
assert_eq!(response1.id, id1);
let response2 = get_adresar_by_position(&pool, PositionRequest { position: pos2 }).await.unwrap();
assert_eq!(response2.id, id2);
// Clean up test data
cleanup_test_records(&pool, prefix).await;
}
/// Test handler correctly excludes deleted records
#[rstest]
#[tokio::test]
async fn test_handler_excludes_deleted_records(#[future] pool: PgPool) {
let pool = pool.await;
// Take a lock to prevent concurrent test execution
let _guard = TEST_MUTEX.lock().await;
// Use a unique prefix for test data
let prefix = "CountTest";
// Clean up any existing test data
cleanup_test_records(&pool, prefix).await;
// Create active records
for i in 1..=3 {
create_test_record(&pool, &format!("{}_Active_{}", prefix, i), false).await;
}
// Create deleted records
for i in 1..=2 {
create_test_record(&pool, &format!("{}_Deleted_{}", prefix, i), true).await;
}
// Count our test records by deleted status
let active_test_count = sqlx::query_scalar!(
"SELECT COUNT(*) FROM adresar WHERE firma LIKE $1 AND deleted = FALSE",
format!("{}%", prefix)
)
.fetch_one(&pool)
.await
.unwrap()
.unwrap_or(0);
let deleted_test_count = sqlx::query_scalar!(
"SELECT COUNT(*) FROM adresar WHERE firma LIKE $1 AND deleted = TRUE",
format!("{}%", prefix)
)
.fetch_one(&pool)
.await
.unwrap()
.unwrap_or(0);
// Verify our test data was inserted correctly
assert_eq!(active_test_count, 3);
assert_eq!(deleted_test_count, 2);
// Get the total count of active records (including existing ones)
let total_active_count = sqlx::query_scalar!(
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
)
.fetch_one(&pool)
.await
.unwrap()
.unwrap_or(0);
// Now call our handler and verify it returns the same count
let response = get_adresar_count(&pool, Empty {}).await.unwrap();
assert_eq!(response.count, total_active_count);
// Clean up test data
cleanup_test_records(&pool, prefix).await;
}

View File

@@ -1,284 +0,0 @@
// tests/adresar/get_adresar_count_test.rs
use rstest::{fixture, rstest};
use server::adresar::handlers::get_adresar_count;
use common::proto::multieko2::common::Empty;
use crate::common::setup_test_db;
use sqlx::PgPool;
use tonic;
// For connection pooling
#[fixture]
async fn pool() -> PgPool {
setup_test_db().await
}
#[fixture]
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
let pool = pool.await;
pool.close().await;
pool
}
// Create a self-contained test that runs in a transaction
// --------------------------------------------------------
// Instead of relying on table state and doing our own transaction management,
// we'll mock the database response to `get_adresar_count` and verify it behaves correctly
/// Test only that the handler returns the value from the database correctly
#[rstest]
#[tokio::test]
async fn test_handler_returns_count_from_database(#[future] pool: PgPool) {
let pool = pool.await;
// First, get whatever count the database currently has
let count_query = sqlx::query_scalar!(
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
)
.fetch_one(&pool)
.await
.unwrap()
.unwrap_or(0);
// Now call our handler and verify it returns the same count
let response = get_adresar_count(&pool, Empty {}).await.unwrap();
assert_eq!(response.count, count_query);
}
/// Test handler correctly excludes deleted records
#[rstest]
#[tokio::test]
async fn test_handler_excludes_deleted_records(#[future] pool: PgPool) {
let pool = pool.await;
// Use a transaction to isolate this test completely
let mut tx = pool.begin().await.unwrap();
// Count records where deleted = TRUE
let deleted_count = sqlx::query_scalar!(
"SELECT COUNT(*) FROM adresar WHERE deleted = TRUE"
)
.fetch_one(&mut *tx)
.await
.unwrap()
.unwrap_or(0);
// Count records where deleted = FALSE
let active_count = sqlx::query_scalar!(
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
)
.fetch_one(&mut *tx)
.await
.unwrap()
.unwrap_or(0);
// Count all records
let total_count = sqlx::query_scalar!(
"SELECT COUNT(*) FROM adresar"
)
.fetch_one(&mut *tx)
.await
.unwrap()
.unwrap_or(0);
// Verify our counts are consistent
assert_eq!(total_count, active_count + deleted_count);
// Verify our handler returns only the active count
let response = get_adresar_count(&pool, Empty {}).await.unwrap();
assert_eq!(response.count, active_count);
// Rollback transaction
tx.rollback().await.unwrap();
}
/// Test SQL query behavior with deleted flag
#[rstest]
#[tokio::test]
async fn test_deleted_flag_filters_records(#[future] pool: PgPool) {
let pool = pool.await;
// Use a transaction to isolate this test completely
let mut tx = pool.begin().await.unwrap();
// Insert test records inside this transaction
// They will be automatically rolled back at the end
sqlx::query!(
"INSERT INTO adresar (firma, deleted) VALUES ($1, FALSE)",
"Test Active Record"
)
.execute(&mut *tx)
.await
.unwrap();
sqlx::query!(
"INSERT INTO adresar (firma, deleted) VALUES ($1, TRUE)",
"Test Deleted Record"
)
.execute(&mut *tx)
.await
.unwrap();
// Count active records in the transaction
let active_count = sqlx::query_scalar!(
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
)
.fetch_one(&mut *tx)
.await
.unwrap()
.unwrap_or(0);
// Count deleted records in the transaction
let deleted_count = sqlx::query_scalar!(
"SELECT COUNT(*) FROM adresar WHERE deleted = TRUE"
)
.fetch_one(&mut *tx)
.await
.unwrap()
.unwrap_or(0);
// Verify at least one active and one deleted record
assert!(active_count > 0);
assert!(deleted_count > 0);
// Rollback transaction
tx.rollback().await.unwrap();
}
/// Test the handler returns an error with a closed pool
#[rstest]
#[tokio::test]
async fn test_database_error(#[future] closed_pool: PgPool) {
let closed_pool = closed_pool.await;
let result = get_adresar_count(&closed_pool, Empty {}).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
}
/// Test the behavior of setting deleted to true and back
#[rstest]
#[tokio::test]
async fn test_update_of_deleted_flag(#[future] pool: PgPool) {
let pool = pool.await;
// Use a transaction for complete isolation
let mut tx = pool.begin().await.unwrap();
// Insert a test record
let id = sqlx::query_scalar!(
"INSERT INTO adresar (firma, deleted) VALUES ($1, FALSE) RETURNING id",
"Test Toggle Record"
)
.fetch_one(&mut *tx)
.await
.unwrap();
// Count active records with this new record
let active_count_before = sqlx::query_scalar!(
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
)
.fetch_one(&mut *tx)
.await
.unwrap()
.unwrap_or(0);
// Mark as deleted
sqlx::query!(
"UPDATE adresar SET deleted = TRUE WHERE id = $1",
id
)
.execute(&mut *tx)
.await
.unwrap();
// Count active records after marking as deleted
let active_count_after_delete = sqlx::query_scalar!(
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
)
.fetch_one(&mut *tx)
.await
.unwrap()
.unwrap_or(0);
// Verify count decreased by 1
assert_eq!(active_count_after_delete, active_count_before - 1);
// Mark as active again
sqlx::query!(
"UPDATE adresar SET deleted = FALSE WHERE id = $1",
id
)
.execute(&mut *tx)
.await
.unwrap();
// Count active records after marking as active
let active_count_after_restore = sqlx::query_scalar!(
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
)
.fetch_one(&mut *tx)
.await
.unwrap()
.unwrap_or(0);
// Verify count increased back to original
assert_eq!(active_count_after_restore, active_count_before);
// Rollback transaction
tx.rollback().await.unwrap();
}
/// Test edge cases of an empty table
#[rstest]
#[tokio::test]
async fn test_edge_case_empty_table(#[future] pool: PgPool) {
let pool = pool.await;
// Not literally testing an empty table since we can't truncate due to FK constraints
// But we can verify the count response is never negative
let response = get_adresar_count(&pool, Empty {}).await.unwrap();
assert!(response.count >= 0);
}
/// Test adding a record and verifying count increases
#[rstest]
#[tokio::test]
async fn test_count_increments_after_adding_record(#[future] pool: PgPool) {
let pool = pool.await;
// Use a transaction for complete isolation
let mut tx = pool.begin().await.unwrap();
// Get initial active count inside transaction
let initial_count = sqlx::query_scalar!(
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
)
.fetch_one(&mut *tx)
.await
.unwrap()
.unwrap_or(0);
// Add a record inside the transaction
sqlx::query!(
"INSERT INTO adresar (firma, deleted) VALUES ($1, FALSE)",
"Test Increment Record"
)
.execute(&mut *tx)
.await
.unwrap();
// Get new count inside transaction
let new_count = sqlx::query_scalar!(
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
)
.fetch_one(&mut *tx)
.await
.unwrap()
.unwrap_or(0);
// Verify count increased by exactly 1
assert_eq!(new_count, initial_count + 1);
// Rollback transaction
tx.rollback().await.unwrap();
}

View File

@@ -1,238 +0,0 @@
// tests/adresar/get_adresar_test.rs
use rstest::{fixture, rstest};
use server::adresar::handlers::get_adresar;
use common::proto::multieko2::adresar::{GetAdresarRequest, AdresarResponse};
use crate::common::setup_test_db;
use sqlx::PgPool;
use tonic;
#[fixture]
async fn pool() -> PgPool {
setup_test_db().await
}
#[fixture]
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
let pool = pool.await;
pool.close().await;
pool
}
#[fixture]
async fn existing_record(#[future] pool: PgPool) -> (PgPool, i64) {
let pool = pool.await;
let record = sqlx::query!(
r#"
INSERT INTO adresar (
firma, kz, drc, ulica, psc, mesto, stat, banka, ucet,
skladm, ico, kontakt, telefon, skladu, fax, deleted
)
VALUES (
'Test Company', 'KZ', 'DRC', 'Street', '12345', 'City',
'Country', 'Bank', 'Account', 'SkladM', 'ICO', 'Contact',
'+421123456789', 'SkladU', 'Fax', false
)
RETURNING id
"#
)
.fetch_one(&pool)
.await
.unwrap();
(pool, record.id)
}
#[fixture]
async fn existing_deleted_record(#[future] pool: PgPool) -> (PgPool, i64) {
let pool = pool.await;
let record = sqlx::query!(
r#"
INSERT INTO adresar (firma, deleted)
VALUES ('Deleted Company', true)
RETURNING id
"#
)
.fetch_one(&pool)
.await
.unwrap();
(pool, record.id)
}
#[fixture]
async fn existing_record_with_nulls(#[future] pool: PgPool) -> (PgPool, i64) {
let pool = pool.await;
let record = sqlx::query!(
r#"
INSERT INTO adresar (firma)
VALUES ('Null Fields Company')
RETURNING id
"#
)
.fetch_one(&pool)
.await
.unwrap();
(pool, record.id)
}
async fn assert_response_matches(pool: &PgPool, id: i64, response: &AdresarResponse) {
let db_record = sqlx::query!("SELECT * FROM adresar WHERE id = $1", id)
.fetch_one(pool)
.await
.unwrap();
assert_eq!(db_record.firma, response.firma);
assert_eq!(db_record.kz.unwrap_or_default(), response.kz);
assert_eq!(db_record.drc.unwrap_or_default(), response.drc);
assert_eq!(db_record.ulica.unwrap_or_default(), response.ulica);
assert_eq!(db_record.psc.unwrap_or_default(), response.psc);
assert_eq!(db_record.mesto.unwrap_or_default(), response.mesto);
assert_eq!(db_record.stat.unwrap_or_default(), response.stat);
assert_eq!(db_record.banka.unwrap_or_default(), response.banka);
assert_eq!(db_record.ucet.unwrap_or_default(), response.ucet);
assert_eq!(db_record.skladm.unwrap_or_default(), response.skladm);
assert_eq!(db_record.ico.unwrap_or_default(), response.ico);
assert_eq!(db_record.kontakt.unwrap_or_default(), response.kontakt);
assert_eq!(db_record.telefon.unwrap_or_default(), response.telefon);
assert_eq!(db_record.skladu.unwrap_or_default(), response.skladu);
assert_eq!(db_record.fax.unwrap_or_default(), response.fax);
}
#[rstest]
#[tokio::test]
async fn test_get_adresar_success(
#[future] existing_record: (PgPool, i64),
) {
let (pool, id) = existing_record.await;
let request = GetAdresarRequest { id };
let response = get_adresar(&pool, request).await.unwrap();
assert_eq!(response.id, id);
assert_response_matches(&pool, id, &response).await;
}
#[rstest]
#[tokio::test]
async fn test_get_optional_fields_null(
#[future] existing_record_with_nulls: (PgPool, i64),
) {
let (pool, id) = existing_record_with_nulls.await;
let request = GetAdresarRequest { id };
let response = get_adresar(&pool, request).await.unwrap();
assert_eq!(response.kz, "");
assert_eq!(response.drc, "");
assert_eq!(response.ulica, "");
assert_eq!(response.psc, "");
assert_eq!(response.mesto, "");
assert_eq!(response.stat, "");
assert_eq!(response.banka, "");
assert_eq!(response.ucet, "");
assert_eq!(response.skladm, "");
assert_eq!(response.ico, "");
assert_eq!(response.kontakt, "");
assert_eq!(response.telefon, "");
assert_eq!(response.skladu, "");
assert_eq!(response.fax, "");
}
#[rstest]
#[tokio::test]
async fn test_get_nonexistent_id(
#[future] pool: PgPool,
) {
let pool = pool.await;
let request = GetAdresarRequest { id: 9999 };
let result = get_adresar(&pool, request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
}
#[rstest]
#[tokio::test]
async fn test_get_deleted_record(
#[future] existing_deleted_record: (PgPool, i64),
) {
let (pool, id) = existing_deleted_record.await;
let request = GetAdresarRequest { id };
let result = get_adresar(&pool, request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
}
#[rstest]
#[tokio::test]
async fn test_database_error(
#[future] closed_pool: PgPool,
) {
let closed_pool = closed_pool.await;
let request = GetAdresarRequest { id: 1 };
let result = get_adresar(&closed_pool, request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
}
#[rstest]
#[tokio::test]
async fn test_get_special_characters(
#[future] pool: PgPool,
) {
let pool = pool.await;
let firma = "Náměstí ČR";
let telefon = "+420 123-456.789";
let ulica = "Křižíkova 123";
let record = sqlx::query!(
r#"
INSERT INTO adresar (firma, telefon, ulica)
VALUES ($1, $2, $3)
RETURNING id
"#,
firma,
telefon,
ulica
)
.fetch_one(&pool)
.await
.unwrap();
let request = GetAdresarRequest { id: record.id };
let response = get_adresar(&pool, request).await.unwrap();
assert_eq!(response.firma, firma);
assert_eq!(response.telefon, telefon);
assert_eq!(response.ulica, ulica);
}
#[rstest]
#[tokio::test]
async fn test_get_max_length_fields(
#[future] pool: PgPool,
) {
let pool = pool.await;
let firma = "a".repeat(255);
let telefon = "1".repeat(20);
let record = sqlx::query!(
r#"
INSERT INTO adresar (firma, telefon)
VALUES ($1, $2)
RETURNING id
"#,
firma,
telefon
)
.fetch_one(&pool)
.await
.unwrap();
let request = GetAdresarRequest { id: record.id };
let response = get_adresar(&pool, request).await.unwrap();
assert_eq!(response.firma.len(), 255);
assert_eq!(response.telefon.len(), 20);
}

View File

@@ -1,8 +0,0 @@
// server/tests/adresar/mod.rs
pub mod post_adresar_test;
pub mod put_adresar_test;
pub mod get_adresar_test;
pub mod get_adresar_count_test;
pub mod get_adresar_by_position_test;
pub mod delete_adresar_test;

View File

@@ -1,222 +0,0 @@
// tests/adresar/post_adresar_test.rs
use rstest::{fixture, rstest};
use server::adresar::handlers::post_adresar;
use common::proto::multieko2::adresar::PostAdresarRequest;
use crate::common::setup_test_db;
use sqlx::PgPool;
use tonic;
// Fixtures
#[fixture]
async fn pool() -> PgPool {
setup_test_db().await
}
#[fixture]
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
let pool = pool.await;
pool.close().await;
pool
}
#[fixture]
fn valid_request() -> PostAdresarRequest {
PostAdresarRequest {
firma: "Test Company".into(),
kz: "KZ123".into(),
drc: "DRC456".into(),
ulica: "Test Street".into(),
psc: "12345".into(),
mesto: "Test City".into(),
stat: "Test Country".into(),
banka: "Test Bank".into(),
ucet: "123456789".into(),
skladm: "Warehouse M".into(),
ico: "12345678".into(),
kontakt: "John Doe".into(),
telefon: "+421123456789".into(),
skladu: "Warehouse U".into(),
fax: "+421123456700".into(),
}
}
#[fixture]
fn minimal_request() -> PostAdresarRequest {
PostAdresarRequest {
firma: "Required Only".into(),
..Default::default()
}
}
// Helper to check database state
async fn assert_response_matches(pool: &PgPool, response: &common::proto::multieko2::adresar::AdresarResponse) {
let db_record = sqlx::query!("SELECT * FROM adresar WHERE id = $1", response.id)
.fetch_one(pool)
.await
.unwrap();
assert_eq!(db_record.firma, response.firma);
assert_eq!(db_record.telefon.as_deref(), Some(response.telefon.as_str()));
// Add assertions for other fields...
assert!(!db_record.deleted);
assert!(db_record.created_at.is_some());
}
// Tests
#[rstest]
#[tokio::test]
async fn test_create_adresar_success(#[future] pool: PgPool, valid_request: PostAdresarRequest) {
let pool = pool.await;
let response = post_adresar(&pool, valid_request).await.unwrap();
assert!(response.id > 0);
assert_eq!(response.firma, "Test Company");
assert_response_matches(&pool, &response).await;
}
#[rstest]
#[tokio::test]
async fn test_create_adresar_whitespace_trimming(
#[future] pool: PgPool,
valid_request: PostAdresarRequest,
) {
let pool = pool.await;
let mut request = valid_request;
request.firma = " Test Company ".into();
request.telefon = " +421123456789 ".into();
request.ulica = " Test Street ".into();
let response = post_adresar(&pool, request).await.unwrap();
assert_eq!(response.firma, "Test Company");
assert_eq!(response.telefon, "+421123456789");
assert_eq!(response.ulica, "Test Street");
}
#[rstest]
#[tokio::test]
async fn test_create_adresar_empty_optional_fields(
#[future] pool: PgPool,
valid_request: PostAdresarRequest,
) {
let pool = pool.await;
let mut request = valid_request;
request.telefon = " ".into();
let response = post_adresar(&pool, request).await.unwrap();
let db_telefon = sqlx::query_scalar!("SELECT telefon FROM adresar WHERE id = $1", response.id)
.fetch_one(&pool)
.await
.unwrap();
assert!(db_telefon.is_none());
assert_eq!(response.telefon, "");
}
#[rstest]
#[tokio::test]
async fn test_create_adresar_invalid_firma(
#[future] pool: PgPool,
valid_request: PostAdresarRequest,
) {
let pool = pool.await;
let mut request = valid_request;
request.firma = " ".into();
let result = post_adresar(&pool, request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::InvalidArgument);
}
#[rstest]
#[tokio::test]
async fn test_create_adresar_minimal_valid_request(
#[future] pool: PgPool,
minimal_request: PostAdresarRequest,
) {
let pool = pool.await;
let response = post_adresar(&pool, minimal_request).await.unwrap();
assert!(response.id > 0);
assert_eq!(response.firma, "Required Only");
assert!(response.kz.is_empty());
assert!(response.drc.is_empty());
}
#[rstest]
#[tokio::test]
async fn test_create_adresar_empty_firma(
#[future] pool: PgPool,
minimal_request: PostAdresarRequest,
) {
let pool = pool.await;
let mut request = minimal_request;
request.firma = "".into();
let result = post_adresar(&pool, request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::InvalidArgument);
}
#[rstest]
#[tokio::test]
async fn test_create_adresar_database_error(
#[future] closed_pool: PgPool,
minimal_request: PostAdresarRequest,
) {
let closed_pool = closed_pool.await;
let result = post_adresar(&closed_pool, minimal_request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
}
#[rstest]
#[tokio::test]
async fn test_create_adresar_field_length_limits(
#[future] pool: PgPool,
valid_request: PostAdresarRequest,
) {
let pool = pool.await;
let mut request = valid_request;
request.firma = "a".repeat(255);
request.telefon = "1".repeat(20);
let response = post_adresar(&pool, request).await.unwrap();
assert_eq!(response.firma.len(), 255);
assert_eq!(response.telefon.len(), 20);
}
#[rstest]
#[tokio::test]
async fn test_create_adresar_special_characters(
#[future] pool: PgPool,
valid_request: PostAdresarRequest,
) {
let pool = pool.await;
let mut request = valid_request;
request.telefon = "+420 123-456.789".into();
request.ulica = "Náměstí 28. října".into();
let response = post_adresar(&pool, request.clone()).await.unwrap();
assert_eq!(response.telefon, request.telefon);
assert_eq!(response.ulica, request.ulica);
}
#[rstest]
#[tokio::test]
async fn test_create_adresar_optional_fields_null_vs_empty(
#[future] pool: PgPool,
valid_request: PostAdresarRequest,
) {
let pool = pool.await;
let mut request = valid_request;
request.telefon = String::new();
let response = post_adresar(&pool, request).await.unwrap();
let db_telefon = sqlx::query_scalar!("SELECT telefon FROM adresar WHERE id = $1", response.id)
.fetch_one(&pool)
.await
.unwrap();
assert!(db_telefon.is_none());
}

View File

@@ -1,266 +0,0 @@
// tests/adresar/put_adresar_test.rs
use rstest::{fixture, rstest};
use server::adresar::handlers::put_adresar;
use common::proto::multieko2::adresar::PutAdresarRequest;
use crate::common::setup_test_db;
use sqlx::PgPool;
use tonic;
// Fixtures
#[fixture]
async fn pool() -> PgPool {
setup_test_db().await
}
#[fixture]
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
let pool = pool.await;
pool.close().await;
pool
}
#[fixture]
async fn existing_record(#[future] pool: PgPool) -> (PgPool, i64) {
let pool = pool.await;
// Create a test record in the database
let record = sqlx::query!(
r#"
INSERT INTO adresar (
firma, kz, drc, ulica, psc, mesto, stat, banka, ucet,
skladm, ico, kontakt, telefon, skladu, fax, deleted
)
VALUES (
'Original Company', 'Original KZ', 'Original DRC', 'Original Street',
'12345', 'Original City', 'Original Country', 'Original Bank',
'Original Account', 'Original SkladM', 'Original ICO',
'Original Contact', '+421123456789', 'Original SkladU', 'Original Fax',
false
)
RETURNING id
"#
)
.fetch_one(&pool)
.await
.unwrap();
(pool, record.id)
}
#[fixture]
fn valid_request_template() -> PutAdresarRequest {
PutAdresarRequest {
id: 0, // This will be replaced in each test
firma: "Updated Company".into(),
kz: "Updated KZ".into(),
drc: "Updated DRC".into(),
ulica: "Updated Street".into(),
psc: "67890".into(),
mesto: "Updated City".into(),
stat: "Updated Country".into(),
banka: "Updated Bank".into(),
ucet: "987654321".into(),
skladm: "Updated SkladM".into(),
ico: "87654321".into(),
kontakt: "Jane Doe".into(),
telefon: "+421987654321".into(),
skladu: "Updated SkladU".into(),
fax: "+421987654300".into(),
}
}
// Helper to check database state
async fn assert_response_matches(pool: &PgPool, id: i64, response: &common::proto::multieko2::adresar::AdresarResponse) {
let db_record = sqlx::query!("SELECT * FROM adresar WHERE id = $1", id)
.fetch_one(pool)
.await
.unwrap();
assert_eq!(db_record.firma, response.firma);
assert_eq!(db_record.kz.unwrap_or_default(), response.kz);
assert_eq!(db_record.drc.unwrap_or_default(), response.drc);
assert_eq!(db_record.ulica.unwrap_or_default(), response.ulica);
assert_eq!(db_record.psc.unwrap_or_default(), response.psc);
assert_eq!(db_record.mesto.unwrap_or_default(), response.mesto);
assert_eq!(db_record.stat.unwrap_or_default(), response.stat);
assert_eq!(db_record.banka.unwrap_or_default(), response.banka);
assert_eq!(db_record.ucet.unwrap_or_default(), response.ucet);
assert_eq!(db_record.skladm.unwrap_or_default(), response.skladm);
assert_eq!(db_record.ico.unwrap_or_default(), response.ico);
assert_eq!(db_record.kontakt.unwrap_or_default(), response.kontakt);
assert_eq!(db_record.telefon.unwrap_or_default(), response.telefon);
assert_eq!(db_record.skladu.unwrap_or_default(), response.skladu);
assert_eq!(db_record.fax.unwrap_or_default(), response.fax);
assert!(!db_record.deleted, "Record should not be deleted");
}
// Tests
#[rstest]
#[tokio::test]
async fn test_update_adresar_success(#[future] existing_record: (PgPool, i64), valid_request_template: PutAdresarRequest) {
let (pool, id) = existing_record.await;
let mut request = valid_request_template;
request.id = id;
let response = put_adresar(&pool, request).await.unwrap();
assert_eq!(response.id, id);
assert_response_matches(&pool, id, &response).await;
}
#[rstest]
#[tokio::test]
async fn test_update_whitespace_fields(#[future] existing_record: (PgPool, i64), valid_request_template: PutAdresarRequest) {
let (pool, id) = existing_record.await;
let mut request = valid_request_template;
request.id = id;
request.firma = " Updated Company ".into();
request.telefon = " +421987654321 ".into();
let response = put_adresar(&pool, request).await.unwrap();
// Verify trimmed values in response
assert_eq!(response.firma, "Updated Company");
assert_eq!(response.telefon, "+421987654321");
// Verify raw values in database
let db_record = sqlx::query!("SELECT firma, telefon FROM adresar WHERE id = $1", id)
.fetch_one(&pool)
.await
.unwrap();
assert_eq!(db_record.firma, "Updated Company"); // Trimmed
assert_eq!(db_record.telefon.unwrap(), "+421987654321"); // Trimmed
}
#[rstest]
#[tokio::test]
async fn test_update_empty_required_field(#[future] existing_record: (PgPool, i64), valid_request_template: PutAdresarRequest) {
let (pool, id) = existing_record.await;
let mut request = valid_request_template;
request.id = id;
request.firma = "".into();
let result = put_adresar(&pool, request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::InvalidArgument);
}
#[rstest]
#[tokio::test]
async fn test_update_nonexistent_id(#[future] pool: PgPool) {
let pool = pool.await;
let request = PutAdresarRequest {
id: 9999, // Non-existent ID
firma: "Updated Company".into(),
kz: "Updated KZ".into(),
drc: "Updated DRC".into(),
ulica: "Updated Street".into(),
psc: "67890".into(),
mesto: "Updated City".into(),
stat: "Updated Country".into(),
banka: "Updated Bank".into(),
ucet: "987654321".into(),
skladm: "Updated SkladM".into(),
ico: "87654321".into(),
kontakt: "Jane Doe".into(),
telefon: "+421987654321".into(),
skladu: "Updated SkladU".into(),
fax: "+421987654300".into(),
};
let result = put_adresar(&pool, request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
}
#[rstest]
#[tokio::test]
async fn test_update_deleted_record(#[future] existing_record: (PgPool, i64), valid_request_template: PutAdresarRequest) {
let (pool, id) = existing_record.await;
// Mark the record as deleted
sqlx::query!("UPDATE adresar SET deleted = true WHERE id = $1", id)
.execute(&pool)
.await
.unwrap();
let mut request = valid_request_template;
request.id = id;
let result = put_adresar(&pool, request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
}
#[rstest]
#[tokio::test]
async fn test_clear_optional_fields(#[future] existing_record: (PgPool, i64), valid_request_template: PutAdresarRequest) {
let (pool, id) = existing_record.await;
let mut request = valid_request_template;
request.id = id;
request.telefon = String::new();
request.ulica = String::new();
let response = put_adresar(&pool, request).await.unwrap();
// Check response contains empty strings
assert!(response.telefon.is_empty());
assert!(response.ulica.is_empty());
// Check database contains NULL
let db_record = sqlx::query!("SELECT telefon, ulica FROM adresar WHERE id = $1", id)
.fetch_one(&pool)
.await
.unwrap();
assert!(db_record.telefon.is_none());
assert!(db_record.ulica.is_none());
}
#[rstest]
#[tokio::test]
async fn test_max_length_fields(#[future] existing_record: (PgPool, i64), valid_request_template: PutAdresarRequest) {
let (pool, id) = existing_record.await;
let mut request = valid_request_template;
request.id = id;
request.firma = "a".repeat(255);
request.telefon = "1".repeat(20);
let _response = put_adresar(&pool, request).await.unwrap();
let db_record = sqlx::query!("SELECT firma, telefon FROM adresar WHERE id = $1", id)
.fetch_one(&pool)
.await
.unwrap();
assert_eq!(db_record.firma.len(), 255);
assert_eq!(db_record.telefon.unwrap().len(), 20);
}
#[rstest]
#[tokio::test]
async fn test_special_characters(#[future] existing_record: (PgPool, i64), valid_request_template: PutAdresarRequest) {
let (pool, id) = existing_record.await;
let mut request = valid_request_template;
request.id = id;
request.ulica = "Náměstí 28. října".into();
request.telefon = "+420 123-456.789".into();
let _response = put_adresar(&pool, request).await.unwrap();
let db_record = sqlx::query!("SELECT ulica, telefon FROM adresar WHERE id = $1", id)
.fetch_one(&pool)
.await
.unwrap();
assert_eq!(db_record.ulica.unwrap(), "Náměstí 28. října");
assert_eq!(db_record.telefon.unwrap(), "+420 123-456.789");
}

View File

@@ -1,56 +1,88 @@
// tests/common/mod.rs
use dotenvy;
use sqlx::{postgres::PgPoolOptions, PgPool};
use dotenvy::dotenv;
use rand::distr::Alphanumeric;
use rand::Rng;
use sqlx::{postgres::PgPoolOptions, Connection, Executor, PgConnection, PgPool};
use std::env;
use std::path::Path;
pub async fn setup_test_db() -> PgPool {
// Get path to server directory
let manifest_dir = env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR must be set");
let env_path = Path::new(&manifest_dir).join(".env_test");
fn get_database_url() -> String {
dotenv().ok();
env::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set")
}
// Load environment variables
dotenvy::from_path(env_path).ok();
async fn get_root_connection() -> PgConnection {
PgConnection::connect(&get_database_url())
.await
.expect("Failed to create root connection to test database")
}
/// The primary test setup function.
/// Creates a new, unique schema and returns a connection pool that is scoped to that schema.
/// This is the key to test isolation.
pub async fn setup_isolated_db() -> PgPool {
let mut root_conn = get_root_connection().await;
// Make schema names more unique - include timestamp + random
let schema_name = format!(
"test_{}_{}",
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos(),
rand::rng()
.sample_iter(&Alphanumeric)
.take(8)
.map(char::from)
.collect::<String>()
.to_lowercase()
);
root_conn
.execute(format!("CREATE SCHEMA \"{}\"", schema_name).as_str())
.await
.unwrap_or_else(|_| panic!("Failed to create schema: {}", schema_name));
root_conn
.execute("CREATE SCHEMA IF NOT EXISTS \"default\"")
.await
.unwrap();
// Create connection pool
let database_url = env::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set");
let pool = PgPoolOptions::new()
.max_connections(5)
.connect(&database_url)
.after_connect(move |conn, _meta| {
let schema_name = schema_name.clone();
Box::pin(async move {
conn.execute(format!("SET search_path TO \"{}\", \"default\", \"public\"", schema_name).as_str())
.await?;
Ok(())
})
})
.connect(&get_database_url())
.await
.expect("Failed to create pool");
.expect("Failed to create isolated pool");
// Run migrations
sqlx::migrate!()
.run(&pool)
.await
.expect("Migrations failed");
.expect("Migrations failed in isolated schema");
// Insert default profile if it doesn't exist
let profile = sqlx::query!(
sqlx::query!(
r#"
INSERT INTO profiles (name)
INSERT INTO schemas (name)
VALUES ('default')
ON CONFLICT (name) DO NOTHING
RETURNING id
"#
)
.fetch_optional(&pool)
.execute(&pool)
.await
.expect("Failed to insert test profile");
let profile_id = if let Some(profile) = profile {
profile.id
} else {
// If the profile already exists, fetch its ID
sqlx::query!(
"SELECT id FROM profiles WHERE name = 'default'"
)
.fetch_one(&pool)
.await
.expect("Failed to fetch default profile ID")
.id
};
.expect("Failed to insert test profile in isolated schema");
pool
}
/// Compatibility alias for the old function name
/// This allows existing tests to continue working without modification
pub async fn setup_test_db() -> PgPool {
setup_isolated_db().await
}

View File

@@ -1,5 +1,4 @@
// tests/mod.rs
pub mod adresar;
pub mod tables_data;
pub mod common;
pub mod table_definition;

View File

@@ -0,0 +1,3 @@
// server/tests/table_definition/mod.rs
pub mod post_table_definition_test;

View File

@@ -0,0 +1,601 @@
// tests/table_definition/post_table_definition_test.rs
// Keep all your normal use statements
use common::proto::multieko2::table_definition::{
ColumnDefinition, PostTableDefinitionRequest, TableLink,
};
use rstest::{fixture, rstest};
use server::table_definition::handlers::post_table_definition;
use sqlx::{postgres::PgPoolOptions, Connection, Executor, PgConnection, PgPool, Row}; // Add PgConnection etc.
use tonic::Code;
// Add these two new use statements for the isolation logic
use rand::distr::Alphanumeric;
use rand::Rng;
use std::env;
use dotenvy;
use std::path::Path;
async fn setup_isolated_gen_schema_db() -> PgPool {
let manifest_dir = env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR must be set");
let env_path = Path::new(&manifest_dir).join(".env_test");
dotenvy::from_path(env_path).ok();
let database_url = env::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set");
let unique_schema_name = format!(
"test_{}",
rand::rng()
.sample_iter(&Alphanumeric)
.take(12)
.map(char::from)
.collect::<String>()
);
let mut root_conn = PgConnection::connect(&database_url).await.unwrap();
// Create the test schema
root_conn
.execute(format!("CREATE SCHEMA \"{}\"", unique_schema_name).as_str())
.await
.unwrap();
// Create schemas A and B for cross-profile tests
root_conn
.execute("CREATE SCHEMA IF NOT EXISTS \"A\"")
.await
.unwrap();
root_conn
.execute("CREATE SCHEMA IF NOT EXISTS \"B\"")
.await
.unwrap();
// IMPORTANT: Create the "default" schema if it doesn't exist
root_conn
.execute("CREATE SCHEMA IF NOT EXISTS \"default\"")
.await
.unwrap();
let pool = PgPoolOptions::new()
.max_connections(5)
.after_connect(move |conn, _meta| {
let schema = unique_schema_name.clone();
Box::pin(async move {
// Set search path to include test schema, default, A, B, and public
conn.execute(format!("SET search_path = '{}', 'default', 'A', 'B', 'public'", schema).as_str())
.await?;
Ok(())
})
})
.connect(&database_url)
.await
.expect("Failed to create isolated pool");
sqlx::migrate!()
.run(&pool)
.await
.expect("Migrations failed in isolated schema");
// Insert into the schemas table - use INSERT ... ON CONFLICT to avoid duplicates
sqlx::query!(
"INSERT INTO schemas (name) VALUES ('default'), ('A'), ('B') ON CONFLICT (name) DO NOTHING"
)
.execute(&pool)
.await
.expect("Failed to insert test schemas");
pool
}
// ========= Fixtures for THIS FILE ONLY =========
#[fixture]
async fn pool() -> PgPool {
// This fixture now calls the LOCAL, SPECIALIZED setup function.
setup_isolated_gen_schema_db().await
}
#[fixture]
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
let pool = pool.await;
pool.close().await;
pool
}
/// This fixture now works perfectly and is also isolated,
/// because it depends on the `pool` fixture above. No changes needed here!
#[fixture]
async fn pool_with_preexisting_table(#[future] pool: PgPool) -> PgPool {
let pool = pool.await;
let create_customers_req = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "customers".into(),
columns: vec![ColumnDefinition {
name: "customer_name".into(),
field_type: "text".into(),
}],
indexes: vec!["customer_name".into()],
links: vec![],
};
post_table_definition(&pool, create_customers_req)
.await
.expect("Failed to create pre-requisite 'customers' table");
pool
}
// ========= Helper Functions =========
/// Checks the PostgreSQL information_schema to verify a table and its columns exist.
async fn assert_table_structure_is_correct(
pool: &PgPool,
schema_name: &str, // ADD: schema parameter
table_name: &str,
expected_cols: &[(&str, &str)],
) {
let table_exists = sqlx::query_scalar::<_, bool>(
"SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_schema = $1 AND table_name = $2
)",
)
.bind(schema_name) // CHANGE: use dynamic schema instead of 'gen'
.bind(table_name)
.fetch_one(pool)
.await
.unwrap();
assert!(table_exists, "Table '{}.{}' was not created", schema_name, table_name); // CHANGE: dynamic schema in error message
for (col_name, col_type) in expected_cols {
let record = sqlx::query(
"SELECT data_type FROM information_schema.columns
WHERE table_schema = $1 AND table_name = $2 AND column_name = $3",
)
.bind(schema_name) // CHANGE: use dynamic schema instead of 'gen'
.bind(table_name)
.bind(col_name)
.fetch_optional(pool)
.await
.unwrap();
let found_type = record.unwrap_or_else(|| panic!("Column '{}' not found in table '{}.{}'", col_name, schema_name, table_name)).get::<String, _>("data_type"); // CHANGE: dynamic schema in error message
// Handle type mappings, e.g., TEXT -> character varying, NUMERIC -> numeric
let normalized_found_type = found_type.to_lowercase();
let normalized_expected_type = col_type.to_lowercase();
assert!(
normalized_found_type.contains(&normalized_expected_type),
"Column '{}' has wrong type. Expected: {}, Found: {}",
col_name,
col_type,
found_type
);
}
}
// ========= Tests =========
#[rstest]
#[tokio::test]
async fn test_create_table_success(#[future] pool: PgPool) {
// Arrange
let pool = pool.await;
let request = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "invoices".into(),
columns: vec![
ColumnDefinition {
name: "invoice_number".into(),
field_type: "text".into(),
},
ColumnDefinition {
name: "amount".into(),
field_type: "decimal(10, 2)".into(),
},
],
indexes: vec!["invoice_number".into()],
links: vec![],
};
// Act
let response = post_table_definition(&pool, request).await.unwrap();
// Assert
assert!(response.success);
assert!(response.sql.contains("CREATE TABLE \"default\".\"invoices\""));
assert!(response.sql.contains("\"invoice_number\" TEXT"));
assert!(response.sql.contains("\"amount\" NUMERIC(10, 2)"));
assert!(response
.sql
.contains("CREATE INDEX \"idx_invoices_invoice_number\""));
// Verify actual DB state - FIXED: Added schema parameter
assert_table_structure_is_correct(
&pool,
"default", // Schema name parameter
"invoices",
&[
("id", "bigint"),
("deleted", "boolean"),
("invoice_number", "text"),
("amount", "numeric"),
("created_at", "timestamp with time zone"),
],
)
.await;
}
#[rstest]
#[tokio::test]
async fn test_fail_on_invalid_decimal_format(#[future] pool: PgPool) {
let pool = pool.await;
let invalid_types = vec![
"decimal(0,0)", // precision too small
"decimal(5,10)", // scale > precision
"decimal(10)", // missing scale
"decimal(a,b)", // non-numeric
];
for invalid_type in invalid_types {
let request = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: format!("table_{}", invalid_type),
columns: vec![ColumnDefinition {
name: "amount".into(),
field_type: invalid_type.into(),
}],
..Default::default()
};
let result = post_table_definition(&pool, request).await;
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
}
}
#[rstest]
#[tokio::test]
async fn test_create_table_with_link(
#[future] pool_with_preexisting_table: PgPool,
) {
// Arrange
let pool = pool_with_preexisting_table.await;
let request = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "orders".into(),
columns: vec![],
indexes: vec![],
links: vec![TableLink { // CORRECTED
linked_table_name: "customers".into(),
required: true,
}],
};
// Act
let response = post_table_definition(&pool, request).await.unwrap();
// Assert
assert!(response.success);
assert!(response.sql.contains(
"\"customers_id\" BIGINT NOT NULL REFERENCES \"default\".\"customers\"(id)"
));
assert!(response
.sql
.contains("CREATE INDEX \"idx_orders_customers_fk\""));
// Verify actual DB state - FIXED: Added schema parameter
assert_table_structure_is_correct(
&pool,
"default", // Schema name parameter
"orders",
&[("customers_id", "bigint")],
)
.await;
}
#[rstest]
#[tokio::test]
async fn test_fail_on_duplicate_table_name(#[future] pool: PgPool) {
// Arrange
let pool = pool.await;
let request = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "reused_name".into(),
..Default::default()
};
// Create it once
post_table_definition(&pool, request.clone()).await.unwrap();
// Act: Try to create it again
let result = post_table_definition(&pool, request).await;
// Assert
let err = result.unwrap_err();
assert_eq!(err.code(), Code::AlreadyExists);
assert_eq!(err.message(), "Table already exists in this profile");
}
#[rstest]
#[tokio::test]
async fn test_fail_on_invalid_table_name(#[future] pool: PgPool) {
let pool = pool.await;
let mut request = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "ends_with_id".into(), // Invalid name
..Default::default()
};
let result = post_table_definition(&pool, request.clone()).await;
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
request.table_name = "deleted".into(); // Reserved name
let result = post_table_definition(&pool, request.clone()).await;
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
}
#[rstest]
#[tokio::test]
async fn test_fail_on_invalid_column_type(#[future] pool: PgPool) {
// Arrange
let pool = pool.await;
let request = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "bad_col_type".into(),
columns: vec![ColumnDefinition {
name: "some_col".into(),
field_type: "super_string_9000".into(), // Invalid type
}],
..Default::default()
};
// Act
let result = post_table_definition(&pool, request).await;
// Assert
let err = result.unwrap_err();
assert_eq!(err.code(), Code::InvalidArgument);
assert!(err.message().contains("Invalid field type"));
}
#[rstest]
#[tokio::test]
async fn test_fail_on_index_for_nonexistent_column(#[future] pool: PgPool) {
// Arrange
let pool = pool.await;
let request = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "bad_index".into(),
columns: vec![ColumnDefinition {
name: "real_column".into(),
field_type: "text".into(),
}],
indexes: vec!["fake_column".into()], // Index on a column not in the list
..Default::default()
};
let result = post_table_definition(&pool, request).await;
assert!(result.is_err());
if let Err(err) = result {
assert!(err.message().contains("Index column 'fake_column' not found"));
}
}
#[rstest]
#[tokio::test]
async fn test_fail_on_link_to_nonexistent_table(#[future] pool: PgPool) {
// Arrange
let pool = pool.await;
let request = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "bad_link".into(),
links: vec![TableLink { // CORRECTED
linked_table_name: "i_do_not_exist".into(),
required: false,
}],
..Default::default()
};
// Act
let result = post_table_definition(&pool, request).await;
// Assert
let err = result.unwrap_err();
assert_eq!(err.code(), Code::NotFound);
assert!(err.message().contains("Linked table i_do_not_exist not found"));
}
#[rstest]
#[tokio::test]
async fn test_database_error_on_closed_pool(
#[future] closed_pool: PgPool,
) {
// Arrange
let pool = closed_pool.await;
let request = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "wont_be_created".into(),
..Default::default()
};
// Act
let result = post_table_definition(&pool, request).await;
// Assert
assert_eq!(result.unwrap_err().code(), Code::Internal);
}
// Tests that minimal, uppercase and whitespacepadded decimal specs
// are accepted and correctly mapped to NUMERIC(p, s).
#[rstest]
#[tokio::test]
async fn test_valid_decimal_variants(#[future] pool: PgPool) {
let pool = pool.await;
let cases = vec![
("decimal(1,1)", "NUMERIC(1, 1)"),
("decimal(1,0)", "NUMERIC(1, 0)"),
("DECIMAL(5,2)", "NUMERIC(5, 2)"),
("decimal( 5 , 2 )", "NUMERIC(5, 2)"),
];
for (i, (typ, expect)) in cases.into_iter().enumerate() {
let request = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: format!("dec_valid_{}", i),
columns: vec![ColumnDefinition {
name: "amount".into(),
field_type: typ.into(),
}],
..Default::default()
};
let resp = post_table_definition(&pool, request).await.unwrap();
assert!(resp.success, "{}", typ);
assert!(
resp.sql.contains(expect),
"expected `{}` to map to {}, got `{}`",
typ,
expect,
resp.sql
);
}
}
// Tests that malformed decimal inputs are rejected with InvalidArgument.
#[rstest]
#[tokio::test]
async fn test_fail_on_malformed_decimal_inputs(#[future] pool: PgPool) {
let pool = pool.await;
let bad = vec!["decimal", "decimal()", "decimal(5,)", "decimal(,2)", "decimal(, )"];
for (i, typ) in bad.into_iter().enumerate() {
let request = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: format!("dec_bad_{}", i),
columns: vec![ColumnDefinition {
name: "amt".into(),
field_type: typ.into(),
}],
..Default::default()
};
let err = post_table_definition(&pool, request).await.unwrap_err();
assert_eq!(err.code(), Code::InvalidArgument, "{}", typ);
}
}
// Tests that obviously invalid column identifiers are rejected
// (start with digit/underscore, contain space or hyphen, or are empty).
#[rstest]
#[tokio::test]
async fn test_fail_on_invalid_column_names(#[future] pool: PgPool) {
let pool = pool.await;
let bad_names = vec!["1col", "_col", "col name", "col-name", ""];
for name in bad_names {
let request = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "tbl_invalid_cols".into(),
columns: vec![ColumnDefinition {
name: name.into(),
field_type: "text".into(),
}],
..Default::default()
};
let err = post_table_definition(&pool, request).await.unwrap_err();
assert_eq!(err.code(), Code::InvalidArgument, "{}", name);
}
}
// Tests that a usersupplied column ending in "_id" is rejected
// to avoid collision with systemgenerated FKs.
#[rstest]
#[tokio::test]
async fn test_fail_on_column_name_suffix_id(#[future] pool: PgPool) {
let pool = pool.await;
let request = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "valid_table".into(), // FIXED: Use valid table name
columns: vec![ColumnDefinition {
name: "invalid_column_id".into(), // FIXED: Test invalid COLUMN name
field_type: "text".into(),
}],
..Default::default()
};
let result = post_table_definition(&pool, request).await;
assert!(result.is_err());
if let Err(status) = result {
// UPDATED: Should mention column, not table
assert!(status.message().contains("Column name") &&
status.message().contains("end with '_id'"));
}
}
#[rstest]
#[tokio::test]
async fn test_invalid_characters_are_rejected(#[future] pool: PgPool) {
// RENAMED: was test_name_sanitization
let pool = pool.await;
let req = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "My-Table!".into(), // Invalid characters
columns: vec![ColumnDefinition {
name: "col".into(),
field_type: "text".into(),
}],
..Default::default()
};
// CHANGED: Now expects error instead of sanitization
let result = post_table_definition(&pool, req).await;
assert!(result.is_err());
if let Err(status) = result {
assert_eq!(status.code(), tonic::Code::InvalidArgument);
assert!(status.message().contains("Table name contains invalid characters"));
}
}
#[rstest]
#[tokio::test]
async fn test_unicode_characters_are_rejected(#[future] pool: PgPool) {
// RENAMED: was test_sanitization_of_unicode_and_special_chars
let pool = pool.await;
let request = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "produits_😂".into(), // Invalid unicode
columns: vec![ColumnDefinition {
name: "col_normal".into(), // Valid name
field_type: "text".into(),
}],
..Default::default()
};
// CHANGED: Now expects error instead of sanitization
let result = post_table_definition(&pool, request).await;
assert!(result.is_err());
if let Err(status) = result {
assert_eq!(status.code(), tonic::Code::InvalidArgument);
assert!(status.message().contains("Table name contains invalid characters"));
}
}
#[rstest]
#[tokio::test]
async fn test_sql_injection_attempts_are_rejected(#[future] pool: PgPool) {
let pool = pool.await;
let req = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "users; DROP TABLE users;".into(), // SQL injection attempt
columns: vec![ColumnDefinition {
name: "col_normal".into(), // Valid name
field_type: "text".into(),
}],
..Default::default()
};
// CHANGED: Now expects error instead of sanitization
let result = post_table_definition(&pool, req).await;
assert!(result.is_err());
if let Err(status) = result {
assert_eq!(status.code(), tonic::Code::InvalidArgument);
assert!(status.message().contains("Table name contains invalid characters"));
}
}
include!("post_table_definition_test2.rs");
include!("post_table_definition_test3.rs");
include!("post_table_definition_test4.rs");
include!("post_table_definition_test5.rs");
include!("post_table_definition_test6.rs");

View File

@@ -0,0 +1,510 @@
// ============================================================================
// Additional edgecase tests for PostTableDefinition
// ============================================================================
// 1) Fieldtype mapping for every predefined key, in various casing.
#[rstest]
#[tokio::test]
async fn test_field_type_mapping_various_casing(#[future] pool: PgPool) {
let pool = pool.await;
let cases = vec![
("text", "TEXT", "text"),
("TEXT", "TEXT", "text"),
("TeXt", "TEXT", "text"),
("string", "TEXT", "text"),
("boolean", "BOOLEAN", "boolean"),
("Boolean", "BOOLEAN", "boolean"),
("timestamp", "TIMESTAMPTZ", "timestamp with time zone"),
("time", "TIMESTAMPTZ", "timestamp with time zone"),
("money", "NUMERIC(14, 4)", "numeric"),
("integer", "INTEGER", "integer"),
("date", "DATE", "date"),
];
for (i, &(input, expected_sql, expected_db)) in cases.iter().enumerate() {
let tbl = format!("ftm_{}", i);
let req = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: tbl.clone(),
columns: vec![ColumnDefinition {
name: "col".into(),
field_type: input.into(),
}],
..Default::default()
};
let resp = post_table_definition(&pool, req).await.unwrap();
assert!(
resp.sql.contains(&format!("\"col\" {}", expected_sql)),
"fieldtype {:?} did not map to {} in `{}`",
input,
expected_sql,
resp.sql
);
assert_table_structure_is_correct(
&pool,
"default", // FIXED: Added schema parameter
&tbl,
&[
("id", "bigint"),
("deleted", "boolean"),
("col", expected_db),
("created_at", "timestamp with time zone"),
],
)
.await;
}
}
// 3) Invalid index names must be rejected.
#[rstest]
#[tokio::test]
async fn test_fail_on_invalid_index_names(#[future] pool: PgPool) {
let pool = pool.await;
let test_cases = vec![
("1col", "Index name cannot start with a number"),
("_col", "Index name cannot start with underscore"),
("col-name", "Index name contains invalid characters"),
];
for (idx, expected_error) in test_cases {
let req = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "idx_bad".into(),
columns: vec![ColumnDefinition {
name: "good".into(),
field_type: "text".into(),
}],
indexes: vec![idx.into()],
..Default::default()
};
let result = post_table_definition(&pool, req).await;
assert!(result.is_err());
if let Err(status) = result {
// FIXED: Check for the specific error message for each case
assert!(status.message().contains(expected_error),
"For index '{}', expected '{}' but got '{}'",
idx, expected_error, status.message());
}
}
}
// 4) More invalidtablename cases: starts-with digit/underscore or sanitizes to empty.
#[rstest]
#[tokio::test]
async fn test_fail_on_more_invalid_table_names(#[future] pool: PgPool) {
let pool = pool.await;
let cases = vec![
("1tbl", "invalid table name"),
("_tbl", "invalid table name"),
];
for (name, expected_msg) in cases {
let req = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: name.into(),
..Default::default()
};
let result = post_table_definition(&pool, req).await;
assert!(result.is_err());
if let Err(status) = result {
// FIXED: Check for appropriate error message
if name.starts_with('_') {
assert!(status.message().contains("Table name cannot start with underscore"));
} else if name.chars().next().unwrap().is_ascii_digit() {
assert!(status.message().contains("Table name cannot start with a number"));
}
}
}
}
// 5) Namesanitization: mixedcase table names and strip invalid characters.
#[rstest]
#[tokio::test]
async fn test_name_sanitization(#[future] pool: PgPool) {
let pool = pool.await;
let req = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "My-Table!123".into(), // Invalid characters
columns: vec![ColumnDefinition {
name: "user_name".into(),
field_type: "text".into(),
}],
..Default::default()
};
// FIXED: Now expect error instead of success
let result = post_table_definition(&pool, req).await;
assert!(result.is_err());
if let Err(status) = result {
assert!(status.message().contains("Table name contains invalid characters"));
}
}
// 6) Creating a table with no custom columns, indexes, or links → only system columns.
#[rstest]
#[tokio::test]
async fn test_create_minimal_table(#[future] pool: PgPool) {
let pool = pool.await;
let profile_name = "test_minimal";
let req = PostTableDefinitionRequest {
profile_name: profile_name.into(),
table_name: "minimal".into(),
..Default::default()
};
let resp = post_table_definition(&pool, req).await.unwrap();
assert!(resp.sql.contains("id BIGSERIAL PRIMARY KEY"));
assert!(resp.sql.contains("deleted BOOLEAN NOT NULL"));
assert!(resp.sql.contains("created_at TIMESTAMPTZ"));
assert_table_structure_is_correct(
&pool,
profile_name,
"minimal",
&[
("id", "bigint"),
("deleted", "boolean"),
("created_at", "timestamp with time zone"),
],
)
.await;
}
// 7) Required & optional links: NOT NULL vs NULL.
#[rstest]
#[tokio::test]
async fn test_nullable_and_multiple_links(#[future] pool: PgPool) {
let pool = pool.await;
// FIXED: Use different prefixes to avoid FK column collisions
let unique_suffix = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_millis() % 1000000;
let customers_table = format!("customers_{}", unique_suffix);
let suppliers_table = format!("suppliers_{}", unique_suffix); // Different prefix
let orders_table = format!("orders_{}", unique_suffix);
// Create customers table
let customers_req = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: customers_table.clone(),
columns: vec![ColumnDefinition {
name: "name".into(),
field_type: "text".into(),
}],
..Default::default()
};
post_table_definition(&pool, customers_req).await
.expect("Failed to create customers table");
// Create suppliers table
let suppliers_req = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: suppliers_table.clone(),
columns: vec![ColumnDefinition {
name: "name".into(),
field_type: "text".into(),
}],
..Default::default()
};
post_table_definition(&pool, suppliers_req).await
.expect("Failed to create suppliers table");
// Create orders table that links to both
let orders_req = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: orders_table.clone(),
columns: vec![ColumnDefinition {
name: "amount".into(),
field_type: "text".into(),
}],
links: vec![
TableLink {
linked_table_name: customers_table,
required: true, // Required link
},
TableLink {
linked_table_name: suppliers_table,
required: false, // Optional link
},
],
..Default::default()
};
let resp = post_table_definition(&pool, orders_req).await
.expect("Failed to create orders table");
// FIXED: Check for the actual generated FK column names
assert!(
resp.sql.contains(&format!("\"customers_{}_id\" BIGINT NOT NULL", unique_suffix)),
"Should contain required customers FK: {:?}",
resp.sql
);
assert!(
resp.sql.contains(&format!("\"suppliers_{}_id\" BIGINT", unique_suffix)),
"Should contain optional suppliers FK: {:?}",
resp.sql
);
// Check database-level nullability for optional FK
let is_nullable: String = sqlx::query_scalar!(
"SELECT is_nullable \
FROM information_schema.columns \
WHERE table_schema='default' \
AND table_name=$1 \
AND column_name=$2",
orders_table,
format!("suppliers_{}_id", unique_suffix)
)
.fetch_one(&pool)
.await
.unwrap()
.unwrap();
assert_eq!(is_nullable, "YES");
}
// 8) Duplicate links in one request → Internal.
#[rstest]
#[tokio::test]
async fn test_fail_on_duplicate_links(#[future] pool: PgPool) {
let pool = pool.await;
let unique_id = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos();
let customers_table = format!("customers_{}", unique_id);
// Create the prerequisite table
let prereq_req = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: customers_table.clone(),
columns: vec![],
links: vec![],
indexes: vec![],
};
post_table_definition(&pool, prereq_req).await.expect("Failed to create prerequisite table");
// Now, test the duplicate link scenario
let req = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: format!("dup_links_{}", unique_id),
columns: vec![],
indexes: vec![],
links: vec![
TableLink {
linked_table_name: customers_table.clone(),
required: true,
},
TableLink {
linked_table_name: customers_table.clone(),
required: false,
},
],
};
let err = post_table_definition(&pool, req).await.unwrap_err();
assert_eq!(err.code(), Code::InvalidArgument);
assert!(err.message().contains(&format!("Duplicate link to table '{}'", customers_table)));
}
// 9) Selfreferential FK: link child back to sameprofile parent.
#[rstest]
#[tokio::test]
async fn test_self_referential_link(#[future] pool: PgPool) {
let pool = pool.await;
post_table_definition(
&pool,
PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "selfref".into(),
..Default::default()
},
)
.await
.unwrap();
let resp = post_table_definition(
&pool,
PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "selfref_child".into(),
links: vec![TableLink {
linked_table_name: "selfref".into(),
required: true,
}],
..Default::default()
},
)
.await
.unwrap();
assert!(
resp
.sql
.contains("\"selfref_id\" BIGINT NOT NULL REFERENCES \"default\".\"selfref\"(id)"), // FIXED: Changed from gen to "default"
"{:?}",
resp.sql
);
}
// 11) Crossprofile uniqueness & link isolation.
#[rstest]
#[tokio::test]
async fn test_cross_profile_uniqueness_and_link_isolation(#[future] pool: PgPool) {
let pool = pool.await;
// Profile a: foo (CHANGED: lowercase)
post_table_definition(&pool, PostTableDefinitionRequest {
profile_name: "a".into(), // CHANGED: was "A"
table_name: "foo".into(),
columns: vec![ColumnDefinition { name: "col".into(), field_type: "text".into() }],
..Default::default()
}).await.unwrap();
// Profile b: foo, bar (CHANGED: lowercase)
post_table_definition(&pool, PostTableDefinitionRequest {
profile_name: "b".into(), // CHANGED: was "B"
table_name: "foo".into(),
columns: vec![ColumnDefinition { name: "col".into(), field_type: "text".into() }],
..Default::default()
}).await.unwrap();
post_table_definition(&pool, PostTableDefinitionRequest {
profile_name: "b".into(), // CHANGED: was "B"
table_name: "bar".into(),
columns: vec![ColumnDefinition { name: "col".into(), field_type: "text".into() }],
..Default::default()
}).await.unwrap();
// a linking to b.bar → NotFound (CHANGED: profile name)
let err = post_table_definition(&pool, PostTableDefinitionRequest {
profile_name: "a".into(), // CHANGED: was "A"
table_name: "linker".into(),
columns: vec![ColumnDefinition { name: "col".into(), field_type: "text".into() }],
links: vec![TableLink {
linked_table_name: "bar".into(),
required: false,
}],
..Default::default()
}).await.unwrap_err();
assert_eq!(err.code(), Code::NotFound);
}
// 12) SQLinjection attempts are sanitized.
#[rstest]
#[tokio::test]
async fn test_sql_injection_sanitization(#[future] pool: PgPool) {
let pool = pool.await;
let req = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "users; DROP TABLE users;".into(), // SQL injection attempt
columns: vec![ColumnDefinition {
name: "col_drop".into(),
field_type: "text".into(),
}],
..Default::default()
};
// FIXED: Now expect error instead of success
let result = post_table_definition(&pool, req).await;
assert!(result.is_err());
if let Err(status) = result {
assert!(status.message().contains("Table name contains invalid characters"));
}
}
// 13) Reservedcolumn shadowing: id, deleted, created_at cannot be userdefined.
#[rstest]
#[tokio::test]
async fn test_reserved_column_shadowing(#[future] pool: PgPool) {
let pool = pool.await;
for col in &["id", "deleted", "created_at"] {
let req = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: format!("tbl_{}", col),
columns: vec![ColumnDefinition {
name: (*col).into(),
field_type: "text".into(),
}],
..Default::default()
};
let err = post_table_definition(&pool, req).await.unwrap_err();
assert_eq!(err.code(), Code::InvalidArgument, "{:?}", col); // FIXED: Changed from Internal to InvalidArgument
}
}
// 14) Identifierlength overflow (>63 chars) yields Internal.
#[rstest]
#[tokio::test]
async fn test_long_identifier_length(#[future] pool: PgPool) {
let pool = pool.await;
let long = "a".repeat(64);
let req = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: long.clone(),
columns: vec![ColumnDefinition {
name: long.clone(),
field_type: "text".into(),
}],
..Default::default()
};
let err = post_table_definition(&pool, req).await.unwrap_err();
assert_eq!(err.code(), Code::InvalidArgument);
}
// 15) Decimal precision overflow must be caught by our parser.
#[rstest]
#[tokio::test]
async fn test_decimal_precision_overflow(#[future] pool: PgPool) {
let pool = pool.await;
let req = PostTableDefinitionRequest {
profile_name: "default".into(),
table_name: "dp_overflow".into(),
columns: vec![ColumnDefinition {
name: "amount".into(),
field_type: "decimal(9999999999,1)".into(),
}],
..Default::default()
};
let err = post_table_definition(&pool, req).await.unwrap_err();
assert_eq!(err.code(), Code::InvalidArgument);
assert!(
err
.message()
.to_lowercase()
.contains("invalid precision"),
"{}",
err.message()
);
}
// 16) Repeated profile insertion only creates one profile row.
#[rstest]
#[tokio::test]
async fn test_repeated_profile_insertion(#[future] pool: PgPool) {
let pool = pool.await;
let prof = "repeat_prof";
post_table_definition(
&pool,
PostTableDefinitionRequest {
profile_name: prof.into(),
table_name: "t1".into(),
..Default::default()
},
)
.await
.unwrap();
post_table_definition(
&pool,
PostTableDefinitionRequest {
profile_name: prof.into(),
table_name: "t2".into(),
..Default::default()
},
)
.await
.unwrap();
let cnt: i64 = sqlx::query_scalar!(
"SELECT COUNT(*) FROM schemas WHERE name = $1", // FIXED: Changed from profiles to schemas
prof
)
.fetch_one(&pool)
.await
.unwrap()
.unwrap();
assert_eq!(cnt, 1);
}

Some files were not shown because too many files have changed in this diff Show More