Compare commits
43 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
625c9b3e09 | ||
|
|
e20623ed53 | ||
|
|
aa9adf7348 | ||
|
|
2e82aba0d1 | ||
|
|
b7a3f0f8d9 | ||
|
|
38c82389f7 | ||
|
|
cb0a2bee17 | ||
|
|
dc99131794 | ||
|
|
5c23f61a10 | ||
|
|
f87e3c03cb | ||
|
|
d346670839 | ||
|
|
560d8b7234 | ||
|
|
b297c2b311 | ||
|
|
d390c567d5 | ||
|
|
029e614b9c | ||
|
|
f9a78e4eec | ||
|
|
d8758f7531 | ||
|
|
4e86ecff84 | ||
|
|
070d091e07 | ||
|
|
7403b3c3f8 | ||
|
|
1b1e7b7205 | ||
|
|
1b8f19f1ce | ||
|
|
2a14eadf34 | ||
|
|
fd36cd5795 | ||
|
|
f4286ac3c9 | ||
|
|
92d5eb4844 | ||
|
|
87b9f6ab87 | ||
|
|
06d98aab5c | ||
|
|
298f56a53c | ||
|
|
714a5f2f1c | ||
|
|
4e29d0084f | ||
|
|
63f1b4da2e | ||
|
|
9477f53432 | ||
|
|
ed786f087c | ||
|
|
8e22ea05ff | ||
|
|
8414657224 | ||
|
|
e25213ed1b | ||
|
|
4843b0778c | ||
|
|
f5fae98c69 | ||
|
|
6faf0a4a31 | ||
|
|
011fafc0ff | ||
|
|
8ebe74484c | ||
|
|
3eb9523103 |
235
Cargo.lock
generated
235
Cargo.lock
generated
@@ -65,6 +65,17 @@ version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
|
||||
|
||||
[[package]]
|
||||
name = "ahash"
|
||||
version = "0.7.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9"
|
||||
dependencies = [
|
||||
"getrandom 0.2.15",
|
||||
"once_cell",
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ahash"
|
||||
version = "0.8.11"
|
||||
@@ -125,6 +136,12 @@ version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
|
||||
|
||||
[[package]]
|
||||
name = "arrayvec"
|
||||
version = "0.7.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
|
||||
|
||||
[[package]]
|
||||
name = "as_derive_utils"
|
||||
version = "0.11.0"
|
||||
@@ -312,6 +329,18 @@ dependencies = [
|
||||
"crunchy",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bitvec"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c"
|
||||
dependencies = [
|
||||
"funty",
|
||||
"radium",
|
||||
"tap",
|
||||
"wyz",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "block-buffer"
|
||||
version = "0.10.4"
|
||||
@@ -356,12 +385,57 @@ dependencies = [
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "borsh"
|
||||
version = "1.5.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce"
|
||||
dependencies = [
|
||||
"borsh-derive",
|
||||
"cfg_aliases",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "borsh-derive"
|
||||
version = "1.5.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"proc-macro-crate",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bumpalo"
|
||||
version = "3.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf"
|
||||
|
||||
[[package]]
|
||||
name = "bytecheck"
|
||||
version = "0.6.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2"
|
||||
dependencies = [
|
||||
"bytecheck_derive",
|
||||
"ptr_meta",
|
||||
"simdutf8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bytecheck_derive"
|
||||
version = "0.6.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.5.0"
|
||||
@@ -412,6 +486,12 @@ version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "cfg_aliases"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
|
||||
|
||||
[[package]]
|
||||
name = "chrono"
|
||||
version = "0.4.40"
|
||||
@@ -967,6 +1047,27 @@ dependencies = [
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "funty"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c"
|
||||
|
||||
[[package]]
|
||||
name = "futures"
|
||||
version = "0.3.31"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
|
||||
dependencies = [
|
||||
"futures-channel",
|
||||
"futures-core",
|
||||
"futures-executor",
|
||||
"futures-io",
|
||||
"futures-sink",
|
||||
"futures-task",
|
||||
"futures-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "futures-channel"
|
||||
version = "0.3.31"
|
||||
@@ -1046,6 +1147,7 @@ version = "0.3.31"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
|
||||
dependencies = [
|
||||
"futures-channel",
|
||||
"futures-core",
|
||||
"futures-io",
|
||||
"futures-macro",
|
||||
@@ -1148,6 +1250,9 @@ name = "hashbrown"
|
||||
version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
|
||||
dependencies = [
|
||||
"ahash 0.7.8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
@@ -1155,7 +1260,7 @@ version = "0.14.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"ahash 0.8.11",
|
||||
"allocator-api2",
|
||||
"serde",
|
||||
]
|
||||
@@ -1659,7 +1764,7 @@ version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6e14eda50a3494b3bf7b9ce51c52434a761e383d7238ce1dd5dcec2fbc13e9fb"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"ahash 0.8.11",
|
||||
"dashmap",
|
||||
"hashbrown 0.14.5",
|
||||
"serde",
|
||||
@@ -2253,7 +2358,7 @@ version = "0.12.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac98773b7109bc75f475ab5a134c9b64b87e59d776d31098d8f346922396a477"
|
||||
dependencies = [
|
||||
"arrayvec",
|
||||
"arrayvec 0.5.2",
|
||||
"typed-arena",
|
||||
"unicode-width 0.1.14",
|
||||
]
|
||||
@@ -2360,6 +2465,26 @@ dependencies = [
|
||||
"prost",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ptr_meta"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1"
|
||||
dependencies = [
|
||||
"ptr_meta_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ptr_meta_derive"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quickscope"
|
||||
version = "0.2.0"
|
||||
@@ -2385,6 +2510,12 @@ version = "5.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
|
||||
|
||||
[[package]]
|
||||
name = "radium"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09"
|
||||
|
||||
[[package]]
|
||||
name = "radix_fmt"
|
||||
version = "1.0.0"
|
||||
@@ -2576,6 +2707,15 @@ version = "1.9.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2"
|
||||
|
||||
[[package]]
|
||||
name = "rend"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "71fe3824f5629716b1589be05dacd749f6aa084c87e00e016714a8cdfccc997c"
|
||||
dependencies = [
|
||||
"bytecheck",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "repr_offset"
|
||||
version = "0.2.2"
|
||||
@@ -2599,6 +2739,35 @@ dependencies = [
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rkyv"
|
||||
version = "0.7.45"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b"
|
||||
dependencies = [
|
||||
"bitvec",
|
||||
"bytecheck",
|
||||
"bytes",
|
||||
"hashbrown 0.12.3",
|
||||
"ptr_meta",
|
||||
"rend",
|
||||
"rkyv_derive",
|
||||
"seahash",
|
||||
"tinyvec",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rkyv_derive"
|
||||
version = "0.7.45"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rsa"
|
||||
version = "0.9.8"
|
||||
@@ -2659,6 +2828,32 @@ dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rust_decimal"
|
||||
version = "1.37.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b203a6425500a03e0919c42d3c47caca51e79f1132046626d2c8871c5092035d"
|
||||
dependencies = [
|
||||
"arrayvec 0.7.6",
|
||||
"borsh",
|
||||
"bytes",
|
||||
"num-traits",
|
||||
"rand 0.8.5",
|
||||
"rkyv",
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rust_decimal_macros"
|
||||
version = "1.37.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f6268b74858287e1a062271b988a0c534bf85bbeb567fe09331bf40ed78113d5"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustc-demangle"
|
||||
version = "0.1.24"
|
||||
@@ -2733,6 +2928,12 @@ version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "seahash"
|
||||
version = "4.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b"
|
||||
|
||||
[[package]]
|
||||
name = "search"
|
||||
version = "0.3.13"
|
||||
@@ -2842,13 +3043,17 @@ dependencies = [
|
||||
"common",
|
||||
"dashmap",
|
||||
"dotenvy",
|
||||
"futures",
|
||||
"jsonwebtoken",
|
||||
"lazy_static",
|
||||
"prost",
|
||||
"prost-types",
|
||||
"rand 0.9.1",
|
||||
"regex",
|
||||
"rstest",
|
||||
"rust-stemmers",
|
||||
"rust_decimal",
|
||||
"rust_decimal_macros",
|
||||
"search",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -2943,6 +3148,12 @@ dependencies = [
|
||||
"rand_core 0.6.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "simdutf8"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e"
|
||||
|
||||
[[package]]
|
||||
name = "simple_asn1"
|
||||
version = "0.6.3"
|
||||
@@ -3059,6 +3270,7 @@ dependencies = [
|
||||
"native-tls",
|
||||
"once_cell",
|
||||
"percent-encoding",
|
||||
"rust_decimal",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
@@ -3142,6 +3354,7 @@ dependencies = [
|
||||
"percent-encoding",
|
||||
"rand 0.8.5",
|
||||
"rsa",
|
||||
"rust_decimal",
|
||||
"serde",
|
||||
"sha1",
|
||||
"sha2",
|
||||
@@ -3182,6 +3395,7 @@ dependencies = [
|
||||
"memchr",
|
||||
"once_cell",
|
||||
"rand 0.8.5",
|
||||
"rust_decimal",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
@@ -3559,6 +3773,12 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tap"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.19.1"
|
||||
@@ -4505,6 +4725,15 @@ version = "0.5.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51"
|
||||
|
||||
[[package]]
|
||||
name = "wyz"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed"
|
||||
dependencies = [
|
||||
"tap",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "yoke"
|
||||
version = "0.7.5"
|
||||
|
||||
@@ -16,7 +16,7 @@ dotenvy = "0.15.7"
|
||||
prost = "0.13.5"
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = "1.0.140"
|
||||
sqlx = { version = "0.8.5", features = ["chrono", "postgres", "runtime-tokio", "runtime-tokio-native-tls", "time", "uuid"] }
|
||||
sqlx = { version = "0.8.5", features = ["chrono", "postgres", "runtime-tokio", "runtime-tokio-native-tls", "rust_decimal", "time", "uuid"] }
|
||||
tokio = { version = "1.44.2", features = ["full", "macros"] }
|
||||
tonic = "0.13.0"
|
||||
tonic-reflection = "0.13.0"
|
||||
@@ -33,6 +33,8 @@ validator = { version = "0.20.0", features = ["derive"] }
|
||||
uuid = { version = "1.16.0", features = ["serde", "v4"] }
|
||||
jsonwebtoken = "9.3.1"
|
||||
rust-stemmers = "1.2.0"
|
||||
rust_decimal = "1.37.2"
|
||||
rust_decimal_macros = "1.37.1"
|
||||
|
||||
[lib]
|
||||
name = "server"
|
||||
@@ -42,3 +44,5 @@ path = "src/lib.rs"
|
||||
tokio = { version = "1.44", features = ["full", "test-util"] }
|
||||
rstest = "0.25.0"
|
||||
lazy_static = "1.5.0"
|
||||
rand = "0.9.1"
|
||||
futures = "0.3.31"
|
||||
|
||||
13
server/Makefile
Normal file
13
server/Makefile
Normal file
@@ -0,0 +1,13 @@
|
||||
# Makefile
|
||||
|
||||
test: reset_db run_tests
|
||||
|
||||
reset_db:
|
||||
@echo "Resetting test database..."
|
||||
@./scripts/reset_test_db.sh
|
||||
|
||||
run_tests:
|
||||
@echo "Running tests..."
|
||||
@cargo test --test mod -- --test-threads=1
|
||||
|
||||
.PHONY: test
|
||||
@@ -1,24 +0,0 @@
|
||||
-- Add migration script here
|
||||
CREATE TABLE adresar (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
firma TEXT NOT NULL,
|
||||
kz TEXT,
|
||||
drc TEXT,
|
||||
ulica TEXT,
|
||||
psc TEXT,
|
||||
mesto TEXT,
|
||||
stat TEXT,
|
||||
banka TEXT,
|
||||
ucet TEXT,
|
||||
skladm TEXT,
|
||||
ico TEXT,
|
||||
kontakt TEXT,
|
||||
telefon TEXT,
|
||||
skladu TEXT,
|
||||
fax TEXT,
|
||||
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE INDEX idx_adresar_firma ON adresar (firma);
|
||||
CREATE INDEX idx_adresar_mesto ON adresar (mesto);
|
||||
@@ -1,22 +0,0 @@
|
||||
-- Add migration script here
|
||||
CREATE TABLE uctovnictvo (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
adresar_id BIGINT NOT NULL REFERENCES adresar(id), -- Link to adresar table
|
||||
c_dokladu TEXT NOT NULL,
|
||||
datum DATE NOT NULL,
|
||||
c_faktury TEXT NOT NULL,
|
||||
obsah TEXT,
|
||||
stredisko TEXT,
|
||||
c_uctu TEXT,
|
||||
md TEXT,
|
||||
identif TEXT,
|
||||
poznanka TEXT,
|
||||
firma TEXT NOT NULL,
|
||||
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE INDEX idx_uctovnictvo_adresar_id ON uctovnictvo (adresar_id);
|
||||
CREATE INDEX idx_uctovnictvo_firma ON uctovnictvo (firma);
|
||||
CREATE INDEX idx_uctovnictvo_c_dokladu ON uctovnictvo (c_dokladu);
|
||||
CREATE INDEX idx_uctovnictvo_poznanka ON uctovnictvo (poznanka);
|
||||
@@ -1,9 +1,12 @@
|
||||
-- Add migration script here
|
||||
CREATE TABLE profiles (
|
||||
CREATE TABLE schemas (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
name TEXT NOT NULL UNIQUE,
|
||||
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
|
||||
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
|
||||
description TEXT,
|
||||
is_active BOOLEAN DEFAULT TRUE
|
||||
);
|
||||
|
||||
-- Create default profile for existing data
|
||||
INSERT INTO profiles (name) VALUES ('default');
|
||||
INSERT INTO schemas (name) VALUES ('default');
|
||||
CREATE SCHEMA IF NOT EXISTS "default";
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
-- Main table definitions
|
||||
|
||||
CREATE TABLE table_definitions (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
@@ -6,7 +7,7 @@ CREATE TABLE table_definitions (
|
||||
columns JSONB NOT NULL,
|
||||
indexes JSONB NOT NULL,
|
||||
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
|
||||
profile_id BIGINT NOT NULL REFERENCES profiles(id) DEFAULT 1
|
||||
schema_id BIGINT NOT NULL REFERENCES schemas(id)
|
||||
);
|
||||
|
||||
-- Relationship table for multiple links
|
||||
@@ -18,9 +19,10 @@ CREATE TABLE table_definition_links (
|
||||
PRIMARY KEY (source_table_id, linked_table_id)
|
||||
);
|
||||
|
||||
-- Create composite unique index for profile+table combination
|
||||
CREATE UNIQUE INDEX idx_table_definitions_profile_table
|
||||
ON table_definitions (profile_id, table_name);
|
||||
-- Create composite unique index for schema+table combination
|
||||
CREATE UNIQUE INDEX idx_table_definitions_schema_table
|
||||
ON table_definitions (schema_id, table_name);
|
||||
|
||||
CREATE INDEX idx_links_source ON table_definition_links (source_table_id);
|
||||
CREATE INDEX idx_links_target ON table_definition_links (linked_table_id);
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ CREATE TABLE table_scripts (
|
||||
script TEXT NOT NULL,
|
||||
description TEXT,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
profile_id BIGINT NOT NULL REFERENCES profiles(id) DEFAULT 1,
|
||||
schema_id BIGINT NOT NULL REFERENCES schemas(id),
|
||||
UNIQUE(table_definitions_id, target_column)
|
||||
);
|
||||
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
-- Add migration script here
|
||||
|
||||
CREATE SCHEMA IF NOT EXISTS gen;
|
||||
9
server/scripts/reset_test_db.sh
Executable file
9
server/scripts/reset_test_db.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
# scripts/reset_test_db.sh
|
||||
|
||||
DATABASE_URL=${TEST_DATABASE_URL:-"postgres://multi_psql_dev:3@localhost:5432/multi_rust_test"}
|
||||
|
||||
echo "Reset db script"
|
||||
yes | sqlx database drop --database-url "$DATABASE_URL"
|
||||
sqlx database create --database-url "$DATABASE_URL"
|
||||
echo "Test database reset complete."
|
||||
@@ -1,156 +0,0 @@
|
||||
❯ grpcurl -plaintext -d '{"id": 1}' localhost:50051 multieko2.adresar.Adresar/GetAdresar
|
||||
{
|
||||
"id": "1",
|
||||
"firma": "Updated Firma",
|
||||
"kz": "Updated KZ",
|
||||
"drc": "Updated DRC",
|
||||
"ulica": "Updated Ulica",
|
||||
"psc": "Updated PSC",
|
||||
"mesto": "Updated Mesto",
|
||||
"stat": "Updated Stat",
|
||||
"banka": "Updated Banka",
|
||||
"ucet": "Updated Ucet",
|
||||
"skladm": "Updated Skladm",
|
||||
"ico": "Updated ICO",
|
||||
"kontakt": "Updated Kontakt",
|
||||
"telefon": "Updated Telefon",
|
||||
"skladu": "Updated Skladu",
|
||||
"fax": "Updated Fax"
|
||||
}
|
||||
❯ grpcurl -plaintext -d '{"id": 2}' localhost:50051 multieko2.adresar.Adresar/GetAdresar
|
||||
{
|
||||
"id": "2",
|
||||
"firma": "asdfasf",
|
||||
"kz": " ",
|
||||
"drc": " ",
|
||||
"ulica": " ",
|
||||
"psc": "sdfasdf",
|
||||
"mesto": "asf",
|
||||
"stat": "as",
|
||||
"banka": "df",
|
||||
"ucet": "asf",
|
||||
"skladm": "f",
|
||||
"ico": "f",
|
||||
"kontakt": "f",
|
||||
"telefon": "f",
|
||||
"skladu": "f",
|
||||
"fax": " "
|
||||
}
|
||||
❯ grpcurl -plaintext -d '{"id": 1}' localhost:50051 multieko2.adresar.Adresar/DeleteAdresar
|
||||
{
|
||||
"success": true
|
||||
}
|
||||
❯ grpcurl -plaintext -d '{"id": 1}' localhost:50051 multieko2.adresar.Adresar/GetAdresar
|
||||
ERROR:
|
||||
Code: NotFound
|
||||
Message: no rows returned by a query that expected to return at least one row
|
||||
❯ grpcurl -plaintext -d '{"id": 2}' localhost:50051 multieko2.adresar.Adresar/GetAdresar
|
||||
{
|
||||
"id": "2",
|
||||
"firma": "asdfasf",
|
||||
"kz": " ",
|
||||
"drc": " ",
|
||||
"ulica": " ",
|
||||
"psc": "sdfasdf",
|
||||
"mesto": "asf",
|
||||
"stat": "as",
|
||||
"banka": "df",
|
||||
"ucet": "asf",
|
||||
"skladm": "f",
|
||||
"ico": "f",
|
||||
"kontakt": "f",
|
||||
"telefon": "f",
|
||||
"skladu": "f",
|
||||
"fax": " "
|
||||
}
|
||||
|
||||
❯ grpcurl -plaintext -d '{
|
||||
"firma": "New Firma",
|
||||
"kz": "New KZ",
|
||||
"drc": "New DRC",
|
||||
"ulica": "New Ulica",
|
||||
"psc": "New PSC",
|
||||
"mesto": "New Mesto",
|
||||
"stat": "New Stat",
|
||||
"banka": "New Banka",
|
||||
"ucet": "New Ucet",
|
||||
"skladm": "New Skladm",
|
||||
"ico": "New ICO",
|
||||
"kontakt": "New Kontakt",
|
||||
"telefon": "New Telefon",
|
||||
"skladu": "New Skladu",
|
||||
"fax": "New Fax"
|
||||
}' localhost:50051 multieko2.adresar.Adresar/PostAdresar
|
||||
{
|
||||
"id": "43",
|
||||
"firma": "New Firma",
|
||||
"kz": "New KZ",
|
||||
"drc": "New DRC",
|
||||
"ulica": "New Ulica",
|
||||
"psc": "New PSC",
|
||||
"mesto": "New Mesto",
|
||||
"stat": "New Stat",
|
||||
"banka": "New Banka",
|
||||
"ucet": "New Ucet",
|
||||
"skladm": "New Skladm",
|
||||
"ico": "New ICO",
|
||||
"kontakt": "New Kontakt",
|
||||
"telefon": "New Telefon",
|
||||
"skladu": "New Skladu",
|
||||
"fax": "New Fax"
|
||||
}
|
||||
❯ grpcurl -plaintext -d '{
|
||||
"id": 43,
|
||||
"firma": "Updated Firma",
|
||||
"kz": "Updated KZ",
|
||||
"drc": "Updated DRC",
|
||||
"ulica": "Updated Ulica",
|
||||
"psc": "Updated PSC",
|
||||
"mesto": "Updated Mesto",
|
||||
"stat": "Updated Stat",
|
||||
"banka": "Updated Banka",
|
||||
"ucet": "Updated Ucet",
|
||||
"skladm": "Updated Skladm",
|
||||
"ico": "Updated ICO",
|
||||
"kontakt": "Updated Kontakt",
|
||||
"telefon": "Updated Telefon",
|
||||
"skladu": "Updated Skladu",
|
||||
"fax": "Updated Fax"
|
||||
}' localhost:50051 multieko2.adresar.Adresar/PutAdresar
|
||||
{
|
||||
"id": "43",
|
||||
"firma": "Updated Firma",
|
||||
"kz": "Updated KZ",
|
||||
"drc": "Updated DRC",
|
||||
"ulica": "Updated Ulica",
|
||||
"psc": "Updated PSC",
|
||||
"mesto": "Updated Mesto",
|
||||
"stat": "Updated Stat",
|
||||
"banka": "Updated Banka",
|
||||
"ucet": "Updated Ucet",
|
||||
"skladm": "Updated Skladm",
|
||||
"ico": "Updated ICO",
|
||||
"kontakt": "Updated Kontakt",
|
||||
"telefon": "Updated Telefon",
|
||||
"skladu": "Updated Skladu",
|
||||
"fax": "Updated Fax"
|
||||
}
|
||||
❯ grpcurl -plaintext -d '{"id": 43}' localhost:50051 multieko2.adresar.Adresar/GetAdresar
|
||||
{
|
||||
"id": "43",
|
||||
"firma": "Updated Firma",
|
||||
"kz": "Updated KZ",
|
||||
"drc": "Updated DRC",
|
||||
"ulica": "Updated Ulica",
|
||||
"psc": "Updated PSC",
|
||||
"mesto": "Updated Mesto",
|
||||
"stat": "Updated Stat",
|
||||
"banka": "Updated Banka",
|
||||
"ucet": "Updated Ucet",
|
||||
"skladm": "Updated Skladm",
|
||||
"ico": "Updated ICO",
|
||||
"kontakt": "Updated Kontakt",
|
||||
"telefon": "Updated Telefon",
|
||||
"skladu": "Updated Skladu",
|
||||
"fax": "Updated Fax"
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
|
||||
# TOTAL items in the adresar
|
||||
❯ grpcurl -plaintext localhost:50051 multieko2.adresar.Adresar/GetAdresarCount
|
||||
{
|
||||
"count": "5"
|
||||
}
|
||||
# Item at this count. If there are 43 items, number 1 is the first item
|
||||
❯ grpcurl -plaintext -d '{"position": 1}' localhost:50051 multieko2.adresar.Adresar/GetAdresarByPosition
|
||||
{
|
||||
"id": "1",
|
||||
"firma": "ks555",
|
||||
"kz": "f",
|
||||
"drc": "asdf",
|
||||
"ulica": "as",
|
||||
"psc": "f",
|
||||
"mesto": "asf",
|
||||
"stat": "as",
|
||||
"banka": "fa",
|
||||
"telefon": "a",
|
||||
"skladu": "fd",
|
||||
"fax": "asf"
|
||||
}
|
||||
# Item fetched by id. The first item was created and marked as deleted, therefore number 1 in ids shouldnt be fetched.
|
||||
❯ grpcurl -plaintext -d '{"id": 1}' localhost:50051 multieko2.adresar.Adresar/GetAdresar
|
||||
ERROR:
|
||||
Code: NotFound
|
||||
Message: no rows returned by a query that expected to return at least one row
|
||||
╭─ ~ ············································· 69 ✘
|
||||
╰─
|
||||
@@ -1,15 +0,0 @@
|
||||
// src/adresar/handlers.rs
|
||||
|
||||
pub mod post_adresar;
|
||||
pub mod get_adresar;
|
||||
pub mod put_adresar;
|
||||
pub mod delete_adresar;
|
||||
pub mod get_adresar_count;
|
||||
pub mod get_adresar_by_position;
|
||||
|
||||
pub use post_adresar::post_adresar;
|
||||
pub use get_adresar::get_adresar;
|
||||
pub use put_adresar::put_adresar;
|
||||
pub use delete_adresar::delete_adresar;
|
||||
pub use get_adresar_count::get_adresar_count;
|
||||
pub use get_adresar_by_position::get_adresar_by_position;
|
||||
@@ -1,27 +0,0 @@
|
||||
// src/adresar/handlers/delete_adresar.rs
|
||||
use tonic::Status;
|
||||
use sqlx::PgPool;
|
||||
use common::proto::multieko2::adresar::{DeleteAdresarRequest, DeleteAdresarResponse};
|
||||
|
||||
pub async fn delete_adresar(
|
||||
db_pool: &PgPool,
|
||||
request: DeleteAdresarRequest,
|
||||
) -> Result<DeleteAdresarResponse, Status> {
|
||||
let rows_affected = sqlx::query!(
|
||||
r#"
|
||||
UPDATE adresar
|
||||
SET deleted = true
|
||||
WHERE id = $1 AND deleted = false
|
||||
"#,
|
||||
request.id
|
||||
)
|
||||
.execute(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(e.to_string()))?
|
||||
.rows_affected();
|
||||
|
||||
Ok(DeleteAdresarResponse {
|
||||
success: rows_affected > 0,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
// src/adresar/handlers/get_adresar.rs
|
||||
use tonic::Status;
|
||||
use sqlx::PgPool;
|
||||
use crate::adresar::models::Adresar;
|
||||
use common::proto::multieko2::adresar::{GetAdresarRequest, AdresarResponse};
|
||||
|
||||
pub async fn get_adresar(
|
||||
db_pool: &PgPool,
|
||||
request: GetAdresarRequest,
|
||||
) -> Result<AdresarResponse, Status> {
|
||||
let adresar = sqlx::query_as!(
|
||||
Adresar,
|
||||
r#"
|
||||
SELECT
|
||||
id,
|
||||
deleted,
|
||||
firma,
|
||||
kz,
|
||||
drc,
|
||||
ulica,
|
||||
psc,
|
||||
mesto,
|
||||
stat,
|
||||
banka,
|
||||
ucet,
|
||||
skladm,
|
||||
ico,
|
||||
kontakt,
|
||||
telefon,
|
||||
skladu,
|
||||
fax
|
||||
FROM adresar
|
||||
WHERE id = $1 AND deleted = false
|
||||
"#,
|
||||
request.id
|
||||
)
|
||||
.fetch_one(db_pool)
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
sqlx::Error::RowNotFound => Status::not_found("Record not found"),
|
||||
_ => Status::internal(format!("Database error: {}", e)),
|
||||
})?;
|
||||
|
||||
Ok(AdresarResponse {
|
||||
id: adresar.id,
|
||||
firma: adresar.firma,
|
||||
kz: adresar.kz.unwrap_or_default(),
|
||||
drc: adresar.drc.unwrap_or_default(),
|
||||
ulica: adresar.ulica.unwrap_or_default(),
|
||||
psc: adresar.psc.unwrap_or_default(),
|
||||
mesto: adresar.mesto.unwrap_or_default(),
|
||||
stat: adresar.stat.unwrap_or_default(),
|
||||
banka: adresar.banka.unwrap_or_default(),
|
||||
ucet: adresar.ucet.unwrap_or_default(),
|
||||
skladm: adresar.skladm.unwrap_or_default(),
|
||||
ico: adresar.ico.unwrap_or_default(),
|
||||
kontakt: adresar.kontakt.unwrap_or_default(),
|
||||
telefon: adresar.telefon.unwrap_or_default(),
|
||||
skladu: adresar.skladu.unwrap_or_default(),
|
||||
fax: adresar.fax.unwrap_or_default(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
// src/adresar/handlers/get_adresar_by_position.rs
|
||||
use tonic::{Status};
|
||||
use sqlx::PgPool;
|
||||
use common::proto::multieko2::adresar::{AdresarResponse, GetAdresarRequest};
|
||||
use common::proto::multieko2::common::PositionRequest;
|
||||
use super::get_adresar;
|
||||
|
||||
pub async fn get_adresar_by_position(
|
||||
db_pool: &PgPool,
|
||||
request: PositionRequest,
|
||||
) -> Result<AdresarResponse, Status> {
|
||||
if request.position < 1 {
|
||||
return Err(Status::invalid_argument("Position must be at least 1"));
|
||||
}
|
||||
|
||||
// Find the ID of the Nth non-deleted record
|
||||
let id: i64 = sqlx::query_scalar!(
|
||||
r#"
|
||||
SELECT id
|
||||
FROM adresar
|
||||
WHERE deleted = FALSE
|
||||
ORDER BY id ASC
|
||||
OFFSET $1
|
||||
LIMIT 1
|
||||
"#,
|
||||
request.position - 1
|
||||
)
|
||||
.fetch_optional(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(e.to_string()))?
|
||||
.ok_or_else(|| Status::not_found("Position out of bounds"))?;
|
||||
|
||||
// Now fetch the complete record using the existing get_adresar function
|
||||
get_adresar(db_pool, GetAdresarRequest { id }).await
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
// src/adresar/handlers/get_adresar_count.rs
|
||||
use tonic::Status;
|
||||
use sqlx::PgPool;
|
||||
use common::proto::multieko2::common::{CountResponse, Empty};
|
||||
|
||||
pub async fn get_adresar_count(
|
||||
db_pool: &PgPool,
|
||||
_request: Empty,
|
||||
) -> Result<CountResponse, Status> {
|
||||
let count: i64 = sqlx::query_scalar!(
|
||||
r#"
|
||||
SELECT COUNT(*) AS count
|
||||
FROM adresar
|
||||
WHERE deleted = FALSE
|
||||
"#
|
||||
)
|
||||
.fetch_one(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(e.to_string()))?
|
||||
.unwrap_or(0);
|
||||
|
||||
Ok(CountResponse { count })
|
||||
}
|
||||
@@ -1,99 +0,0 @@
|
||||
// src/adresar/handlers/post_adresar.rs
|
||||
use tonic::Status;
|
||||
use sqlx::PgPool;
|
||||
use crate::adresar::models::Adresar;
|
||||
use common::proto::multieko2::adresar::{PostAdresarRequest, AdresarResponse};
|
||||
|
||||
// Helper function to sanitize inputs
|
||||
fn sanitize_input(input: &str) -> Option<String> {
|
||||
let trimmed = input.trim().to_string();
|
||||
if trimmed.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(trimmed)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn post_adresar(
|
||||
db_pool: &PgPool,
|
||||
mut request: PostAdresarRequest,
|
||||
) -> Result<AdresarResponse, Status> {
|
||||
request.firma = request.firma.trim().to_string();
|
||||
if request.firma.is_empty() {
|
||||
return Err(Status::invalid_argument("Firma je povinne pole"));
|
||||
}
|
||||
|
||||
// Sanitize optional fields
|
||||
let kz = sanitize_input(&request.kz);
|
||||
let drc = sanitize_input(&request.drc);
|
||||
let ulica = sanitize_input(&request.ulica);
|
||||
let psc = sanitize_input(&request.psc);
|
||||
let mesto = sanitize_input(&request.mesto);
|
||||
let stat = sanitize_input(&request.stat);
|
||||
let banka = sanitize_input(&request.banka);
|
||||
let ucet = sanitize_input(&request.ucet);
|
||||
let skladm = sanitize_input(&request.skladm);
|
||||
let ico = sanitize_input(&request.ico);
|
||||
let kontakt = sanitize_input(&request.kontakt);
|
||||
let telefon = sanitize_input(&request.telefon);
|
||||
let skladu = sanitize_input(&request.skladu);
|
||||
let fax = sanitize_input(&request.fax);
|
||||
|
||||
let adresar = sqlx::query_as!(
|
||||
Adresar,
|
||||
r#"
|
||||
INSERT INTO adresar (
|
||||
firma, kz, drc, ulica, psc, mesto, stat, banka, ucet,
|
||||
skladm, ico, kontakt, telefon, skladu, fax, deleted
|
||||
)
|
||||
VALUES (
|
||||
$1, $2, $3, $4, $5, $6, $7, $8, $9,
|
||||
$10, $11, $12, $13, $14, $15, $16
|
||||
)
|
||||
RETURNING
|
||||
id, deleted, firma, kz, drc, ulica, psc, mesto, stat,
|
||||
banka, ucet, skladm, ico, kontakt, telefon, skladu, fax
|
||||
"#,
|
||||
request.firma,
|
||||
kz,
|
||||
drc,
|
||||
ulica,
|
||||
psc,
|
||||
mesto,
|
||||
stat,
|
||||
banka,
|
||||
ucet,
|
||||
skladm,
|
||||
ico,
|
||||
kontakt,
|
||||
telefon,
|
||||
skladu,
|
||||
fax,
|
||||
false
|
||||
)
|
||||
.fetch_one(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(e.to_string()))?;
|
||||
|
||||
Ok(AdresarResponse {
|
||||
id: adresar.id,
|
||||
// Do not include `deleted` in the response since it's not
|
||||
// defined in the proto message.
|
||||
firma: adresar.firma,
|
||||
kz: adresar.kz.unwrap_or_default(),
|
||||
drc: adresar.drc.unwrap_or_default(),
|
||||
ulica: adresar.ulica.unwrap_or_default(),
|
||||
psc: adresar.psc.unwrap_or_default(),
|
||||
mesto: adresar.mesto.unwrap_or_default(),
|
||||
stat: adresar.stat.unwrap_or_default(),
|
||||
banka: adresar.banka.unwrap_or_default(),
|
||||
ucet: adresar.ucet.unwrap_or_default(),
|
||||
skladm: adresar.skladm.unwrap_or_default(),
|
||||
ico: adresar.ico.unwrap_or_default(),
|
||||
kontakt: adresar.kontakt.unwrap_or_default(),
|
||||
telefon: adresar.telefon.unwrap_or_default(),
|
||||
skladu: adresar.skladu.unwrap_or_default(),
|
||||
fax: adresar.fax.unwrap_or_default(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,122 +0,0 @@
|
||||
// src/adresar/handlers/put_adresar.rs
|
||||
use tonic::Status;
|
||||
use sqlx::PgPool;
|
||||
use crate::adresar::models::Adresar;
|
||||
use common::proto::multieko2::adresar::{PutAdresarRequest, AdresarResponse};
|
||||
|
||||
// Add the same sanitize_input helper as in POST handler
|
||||
fn sanitize_input(input: &str) -> Option<String> {
|
||||
let trimmed = input.trim().to_string();
|
||||
if trimmed.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(trimmed)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn put_adresar(
|
||||
db_pool: &PgPool,
|
||||
mut request: PutAdresarRequest,
|
||||
) -> Result<AdresarResponse, Status> {
|
||||
// Add validation for required fields like in POST
|
||||
request.firma = request.firma.trim().to_string();
|
||||
if request.firma.is_empty() {
|
||||
return Err(Status::invalid_argument("Firma je povinne pole"));
|
||||
}
|
||||
|
||||
// Sanitize optional fields like in POST
|
||||
let kz = sanitize_input(&request.kz);
|
||||
let drc = sanitize_input(&request.drc);
|
||||
let ulica = sanitize_input(&request.ulica);
|
||||
let psc = sanitize_input(&request.psc);
|
||||
let mesto = sanitize_input(&request.mesto);
|
||||
let stat = sanitize_input(&request.stat);
|
||||
let banka = sanitize_input(&request.banka);
|
||||
let ucet = sanitize_input(&request.ucet);
|
||||
let skladm = sanitize_input(&request.skladm);
|
||||
let ico = sanitize_input(&request.ico);
|
||||
let kontakt = sanitize_input(&request.kontakt);
|
||||
let telefon = sanitize_input(&request.telefon);
|
||||
let skladu = sanitize_input(&request.skladu);
|
||||
let fax = sanitize_input(&request.fax);
|
||||
|
||||
let adresar = sqlx::query_as!(
|
||||
Adresar,
|
||||
r#"
|
||||
UPDATE adresar
|
||||
SET
|
||||
firma = $2,
|
||||
kz = $3,
|
||||
drc = $4,
|
||||
ulica = $5,
|
||||
psc = $6,
|
||||
mesto = $7,
|
||||
stat = $8,
|
||||
banka = $9,
|
||||
ucet = $10,
|
||||
skladm = $11,
|
||||
ico = $12,
|
||||
kontakt = $13,
|
||||
telefon = $14,
|
||||
skladu = $15,
|
||||
fax = $16
|
||||
WHERE id = $1 AND deleted = FALSE
|
||||
RETURNING
|
||||
id,
|
||||
deleted,
|
||||
firma,
|
||||
kz,
|
||||
drc,
|
||||
ulica,
|
||||
psc,
|
||||
mesto,
|
||||
stat,
|
||||
banka,
|
||||
ucet,
|
||||
skladm,
|
||||
ico,
|
||||
kontakt,
|
||||
telefon,
|
||||
skladu,
|
||||
fax
|
||||
"#,
|
||||
request.id,
|
||||
request.firma,
|
||||
kz,
|
||||
drc,
|
||||
ulica,
|
||||
psc,
|
||||
mesto,
|
||||
stat,
|
||||
banka,
|
||||
ucet,
|
||||
skladm,
|
||||
ico,
|
||||
kontakt,
|
||||
telefon,
|
||||
skladu,
|
||||
fax
|
||||
)
|
||||
.fetch_one(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(e.to_string()))?;
|
||||
|
||||
Ok(AdresarResponse {
|
||||
id: adresar.id,
|
||||
firma: adresar.firma,
|
||||
kz: adresar.kz.unwrap_or_default(),
|
||||
drc: adresar.drc.unwrap_or_default(),
|
||||
ulica: adresar.ulica.unwrap_or_default(),
|
||||
psc: adresar.psc.unwrap_or_default(),
|
||||
mesto: adresar.mesto.unwrap_or_default(),
|
||||
stat: adresar.stat.unwrap_or_default(),
|
||||
banka: adresar.banka.unwrap_or_default(),
|
||||
ucet: adresar.ucet.unwrap_or_default(),
|
||||
skladm: adresar.skladm.unwrap_or_default(),
|
||||
ico: adresar.ico.unwrap_or_default(),
|
||||
kontakt: adresar.kontakt.unwrap_or_default(),
|
||||
telefon: adresar.telefon.unwrap_or_default(),
|
||||
skladu: adresar.skladu.unwrap_or_default(),
|
||||
fax: adresar.fax.unwrap_or_default(),
|
||||
})
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
// src/adresar/mod.rs
|
||||
|
||||
pub mod models;
|
||||
pub mod handlers;
|
||||
|
||||
// #[cfg(test)]
|
||||
// pub mod tests;
|
||||
@@ -1,23 +0,0 @@
|
||||
// src/adresar/models.rs
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Adresar {
|
||||
pub id: i64,
|
||||
pub deleted: bool,
|
||||
pub firma: String,
|
||||
pub kz: Option<String>,
|
||||
pub drc: Option<String>,
|
||||
pub ulica: Option<String>,
|
||||
pub psc: Option<String>,
|
||||
pub mesto: Option<String>,
|
||||
pub stat: Option<String>,
|
||||
pub banka: Option<String>,
|
||||
pub ucet: Option<String>,
|
||||
pub skladm: Option<String>,
|
||||
pub ico: Option<String>,
|
||||
pub kontakt: Option<String>,
|
||||
pub telefon: Option<String>,
|
||||
pub skladu: Option<String>,
|
||||
pub fax: Option<String>,
|
||||
}
|
||||
@@ -3,6 +3,8 @@
|
||||
use tower::ServiceBuilder;
|
||||
use crate::auth::logic::rbac;
|
||||
|
||||
// TODO redesign this, adresar and uctovnictvo are nonexistent, but we are keeping this code for
|
||||
// the reference. Please adjust in the future rbac.
|
||||
pub async fn run_server(db_pool: sqlx::PgPool) -> Result<(), Box<dyn std::error::Error>> {
|
||||
// ... existing setup code ...
|
||||
|
||||
|
||||
@@ -8,8 +8,6 @@ use tracing::{error, info, warn};
|
||||
use tantivy::schema::Schema;
|
||||
use crate::search_schema;
|
||||
|
||||
const INDEX_DIR: &str = "./tantivy_indexes";
|
||||
|
||||
/// Defines the commands that can be sent to the indexer task.
|
||||
#[derive(Debug)]
|
||||
pub enum IndexCommand {
|
||||
|
||||
@@ -4,8 +4,6 @@ pub mod auth;
|
||||
pub mod indexer;
|
||||
pub mod search_schema;
|
||||
pub mod server;
|
||||
pub mod adresar;
|
||||
pub mod uctovnictvo;
|
||||
pub mod shared;
|
||||
pub mod table_structure;
|
||||
pub mod table_definition;
|
||||
|
||||
@@ -1,4 +1,2 @@
|
||||
// src/server/handlers.rs
|
||||
pub use crate::server::services::adresar_service::AdresarService;
|
||||
pub use crate::server::services::uctovnictvo_service::UctovnictvoService;
|
||||
pub use crate::server::services::table_structure_service::TableStructureHandler;
|
||||
|
||||
@@ -6,8 +6,6 @@ use crate::indexer::{indexer_task, IndexCommand};
|
||||
|
||||
use common::proto::multieko2::FILE_DESCRIPTOR_SET;
|
||||
use crate::server::services::{
|
||||
AdresarService,
|
||||
UctovnictvoService,
|
||||
TableStructureHandler,
|
||||
TableDefinitionService,
|
||||
TablesDataService,
|
||||
@@ -15,8 +13,6 @@ use crate::server::services::{
|
||||
AuthServiceImpl
|
||||
};
|
||||
use common::proto::multieko2::{
|
||||
adresar::adresar_server::AdresarServer,
|
||||
uctovnictvo::uctovnictvo_server::UctovnictvoServer,
|
||||
table_structure::table_structure_service_server::TableStructureServiceServer,
|
||||
table_definition::table_definition_server::TableDefinitionServer,
|
||||
tables_data::tables_data_server::TablesDataServer,
|
||||
@@ -47,7 +43,7 @@ pub async fn run_server(db_pool: sqlx::PgPool) -> Result<(), Box<dyn std::error:
|
||||
let table_definition_service = TableDefinitionService { db_pool: db_pool.clone() };
|
||||
let tables_data_service = TablesDataService {
|
||||
db_pool: db_pool.clone(),
|
||||
indexer_tx: indexer_tx.clone(), // Pass the sender
|
||||
indexer_tx: indexer_tx.clone(),
|
||||
};
|
||||
let table_script_service = TableScriptService { db_pool: db_pool.clone() };
|
||||
let auth_service = AuthServiceImpl { db_pool: db_pool.clone() };
|
||||
@@ -56,14 +52,12 @@ pub async fn run_server(db_pool: sqlx::PgPool) -> Result<(), Box<dyn std::error:
|
||||
let search_service = SearcherService { pool: db_pool.clone() };
|
||||
|
||||
Server::builder()
|
||||
.add_service(AdresarServer::new(AdresarService { db_pool: db_pool.clone() }))
|
||||
.add_service(UctovnictvoServer::new(UctovnictvoService { db_pool: db_pool.clone() }))
|
||||
.add_service(TableStructureServiceServer::new(TableStructureHandler { db_pool: db_pool.clone() }))
|
||||
.add_service(TableDefinitionServer::new(table_definition_service))
|
||||
.add_service(TablesDataServer::new(tables_data_service))
|
||||
.add_service(TableScriptServer::new(table_script_service))
|
||||
.add_service(AuthServiceServer::new(auth_service))
|
||||
.add_service(SearcherServer::new(search_service)) // This now works correctly
|
||||
.add_service(SearcherServer::new(search_service))
|
||||
.add_service(reflection_service)
|
||||
.serve(addr)
|
||||
.await?;
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
// src/server/services/adresar_service.rs
|
||||
use tonic::{Request, Response, Status};
|
||||
use common::proto::multieko2::adresar::{
|
||||
adresar_server::Adresar,
|
||||
PostAdresarRequest, AdresarResponse, GetAdresarRequest, PutAdresarRequest,
|
||||
DeleteAdresarRequest, DeleteAdresarResponse,
|
||||
};
|
||||
use common::proto::multieko2::common::{Empty, CountResponse, PositionRequest};
|
||||
use crate::adresar::handlers::{
|
||||
post_adresar, get_adresar, put_adresar, delete_adresar,
|
||||
get_adresar_count, get_adresar_by_position,
|
||||
};
|
||||
use sqlx::PgPool;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AdresarService {
|
||||
pub db_pool: PgPool,
|
||||
}
|
||||
|
||||
#[tonic::async_trait]
|
||||
impl Adresar for AdresarService {
|
||||
async fn post_adresar(
|
||||
&self,
|
||||
request: Request<PostAdresarRequest>,
|
||||
) -> Result<Response<AdresarResponse>, Status> {
|
||||
let response = post_adresar(&self.db_pool, request.into_inner()).await?;
|
||||
Ok(Response::new(response))
|
||||
}
|
||||
|
||||
async fn get_adresar(
|
||||
&self,
|
||||
request: Request<GetAdresarRequest>,
|
||||
) -> Result<Response<AdresarResponse>, Status> {
|
||||
let response = get_adresar(&self.db_pool, request.into_inner()).await?;
|
||||
Ok(Response::new(response))
|
||||
}
|
||||
|
||||
async fn put_adresar(
|
||||
&self,
|
||||
request: Request<PutAdresarRequest>,
|
||||
) -> Result<Response<AdresarResponse>, Status> {
|
||||
let response = put_adresar(&self.db_pool, request.into_inner()).await?;
|
||||
Ok(Response::new(response))
|
||||
}
|
||||
|
||||
async fn delete_adresar(
|
||||
&self,
|
||||
request: Request<DeleteAdresarRequest>,
|
||||
) -> Result<Response<DeleteAdresarResponse>, Status> {
|
||||
let response = delete_adresar(&self.db_pool, request.into_inner()).await?;
|
||||
Ok(Response::new(response))
|
||||
}
|
||||
|
||||
async fn get_adresar_count(
|
||||
&self,
|
||||
request: Request<Empty>,
|
||||
) -> Result<Response<CountResponse>, Status> {
|
||||
let response = get_adresar_count(&self.db_pool, request.into_inner()).await?;
|
||||
Ok(Response::new(response))
|
||||
}
|
||||
|
||||
async fn get_adresar_by_position(
|
||||
&self,
|
||||
request: Request<PositionRequest>,
|
||||
) -> Result<Response<AdresarResponse>, Status> {
|
||||
let response = get_adresar_by_position(&self.db_pool, request.into_inner()).await?;
|
||||
Ok(Response::new(response))
|
||||
}
|
||||
}
|
||||
@@ -1,16 +1,12 @@
|
||||
// src/server/services/mod.rs
|
||||
|
||||
pub mod adresar_service;
|
||||
pub mod table_structure_service;
|
||||
pub mod uctovnictvo_service;
|
||||
pub mod table_definition_service;
|
||||
pub mod tables_data_service;
|
||||
pub mod table_script_service;
|
||||
pub mod auth_service;
|
||||
|
||||
pub use adresar_service::AdresarService;
|
||||
pub use table_structure_service::TableStructureHandler;
|
||||
pub use uctovnictvo_service::UctovnictvoService;
|
||||
pub use table_definition_service::TableDefinitionService;
|
||||
pub use tables_data_service::TablesDataService;
|
||||
pub use table_script_service::TableScriptService;
|
||||
|
||||
@@ -41,14 +41,17 @@ impl TablesData for TablesDataService {
|
||||
Ok(Response::new(response))
|
||||
}
|
||||
|
||||
// You will later apply the same pattern to put_table_data...
|
||||
async fn put_table_data(
|
||||
&self,
|
||||
request: Request<PutTableDataRequest>,
|
||||
) -> Result<Response<PutTableDataResponse>, Status> {
|
||||
let request = request.into_inner();
|
||||
// TODO: Update put_table_data handler to accept and use indexer_tx
|
||||
let response = put_table_data(&self.db_pool, request).await?;
|
||||
let response = put_table_data(
|
||||
&self.db_pool,
|
||||
request,
|
||||
&self.indexer_tx,
|
||||
)
|
||||
.await?;
|
||||
Ok(Response::new(response))
|
||||
}
|
||||
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
// src/server/services/uctovnictvo_service.rs
|
||||
use tonic::{Request, Response, Status};
|
||||
use common::proto::multieko2::uctovnictvo::{
|
||||
uctovnictvo_server::Uctovnictvo,
|
||||
PostUctovnictvoRequest, UctovnictvoResponse, GetUctovnictvoRequest, PutUctovnictvoRequest,
|
||||
};
|
||||
use crate::uctovnictvo::handlers::{
|
||||
post_uctovnictvo, get_uctovnictvo, get_uctovnictvo_count,
|
||||
get_uctovnictvo_by_position, put_uctovnictvo,
|
||||
};
|
||||
use common::proto::multieko2::common::{Empty, CountResponse, PositionRequest};
|
||||
use sqlx::PgPool;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct UctovnictvoService {
|
||||
pub db_pool: PgPool,
|
||||
}
|
||||
|
||||
#[tonic::async_trait]
|
||||
impl Uctovnictvo for UctovnictvoService {
|
||||
async fn post_uctovnictvo(
|
||||
&self,
|
||||
request: Request<PostUctovnictvoRequest>,
|
||||
) -> Result<Response<UctovnictvoResponse>, Status> {
|
||||
let response = post_uctovnictvo(&self.db_pool, request.into_inner()).await?;
|
||||
Ok(Response::new(response))
|
||||
}
|
||||
|
||||
async fn get_uctovnictvo(
|
||||
&self,
|
||||
request: Request<GetUctovnictvoRequest>,
|
||||
) -> Result<Response<UctovnictvoResponse>, Status> {
|
||||
let response = get_uctovnictvo(&self.db_pool, request.into_inner()).await?;
|
||||
Ok(Response::new(response))
|
||||
}
|
||||
|
||||
async fn get_uctovnictvo_count(
|
||||
&self,
|
||||
request: Request<Empty>,
|
||||
) -> Result<Response<CountResponse>, Status> {
|
||||
let response = get_uctovnictvo_count(&self.db_pool, request.into_inner()).await?;
|
||||
Ok(Response::new(response))
|
||||
}
|
||||
|
||||
async fn get_uctovnictvo_by_position(
|
||||
&self,
|
||||
request: Request<PositionRequest>,
|
||||
) -> Result<Response<UctovnictvoResponse>, Status> {
|
||||
let response = get_uctovnictvo_by_position(&self.db_pool, request.into_inner()).await?;
|
||||
Ok(Response::new(response))
|
||||
}
|
||||
|
||||
async fn put_uctovnictvo(
|
||||
&self,
|
||||
request: Request<PutUctovnictvoRequest>,
|
||||
) -> Result<Response<UctovnictvoResponse>, Status> {
|
||||
let response = put_uctovnictvo(&self.db_pool, request.into_inner()).await?;
|
||||
Ok(Response::new(response))
|
||||
}
|
||||
}
|
||||
@@ -1,34 +1,50 @@
|
||||
// src/shared/schema_qualifier.rs
|
||||
// src/shared/schema_qualifier.rs
|
||||
use sqlx::PgPool;
|
||||
use tonic::Status;
|
||||
|
||||
/// Qualifies table names with the appropriate schema
|
||||
///
|
||||
// TODO in the future, remove database query on every request and implement caching for scalable
|
||||
// solution with many data and requests
|
||||
|
||||
/// Qualifies a table name by checking for its existence in the table_definitions table.
|
||||
/// This is the robust, "source of truth" approach.
|
||||
///
|
||||
/// Rules:
|
||||
/// - Tables created via PostTableDefinition (dynamically created tables) are in 'gen' schema
|
||||
/// - System tables (like users, profiles) remain in 'public' schema
|
||||
pub fn qualify_table_name(table_name: &str) -> String {
|
||||
// Check if table matches the pattern of dynamically created tables (e.g., 2025_something)
|
||||
if table_name.starts_with(|c: char| c.is_ascii_digit()) && table_name.contains('_') {
|
||||
format!("gen.\"{}\"", table_name)
|
||||
/// - If a table is found in `table_definitions`, it is qualified with the 'gen' schema.
|
||||
/// - Otherwise, it is assumed to be a system table in the 'public' schema.
|
||||
pub async fn qualify_table_name(
|
||||
db_pool: &PgPool,
|
||||
profile_name: &str,
|
||||
table_name: &str,
|
||||
) -> Result<String, Status> {
|
||||
// Check if a definition exists for this table in the given profile.
|
||||
let definition_exists = sqlx::query!(
|
||||
r#"SELECT EXISTS (
|
||||
SELECT 1 FROM table_definitions td
|
||||
JOIN schemas s ON td.schema_id = s.id
|
||||
WHERE s.name = $1 AND td.table_name = $2
|
||||
)"#,
|
||||
profile_name,
|
||||
table_name
|
||||
)
|
||||
.fetch_one(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Schema lookup failed: {}", e)))?
|
||||
.exists
|
||||
.unwrap_or(false);
|
||||
|
||||
if definition_exists {
|
||||
Ok(format!("{}.\"{}\"", profile_name, table_name))
|
||||
} else {
|
||||
format!("\"{}\"", table_name)
|
||||
// It's not a user-defined table, so it must be a system table in 'public.
|
||||
Ok(format!("\"{}\"", table_name))
|
||||
}
|
||||
}
|
||||
|
||||
/// Qualifies table names for data operations
|
||||
pub fn qualify_table_name_for_data(table_name: &str) -> Result<String, Status> {
|
||||
Ok(qualify_table_name(table_name))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_qualify_table_name() {
|
||||
assert_eq!(qualify_table_name("2025_test_schema3"), "gen.\"2025_test_schema3\"");
|
||||
assert_eq!(qualify_table_name("users"), "\"users\"");
|
||||
assert_eq!(qualify_table_name("profiles"), "\"profiles\"");
|
||||
assert_eq!(qualify_table_name("adresar"), "\"adresar\"");
|
||||
}
|
||||
pub async fn qualify_table_name_for_data(
|
||||
db_pool: &PgPool,
|
||||
profile_name: &str,
|
||||
table_name: &str,
|
||||
) -> Result<String, Status> {
|
||||
qualify_table_name(db_pool, profile_name, table_name).await
|
||||
}
|
||||
|
||||
@@ -21,7 +21,8 @@ pub enum FunctionError {
|
||||
#[derive(Clone)]
|
||||
pub struct SteelContext {
|
||||
pub current_table: String,
|
||||
pub profile_id: i64,
|
||||
pub schema_id: i64,
|
||||
pub schema_name: String,
|
||||
pub row_data: HashMap<String, String>,
|
||||
pub db_pool: Arc<PgPool>,
|
||||
}
|
||||
@@ -30,8 +31,8 @@ impl SteelContext {
|
||||
pub async fn get_related_table_name(&self, base_name: &str) -> Result<String, FunctionError> {
|
||||
let table_def = sqlx::query!(
|
||||
r#"SELECT table_name FROM table_definitions
|
||||
WHERE profile_id = $1 AND table_name LIKE $2"#,
|
||||
self.profile_id,
|
||||
WHERE schema_id = $1 AND table_name LIKE $2"#,
|
||||
self.schema_id,
|
||||
format!("%_{}", base_name)
|
||||
)
|
||||
.fetch_optional(&*self.db_pool)
|
||||
@@ -66,7 +67,7 @@ impl SteelContext {
|
||||
|
||||
// Add quotes around the table name
|
||||
sqlx::query_scalar::<_, String>(
|
||||
&format!("SELECT {} FROM \"{}\" WHERE id = $1", column, actual_table)
|
||||
&format!("SELECT {} FROM \"{}\".\"{}\" WHERE id = $1", column, self.schema_name, actual_table)
|
||||
)
|
||||
.bind(fk_value.parse::<i64>().map_err(|_|
|
||||
SteelVal::StringV("Invalid foreign key format".into()))?)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// server/src/table_definition/handlers/delete_table.rs
|
||||
// src/table_definition/handlers/delete_table.rs
|
||||
use tonic::Status;
|
||||
use sqlx::PgPool;
|
||||
use common::proto::multieko2::table_definition::{DeleteTableRequest, DeleteTableResponse};
|
||||
@@ -10,25 +10,25 @@ pub async fn delete_table(
|
||||
let mut transaction = db_pool.begin().await
|
||||
.map_err(|e| Status::internal(format!("Failed to start transaction: {}", e)))?;
|
||||
|
||||
// Step 1: Get profile and validate existence
|
||||
let profile = sqlx::query!(
|
||||
"SELECT id FROM profiles WHERE name = $1",
|
||||
// Step 1: Get schema and validate existence
|
||||
let schema = sqlx::query!(
|
||||
"SELECT id, name FROM schemas WHERE name = $1",
|
||||
request.profile_name
|
||||
)
|
||||
.fetch_optional(&mut *transaction)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Profile lookup failed: {}", e)))?;
|
||||
.map_err(|e| Status::internal(format!("Schema lookup failed: {}", e)))?;
|
||||
|
||||
let profile_id = match profile {
|
||||
Some(p) => p.id,
|
||||
let (schema_id, schema_name) = match schema {
|
||||
Some(s) => (s.id, s.name),
|
||||
None => return Err(Status::not_found("Profile not found")),
|
||||
};
|
||||
|
||||
// Step 2: Get table definition and validate existence
|
||||
let table_def = sqlx::query!(
|
||||
"SELECT id FROM table_definitions
|
||||
WHERE profile_id = $1 AND table_name = $2",
|
||||
profile_id,
|
||||
"SELECT id FROM table_definitions
|
||||
WHERE schema_id = $1 AND table_name = $2",
|
||||
schema_id,
|
||||
request.table_name
|
||||
)
|
||||
.fetch_optional(&mut *transaction)
|
||||
@@ -40,8 +40,9 @@ pub async fn delete_table(
|
||||
None => return Err(Status::not_found("Table not found in profile")),
|
||||
};
|
||||
|
||||
// Step 3: Drop the actual PostgreSQL table with CASCADE
|
||||
sqlx::query(&format!(r#"DROP TABLE IF EXISTS "{}" CASCADE"#, request.table_name))
|
||||
// Step 3: Drop the actual PostgreSQL table with CASCADE (schema-qualified)
|
||||
let drop_table_sql = format!(r#"DROP TABLE IF EXISTS "{}"."{}" CASCADE"#, schema_name, request.table_name);
|
||||
sqlx::query(&drop_table_sql)
|
||||
.execute(&mut *transaction)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Table drop failed: {}", e)))?;
|
||||
@@ -55,23 +56,31 @@ pub async fn delete_table(
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Definition deletion failed: {}", e)))?;
|
||||
|
||||
// Step 5: Check and clean up profile if empty
|
||||
// Step 5: Check and clean up schema if empty
|
||||
let remaining = sqlx::query!(
|
||||
"SELECT COUNT(*) as count FROM table_definitions WHERE profile_id = $1",
|
||||
profile_id
|
||||
"SELECT COUNT(*) as count FROM table_definitions WHERE schema_id = $1",
|
||||
schema_id
|
||||
)
|
||||
.fetch_one(&mut *transaction)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Count query failed: {}", e)))?;
|
||||
|
||||
if remaining.count.unwrap_or(1) == 0 {
|
||||
// Drop the PostgreSQL schema if empty
|
||||
let drop_schema_sql = format!(r#"DROP SCHEMA IF EXISTS "{}" CASCADE"#, schema_name);
|
||||
sqlx::query(&drop_schema_sql)
|
||||
.execute(&mut *transaction)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Schema drop failed: {}", e)))?;
|
||||
|
||||
// Delete the schema record
|
||||
sqlx::query!(
|
||||
"DELETE FROM profiles WHERE id = $1",
|
||||
profile_id
|
||||
"DELETE FROM schemas WHERE id = $1",
|
||||
schema_id
|
||||
)
|
||||
.execute(&mut *transaction)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Profile cleanup failed: {}", e)))?;
|
||||
.map_err(|e| Status::internal(format!("Schema cleanup failed: {}", e)))?;
|
||||
}
|
||||
|
||||
transaction.commit().await
|
||||
|
||||
@@ -15,13 +15,15 @@ pub async fn get_profile_tree(
|
||||
) -> Result<Response<ProfileTreeResponse>, Status> {
|
||||
let mut profiles = Vec::new();
|
||||
|
||||
// Get all profiles
|
||||
let profile_records = sqlx::query!("SELECT id, name FROM profiles")
|
||||
.fetch_all(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Failed to fetch profiles: {}", e)))?;
|
||||
// Get all schemas (internally changed from profiles to schemas)
|
||||
let schema_records = sqlx::query!(
|
||||
"SELECT id, name FROM schemas ORDER BY name"
|
||||
)
|
||||
.fetch_all(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Failed to fetch schemas: {}", e)))?;
|
||||
|
||||
for profile in profile_records {
|
||||
for schema in schema_records {
|
||||
// Get all tables with their dependencies from the links table
|
||||
let tables = sqlx::query!(
|
||||
r#"
|
||||
@@ -35,15 +37,16 @@ pub async fn get_profile_tree(
|
||||
'required', tdl.is_required
|
||||
)
|
||||
) FILTER (WHERE ltd.id IS NOT NULL),
|
||||
'[]'
|
||||
'[]'::json
|
||||
) as dependencies
|
||||
FROM table_definitions td
|
||||
LEFT JOIN table_definition_links tdl ON td.id = tdl.source_table_id
|
||||
LEFT JOIN table_definitions ltd ON tdl.linked_table_id = ltd.id
|
||||
WHERE td.profile_id = $1
|
||||
WHERE td.schema_id = $1
|
||||
GROUP BY td.id, td.table_name
|
||||
ORDER BY td.table_name
|
||||
"#,
|
||||
profile.id
|
||||
schema.id
|
||||
)
|
||||
.fetch_all(db_pool)
|
||||
.await
|
||||
@@ -70,8 +73,9 @@ pub async fn get_profile_tree(
|
||||
})
|
||||
.collect();
|
||||
|
||||
// External API still returns "profiles" for compatibility
|
||||
profiles.push(Profile {
|
||||
name: profile.name,
|
||||
name: schema.name,
|
||||
tables: proto_tables
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,48 +1,170 @@
|
||||
// src/table_definition/handlers/post_table_definition.rs
|
||||
|
||||
use tonic::Status;
|
||||
use sqlx::{PgPool, Transaction, Postgres};
|
||||
use serde_json::json;
|
||||
use time::OffsetDateTime;
|
||||
use common::proto::multieko2::table_definition::{PostTableDefinitionRequest, TableDefinitionResponse};
|
||||
|
||||
const GENERATED_SCHEMA_NAME: &str = "gen";
|
||||
|
||||
const PREDEFINED_FIELD_TYPES: &[(&str, &str)] = &[
|
||||
("text", "TEXT"),
|
||||
("psc", "TEXT"),
|
||||
("phone", "VARCHAR(15)"),
|
||||
("address", "TEXT"),
|
||||
("email", "VARCHAR(255)"),
|
||||
("string", "TEXT"),
|
||||
("boolean", "BOOLEAN"),
|
||||
("timestamp", "TIMESTAMPTZ"),
|
||||
("timestamptz", "TIMESTAMPTZ"),
|
||||
("time", "TIMESTAMPTZ"),
|
||||
("money", "NUMERIC(14, 4)"),
|
||||
("integer", "INTEGER"),
|
||||
("int", "INTEGER"),
|
||||
("biginteger", "BIGINT"),
|
||||
("bigint", "BIGINT"),
|
||||
("date", "DATE"),
|
||||
];
|
||||
|
||||
fn is_valid_identifier(s: &str) -> bool {
|
||||
!s.is_empty() &&
|
||||
s.chars().all(|c| c.is_ascii_alphanumeric() || c == '_') &&
|
||||
!s.starts_with('_') &&
|
||||
!s.chars().next().unwrap().is_ascii_digit()
|
||||
// NEW: Helper function to provide detailed error messages
|
||||
fn validate_identifier_format(s: &str, identifier_type: &str) -> Result<(), Status> {
|
||||
if s.is_empty() {
|
||||
return Err(Status::invalid_argument(format!("{} cannot be empty", identifier_type)));
|
||||
}
|
||||
|
||||
if s.starts_with('_') {
|
||||
return Err(Status::invalid_argument(format!("{} cannot start with underscore", identifier_type)));
|
||||
}
|
||||
|
||||
if s.chars().next().unwrap().is_ascii_digit() {
|
||||
return Err(Status::invalid_argument(format!("{} cannot start with a number", identifier_type)));
|
||||
}
|
||||
|
||||
// Check for invalid characters
|
||||
let invalid_chars: Vec<char> = s.chars()
|
||||
.filter(|c| !c.is_ascii_lowercase() && !c.is_ascii_digit() && *c != '_')
|
||||
.collect();
|
||||
|
||||
if !invalid_chars.is_empty() {
|
||||
return Err(Status::invalid_argument(format!(
|
||||
"{} contains invalid characters: {:?}. Only lowercase letters, numbers, and underscores are allowed",
|
||||
identifier_type, invalid_chars
|
||||
)));
|
||||
}
|
||||
|
||||
// Check for uppercase letters specifically to give a helpful message
|
||||
if s.chars().any(|c| c.is_ascii_uppercase()) {
|
||||
return Err(Status::invalid_argument(format!(
|
||||
"{} contains uppercase letters. Only lowercase letters are allowed",
|
||||
identifier_type
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn sanitize_table_name(s: &str) -> String {
|
||||
let year = OffsetDateTime::now_utc().year();
|
||||
let cleaned = s.replace(|c: char| !c.is_ascii_alphanumeric() && c != '_', "")
|
||||
.trim()
|
||||
.to_lowercase();
|
||||
format!("{}_{}", year, cleaned)
|
||||
fn validate_decimal_number_format(num_str: &str, param_name: &str) -> Result<(), Status> {
|
||||
if num_str.is_empty() {
|
||||
return Err(Status::invalid_argument(format!(
|
||||
"{} cannot be empty",
|
||||
param_name
|
||||
)));
|
||||
}
|
||||
|
||||
// Check for explicit signs
|
||||
if num_str.starts_with('+') || num_str.starts_with('-') {
|
||||
return Err(Status::invalid_argument(format!(
|
||||
"{} cannot have explicit positive or negative signs",
|
||||
param_name
|
||||
)));
|
||||
}
|
||||
|
||||
// Check for decimal points
|
||||
if num_str.contains('.') {
|
||||
return Err(Status::invalid_argument(format!(
|
||||
"{} must be a whole number (no decimal points)",
|
||||
param_name
|
||||
)));
|
||||
}
|
||||
|
||||
// Check for leading zeros (but allow "0" itself)
|
||||
if num_str.len() > 1 && num_str.starts_with('0') {
|
||||
let trimmed = num_str.trim_start_matches('0');
|
||||
let suggestion = if trimmed.is_empty() { "0" } else { trimmed };
|
||||
return Err(Status::invalid_argument(format!(
|
||||
"{} cannot have leading zeros (use '{}' instead of '{}')",
|
||||
param_name,
|
||||
suggestion,
|
||||
num_str
|
||||
)));
|
||||
}
|
||||
|
||||
// Check that all characters are digits
|
||||
if !num_str.chars().all(|c| c.is_ascii_digit()) {
|
||||
return Err(Status::invalid_argument(format!(
|
||||
"{} contains invalid characters. Only digits 0-9 are allowed",
|
||||
param_name
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn sanitize_identifier(s: &str) -> String {
|
||||
s.replace(|c: char| !c.is_ascii_alphanumeric() && c != '_', "")
|
||||
.trim()
|
||||
.to_lowercase()
|
||||
}
|
||||
fn map_field_type(field_type: &str) -> Result<String, Status> {
|
||||
let lower_field_type = field_type.to_lowercase();
|
||||
|
||||
fn map_field_type(field_type: &str) -> Result<&str, Status> {
|
||||
// Special handling for "decimal(precision, scale)"
|
||||
if lower_field_type.starts_with("decimal(") && lower_field_type.ends_with(')') {
|
||||
// Extract the part inside the parentheses, e.g., "10, 2"
|
||||
let args = lower_field_type
|
||||
.strip_prefix("decimal(")
|
||||
.and_then(|s| s.strip_suffix(')'))
|
||||
.unwrap_or(""); // Should always succeed due to the checks above
|
||||
|
||||
// Split into precision and scale parts
|
||||
if let Some((p_str, s_str)) = args.split_once(',') {
|
||||
let precision_str = p_str.trim();
|
||||
let scale_str = s_str.trim();
|
||||
|
||||
// NEW: Validate format BEFORE parsing
|
||||
validate_decimal_number_format(precision_str, "precision")?;
|
||||
validate_decimal_number_format(scale_str, "scale")?;
|
||||
|
||||
// Parse precision, returning an error if it's not a valid number
|
||||
let precision = precision_str.parse::<u32>().map_err(|_| {
|
||||
Status::invalid_argument("Invalid precision in decimal type")
|
||||
})?;
|
||||
|
||||
// Parse scale, returning an error if it's not a valid number
|
||||
let scale = scale_str.parse::<u32>().map_err(|_| {
|
||||
Status::invalid_argument("Invalid scale in decimal type")
|
||||
})?;
|
||||
|
||||
// Add validation based on PostgreSQL rules
|
||||
if precision < 1 {
|
||||
return Err(Status::invalid_argument("Precision must be at least 1"));
|
||||
}
|
||||
if scale > precision {
|
||||
return Err(Status::invalid_argument(
|
||||
"Scale cannot be greater than precision",
|
||||
));
|
||||
}
|
||||
|
||||
// If everything is valid, build and return the NUMERIC type string
|
||||
return Ok(format!("NUMERIC({}, {})", precision, scale));
|
||||
} else {
|
||||
// The format was wrong, e.g., "decimal(10)" or "decimal()"
|
||||
return Err(Status::invalid_argument(
|
||||
"Invalid decimal format. Expected: decimal(precision, scale)",
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// If not a decimal, fall back to the predefined list
|
||||
PREDEFINED_FIELD_TYPES
|
||||
.iter()
|
||||
.find(|(key, _)| *key == field_type.to_lowercase().as_str())
|
||||
.map(|(_, sql_type)| *sql_type)
|
||||
.ok_or_else(|| Status::invalid_argument(format!("Invalid field type: {}", field_type)))
|
||||
.find(|(key, _)| *key == lower_field_type.as_str())
|
||||
.map(|(_, sql_type)| sql_type.to_string()) // Convert to an owned String
|
||||
.ok_or_else(|| {
|
||||
Status::invalid_argument(format!(
|
||||
"Invalid field type: {}",
|
||||
field_type
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
fn is_invalid_table_name(table_name: &str) -> bool {
|
||||
@@ -52,33 +174,65 @@ fn is_invalid_table_name(table_name: &str) -> bool {
|
||||
table_name == "created_at"
|
||||
}
|
||||
|
||||
fn is_reserved_schema(schema_name: &str) -> bool {
|
||||
let lower = schema_name.to_lowercase();
|
||||
lower == "public" ||
|
||||
lower == "information_schema" ||
|
||||
lower.starts_with("pg_")
|
||||
}
|
||||
|
||||
pub async fn post_table_definition(
|
||||
db_pool: &PgPool,
|
||||
request: PostTableDefinitionRequest,
|
||||
) -> Result<TableDefinitionResponse, Status> {
|
||||
let base_name = sanitize_table_name(&request.table_name);
|
||||
let user_part_cleaned = request.table_name
|
||||
.replace(|c: char| !c.is_ascii_alphanumeric() && c != '_', "")
|
||||
.trim_matches('_')
|
||||
.to_lowercase();
|
||||
// Create owned copies of the strings after validation
|
||||
let profile_name = {
|
||||
let trimmed = request.profile_name.trim();
|
||||
validate_identifier_format(trimmed, "Profile name")?;
|
||||
trimmed.to_string()
|
||||
};
|
||||
|
||||
// New validation check
|
||||
if is_invalid_table_name(&user_part_cleaned) {
|
||||
return Err(Status::invalid_argument(
|
||||
"Table name cannot be 'id', 'deleted', 'created_at' or end with '_id'"
|
||||
));
|
||||
// Add validation to prevent reserved schemas
|
||||
if is_reserved_schema(&profile_name) {
|
||||
return Err(Status::invalid_argument("Profile name is reserved and cannot be used"));
|
||||
}
|
||||
|
||||
if !user_part_cleaned.is_empty() && !is_valid_identifier(&user_part_cleaned) {
|
||||
return Err(Status::invalid_argument("Invalid table name"));
|
||||
} else if user_part_cleaned.is_empty() {
|
||||
return Err(Status::invalid_argument("Table name cannot be empty"));
|
||||
const MAX_IDENTIFIER_LENGTH: usize = 63;
|
||||
|
||||
if profile_name.len() > MAX_IDENTIFIER_LENGTH {
|
||||
return Err(Status::invalid_argument(format!(
|
||||
"Profile name '{}' exceeds the {} character limit.",
|
||||
profile_name,
|
||||
MAX_IDENTIFIER_LENGTH
|
||||
)));
|
||||
}
|
||||
|
||||
let table_name = {
|
||||
let trimmed = request.table_name.trim();
|
||||
validate_identifier_format(trimmed, "Table name")?;
|
||||
|
||||
if trimmed.len() > MAX_IDENTIFIER_LENGTH {
|
||||
return Err(Status::invalid_argument(format!(
|
||||
"Table name '{}' exceeds the {} character limit.",
|
||||
trimmed,
|
||||
MAX_IDENTIFIER_LENGTH
|
||||
)));
|
||||
}
|
||||
|
||||
// Check invalid table names on the original input
|
||||
if is_invalid_table_name(trimmed) {
|
||||
return Err(Status::invalid_argument(
|
||||
"Table name cannot be 'id', 'deleted', 'created_at' or end with '_id'"
|
||||
));
|
||||
}
|
||||
|
||||
trimmed.to_string()
|
||||
};
|
||||
|
||||
let mut tx = db_pool.begin().await
|
||||
.map_err(|e| Status::internal(format!("Failed to start transaction: {}", e)))?;
|
||||
|
||||
match execute_table_definition(&mut tx, request, base_name).await {
|
||||
match execute_table_definition(&mut tx, request, table_name, profile_name).await {
|
||||
Ok(response) => {
|
||||
tx.commit().await
|
||||
.map_err(|e| Status::internal(format!("Failed to commit transaction: {}", e)))?;
|
||||
@@ -95,23 +249,42 @@ async fn execute_table_definition(
|
||||
tx: &mut Transaction<'_, Postgres>,
|
||||
mut request: PostTableDefinitionRequest,
|
||||
table_name: String,
|
||||
profile_name: String,
|
||||
) -> Result<TableDefinitionResponse, Status> {
|
||||
let profile = sqlx::query!(
|
||||
"INSERT INTO profiles (name) VALUES ($1)
|
||||
// Use the validated profile_name for schema insertion
|
||||
let schema = sqlx::query!(
|
||||
"INSERT INTO schemas (name) VALUES ($1)
|
||||
ON CONFLICT (name) DO UPDATE SET name = EXCLUDED.name
|
||||
RETURNING id",
|
||||
request.profile_name
|
||||
profile_name // Use the validated profile name
|
||||
)
|
||||
.fetch_one(&mut **tx)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Profile error: {}", e)))?;
|
||||
.map_err(|e| Status::internal(format!("Schema error: {}", e)))?;
|
||||
|
||||
// Create PostgreSQL schema if it doesn't exist
|
||||
let create_schema_sql = format!("CREATE SCHEMA IF NOT EXISTS \"{}\"", profile_name);
|
||||
sqlx::query(&create_schema_sql)
|
||||
.execute(&mut **tx)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Schema creation failed: {}", e)))?;
|
||||
|
||||
let mut links = Vec::new();
|
||||
let mut seen_tables = std::collections::HashSet::new();
|
||||
|
||||
for link in request.links.drain(..) {
|
||||
// Check for duplicate link
|
||||
if !seen_tables.insert(link.linked_table_name.clone()) {
|
||||
return Err(Status::invalid_argument(format!(
|
||||
"Duplicate link to table '{}'",
|
||||
link.linked_table_name
|
||||
)));
|
||||
}
|
||||
|
||||
let linked_table = sqlx::query!(
|
||||
"SELECT id FROM table_definitions
|
||||
WHERE profile_id = $1 AND table_name = $2",
|
||||
profile.id,
|
||||
WHERE schema_id = $1 AND table_name = $2",
|
||||
schema.id,
|
||||
link.linked_table_name
|
||||
)
|
||||
.fetch_optional(&mut **tx)
|
||||
@@ -127,34 +300,40 @@ async fn execute_table_definition(
|
||||
|
||||
let mut columns = Vec::new();
|
||||
for col_def in request.columns.drain(..) {
|
||||
let col_name = sanitize_identifier(&col_def.name);
|
||||
if !is_valid_identifier(&col_def.name) {
|
||||
return Err(Status::invalid_argument("Invalid column name"));
|
||||
let col_name = col_def.name.trim().to_string();
|
||||
validate_identifier_format(&col_name, "Column name")?;
|
||||
|
||||
if col_name.ends_with("_id") || col_name == "id" || col_name == "deleted" || col_name == "created_at" {
|
||||
return Err(Status::invalid_argument(format!(
|
||||
"Column name '{}' cannot be 'id', 'deleted', 'created_at' or end with '_id'",
|
||||
col_name
|
||||
)));
|
||||
}
|
||||
|
||||
let sql_type = map_field_type(&col_def.field_type)?;
|
||||
columns.push(format!("\"{}\" {}", col_name, sql_type));
|
||||
}
|
||||
|
||||
let mut indexes = Vec::new();
|
||||
for idx in request.indexes.drain(..) {
|
||||
let idx_name = sanitize_identifier(&idx);
|
||||
if !is_valid_identifier(&idx) {
|
||||
return Err(Status::invalid_argument(format!("Invalid index name: {}", idx)));
|
||||
}
|
||||
let idx_name = idx.trim().to_string();
|
||||
validate_identifier_format(&idx_name, "Index name")?;
|
||||
|
||||
if !columns.iter().any(|c| c.starts_with(&format!("\"{}\"", idx_name))) {
|
||||
return Err(Status::invalid_argument(format!("Index column {} not found", idx_name)));
|
||||
return Err(Status::invalid_argument(format!("Index column '{}' not found", idx_name)));
|
||||
}
|
||||
indexes.push(idx_name);
|
||||
}
|
||||
|
||||
let (create_sql, index_sql) = generate_table_sql(tx, &table_name, &columns, &indexes, &links).await?;
|
||||
let (create_sql, index_sql) = generate_table_sql(tx, &profile_name, &table_name, &columns, &indexes, &links).await?;
|
||||
|
||||
// Use schema_id instead of profile_id
|
||||
let table_def = sqlx::query!(
|
||||
r#"INSERT INTO table_definitions
|
||||
(profile_id, table_name, columns, indexes)
|
||||
(schema_id, table_name, columns, indexes)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
RETURNING id"#,
|
||||
profile.id,
|
||||
schema.id,
|
||||
&table_name,
|
||||
json!(columns),
|
||||
json!(indexes)
|
||||
@@ -163,7 +342,8 @@ async fn execute_table_definition(
|
||||
.await
|
||||
.map_err(|e| {
|
||||
if let Some(db_err) = e.as_database_error() {
|
||||
if db_err.constraint() == Some("idx_table_definitions_profile_table") {
|
||||
// Update constraint name to match new schema
|
||||
if db_err.constraint() == Some("idx_table_definitions_schema_table") {
|
||||
return Status::already_exists("Table already exists in this profile");
|
||||
}
|
||||
}
|
||||
@@ -204,13 +384,13 @@ async fn execute_table_definition(
|
||||
|
||||
async fn generate_table_sql(
|
||||
tx: &mut Transaction<'_, Postgres>,
|
||||
profile_name: &str,
|
||||
table_name: &str,
|
||||
columns: &[String],
|
||||
indexes: &[String],
|
||||
links: &[(i64, bool)],
|
||||
) -> Result<(String, Vec<String>), Status> {
|
||||
let qualified_table = format!("{}.\"{}\"", GENERATED_SCHEMA_NAME, table_name);
|
||||
|
||||
let qualified_table = format!("\"{}\".\"{}\"", profile_name, table_name);
|
||||
let mut system_columns = vec![
|
||||
"id BIGSERIAL PRIMARY KEY".to_string(),
|
||||
"deleted BOOLEAN NOT NULL DEFAULT FALSE".to_string(),
|
||||
@@ -218,16 +398,13 @@ async fn generate_table_sql(
|
||||
|
||||
for (linked_id, required) in links {
|
||||
let linked_table = get_table_name_by_id(tx, *linked_id).await?;
|
||||
let qualified_linked_table = format!("{}.\"{}\"", GENERATED_SCHEMA_NAME, linked_table);
|
||||
let base_name = linked_table.split_once('_')
|
||||
.map(|(_, rest)| rest)
|
||||
.unwrap_or(&linked_table)
|
||||
.to_string();
|
||||
let null_clause = if *required { "NOT NULL" } else { "" };
|
||||
let qualified_linked_table = format!("\"{}\".\"{}\"", profile_name, linked_table);
|
||||
|
||||
// Simply use the full table name - no truncation!
|
||||
let null_clause = if *required { "NOT NULL" } else { "" };
|
||||
system_columns.push(
|
||||
format!("\"{0}_id\" BIGINT {1} REFERENCES {2}(id)",
|
||||
base_name, null_clause, qualified_linked_table
|
||||
format!("\"{}_id\" BIGINT {} REFERENCES {}(id)",
|
||||
linked_table, null_clause, qualified_linked_table
|
||||
)
|
||||
);
|
||||
}
|
||||
@@ -247,13 +424,9 @@ async fn generate_table_sql(
|
||||
let mut all_indexes = Vec::new();
|
||||
for (linked_id, _) in links {
|
||||
let linked_table = get_table_name_by_id(tx, *linked_id).await?;
|
||||
let base_name = linked_table.split_once('_')
|
||||
.map(|(_, rest)| rest)
|
||||
.unwrap_or(&linked_table)
|
||||
.to_string();
|
||||
all_indexes.push(format!(
|
||||
"CREATE INDEX \"idx_{}_{}_fk\" ON {} (\"{}_id\")",
|
||||
table_name, base_name, qualified_table, base_name
|
||||
table_name, linked_table, qualified_table, linked_table
|
||||
));
|
||||
}
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ pub async fn post_table_script(
|
||||
) -> Result<TableScriptResponse, Status> {
|
||||
// Fetch the table definition
|
||||
let table_def = sqlx::query!(
|
||||
r#"SELECT id, table_name, columns, profile_id
|
||||
r#"SELECT id, table_name, columns, schema_id
|
||||
FROM table_definitions WHERE id = $1"#,
|
||||
request.table_definition_id
|
||||
)
|
||||
@@ -76,7 +76,7 @@ pub async fn post_table_script(
|
||||
let script_record = sqlx::query!(
|
||||
r#"INSERT INTO table_scripts
|
||||
(table_definitions_id, target_table, target_column,
|
||||
target_column_type, script, description, profile_id)
|
||||
target_column_type, script, description, schema_id)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
RETURNING id"#,
|
||||
request.table_definition_id,
|
||||
@@ -85,7 +85,7 @@ pub async fn post_table_script(
|
||||
column_type,
|
||||
parsed_script,
|
||||
request.description,
|
||||
table_def.profile_id
|
||||
table_def.schema_id
|
||||
)
|
||||
.fetch_one(db_pool)
|
||||
.await
|
||||
|
||||
@@ -20,11 +20,11 @@ pub async fn get_table_structure(
|
||||
) -> Result<TableStructureResponse, Status> {
|
||||
let profile_name = request.profile_name;
|
||||
let table_name = request.table_name;
|
||||
let table_schema = "gen";
|
||||
let table_schema = &profile_name;
|
||||
|
||||
// 1. Validate Profile
|
||||
let profile = sqlx::query!(
|
||||
"SELECT id FROM profiles WHERE name = $1",
|
||||
let schema = sqlx::query!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
)
|
||||
.fetch_optional(db_pool)
|
||||
@@ -36,8 +36,8 @@ pub async fn get_table_structure(
|
||||
))
|
||||
})?;
|
||||
|
||||
let profile_id = match profile {
|
||||
Some(p) => p.id,
|
||||
let schema_id = match schema {
|
||||
Some(s) => s.id,
|
||||
None => {
|
||||
return Err(Status::not_found(format!(
|
||||
"Profile '{}' not found",
|
||||
@@ -48,8 +48,8 @@ pub async fn get_table_structure(
|
||||
|
||||
// 2. Validate Table within Profile
|
||||
sqlx::query!(
|
||||
"SELECT id FROM table_definitions WHERE profile_id = $1 AND table_name = $2",
|
||||
profile_id,
|
||||
"SELECT id FROM table_definitions WHERE schema_id = $1 AND table_name = $2",
|
||||
schema_id,
|
||||
table_name
|
||||
)
|
||||
.fetch_optional(db_pool)
|
||||
|
||||
@@ -9,24 +9,24 @@ pub async fn delete_table_data(
|
||||
request: DeleteTableDataRequest,
|
||||
) -> Result<DeleteTableDataResponse, Status> {
|
||||
// Lookup profile
|
||||
let profile = sqlx::query!(
|
||||
"SELECT id FROM profiles WHERE name = $1",
|
||||
let schema = sqlx::query!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
request.profile_name
|
||||
)
|
||||
.fetch_optional(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Profile lookup error: {}", e)))?;
|
||||
|
||||
let profile_id = match profile {
|
||||
Some(p) => p.id,
|
||||
let schema_id = match schema {
|
||||
Some(s) => s.id,
|
||||
None => return Err(Status::not_found("Profile not found")),
|
||||
};
|
||||
|
||||
// Verify table exists in profile
|
||||
let table_exists = sqlx::query!(
|
||||
"SELECT 1 AS exists FROM table_definitions
|
||||
WHERE profile_id = $1 AND table_name = $2",
|
||||
profile_id,
|
||||
WHERE schema_id = $1 AND table_name = $2",
|
||||
schema_id,
|
||||
request.table_name
|
||||
)
|
||||
.fetch_optional(db_pool)
|
||||
@@ -38,7 +38,12 @@ pub async fn delete_table_data(
|
||||
}
|
||||
|
||||
// Qualify table name with schema
|
||||
let qualified_table = qualify_table_name_for_data(&request.table_name)?;
|
||||
let qualified_table = qualify_table_name_for_data(
|
||||
db_pool,
|
||||
&request.profile_name,
|
||||
&request.table_name,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Perform soft delete using qualified table name
|
||||
let query = format!(
|
||||
|
||||
@@ -15,21 +15,21 @@ pub async fn get_table_data(
|
||||
let record_id = request.id;
|
||||
|
||||
// Lookup profile
|
||||
let profile = sqlx::query!(
|
||||
"SELECT id FROM profiles WHERE name = $1",
|
||||
let schema = sqlx::query!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
)
|
||||
.fetch_optional(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Profile lookup error: {}", e)))?;
|
||||
|
||||
let profile_id = profile.ok_or_else(|| Status::not_found("Profile not found"))?.id;
|
||||
let schema_id = schema.ok_or_else(|| Status::not_found("Profile not found"))?.id;
|
||||
|
||||
// Lookup table_definition
|
||||
let table_def = sqlx::query!(
|
||||
r#"SELECT id, columns FROM table_definitions
|
||||
WHERE profile_id = $1 AND table_name = $2"#,
|
||||
profile_id,
|
||||
WHERE schema_id = $1 AND table_name = $2"#,
|
||||
schema_id,
|
||||
table_name
|
||||
)
|
||||
.fetch_optional(db_pool)
|
||||
@@ -66,11 +66,11 @@ pub async fn get_table_data(
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Foreign key lookup error: {}", e)))?;
|
||||
|
||||
// 2. Build the list of foreign key column names
|
||||
// 2. Build the list of foreign key column names using full table names
|
||||
let mut foreign_key_columns = Vec::new();
|
||||
for fk in fk_columns_query {
|
||||
let base_name = fk.table_name.split_once('_').map_or(fk.table_name.as_str(), |(_, rest)| rest);
|
||||
foreign_key_columns.push(format!("{}_id", base_name));
|
||||
// Use the full table name, not a stripped version
|
||||
foreign_key_columns.push(format!("{}_id", fk.table_name));
|
||||
}
|
||||
|
||||
// 3. Prepare a complete list of all columns to select
|
||||
@@ -88,7 +88,12 @@ pub async fn get_table_data(
|
||||
// --- END OF FIX ---
|
||||
|
||||
// Qualify table name with schema
|
||||
let qualified_table = qualify_table_name_for_data(&table_name)?;
|
||||
let qualified_table = qualify_table_name_for_data(
|
||||
db_pool,
|
||||
&profile_name,
|
||||
&table_name,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let sql = format!(
|
||||
"SELECT {} FROM {} WHERE id = $1 AND deleted = false",
|
||||
|
||||
@@ -18,22 +18,22 @@ pub async fn get_table_data_by_position(
|
||||
return Err(Status::invalid_argument("Position must be at least 1"));
|
||||
}
|
||||
|
||||
let profile = sqlx::query!(
|
||||
"SELECT id FROM profiles WHERE name = $1",
|
||||
let schema = sqlx::query!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
)
|
||||
.fetch_optional(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Profile lookup error: {}", e)))?;
|
||||
|
||||
let profile_id = profile.ok_or_else(|| Status::not_found("Profile not found"))?.id;
|
||||
let schema_id = schema.ok_or_else(|| Status::not_found("Profile not found"))?.id;
|
||||
|
||||
let table_exists = sqlx::query_scalar!(
|
||||
r#"SELECT EXISTS(
|
||||
SELECT 1 FROM table_definitions
|
||||
WHERE profile_id = $1 AND table_name = $2
|
||||
WHERE schema_id = $1 AND table_name = $2
|
||||
) AS "exists!""#,
|
||||
profile_id,
|
||||
schema_id,
|
||||
table_name
|
||||
)
|
||||
.fetch_one(db_pool)
|
||||
@@ -45,7 +45,12 @@ pub async fn get_table_data_by_position(
|
||||
}
|
||||
|
||||
// Qualify table name with schema
|
||||
let qualified_table = qualify_table_name_for_data(&table_name)?;
|
||||
let qualified_table = qualify_table_name_for_data(
|
||||
db_pool,
|
||||
&profile_name,
|
||||
&table_name,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let id_result = sqlx::query_scalar(
|
||||
&format!(
|
||||
|
||||
@@ -12,15 +12,15 @@ pub async fn get_table_data_count(
|
||||
// We still need to verify that the table is logically defined for the profile.
|
||||
// The schema qualifier handles *how* to access it physically, but this check
|
||||
// ensures the request is valid in the context of the application's definitions.
|
||||
let profile = sqlx::query!(
|
||||
"SELECT id FROM profiles WHERE name = $1",
|
||||
let schema = sqlx::query!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
request.profile_name
|
||||
)
|
||||
.fetch_optional(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Profile lookup error for '{}': {}", request.profile_name, e)))?;
|
||||
|
||||
let profile_id = match profile {
|
||||
let schema_id = match schema {
|
||||
Some(p) => p.id,
|
||||
None => return Err(Status::not_found(format!("Profile '{}' not found", request.profile_name))),
|
||||
};
|
||||
@@ -28,9 +28,9 @@ pub async fn get_table_data_count(
|
||||
let table_defined_for_profile = sqlx::query_scalar!(
|
||||
r#"SELECT EXISTS(
|
||||
SELECT 1 FROM table_definitions
|
||||
WHERE profile_id = $1 AND table_name = $2
|
||||
) AS "exists!" "#, // Added AS "exists!" for clarity with sqlx macro
|
||||
profile_id,
|
||||
WHERE schema_id = $1 AND table_name = $2
|
||||
) AS "exists!" "#,
|
||||
schema_id,
|
||||
request.table_name
|
||||
)
|
||||
.fetch_one(db_pool)
|
||||
@@ -47,7 +47,12 @@ pub async fn get_table_data_count(
|
||||
}
|
||||
|
||||
// 2. QUALIFY THE TABLE NAME using the imported function
|
||||
let qualified_table_name = qualify_table_name_for_data(&request.table_name)?;
|
||||
let qualified_table = qualify_table_name_for_data(
|
||||
db_pool,
|
||||
&request.profile_name,
|
||||
&request.table_name,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 3. USE THE QUALIFIED NAME in the SQL query
|
||||
let query_sql = format!(
|
||||
@@ -56,7 +61,7 @@ pub async fn get_table_data_count(
|
||||
FROM {}
|
||||
WHERE deleted = FALSE
|
||||
"#,
|
||||
qualified_table_name // Use the schema-qualified name here
|
||||
qualified_table
|
||||
);
|
||||
|
||||
// The rest of the logic remains largely the same, but error messages can be more specific.
|
||||
@@ -81,14 +86,14 @@ pub async fn get_table_data_count(
|
||||
// even though it was defined in table_definitions. This is an inconsistency.
|
||||
return Err(Status::internal(format!(
|
||||
"Table '{}' is defined but does not physically exist in the database as {}.",
|
||||
request.table_name, qualified_table_name
|
||||
request.table_name, qualified_table
|
||||
)));
|
||||
}
|
||||
}
|
||||
// For other errors, provide a general message.
|
||||
Err(Status::internal(format!(
|
||||
"Count query failed for table {}: {}",
|
||||
qualified_table_name, e
|
||||
qualified_table, e
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,8 +7,9 @@ use chrono::{DateTime, Utc};
|
||||
use common::proto::multieko2::tables_data::{PostTableDataRequest, PostTableDataResponse};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use crate::shared::schema_qualifier::qualify_table_name_for_data;
|
||||
use prost_types::value::Kind; // NEW: Import the Kind enum
|
||||
use prost_types::value::Kind;
|
||||
use rust_decimal::Decimal;
|
||||
use std::str::FromStr;
|
||||
|
||||
use crate::steel::server::execution::{self, Value};
|
||||
use crate::steel::server::functions::SteelContext;
|
||||
@@ -25,22 +26,20 @@ pub async fn post_table_data(
|
||||
let profile_name = request.profile_name;
|
||||
let table_name = request.table_name;
|
||||
|
||||
// Lookup profile
|
||||
let profile = sqlx::query!(
|
||||
"SELECT id FROM profiles WHERE name = $1",
|
||||
let schema = sqlx::query!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
)
|
||||
.fetch_optional(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Profile lookup error: {}", e)))?;
|
||||
|
||||
let profile_id = profile.ok_or_else(|| Status::not_found("Profile not found"))?.id;
|
||||
let schema_id = schema.ok_or_else(|| Status::not_found("Profile not found"))?.id;
|
||||
|
||||
// Lookup table_definition
|
||||
let table_def = sqlx::query!(
|
||||
r#"SELECT id, columns FROM table_definitions
|
||||
WHERE profile_id = $1 AND table_name = $2"#,
|
||||
profile_id,
|
||||
WHERE schema_id = $1 AND table_name = $2"#,
|
||||
schema_id,
|
||||
table_name
|
||||
)
|
||||
.fetch_optional(db_pool)
|
||||
@@ -49,7 +48,6 @@ pub async fn post_table_data(
|
||||
|
||||
let table_def = table_def.ok_or_else(|| Status::not_found("Table not found"))?;
|
||||
|
||||
// Parse columns from JSON
|
||||
let columns_json: Vec<String> = serde_json::from_value(table_def.columns.clone())
|
||||
.map_err(|e| Status::internal(format!("Column parsing error: {}", e)))?;
|
||||
|
||||
@@ -64,7 +62,6 @@ pub async fn post_table_data(
|
||||
columns.push((name, sql_type));
|
||||
}
|
||||
|
||||
// Get all foreign key columns for this table
|
||||
let fk_columns = sqlx::query!(
|
||||
r#"SELECT ltd.table_name
|
||||
FROM table_definition_links tdl
|
||||
@@ -76,17 +73,13 @@ pub async fn post_table_data(
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Foreign key lookup error: {}", e)))?;
|
||||
|
||||
// Build system columns with foreign keys
|
||||
let mut system_columns = vec!["deleted".to_string()];
|
||||
for fk in fk_columns {
|
||||
let base_name = fk.table_name.split_once('_').map_or(fk.table_name.as_str(), |(_, rest)| rest);
|
||||
system_columns.push(format!("{}_id", base_name));
|
||||
system_columns.push(format!("{}_id", fk.table_name));
|
||||
}
|
||||
|
||||
// Convert to HashSet for faster lookups
|
||||
let system_columns_set: std::collections::HashSet<_> = system_columns.iter().map(|s| s.as_str()).collect();
|
||||
|
||||
// Validate all data columns
|
||||
let user_columns: Vec<&String> = columns.iter().map(|(name, _)| name).collect();
|
||||
for key in request.data.keys() {
|
||||
if !system_columns_set.contains(key.as_str()) &&
|
||||
@@ -95,17 +88,18 @@ pub async fn post_table_data(
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// FIX #1: SCRIPT VALIDATION LOOP
|
||||
// This loop now correctly handles JSON `null` (which becomes `None`).
|
||||
// ========================================================================
|
||||
let mut string_data_for_scripts = HashMap::new();
|
||||
for (key, proto_value) in &request.data {
|
||||
let str_val = match &proto_value.kind {
|
||||
Some(Kind::StringValue(s)) => s.clone(),
|
||||
Some(Kind::StringValue(s)) => {
|
||||
let trimmed = s.trim();
|
||||
if trimmed.is_empty() {
|
||||
continue;
|
||||
}
|
||||
trimmed.to_string()
|
||||
},
|
||||
Some(Kind::NumberValue(n)) => n.to_string(),
|
||||
Some(Kind::BoolValue(b)) => b.to_string(),
|
||||
// This now correctly skips both protobuf `NULL` and JSON `null`.
|
||||
Some(Kind::NullValue(_)) | None => continue,
|
||||
Some(Kind::StructValue(_)) | Some(Kind::ListValue(_)) => {
|
||||
return Err(Status::invalid_argument(format!("Unsupported type for script validation in column '{}'", key)));
|
||||
@@ -114,7 +108,6 @@ pub async fn post_table_data(
|
||||
string_data_for_scripts.insert(key.clone(), str_val);
|
||||
}
|
||||
|
||||
// Validate Steel scripts
|
||||
let scripts = sqlx::query!(
|
||||
"SELECT target_column, script FROM table_scripts WHERE table_definitions_id = $1",
|
||||
table_def.id
|
||||
@@ -133,7 +126,8 @@ pub async fn post_table_data(
|
||||
|
||||
let context = SteelContext {
|
||||
current_table: table_name.clone(),
|
||||
profile_id,
|
||||
schema_id,
|
||||
schema_name: profile_name.clone(),
|
||||
row_data: string_data_for_scripts.clone(),
|
||||
db_pool: Arc::new(db_pool.clone()),
|
||||
};
|
||||
@@ -163,17 +157,11 @@ pub async fn post_table_data(
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare SQL parameters
|
||||
let mut params = PgArguments::default();
|
||||
let mut columns_list = Vec::new();
|
||||
let mut placeholders = Vec::new();
|
||||
let mut param_idx = 1;
|
||||
|
||||
// ========================================================================
|
||||
// FIX #2: DATABASE INSERTION LOOP
|
||||
// This loop now correctly handles JSON `null` (which becomes `None`)
|
||||
// without crashing and correctly inserts a SQL NULL.
|
||||
// ========================================================================
|
||||
for (col, proto_value) in request.data {
|
||||
let sql_type = if system_columns_set.contains(col.as_str()) {
|
||||
match col.as_str() {
|
||||
@@ -188,67 +176,122 @@ pub async fn post_table_data(
|
||||
.ok_or_else(|| Status::invalid_argument(format!("Column not found: {}", col)))?
|
||||
};
|
||||
|
||||
// Check for `None` (from JSON null) or `Some(NullValue)` first.
|
||||
let kind = match &proto_value.kind {
|
||||
None | Some(Kind::NullValue(_)) => {
|
||||
// It's a null value. Add the correct SQL NULL type and continue.
|
||||
match sql_type {
|
||||
"BOOLEAN" => params.add(None::<bool>),
|
||||
"TEXT" | "VARCHAR(15)" | "VARCHAR(255)" => params.add(None::<String>),
|
||||
"TEXT" => params.add(None::<String>),
|
||||
"TIMESTAMPTZ" => params.add(None::<DateTime<Utc>>),
|
||||
"BIGINT" => params.add(None::<i64>),
|
||||
"INTEGER" => params.add(None::<i32>),
|
||||
s if s.starts_with("NUMERIC") => params.add(None::<Decimal>),
|
||||
_ => return Err(Status::invalid_argument(format!("Unsupported type for null value: {}", sql_type))),
|
||||
}.map_err(|e| Status::internal(format!("Failed to add null parameter for {}: {}", col, e)))?;
|
||||
|
||||
columns_list.push(format!("\"{}\"", col));
|
||||
placeholders.push(format!("${}", param_idx));
|
||||
param_idx += 1;
|
||||
continue; // Skip to the next column in the loop
|
||||
continue;
|
||||
}
|
||||
// If it's not null, just pass the inner `Kind` through.
|
||||
Some(k) => k,
|
||||
};
|
||||
|
||||
// From here, we know `kind` is not a null type.
|
||||
match sql_type {
|
||||
"TEXT" | "VARCHAR(15)" | "VARCHAR(255)" => {
|
||||
if let Kind::StringValue(value) = kind {
|
||||
if let Some(max_len) = sql_type.strip_prefix("VARCHAR(").and_then(|s| s.strip_suffix(')')).and_then(|s| s.parse::<usize>().ok()) {
|
||||
if value.len() > max_len {
|
||||
return Err(Status::internal(format!("Value too long for {}", col)));
|
||||
}
|
||||
if sql_type == "TEXT" {
|
||||
if let Kind::StringValue(value) = kind {
|
||||
let trimmed_value = value.trim();
|
||||
|
||||
if trimmed_value.is_empty() {
|
||||
params.add(None::<String>).map_err(|e| Status::internal(format!("Failed to add null parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
if col == "telefon" && trimmed_value.len() > 15 {
|
||||
return Err(Status::internal(format!("Value too long for {}", col)));
|
||||
}
|
||||
params.add(value).map_err(|e| Status::invalid_argument(format!("Failed to add text parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected string for column '{}'", col)));
|
||||
params.add(trimmed_value).map_err(|e| Status::invalid_argument(format!("Failed to add text parameter for {}: {}", col, e)))?;
|
||||
}
|
||||
},
|
||||
"BOOLEAN" => {
|
||||
if let Kind::BoolValue(val) = kind {
|
||||
params.add(val).map_err(|e| Status::invalid_argument(format!("Failed to add boolean parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected boolean for column '{}'", col)));
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected string for column '{}'", col)));
|
||||
}
|
||||
} else if sql_type == "BOOLEAN" {
|
||||
if let Kind::BoolValue(val) = kind {
|
||||
params.add(val).map_err(|e| Status::invalid_argument(format!("Failed to add boolean parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected boolean for column '{}'", col)));
|
||||
}
|
||||
} else if sql_type == "TIMESTAMPTZ" {
|
||||
if let Kind::StringValue(value) = kind {
|
||||
let dt = DateTime::parse_from_rfc3339(value).map_err(|_| Status::invalid_argument(format!("Invalid timestamp for {}", col)))?;
|
||||
params.add(dt.with_timezone(&Utc)).map_err(|e| Status::invalid_argument(format!("Failed to add timestamp parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected ISO 8601 string for column '{}'", col)));
|
||||
}
|
||||
} else if sql_type == "BIGINT" {
|
||||
if let Kind::NumberValue(val) = kind {
|
||||
if val.fract() != 0.0 {
|
||||
return Err(Status::invalid_argument(format!("Expected integer for column '{}', but got a float", col)));
|
||||
}
|
||||
},
|
||||
"TIMESTAMPTZ" => {
|
||||
if let Kind::StringValue(value) = kind {
|
||||
let dt = DateTime::parse_from_rfc3339(value).map_err(|_| Status::invalid_argument(format!("Invalid timestamp for {}", col)))?;
|
||||
params.add(dt.with_timezone(&Utc)).map_err(|e| Status::invalid_argument(format!("Failed to add timestamp parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected ISO 8601 string for column '{}'", col)));
|
||||
|
||||
// Simple universal check: try the conversion and verify it's reversible
|
||||
// This handles ALL edge cases: infinity, NaN, overflow, underflow, precision loss
|
||||
let as_i64 = *val as i64;
|
||||
if (as_i64 as f64) != *val {
|
||||
return Err(Status::invalid_argument(format!("Integer value out of range for BIGINT column '{}'", col)));
|
||||
}
|
||||
},
|
||||
"BIGINT" => {
|
||||
if let Kind::NumberValue(val) = kind {
|
||||
if val.fract() != 0.0 {
|
||||
return Err(Status::invalid_argument(format!("Expected integer for column '{}', but got a float", col)));
|
||||
|
||||
params.add(as_i64).map_err(|e| Status::invalid_argument(format!("Failed to add bigint parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected number for column '{}'", col)));
|
||||
}
|
||||
} else if sql_type == "INTEGER" {
|
||||
if let Kind::NumberValue(val) = kind {
|
||||
if val.fract() != 0.0 {
|
||||
return Err(Status::invalid_argument(format!("Expected integer for column '{}', but got a float", col)));
|
||||
}
|
||||
|
||||
// Simple universal check: try the conversion and verify it's reversible
|
||||
// This handles ALL edge cases: infinity, NaN, overflow, underflow, precision loss
|
||||
let as_i32 = *val as i32;
|
||||
if (as_i32 as f64) != *val {
|
||||
return Err(Status::invalid_argument(format!("Integer value out of range for INTEGER column '{}'", col)));
|
||||
}
|
||||
|
||||
params.add(as_i32).map_err(|e| Status::invalid_argument(format!("Failed to add integer parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected number for column '{}'", col)));
|
||||
}
|
||||
} else if sql_type.starts_with("NUMERIC") {
|
||||
// MODIFIED: This block is now stricter.
|
||||
let decimal_val = match kind {
|
||||
Kind::StringValue(s) => {
|
||||
let trimmed = s.trim();
|
||||
if trimmed.is_empty() {
|
||||
None // Treat empty string as NULL
|
||||
} else {
|
||||
// This is the only valid path: parse from a string.
|
||||
Some(Decimal::from_str(trimmed).map_err(|_| {
|
||||
Status::invalid_argument(format!(
|
||||
"Invalid decimal string format for column '{}': {}",
|
||||
col, s
|
||||
))
|
||||
})?)
|
||||
}
|
||||
params.add(*val as i64).map_err(|e| Status::invalid_argument(format!("Failed to add integer parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected number for column '{}'", col)));
|
||||
}
|
||||
},
|
||||
_ => return Err(Status::invalid_argument(format!("Unsupported type {}", sql_type))),
|
||||
// CATCH-ALL: Reject NumberValue, BoolValue, etc. for NUMERIC fields.
|
||||
_ => {
|
||||
return Err(Status::invalid_argument(format!(
|
||||
"Expected a string representation for decimal column '{}', but received a different type.",
|
||||
col
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
params.add(decimal_val).map_err(|e| {
|
||||
Status::invalid_argument(format!(
|
||||
"Failed to add decimal parameter for {}: {}",
|
||||
col, e
|
||||
))
|
||||
})?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Unsupported type {}", sql_type)));
|
||||
}
|
||||
|
||||
columns_list.push(format!("\"{}\"", col));
|
||||
@@ -260,8 +303,12 @@ pub async fn post_table_data(
|
||||
return Err(Status::invalid_argument("No valid columns to insert"));
|
||||
}
|
||||
|
||||
// Qualify table name with schema
|
||||
let qualified_table = qualify_table_name_for_data(&table_name)?;
|
||||
let qualified_table = crate::shared::schema_qualifier::qualify_table_name_for_data(
|
||||
db_pool,
|
||||
&profile_name,
|
||||
&table_name,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let sql = format!(
|
||||
"INSERT INTO {} ({}) VALUES ({}) RETURNING id",
|
||||
@@ -278,6 +325,12 @@ pub async fn post_table_data(
|
||||
Ok(id) => id,
|
||||
Err(e) => {
|
||||
if let Some(db_err) = e.as_database_error() {
|
||||
if db_err.code() == Some(std::borrow::Cow::Borrowed("22P02")) ||
|
||||
db_err.code() == Some(std::borrow::Cow::Borrowed("22003")) {
|
||||
return Err(Status::invalid_argument(format!(
|
||||
"Numeric field overflow or invalid format. Check precision and scale. Details: {}", db_err.message()
|
||||
)));
|
||||
}
|
||||
if db_err.code() == Some(std::borrow::Cow::Borrowed("42P01")) {
|
||||
return Err(Status::internal(format!(
|
||||
"Table '{}' is defined but does not physically exist in the database as {}",
|
||||
|
||||
@@ -1,41 +1,56 @@
|
||||
// src/tables_data/handlers/put_table_data.rs
|
||||
|
||||
use tonic::Status;
|
||||
use sqlx::{PgPool, Arguments, Postgres};
|
||||
use sqlx::{PgPool, Arguments};
|
||||
use sqlx::postgres::PgArguments;
|
||||
use chrono::{DateTime, Utc};
|
||||
use common::proto::multieko2::tables_data::{PutTableDataRequest, PutTableDataResponse};
|
||||
use crate::shared::schema_qualifier::qualify_table_name_for_data;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use prost_types::value::Kind;
|
||||
use rust_decimal::Decimal;
|
||||
use std::str::FromStr;
|
||||
|
||||
use crate::steel::server::execution::{self, Value};
|
||||
use crate::steel::server::functions::SteelContext;
|
||||
use crate::indexer::{IndexCommand, IndexCommandData};
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::error;
|
||||
|
||||
pub async fn put_table_data(
|
||||
db_pool: &PgPool,
|
||||
request: PutTableDataRequest,
|
||||
indexer_tx: &mpsc::Sender<IndexCommand>,
|
||||
) -> Result<PutTableDataResponse, Status> {
|
||||
let profile_name = request.profile_name;
|
||||
let table_name = request.table_name;
|
||||
let record_id = request.id;
|
||||
|
||||
// If no data is provided to update, it's an invalid request.
|
||||
// An update with no fields is a no-op; we can return success early.
|
||||
if request.data.is_empty() {
|
||||
return Err(Status::invalid_argument("No fields provided to update."));
|
||||
return Ok(PutTableDataResponse {
|
||||
success: true,
|
||||
message: "No fields to update.".into(),
|
||||
updated_id: record_id,
|
||||
});
|
||||
}
|
||||
|
||||
// Lookup profile
|
||||
let profile = sqlx::query!(
|
||||
"SELECT id FROM profiles WHERE name = $1",
|
||||
// --- Start of logic copied and adapted from post_table_data ---
|
||||
|
||||
let schema = sqlx::query!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
)
|
||||
.fetch_optional(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Profile lookup error: {}", e)))?;
|
||||
|
||||
let profile_id = profile.ok_or_else(|| Status::not_found("Profile not found"))?.id;
|
||||
let schema_id = schema.ok_or_else(|| Status::not_found("Profile not found"))?.id;
|
||||
|
||||
// Lookup table_definition
|
||||
let table_def = sqlx::query!(
|
||||
r#"SELECT id, columns FROM table_definitions
|
||||
WHERE profile_id = $1 AND table_name = $2"#,
|
||||
profile_id,
|
||||
WHERE schema_id = $1 AND table_name = $2"#,
|
||||
schema_id,
|
||||
table_name
|
||||
)
|
||||
.fetch_optional(db_pool)
|
||||
@@ -44,7 +59,6 @@ pub async fn put_table_data(
|
||||
|
||||
let table_def = table_def.ok_or_else(|| Status::not_found("Table not found"))?;
|
||||
|
||||
// Parse columns from JSON
|
||||
let columns_json: Vec<String> = serde_json::from_value(table_def.columns.clone())
|
||||
.map_err(|e| Status::internal(format!("Column parsing error: {}", e)))?;
|
||||
|
||||
@@ -59,7 +73,6 @@ pub async fn put_table_data(
|
||||
columns.push((name, sql_type));
|
||||
}
|
||||
|
||||
// Get all foreign key columns for this table (needed for validation)
|
||||
let fk_columns = sqlx::query!(
|
||||
r#"SELECT ltd.table_name
|
||||
FROM table_definition_links tdl
|
||||
@@ -73,20 +86,85 @@ pub async fn put_table_data(
|
||||
|
||||
let mut system_columns = vec!["deleted".to_string()];
|
||||
for fk in fk_columns {
|
||||
let base_name = fk.table_name.split_once('_').map_or(fk.table_name.as_str(), |(_, rest)| rest);
|
||||
system_columns.push(format!("{}_id", base_name));
|
||||
system_columns.push(format!("{}_id", fk.table_name));
|
||||
}
|
||||
let system_columns_set: std::collections::HashSet<_> = system_columns.iter().map(|s| s.as_str()).collect();
|
||||
let user_columns: Vec<&String> = columns.iter().map(|(name, _)| name).collect();
|
||||
|
||||
// Validate input columns
|
||||
let system_columns_set: std::collections::HashSet<_> = system_columns.iter().map(|s| s.as_str()).collect();
|
||||
|
||||
let user_columns: Vec<&String> = columns.iter().map(|(name, _)| name).collect();
|
||||
for key in request.data.keys() {
|
||||
if !system_columns_set.contains(key.as_str()) && !user_columns.contains(&key) {
|
||||
if !system_columns_set.contains(key.as_str()) &&
|
||||
!user_columns.contains(&&key.to_string()) {
|
||||
return Err(Status::invalid_argument(format!("Invalid column: {}", key)));
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare SQL parameters
|
||||
let mut string_data_for_scripts = HashMap::new();
|
||||
for (key, proto_value) in &request.data {
|
||||
let str_val = match &proto_value.kind {
|
||||
Some(Kind::StringValue(s)) => {
|
||||
let trimmed = s.trim();
|
||||
if trimmed.is_empty() {
|
||||
continue;
|
||||
}
|
||||
trimmed.to_string()
|
||||
},
|
||||
Some(Kind::NumberValue(n)) => n.to_string(),
|
||||
Some(Kind::BoolValue(b)) => b.to_string(),
|
||||
Some(Kind::NullValue(_)) | None => continue,
|
||||
Some(Kind::StructValue(_)) | Some(Kind::ListValue(_)) => {
|
||||
return Err(Status::invalid_argument(format!("Unsupported type for script validation in column '{}'", key)));
|
||||
}
|
||||
};
|
||||
string_data_for_scripts.insert(key.clone(), str_val);
|
||||
}
|
||||
|
||||
let scripts = sqlx::query!(
|
||||
"SELECT target_column, script FROM table_scripts WHERE table_definitions_id = $1",
|
||||
table_def.id
|
||||
)
|
||||
.fetch_all(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("Failed to fetch scripts: {}", e)))?;
|
||||
|
||||
for script_record in scripts {
|
||||
let target_column = script_record.target_column;
|
||||
|
||||
if let Some(user_value) = string_data_for_scripts.get(&target_column) {
|
||||
let context = SteelContext {
|
||||
current_table: table_name.clone(),
|
||||
schema_id,
|
||||
schema_name: profile_name.clone(),
|
||||
row_data: string_data_for_scripts.clone(),
|
||||
db_pool: Arc::new(db_pool.clone()),
|
||||
};
|
||||
|
||||
let script_result = execution::execute_script(
|
||||
script_record.script,
|
||||
"STRINGS",
|
||||
Arc::new(db_pool.clone()),
|
||||
context,
|
||||
)
|
||||
.map_err(|e| Status::invalid_argument(
|
||||
format!("Script execution failed for '{}': {}", target_column, e)
|
||||
))?;
|
||||
|
||||
let Value::Strings(mut script_output) = script_result else {
|
||||
return Err(Status::internal("Script must return string values"));
|
||||
};
|
||||
|
||||
let expected_value = script_output.pop()
|
||||
.ok_or_else(|| Status::internal("Script returned no values"))?;
|
||||
|
||||
if user_value != &expected_value {
|
||||
return Err(Status::invalid_argument(format!(
|
||||
"Validation failed for column '{}': Expected '{}', Got '{}'",
|
||||
target_column, expected_value, user_value
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut params = PgArguments::default();
|
||||
let mut set_clauses = Vec::new();
|
||||
let mut param_idx = 1;
|
||||
@@ -105,92 +183,177 @@ pub async fn put_table_data(
|
||||
.ok_or_else(|| Status::invalid_argument(format!("Column not found: {}", col)))?
|
||||
};
|
||||
|
||||
// A provided value cannot be null or empty in a PUT request.
|
||||
// To clear a field, it should be set to an empty string "" for text,
|
||||
// or a specific value for other types if needed (though typically not done).
|
||||
// For now, we reject nulls.
|
||||
let kind = proto_value.kind.ok_or_else(|| {
|
||||
Status::invalid_argument(format!("Value for column '{}' cannot be empty in a PUT request. To clear a text field, send an empty string.", col))
|
||||
})?;
|
||||
let kind = match &proto_value.kind {
|
||||
None | Some(Kind::NullValue(_)) => {
|
||||
match sql_type {
|
||||
"BOOLEAN" => params.add(None::<bool>),
|
||||
"TEXT" => params.add(None::<String>),
|
||||
"TIMESTAMPTZ" => params.add(None::<DateTime<Utc>>),
|
||||
"BIGINT" => params.add(None::<i64>),
|
||||
"INTEGER" => params.add(None::<i32>),
|
||||
s if s.starts_with("NUMERIC") => params.add(None::<Decimal>),
|
||||
_ => return Err(Status::invalid_argument(format!("Unsupported type for null value: {}", sql_type))),
|
||||
}.map_err(|e| Status::internal(format!("Failed to add null parameter for {}: {}", col, e)))?;
|
||||
|
||||
match sql_type {
|
||||
"TEXT" | "VARCHAR(15)" | "VARCHAR(255)" => {
|
||||
if let Kind::StringValue(value) = kind {
|
||||
params.add(value)
|
||||
.map_err(|e| Status::internal(format!("Failed to add text parameter for {}: {}", col, e)))?;
|
||||
set_clauses.push(format!("\"{}\" = ${}", col, param_idx));
|
||||
param_idx += 1;
|
||||
continue;
|
||||
}
|
||||
Some(k) => k,
|
||||
};
|
||||
|
||||
if sql_type == "TEXT" {
|
||||
if let Kind::StringValue(value) = kind {
|
||||
let trimmed_value = value.trim();
|
||||
|
||||
if trimmed_value.is_empty() {
|
||||
params.add(None::<String>).map_err(|e| Status::internal(format!("Failed to add null parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected string for column '{}'", col)));
|
||||
}
|
||||
},
|
||||
"BOOLEAN" => {
|
||||
if let Kind::BoolValue(val) = kind {
|
||||
params.add(val)
|
||||
.map_err(|e| Status::internal(format!("Failed to add boolean parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected boolean for column '{}'", col)));
|
||||
}
|
||||
},
|
||||
"TIMESTAMPTZ" => {
|
||||
if let Kind::StringValue(value) = kind {
|
||||
let dt = DateTime::parse_from_rfc3339(&value)
|
||||
.map_err(|_| Status::invalid_argument(format!("Invalid timestamp for {}", col)))?;
|
||||
params.add(dt.with_timezone(&Utc))
|
||||
.map_err(|e| Status::internal(format!("Failed to add timestamp parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected ISO 8601 string for column '{}'", col)));
|
||||
}
|
||||
},
|
||||
"BIGINT" => {
|
||||
if let Kind::NumberValue(val) = kind {
|
||||
if val.fract() != 0.0 {
|
||||
return Err(Status::invalid_argument(format!("Expected integer for column '{}', but got a float", col)));
|
||||
if col == "telefon" && trimmed_value.len() > 15 {
|
||||
return Err(Status::internal(format!("Value too long for {}", col)));
|
||||
}
|
||||
params.add(val as i64)
|
||||
.map_err(|e| Status::internal(format!("Failed to add integer parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected number for column '{}'", col)));
|
||||
params.add(trimmed_value).map_err(|e| Status::invalid_argument(format!("Failed to add text parameter for {}: {}", col, e)))?;
|
||||
}
|
||||
},
|
||||
_ => return Err(Status::invalid_argument(format!("Unsupported type {}", sql_type))),
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected string for column '{}'", col)));
|
||||
}
|
||||
} else if sql_type == "BOOLEAN" {
|
||||
if let Kind::BoolValue(val) = kind {
|
||||
params.add(val).map_err(|e| Status::invalid_argument(format!("Failed to add boolean parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected boolean for column '{}'", col)));
|
||||
}
|
||||
} else if sql_type == "TIMESTAMPTZ" {
|
||||
if let Kind::StringValue(value) = kind {
|
||||
let dt = DateTime::parse_from_rfc3339(value).map_err(|_| Status::invalid_argument(format!("Invalid timestamp for {}", col)))?;
|
||||
params.add(dt.with_timezone(&Utc)).map_err(|e| Status::invalid_argument(format!("Failed to add timestamp parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected ISO 8601 string for column '{}'", col)));
|
||||
}
|
||||
} else if sql_type == "BIGINT" {
|
||||
if let Kind::NumberValue(val) = kind {
|
||||
if val.fract() != 0.0 {
|
||||
return Err(Status::invalid_argument(format!("Expected integer for column '{}', but got a float", col)));
|
||||
}
|
||||
let as_i64 = *val as i64;
|
||||
if (as_i64 as f64) != *val {
|
||||
return Err(Status::invalid_argument(format!("Integer value out of range for BIGINT column '{}'", col)));
|
||||
}
|
||||
params.add(as_i64).map_err(|e| Status::invalid_argument(format!("Failed to add bigint parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected number for column '{}'", col)));
|
||||
}
|
||||
} else if sql_type == "INTEGER" {
|
||||
if let Kind::NumberValue(val) = kind {
|
||||
if val.fract() != 0.0 {
|
||||
return Err(Status::invalid_argument(format!("Expected integer for column '{}', but got a float", col)));
|
||||
}
|
||||
let as_i32 = *val as i32;
|
||||
if (as_i32 as f64) != *val {
|
||||
return Err(Status::invalid_argument(format!("Integer value out of range for INTEGER column '{}'", col)));
|
||||
}
|
||||
params.add(as_i32).map_err(|e| Status::invalid_argument(format!("Failed to add integer parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected number for column '{}'", col)));
|
||||
}
|
||||
} else if sql_type.starts_with("NUMERIC") {
|
||||
let decimal_val = match kind {
|
||||
Kind::StringValue(s) => {
|
||||
let trimmed = s.trim();
|
||||
if trimmed.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(Decimal::from_str(trimmed).map_err(|_| {
|
||||
Status::invalid_argument(format!(
|
||||
"Invalid decimal string format for column '{}': {}",
|
||||
col, s
|
||||
))
|
||||
})?)
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(Status::invalid_argument(format!(
|
||||
"Expected a string representation for decimal column '{}', but received a different type.",
|
||||
col
|
||||
)));
|
||||
}
|
||||
};
|
||||
params.add(decimal_val).map_err(|e| {
|
||||
Status::invalid_argument(format!(
|
||||
"Failed to add decimal parameter for {}: {}",
|
||||
col, e
|
||||
))
|
||||
})?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Unsupported type {}", sql_type)));
|
||||
}
|
||||
|
||||
set_clauses.push(format!("\"{}\" = ${}", col, param_idx));
|
||||
param_idx += 1;
|
||||
}
|
||||
|
||||
params.add(record_id)
|
||||
.map_err(|e| Status::internal(format!("Failed to add record_id parameter: {}", e)))?;
|
||||
// --- End of copied logic ---
|
||||
|
||||
if set_clauses.is_empty() {
|
||||
return Ok(PutTableDataResponse {
|
||||
success: true,
|
||||
message: "No valid fields to update after processing.".into(),
|
||||
updated_id: record_id,
|
||||
});
|
||||
}
|
||||
|
||||
let qualified_table = crate::shared::schema_qualifier::qualify_table_name_for_data(
|
||||
db_pool,
|
||||
&profile_name,
|
||||
&table_name,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let qualified_table = qualify_table_name_for_data(&table_name)?;
|
||||
let set_clause = set_clauses.join(", ");
|
||||
let sql = format!(
|
||||
"UPDATE {} SET {} WHERE id = ${} AND deleted = FALSE RETURNING id",
|
||||
"UPDATE {} SET {} WHERE id = ${} RETURNING id",
|
||||
qualified_table,
|
||||
set_clause,
|
||||
param_idx
|
||||
);
|
||||
|
||||
let result = sqlx::query_scalar_with::<Postgres, i64, _>(&sql, params)
|
||||
params.add(record_id).map_err(|e| Status::internal(format!("Failed to add record_id parameter: {}", e)))?;
|
||||
|
||||
let result = sqlx::query_scalar_with::<_, i64, _>(&sql, params)
|
||||
.fetch_optional(db_pool)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(Some(updated_id)) => Ok(PutTableDataResponse {
|
||||
success: true,
|
||||
message: "Data updated successfully".into(),
|
||||
updated_id,
|
||||
}),
|
||||
Ok(None) => Err(Status::not_found("Record not found or already deleted")),
|
||||
let updated_id = match result {
|
||||
Ok(Some(id)) => id,
|
||||
Ok(None) => return Err(Status::not_found("Record not found")),
|
||||
Err(e) => {
|
||||
if let Some(db_err) = e.as_database_error() {
|
||||
if db_err.code() == Some(std::borrow::Cow::Borrowed("42P01")) {
|
||||
return Err(Status::internal(format!(
|
||||
"Table '{}' is defined but does not physically exist in the database as {}",
|
||||
table_name, qualified_table
|
||||
if db_err.code() == Some(std::borrow::Cow::Borrowed("22P02")) ||
|
||||
db_err.code() == Some(std::borrow::Cow::Borrowed("22003")) {
|
||||
return Err(Status::invalid_argument(format!(
|
||||
"Numeric field overflow or invalid format. Check precision and scale. Details: {}", db_err.message()
|
||||
)));
|
||||
}
|
||||
}
|
||||
Err(Status::internal(format!("Update failed: {}", e)))
|
||||
return Err(Status::internal(format!("Update failed: {}", e)));
|
||||
}
|
||||
};
|
||||
|
||||
let command = IndexCommand::AddOrUpdate(IndexCommandData {
|
||||
table_name: table_name.clone(),
|
||||
row_id: updated_id,
|
||||
});
|
||||
|
||||
if let Err(e) = indexer_tx.send(command).await {
|
||||
error!(
|
||||
"CRITICAL: DB update for table '{}' (id: {}) succeeded but failed to queue for indexing: {}. Search index is now inconsistent.",
|
||||
table_name, updated_id, e
|
||||
);
|
||||
}
|
||||
|
||||
Ok(PutTableDataResponse {
|
||||
success: true,
|
||||
message: "Data updated successfully".into(),
|
||||
updated_id,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
POST
|
||||
❯ grpcurl -plaintext -d '{
|
||||
"adresar_id": 1,
|
||||
"c_dokladu": "DOC123",
|
||||
"datum": "01:10:2023",
|
||||
"c_faktury": "INV123",
|
||||
"obsah": "Sample content",
|
||||
"stredisko": "Center A",
|
||||
"c_uctu": "ACC123",
|
||||
"md": "MD123",
|
||||
"identif": "ID123",
|
||||
"poznanka": "Sample note",
|
||||
"firma": "AAA"
|
||||
}' localhost:50051 multieko2.uctovnictvo.Uctovnictvo/PostUctovnictvo
|
||||
{
|
||||
"id": "3",
|
||||
"adresarId": "1",
|
||||
"cDokladu": "DOC123",
|
||||
"datum": "2023-10-01",
|
||||
"cFaktury": "INV123",
|
||||
"obsah": "Sample content",
|
||||
"stredisko": "Center A",
|
||||
"cUctu": "ACC123",
|
||||
"md": "MD123",
|
||||
"identif": "ID123",
|
||||
"poznanka": "Sample note",
|
||||
"firma": "AAA"
|
||||
}
|
||||
|
||||
PUT
|
||||
❯ grpcurl -plaintext -d '{
|
||||
"id": '1',
|
||||
"adresar_id": 1,
|
||||
"c_dokladu": "UPDATED-DOC",
|
||||
"datum": "15.11.2023",
|
||||
"c_faktury": "UPDATED-INV",
|
||||
"obsah": "Updated content",
|
||||
"stredisko": "Updated Center",
|
||||
"c_uctu": "UPD-ACC",
|
||||
"md": "UPD-MD",
|
||||
"identif": "UPD-ID",
|
||||
"poznanka": "Updated note",
|
||||
"firma": "UPD"
|
||||
}' localhost:50051 multieko2.uctovnictvo.Uctovnictvo/PutUctovnictvo
|
||||
{
|
||||
"id": "1",
|
||||
"adresarId": "1",
|
||||
"cDokladu": "UPDATED-DOC",
|
||||
"datum": "15.11.2023",
|
||||
"cFaktury": "UPDATED-INV",
|
||||
"obsah": "Updated content",
|
||||
"stredisko": "Updated Center",
|
||||
"cUctu": "UPD-ACC",
|
||||
"md": "UPD-MD",
|
||||
"identif": "UPD-ID",
|
||||
"poznanka": "Updated note",
|
||||
"firma": "UPD"
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
❯ grpcurl -plaintext -d '{}' localhost:50051 multieko2.uctovnictvo.Uctovnictvo/GetUctovnictvoCount
|
||||
|
||||
{
|
||||
"count": "4"
|
||||
}
|
||||
❯ grpcurl -plaintext -d '{
|
||||
"position": 2
|
||||
}' localhost:50051 multieko2.uctovnictvo.Uctovnictvo/GetUctovnictvoByPosition
|
||||
|
||||
{
|
||||
"id": "2",
|
||||
"adresarId": "1",
|
||||
"cDokladu": "DOC123",
|
||||
"datum": "01.10.2023",
|
||||
"cFaktury": "INV123",
|
||||
"obsah": "Sample content",
|
||||
"stredisko": "Center A",
|
||||
"cUctu": "ACC123",
|
||||
"md": "MD123",
|
||||
"identif": "ID123",
|
||||
"poznanka": "Sample note",
|
||||
"firma": "AAA"
|
||||
}
|
||||
❯ grpcurl -plaintext -d '{
|
||||
"id": 1
|
||||
}' localhost:50051 multieko2.uctovnictvo.Uctovnictvo/GetUctovnictvo
|
||||
{
|
||||
"id": "1",
|
||||
"adresarId": "1",
|
||||
"cDokladu": "DOC123",
|
||||
"datum": "01.10.2023",
|
||||
"cFaktury": "INV123",
|
||||
"obsah": "Sample content",
|
||||
"stredisko": "Center A",
|
||||
"cUctu": "ACC123",
|
||||
"md": "MD123",
|
||||
"identif": "ID123",
|
||||
"poznanka": "Sample note",
|
||||
"firma": "AAA"
|
||||
}
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
// src/uctovnictvo/handlers.rs
|
||||
pub mod post_uctovnictvo;
|
||||
pub mod get_uctovnictvo;
|
||||
pub mod get_uctovnictvo_count;
|
||||
pub mod get_uctovnictvo_by_position;
|
||||
pub mod put_uctovnictvo;
|
||||
|
||||
pub use post_uctovnictvo::post_uctovnictvo;
|
||||
pub use get_uctovnictvo::get_uctovnictvo;
|
||||
pub use get_uctovnictvo_count::get_uctovnictvo_count;
|
||||
pub use get_uctovnictvo_by_position::get_uctovnictvo_by_position;
|
||||
pub use put_uctovnictvo::put_uctovnictvo;
|
||||
@@ -1,51 +0,0 @@
|
||||
// src/uctovnictvo/handlers/get_uctovnictvo.rs
|
||||
use tonic::Status;
|
||||
use sqlx::PgPool;
|
||||
use crate::uctovnictvo::models::Uctovnictvo;
|
||||
use common::proto::multieko2::uctovnictvo::{GetUctovnictvoRequest, UctovnictvoResponse};
|
||||
|
||||
pub async fn get_uctovnictvo(
|
||||
db_pool: &PgPool,
|
||||
request: GetUctovnictvoRequest,
|
||||
) -> Result<UctovnictvoResponse, Status> {
|
||||
let uctovnictvo = sqlx::query_as!(
|
||||
Uctovnictvo,
|
||||
r#"
|
||||
SELECT
|
||||
id,
|
||||
deleted,
|
||||
adresar_id,
|
||||
c_dokladu,
|
||||
datum as "datum: chrono::NaiveDate",
|
||||
c_faktury,
|
||||
obsah,
|
||||
stredisko,
|
||||
c_uctu,
|
||||
md,
|
||||
identif,
|
||||
poznanka,
|
||||
firma
|
||||
FROM uctovnictvo
|
||||
WHERE id = $1
|
||||
"#,
|
||||
request.id
|
||||
)
|
||||
.fetch_one(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::not_found(e.to_string()))?;
|
||||
|
||||
Ok(UctovnictvoResponse {
|
||||
id: uctovnictvo.id,
|
||||
adresar_id: uctovnictvo.adresar_id,
|
||||
c_dokladu: uctovnictvo.c_dokladu,
|
||||
datum: uctovnictvo.datum.format("%d.%m.%Y").to_string(),
|
||||
c_faktury: uctovnictvo.c_faktury,
|
||||
obsah: uctovnictvo.obsah.unwrap_or_default(),
|
||||
stredisko: uctovnictvo.stredisko.unwrap_or_default(),
|
||||
c_uctu: uctovnictvo.c_uctu.unwrap_or_default(),
|
||||
md: uctovnictvo.md.unwrap_or_default(),
|
||||
identif: uctovnictvo.identif.unwrap_or_default(),
|
||||
poznanka: uctovnictvo.poznanka.unwrap_or_default(),
|
||||
firma: uctovnictvo.firma,
|
||||
})
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
// src/uctovnictvo/handlers/get_uctovnictvo_by_position.rs
|
||||
use tonic::Status;
|
||||
use sqlx::PgPool;
|
||||
use common::proto::multieko2::common::PositionRequest;
|
||||
use super::get_uctovnictvo;
|
||||
|
||||
pub async fn get_uctovnictvo_by_position(
|
||||
db_pool: &PgPool,
|
||||
request: PositionRequest,
|
||||
) -> Result<common::proto::multieko2::uctovnictvo::UctovnictvoResponse, Status> {
|
||||
if request.position < 1 {
|
||||
return Err(Status::invalid_argument("Position must be at least 1"));
|
||||
}
|
||||
|
||||
// Find the ID of the Nth non-deleted record
|
||||
let id: i64 = sqlx::query_scalar!(
|
||||
r#"
|
||||
SELECT id
|
||||
FROM uctovnictvo
|
||||
WHERE deleted = FALSE
|
||||
ORDER BY id ASC
|
||||
OFFSET $1
|
||||
LIMIT 1
|
||||
"#,
|
||||
request.position - 1
|
||||
)
|
||||
.fetch_optional(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(e.to_string()))?
|
||||
.ok_or_else(|| Status::not_found("Position out of bounds"))?;
|
||||
|
||||
// Now fetch the complete record using the existing get_uctovnictvo function
|
||||
get_uctovnictvo(db_pool, common::proto::multieko2::uctovnictvo::GetUctovnictvoRequest { id }).await
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
// src/uctovnictvo/handlers/get_uctovnictvo_count.rs
|
||||
use tonic::Status;
|
||||
use sqlx::PgPool;
|
||||
use common::proto::multieko2::common::{CountResponse, Empty};
|
||||
|
||||
pub async fn get_uctovnictvo_count(
|
||||
db_pool: &PgPool,
|
||||
_request: Empty,
|
||||
) -> Result<CountResponse, Status> {
|
||||
let count: i64 = sqlx::query_scalar!(
|
||||
r#"
|
||||
SELECT COUNT(*) AS count
|
||||
FROM uctovnictvo
|
||||
WHERE deleted = FALSE
|
||||
"#
|
||||
)
|
||||
.fetch_one(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(e.to_string()))?
|
||||
.unwrap_or(0);
|
||||
|
||||
Ok(CountResponse { count })
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
// src/uctovnictvo/handlers/post_uctovnictvo.rs
|
||||
use tonic::Status;
|
||||
use sqlx::PgPool;
|
||||
use crate::uctovnictvo::models::Uctovnictvo;
|
||||
use common::proto::multieko2::uctovnictvo::{PostUctovnictvoRequest, UctovnictvoResponse};
|
||||
use crate::shared::date_utils::parse_date_with_multiple_formats; // Import from shared module
|
||||
|
||||
pub async fn post_uctovnictvo(
|
||||
db_pool: &PgPool,
|
||||
request: PostUctovnictvoRequest,
|
||||
) -> Result<UctovnictvoResponse, Status> {
|
||||
let datum = parse_date_with_multiple_formats(&request.datum)
|
||||
.ok_or_else(|| Status::invalid_argument(format!("Invalid date format: {}", request.datum)))?;
|
||||
|
||||
// Pass the NaiveDate value directly.
|
||||
let uctovnictvo = sqlx::query_as!(
|
||||
Uctovnictvo,
|
||||
r#"
|
||||
INSERT INTO uctovnictvo (
|
||||
adresar_id, c_dokladu, datum, c_faktury, obsah, stredisko,
|
||||
c_uctu, md, identif, poznanka, firma, deleted
|
||||
)
|
||||
VALUES (
|
||||
$1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12
|
||||
)
|
||||
RETURNING
|
||||
id,
|
||||
deleted,
|
||||
adresar_id,
|
||||
c_dokladu,
|
||||
datum as "datum: chrono::NaiveDate",
|
||||
c_faktury,
|
||||
obsah,
|
||||
stredisko,
|
||||
c_uctu,
|
||||
md,
|
||||
identif,
|
||||
poznanka,
|
||||
firma
|
||||
"#,
|
||||
request.adresar_id,
|
||||
request.c_dokladu,
|
||||
datum as chrono::NaiveDate,
|
||||
request.c_faktury,
|
||||
request.obsah,
|
||||
request.stredisko,
|
||||
request.c_uctu,
|
||||
request.md,
|
||||
request.identif,
|
||||
request.poznanka,
|
||||
request.firma,
|
||||
false
|
||||
)
|
||||
.fetch_one(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(e.to_string()))?;
|
||||
|
||||
// Return the response with formatted date
|
||||
Ok(UctovnictvoResponse {
|
||||
id: uctovnictvo.id,
|
||||
adresar_id: uctovnictvo.adresar_id,
|
||||
c_dokladu: uctovnictvo.c_dokladu,
|
||||
datum: uctovnictvo.datum.format("%d.%m.%Y").to_string(), // Standard Slovak format
|
||||
c_faktury: uctovnictvo.c_faktury,
|
||||
obsah: uctovnictvo.obsah.unwrap_or_default(),
|
||||
stredisko: uctovnictvo.stredisko.unwrap_or_default(),
|
||||
c_uctu: uctovnictvo.c_uctu.unwrap_or_default(),
|
||||
md: uctovnictvo.md.unwrap_or_default(),
|
||||
identif: uctovnictvo.identif.unwrap_or_default(),
|
||||
poznanka: uctovnictvo.poznanka.unwrap_or_default(),
|
||||
firma: uctovnictvo.firma,
|
||||
})
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
// src/uctovnictvo/handlers/put_uctovnictvo.rs
|
||||
use tonic::Status;
|
||||
use sqlx::PgPool;
|
||||
use crate::uctovnictvo::models::Uctovnictvo;
|
||||
use common::proto::multieko2::uctovnictvo::{PutUctovnictvoRequest, UctovnictvoResponse};
|
||||
use crate::shared::date_utils::parse_date_with_multiple_formats; // Import from shared module
|
||||
|
||||
pub async fn put_uctovnictvo(
|
||||
db_pool: &PgPool,
|
||||
request: PutUctovnictvoRequest,
|
||||
) -> Result<UctovnictvoResponse, Status> {
|
||||
let datum = parse_date_with_multiple_formats(&request.datum)
|
||||
.ok_or_else(|| Status::invalid_argument("Invalid date format"))?;
|
||||
|
||||
let uctovnictvo = sqlx::query_as!(
|
||||
Uctovnictvo,
|
||||
r#"
|
||||
UPDATE uctovnictvo
|
||||
SET
|
||||
adresar_id = $2,
|
||||
c_dokladu = $3,
|
||||
datum = $4,
|
||||
c_faktury = $5,
|
||||
obsah = $6,
|
||||
stredisko = $7,
|
||||
c_uctu = $8,
|
||||
md = $9,
|
||||
identif = $10,
|
||||
poznanka = $11,
|
||||
firma = $12
|
||||
WHERE id = $1 AND deleted = FALSE
|
||||
RETURNING
|
||||
id,
|
||||
deleted,
|
||||
adresar_id,
|
||||
c_dokladu,
|
||||
datum as "datum: chrono::NaiveDate",
|
||||
c_faktury,
|
||||
obsah,
|
||||
stredisko,
|
||||
c_uctu,
|
||||
md,
|
||||
identif,
|
||||
poznanka,
|
||||
firma
|
||||
"#,
|
||||
request.id,
|
||||
request.adresar_id,
|
||||
request.c_dokladu,
|
||||
datum as chrono::NaiveDate,
|
||||
request.c_faktury,
|
||||
request.obsah,
|
||||
request.stredisko,
|
||||
request.c_uctu,
|
||||
request.md,
|
||||
request.identif,
|
||||
request.poznanka,
|
||||
request.firma
|
||||
)
|
||||
.fetch_one(db_pool)
|
||||
.await
|
||||
.map_err(|e| Status::internal(e.to_string()))?;
|
||||
|
||||
Ok(UctovnictvoResponse {
|
||||
id: uctovnictvo.id,
|
||||
adresar_id: uctovnictvo.adresar_id,
|
||||
c_dokladu: uctovnictvo.c_dokladu,
|
||||
datum: uctovnictvo.datum.format("%d.%m.%Y").to_string(),
|
||||
c_faktury: uctovnictvo.c_faktury,
|
||||
obsah: uctovnictvo.obsah.unwrap_or_default(),
|
||||
stredisko: uctovnictvo.stredisko.unwrap_or_default(),
|
||||
c_uctu: uctovnictvo.c_uctu.unwrap_or_default(),
|
||||
md: uctovnictvo.md.unwrap_or_default(),
|
||||
identif: uctovnictvo.identif.unwrap_or_default(),
|
||||
poznanka: uctovnictvo.poznanka.unwrap_or_default(),
|
||||
firma: uctovnictvo.firma,
|
||||
})
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
// src/uctovnictvo/mod.rs
|
||||
|
||||
pub mod models;
|
||||
pub mod handlers;
|
||||
@@ -1,21 +0,0 @@
|
||||
// src/uctovnictvo/models.rs
|
||||
use chrono::NaiveDate;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, sqlx::FromRow, Serialize, Deserialize)]
|
||||
pub struct Uctovnictvo {
|
||||
pub id: i64,
|
||||
pub deleted: bool,
|
||||
pub adresar_id: i64,
|
||||
pub c_dokladu: String,
|
||||
pub datum: NaiveDate,
|
||||
pub c_faktury: String,
|
||||
pub obsah: Option<String>,
|
||||
pub stredisko: Option<String>,
|
||||
pub c_uctu: Option<String>,
|
||||
pub md: Option<String>,
|
||||
pub identif: Option<String>,
|
||||
pub poznanka: Option<String>,
|
||||
pub firma: String,
|
||||
}
|
||||
|
||||
@@ -1,161 +0,0 @@
|
||||
// tests/adresar/delete_adresar_test.rs
|
||||
use rstest::{fixture, rstest};
|
||||
use server::adresar::handlers::delete_adresar;
|
||||
use common::proto::multieko2::adresar::DeleteAdresarRequest;
|
||||
use crate::common::setup_test_db;
|
||||
use sqlx::PgPool;
|
||||
use tonic;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
// Reuse the mutex from get_adresar_by_position_test or create a new one
|
||||
lazy_static::lazy_static! {
|
||||
static ref TEST_MUTEX: Arc<Mutex<()>> = Arc::new(Mutex::new(()));
|
||||
}
|
||||
|
||||
// Fixtures
|
||||
#[fixture]
|
||||
async fn pool() -> PgPool {
|
||||
setup_test_db().await
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
|
||||
let pool = pool.await;
|
||||
pool.close().await;
|
||||
pool
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_record(#[future] pool: PgPool) -> (PgPool, i64, String) {
|
||||
let pool = pool.await;
|
||||
// Use a unique prefix for test data
|
||||
let prefix = format!("DeleteTest_{}", chrono::Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
||||
|
||||
let record = sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO adresar (firma, deleted)
|
||||
VALUES ($1, false)
|
||||
RETURNING id
|
||||
"#,
|
||||
format!("{}_Company", prefix)
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
(pool, record.id, prefix)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_deleted_record(#[future] pool: PgPool) -> (PgPool, i64, String) {
|
||||
let pool = pool.await;
|
||||
// Use a unique prefix for test data
|
||||
let prefix = format!("DeletedTest_{}", chrono::Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
||||
|
||||
let record = sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO adresar (firma, deleted)
|
||||
VALUES ($1, true)
|
||||
RETURNING id
|
||||
"#,
|
||||
format!("{}_Deleted", prefix)
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
(pool, record.id, prefix)
|
||||
}
|
||||
|
||||
// Helper to check if the record is deleted
|
||||
async fn assert_record_deleted(pool: &PgPool, id: i64) {
|
||||
let db_record = sqlx::query!("SELECT deleted FROM adresar WHERE id = $1", id)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(db_record.deleted);
|
||||
}
|
||||
|
||||
// Helper to clean up test records
|
||||
async fn cleanup_test_records(pool: &PgPool, prefix: &str) {
|
||||
if !prefix.is_empty() {
|
||||
sqlx::query!(
|
||||
"DELETE FROM adresar WHERE firma LIKE $1",
|
||||
format!("{}%", prefix)
|
||||
)
|
||||
.execute(pool)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
// Tests
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_adresar_success(
|
||||
#[future] existing_record: (PgPool, i64, String),
|
||||
) {
|
||||
// Take a lock to prevent concurrent test execution
|
||||
let _guard = TEST_MUTEX.lock().await;
|
||||
|
||||
let (pool, id, prefix) = existing_record.await;
|
||||
let request = DeleteAdresarRequest { id };
|
||||
let response = delete_adresar(&pool, request).await.unwrap();
|
||||
|
||||
assert!(response.success);
|
||||
assert_record_deleted(&pool, id).await;
|
||||
|
||||
// Clean up
|
||||
cleanup_test_records(&pool, &prefix).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_adresar_nonexistent_id(
|
||||
#[future] pool: PgPool,
|
||||
) {
|
||||
// Take a lock to prevent concurrent test execution
|
||||
let _guard = TEST_MUTEX.lock().await;
|
||||
|
||||
let pool = pool.await;
|
||||
let request = DeleteAdresarRequest { id: 9999 };
|
||||
let response = delete_adresar(&pool, request).await.unwrap();
|
||||
|
||||
// Deleting a non-existent record should return success: false
|
||||
assert!(!response.success);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_adresar_already_deleted(
|
||||
#[future] existing_deleted_record: (PgPool, i64, String),
|
||||
) {
|
||||
// Take a lock to prevent concurrent test execution
|
||||
let _guard = TEST_MUTEX.lock().await;
|
||||
|
||||
let (pool, id, prefix) = existing_deleted_record.await;
|
||||
let request = DeleteAdresarRequest { id };
|
||||
let response = delete_adresar(&pool, request).await.unwrap();
|
||||
|
||||
// Deleting an already deleted record should return success: false
|
||||
assert!(!response.success);
|
||||
|
||||
// Clean up
|
||||
cleanup_test_records(&pool, &prefix).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_adresar_database_error(
|
||||
#[future] closed_pool: PgPool,
|
||||
) {
|
||||
// No need for mutex here as we're not modifying the database
|
||||
let closed_pool = closed_pool.await;
|
||||
let request = DeleteAdresarRequest { id: 1 };
|
||||
let result = delete_adresar(&closed_pool, request).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
@@ -1,368 +0,0 @@
|
||||
// tests/adresar/get_adresar_by_position_test.rs
|
||||
use rstest::{fixture, rstest};
|
||||
use server::adresar::handlers::{get_adresar_by_position, get_adresar_count};
|
||||
use common::proto::multieko2::common::{PositionRequest, Empty};
|
||||
use crate::common::setup_test_db;
|
||||
use sqlx::PgPool;
|
||||
use tonic;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
// Use a global mutex to synchronize test execution
|
||||
// This prevents tests from interfering with each other
|
||||
lazy_static::lazy_static! {
|
||||
static ref TEST_MUTEX: Arc<Mutex<()>> = Arc::new(Mutex::new(()));
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn pool() -> PgPool {
|
||||
setup_test_db().await
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
|
||||
let pool = pool.await;
|
||||
pool.close().await;
|
||||
pool
|
||||
}
|
||||
|
||||
// Create a test record with specific data and delete status
|
||||
async fn create_test_record(pool: &PgPool, firma: &str, deleted: bool) -> i64 {
|
||||
sqlx::query_scalar!(
|
||||
"INSERT INTO adresar (firma, deleted) VALUES ($1, $2) RETURNING id",
|
||||
firma,
|
||||
deleted
|
||||
)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
// Clean up test records after tests
|
||||
async fn cleanup_test_records(pool: &PgPool, prefix: &str) {
|
||||
sqlx::query!(
|
||||
"DELETE FROM adresar WHERE firma LIKE $1",
|
||||
format!("{}%", prefix)
|
||||
)
|
||||
.execute(pool)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Find the position of a record in the database
|
||||
async fn find_position_of_record(pool: &PgPool, id: i64) -> Option<i64> {
|
||||
// Get all non-deleted records ordered by ID
|
||||
let records = sqlx::query_scalar!(
|
||||
"SELECT id FROM adresar WHERE deleted = FALSE ORDER BY id ASC"
|
||||
)
|
||||
.fetch_all(pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Find the position of our record (1-based)
|
||||
for (index, record_id) in records.iter().enumerate() {
|
||||
if *record_id == id {
|
||||
return Some((index + 1) as i64);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
// Test position validation
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_position_zero(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Request position 0 (invalid)
|
||||
let request = PositionRequest { position: 0 };
|
||||
let result = get_adresar_by_position(&pool, request).await;
|
||||
|
||||
// Verify it returns an error
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::InvalidArgument);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_position_negative(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Request negative position (invalid)
|
||||
let request = PositionRequest { position: -1 };
|
||||
let result = get_adresar_by_position(&pool, request).await;
|
||||
|
||||
// Verify it returns an error
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::InvalidArgument);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_basic_position_retrieval(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Take a lock to prevent concurrent test execution
|
||||
let _guard = TEST_MUTEX.lock().await;
|
||||
|
||||
// Use a unique prefix for test data to prevent conflicts
|
||||
let prefix = "PosBasicTest";
|
||||
|
||||
// Clean up any existing test data
|
||||
cleanup_test_records(&pool, prefix).await;
|
||||
|
||||
// Create test records
|
||||
let id1 = create_test_record(&pool, &format!("{}_1", prefix), false).await;
|
||||
let id2 = create_test_record(&pool, &format!("{}_2", prefix), false).await;
|
||||
let id3 = create_test_record(&pool, &format!("{}_3", prefix), false).await;
|
||||
|
||||
// Find the positions of these records in the database
|
||||
let pos1 = find_position_of_record(&pool, id1).await.unwrap();
|
||||
let pos2 = find_position_of_record(&pool, id2).await.unwrap();
|
||||
let pos3 = find_position_of_record(&pool, id3).await.unwrap();
|
||||
|
||||
// Test retrieving each position
|
||||
let response1 = get_adresar_by_position(&pool, PositionRequest { position: pos1 }).await.unwrap();
|
||||
assert_eq!(response1.id, id1);
|
||||
|
||||
let response2 = get_adresar_by_position(&pool, PositionRequest { position: pos2 }).await.unwrap();
|
||||
assert_eq!(response2.id, id2);
|
||||
|
||||
let response3 = get_adresar_by_position(&pool, PositionRequest { position: pos3 }).await.unwrap();
|
||||
assert_eq!(response3.id, id3);
|
||||
|
||||
// Clean up test data
|
||||
cleanup_test_records(&pool, prefix).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_deleted_records_excluded(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Take a lock to prevent concurrent test execution
|
||||
let _guard = TEST_MUTEX.lock().await;
|
||||
|
||||
// Use a unique prefix for test data
|
||||
let prefix = "PosDeletedTest";
|
||||
|
||||
// Clean up any existing test data
|
||||
cleanup_test_records(&pool, prefix).await;
|
||||
|
||||
// Create a mix of active and deleted records
|
||||
let id1 = create_test_record(&pool, &format!("{}_1", prefix), false).await;
|
||||
let _id_deleted = create_test_record(&pool, &format!("{}_del", prefix), true).await;
|
||||
let id2 = create_test_record(&pool, &format!("{}_2", prefix), false).await;
|
||||
|
||||
// Find positions
|
||||
let pos1 = find_position_of_record(&pool, id1).await.unwrap();
|
||||
let pos2 = find_position_of_record(&pool, id2).await.unwrap();
|
||||
|
||||
// Verify positions are consecutive, which means the deleted record is excluded
|
||||
assert_eq!(pos2, pos1 + 1);
|
||||
|
||||
// Retrieve by position and verify
|
||||
let response1 = get_adresar_by_position(&pool, PositionRequest { position: pos1 }).await.unwrap();
|
||||
assert_eq!(response1.id, id1);
|
||||
|
||||
let response2 = get_adresar_by_position(&pool, PositionRequest { position: pos2 }).await.unwrap();
|
||||
assert_eq!(response2.id, id2);
|
||||
|
||||
// Clean up test data
|
||||
cleanup_test_records(&pool, prefix).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_position_changes_after_deletion(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Take a lock to prevent concurrent test execution
|
||||
let _guard = TEST_MUTEX.lock().await;
|
||||
|
||||
// Use a unique prefix for test data
|
||||
let prefix = "PosChangeTest";
|
||||
|
||||
// Clean up any existing test data
|
||||
cleanup_test_records(&pool, prefix).await;
|
||||
|
||||
// Create records
|
||||
let id1 = create_test_record(&pool, &format!("{}_1", prefix), false).await;
|
||||
let id2 = create_test_record(&pool, &format!("{}_2", prefix), false).await;
|
||||
let id3 = create_test_record(&pool, &format!("{}_3", prefix), false).await;
|
||||
|
||||
// Find initial positions
|
||||
let _pos1 = find_position_of_record(&pool, id1).await.unwrap();
|
||||
let pos2 = find_position_of_record(&pool, id2).await.unwrap();
|
||||
let pos3 = find_position_of_record(&pool, id3).await.unwrap();
|
||||
|
||||
// Mark the first record as deleted
|
||||
sqlx::query!("UPDATE adresar SET deleted = TRUE WHERE id = $1", id1)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Find new positions
|
||||
let pos2_after = find_position_of_record(&pool, id2).await.unwrap();
|
||||
let pos3_after = find_position_of_record(&pool, id3).await.unwrap();
|
||||
|
||||
// Verify positions shifted
|
||||
assert!(pos2_after < pos2);
|
||||
assert!(pos3_after < pos3);
|
||||
|
||||
// Verify by retrieving records at new positions
|
||||
let response_at_first = get_adresar_by_position(&pool, PositionRequest { position: pos2_after }).await.unwrap();
|
||||
assert_eq!(response_at_first.id, id2);
|
||||
|
||||
// Clean up test data
|
||||
cleanup_test_records(&pool, prefix).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_position_out_of_bounds(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Take a lock to prevent concurrent test execution
|
||||
let _guard = TEST_MUTEX.lock().await;
|
||||
|
||||
// Get the total count of non-deleted records
|
||||
let count = sqlx::query_scalar!(
|
||||
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Request a position beyond the count
|
||||
let request = PositionRequest { position: count + 1 };
|
||||
let result = get_adresar_by_position(&pool, request).await;
|
||||
|
||||
// Verify it returns an error
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_database_error(#[future] closed_pool: PgPool) {
|
||||
let closed_pool = closed_pool.await;
|
||||
|
||||
// Attempt to query with a closed pool
|
||||
let request = PositionRequest { position: 1 };
|
||||
let result = get_adresar_by_position(&closed_pool, request).await;
|
||||
|
||||
// Verify it returns an internal error
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_position_after_adding_record(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Take a lock to prevent concurrent test execution
|
||||
let _guard = TEST_MUTEX.lock().await;
|
||||
|
||||
// Use a unique prefix for test data
|
||||
let prefix = "PosAddTest";
|
||||
|
||||
// Clean up any existing test data
|
||||
cleanup_test_records(&pool, prefix).await;
|
||||
|
||||
// Create records
|
||||
let id1 = create_test_record(&pool, &format!("{}_1", prefix), false).await;
|
||||
let id2 = create_test_record(&pool, &format!("{}_2", prefix), false).await;
|
||||
|
||||
// Find positions
|
||||
let pos1 = find_position_of_record(&pool, id1).await.unwrap();
|
||||
let pos2 = find_position_of_record(&pool, id2).await.unwrap();
|
||||
|
||||
// Add a new record
|
||||
let id3 = create_test_record(&pool, &format!("{}_3", prefix), false).await;
|
||||
|
||||
// Find its position
|
||||
let pos3 = find_position_of_record(&pool, id3).await.unwrap();
|
||||
|
||||
// Verify retrieval by position
|
||||
let response3 = get_adresar_by_position(&pool, PositionRequest { position: pos3 }).await.unwrap();
|
||||
assert_eq!(response3.id, id3);
|
||||
|
||||
// Verify original positions still work
|
||||
let response1 = get_adresar_by_position(&pool, PositionRequest { position: pos1 }).await.unwrap();
|
||||
assert_eq!(response1.id, id1);
|
||||
|
||||
let response2 = get_adresar_by_position(&pool, PositionRequest { position: pos2 }).await.unwrap();
|
||||
assert_eq!(response2.id, id2);
|
||||
|
||||
// Clean up test data
|
||||
cleanup_test_records(&pool, prefix).await;
|
||||
}
|
||||
|
||||
/// Test handler correctly excludes deleted records
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_handler_excludes_deleted_records(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Take a lock to prevent concurrent test execution
|
||||
let _guard = TEST_MUTEX.lock().await;
|
||||
|
||||
// Use a unique prefix for test data
|
||||
let prefix = "CountTest";
|
||||
|
||||
// Clean up any existing test data
|
||||
cleanup_test_records(&pool, prefix).await;
|
||||
|
||||
// Create active records
|
||||
for i in 1..=3 {
|
||||
create_test_record(&pool, &format!("{}_Active_{}", prefix, i), false).await;
|
||||
}
|
||||
|
||||
// Create deleted records
|
||||
for i in 1..=2 {
|
||||
create_test_record(&pool, &format!("{}_Deleted_{}", prefix, i), true).await;
|
||||
}
|
||||
|
||||
// Count our test records by deleted status
|
||||
let active_test_count = sqlx::query_scalar!(
|
||||
"SELECT COUNT(*) FROM adresar WHERE firma LIKE $1 AND deleted = FALSE",
|
||||
format!("{}%", prefix)
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap_or(0);
|
||||
|
||||
let deleted_test_count = sqlx::query_scalar!(
|
||||
"SELECT COUNT(*) FROM adresar WHERE firma LIKE $1 AND deleted = TRUE",
|
||||
format!("{}%", prefix)
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Verify our test data was inserted correctly
|
||||
assert_eq!(active_test_count, 3);
|
||||
assert_eq!(deleted_test_count, 2);
|
||||
|
||||
// Get the total count of active records (including existing ones)
|
||||
let total_active_count = sqlx::query_scalar!(
|
||||
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Now call our handler and verify it returns the same count
|
||||
let response = get_adresar_count(&pool, Empty {}).await.unwrap();
|
||||
assert_eq!(response.count, total_active_count);
|
||||
|
||||
// Clean up test data
|
||||
cleanup_test_records(&pool, prefix).await;
|
||||
}
|
||||
@@ -1,284 +0,0 @@
|
||||
// tests/adresar/get_adresar_count_test.rs
|
||||
use rstest::{fixture, rstest};
|
||||
use server::adresar::handlers::get_adresar_count;
|
||||
use common::proto::multieko2::common::Empty;
|
||||
use crate::common::setup_test_db;
|
||||
use sqlx::PgPool;
|
||||
use tonic;
|
||||
|
||||
// For connection pooling
|
||||
#[fixture]
|
||||
async fn pool() -> PgPool {
|
||||
setup_test_db().await
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
|
||||
let pool = pool.await;
|
||||
pool.close().await;
|
||||
pool
|
||||
}
|
||||
|
||||
// Create a self-contained test that runs in a transaction
|
||||
// --------------------------------------------------------
|
||||
// Instead of relying on table state and doing our own transaction management,
|
||||
// we'll mock the database response to `get_adresar_count` and verify it behaves correctly
|
||||
|
||||
/// Test only that the handler returns the value from the database correctly
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_handler_returns_count_from_database(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// First, get whatever count the database currently has
|
||||
let count_query = sqlx::query_scalar!(
|
||||
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Now call our handler and verify it returns the same count
|
||||
let response = get_adresar_count(&pool, Empty {}).await.unwrap();
|
||||
assert_eq!(response.count, count_query);
|
||||
}
|
||||
|
||||
/// Test handler correctly excludes deleted records
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_handler_excludes_deleted_records(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Use a transaction to isolate this test completely
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
|
||||
// Count records where deleted = TRUE
|
||||
let deleted_count = sqlx::query_scalar!(
|
||||
"SELECT COUNT(*) FROM adresar WHERE deleted = TRUE"
|
||||
)
|
||||
.fetch_one(&mut *tx)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Count records where deleted = FALSE
|
||||
let active_count = sqlx::query_scalar!(
|
||||
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
|
||||
)
|
||||
.fetch_one(&mut *tx)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Count all records
|
||||
let total_count = sqlx::query_scalar!(
|
||||
"SELECT COUNT(*) FROM adresar"
|
||||
)
|
||||
.fetch_one(&mut *tx)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Verify our counts are consistent
|
||||
assert_eq!(total_count, active_count + deleted_count);
|
||||
|
||||
// Verify our handler returns only the active count
|
||||
let response = get_adresar_count(&pool, Empty {}).await.unwrap();
|
||||
assert_eq!(response.count, active_count);
|
||||
|
||||
// Rollback transaction
|
||||
tx.rollback().await.unwrap();
|
||||
}
|
||||
|
||||
/// Test SQL query behavior with deleted flag
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_deleted_flag_filters_records(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Use a transaction to isolate this test completely
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
|
||||
// Insert test records inside this transaction
|
||||
// They will be automatically rolled back at the end
|
||||
|
||||
sqlx::query!(
|
||||
"INSERT INTO adresar (firma, deleted) VALUES ($1, FALSE)",
|
||||
"Test Active Record"
|
||||
)
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
sqlx::query!(
|
||||
"INSERT INTO adresar (firma, deleted) VALUES ($1, TRUE)",
|
||||
"Test Deleted Record"
|
||||
)
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Count active records in the transaction
|
||||
let active_count = sqlx::query_scalar!(
|
||||
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
|
||||
)
|
||||
.fetch_one(&mut *tx)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Count deleted records in the transaction
|
||||
let deleted_count = sqlx::query_scalar!(
|
||||
"SELECT COUNT(*) FROM adresar WHERE deleted = TRUE"
|
||||
)
|
||||
.fetch_one(&mut *tx)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Verify at least one active and one deleted record
|
||||
assert!(active_count > 0);
|
||||
assert!(deleted_count > 0);
|
||||
|
||||
// Rollback transaction
|
||||
tx.rollback().await.unwrap();
|
||||
}
|
||||
|
||||
/// Test the handler returns an error with a closed pool
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_database_error(#[future] closed_pool: PgPool) {
|
||||
let closed_pool = closed_pool.await;
|
||||
let result = get_adresar_count(&closed_pool, Empty {}).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
|
||||
/// Test the behavior of setting deleted to true and back
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_of_deleted_flag(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Use a transaction for complete isolation
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
|
||||
// Insert a test record
|
||||
let id = sqlx::query_scalar!(
|
||||
"INSERT INTO adresar (firma, deleted) VALUES ($1, FALSE) RETURNING id",
|
||||
"Test Toggle Record"
|
||||
)
|
||||
.fetch_one(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Count active records with this new record
|
||||
let active_count_before = sqlx::query_scalar!(
|
||||
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
|
||||
)
|
||||
.fetch_one(&mut *tx)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Mark as deleted
|
||||
sqlx::query!(
|
||||
"UPDATE adresar SET deleted = TRUE WHERE id = $1",
|
||||
id
|
||||
)
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Count active records after marking as deleted
|
||||
let active_count_after_delete = sqlx::query_scalar!(
|
||||
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
|
||||
)
|
||||
.fetch_one(&mut *tx)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Verify count decreased by 1
|
||||
assert_eq!(active_count_after_delete, active_count_before - 1);
|
||||
|
||||
// Mark as active again
|
||||
sqlx::query!(
|
||||
"UPDATE adresar SET deleted = FALSE WHERE id = $1",
|
||||
id
|
||||
)
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Count active records after marking as active
|
||||
let active_count_after_restore = sqlx::query_scalar!(
|
||||
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
|
||||
)
|
||||
.fetch_one(&mut *tx)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Verify count increased back to original
|
||||
assert_eq!(active_count_after_restore, active_count_before);
|
||||
|
||||
// Rollback transaction
|
||||
tx.rollback().await.unwrap();
|
||||
}
|
||||
|
||||
/// Test edge cases of an empty table
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_edge_case_empty_table(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Not literally testing an empty table since we can't truncate due to FK constraints
|
||||
// But we can verify the count response is never negative
|
||||
let response = get_adresar_count(&pool, Empty {}).await.unwrap();
|
||||
assert!(response.count >= 0);
|
||||
}
|
||||
|
||||
/// Test adding a record and verifying count increases
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_count_increments_after_adding_record(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Use a transaction for complete isolation
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
|
||||
// Get initial active count inside transaction
|
||||
let initial_count = sqlx::query_scalar!(
|
||||
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
|
||||
)
|
||||
.fetch_one(&mut *tx)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Add a record inside the transaction
|
||||
sqlx::query!(
|
||||
"INSERT INTO adresar (firma, deleted) VALUES ($1, FALSE)",
|
||||
"Test Increment Record"
|
||||
)
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Get new count inside transaction
|
||||
let new_count = sqlx::query_scalar!(
|
||||
"SELECT COUNT(*) FROM adresar WHERE deleted = FALSE"
|
||||
)
|
||||
.fetch_one(&mut *tx)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Verify count increased by exactly 1
|
||||
assert_eq!(new_count, initial_count + 1);
|
||||
|
||||
// Rollback transaction
|
||||
tx.rollback().await.unwrap();
|
||||
}
|
||||
@@ -1,238 +0,0 @@
|
||||
// tests/adresar/get_adresar_test.rs
|
||||
use rstest::{fixture, rstest};
|
||||
use server::adresar::handlers::get_adresar;
|
||||
use common::proto::multieko2::adresar::{GetAdresarRequest, AdresarResponse};
|
||||
use crate::common::setup_test_db;
|
||||
use sqlx::PgPool;
|
||||
use tonic;
|
||||
|
||||
#[fixture]
|
||||
async fn pool() -> PgPool {
|
||||
setup_test_db().await
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
|
||||
let pool = pool.await;
|
||||
pool.close().await;
|
||||
pool
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_record(#[future] pool: PgPool) -> (PgPool, i64) {
|
||||
let pool = pool.await;
|
||||
let record = sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO adresar (
|
||||
firma, kz, drc, ulica, psc, mesto, stat, banka, ucet,
|
||||
skladm, ico, kontakt, telefon, skladu, fax, deleted
|
||||
)
|
||||
VALUES (
|
||||
'Test Company', 'KZ', 'DRC', 'Street', '12345', 'City',
|
||||
'Country', 'Bank', 'Account', 'SkladM', 'ICO', 'Contact',
|
||||
'+421123456789', 'SkladU', 'Fax', false
|
||||
)
|
||||
RETURNING id
|
||||
"#
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
(pool, record.id)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_deleted_record(#[future] pool: PgPool) -> (PgPool, i64) {
|
||||
let pool = pool.await;
|
||||
let record = sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO adresar (firma, deleted)
|
||||
VALUES ('Deleted Company', true)
|
||||
RETURNING id
|
||||
"#
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
(pool, record.id)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_record_with_nulls(#[future] pool: PgPool) -> (PgPool, i64) {
|
||||
let pool = pool.await;
|
||||
let record = sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO adresar (firma)
|
||||
VALUES ('Null Fields Company')
|
||||
RETURNING id
|
||||
"#
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
(pool, record.id)
|
||||
}
|
||||
|
||||
async fn assert_response_matches(pool: &PgPool, id: i64, response: &AdresarResponse) {
|
||||
let db_record = sqlx::query!("SELECT * FROM adresar WHERE id = $1", id)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(db_record.firma, response.firma);
|
||||
assert_eq!(db_record.kz.unwrap_or_default(), response.kz);
|
||||
assert_eq!(db_record.drc.unwrap_or_default(), response.drc);
|
||||
assert_eq!(db_record.ulica.unwrap_or_default(), response.ulica);
|
||||
assert_eq!(db_record.psc.unwrap_or_default(), response.psc);
|
||||
assert_eq!(db_record.mesto.unwrap_or_default(), response.mesto);
|
||||
assert_eq!(db_record.stat.unwrap_or_default(), response.stat);
|
||||
assert_eq!(db_record.banka.unwrap_or_default(), response.banka);
|
||||
assert_eq!(db_record.ucet.unwrap_or_default(), response.ucet);
|
||||
assert_eq!(db_record.skladm.unwrap_or_default(), response.skladm);
|
||||
assert_eq!(db_record.ico.unwrap_or_default(), response.ico);
|
||||
assert_eq!(db_record.kontakt.unwrap_or_default(), response.kontakt);
|
||||
assert_eq!(db_record.telefon.unwrap_or_default(), response.telefon);
|
||||
assert_eq!(db_record.skladu.unwrap_or_default(), response.skladu);
|
||||
assert_eq!(db_record.fax.unwrap_or_default(), response.fax);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_get_adresar_success(
|
||||
#[future] existing_record: (PgPool, i64),
|
||||
) {
|
||||
let (pool, id) = existing_record.await;
|
||||
let request = GetAdresarRequest { id };
|
||||
let response = get_adresar(&pool, request).await.unwrap();
|
||||
|
||||
assert_eq!(response.id, id);
|
||||
assert_response_matches(&pool, id, &response).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_get_optional_fields_null(
|
||||
#[future] existing_record_with_nulls: (PgPool, i64),
|
||||
) {
|
||||
let (pool, id) = existing_record_with_nulls.await;
|
||||
let request = GetAdresarRequest { id };
|
||||
let response = get_adresar(&pool, request).await.unwrap();
|
||||
|
||||
assert_eq!(response.kz, "");
|
||||
assert_eq!(response.drc, "");
|
||||
assert_eq!(response.ulica, "");
|
||||
assert_eq!(response.psc, "");
|
||||
assert_eq!(response.mesto, "");
|
||||
assert_eq!(response.stat, "");
|
||||
assert_eq!(response.banka, "");
|
||||
assert_eq!(response.ucet, "");
|
||||
assert_eq!(response.skladm, "");
|
||||
assert_eq!(response.ico, "");
|
||||
assert_eq!(response.kontakt, "");
|
||||
assert_eq!(response.telefon, "");
|
||||
assert_eq!(response.skladu, "");
|
||||
assert_eq!(response.fax, "");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_get_nonexistent_id(
|
||||
#[future] pool: PgPool,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let request = GetAdresarRequest { id: 9999 };
|
||||
let result = get_adresar(&pool, request).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_get_deleted_record(
|
||||
#[future] existing_deleted_record: (PgPool, i64),
|
||||
) {
|
||||
let (pool, id) = existing_deleted_record.await;
|
||||
let request = GetAdresarRequest { id };
|
||||
let result = get_adresar(&pool, request).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_database_error(
|
||||
#[future] closed_pool: PgPool,
|
||||
) {
|
||||
let closed_pool = closed_pool.await;
|
||||
let request = GetAdresarRequest { id: 1 };
|
||||
let result = get_adresar(&closed_pool, request).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_get_special_characters(
|
||||
#[future] pool: PgPool,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let firma = "Náměstí ČR";
|
||||
let telefon = "+420 123-456.789";
|
||||
let ulica = "Křižíkova 123";
|
||||
|
||||
let record = sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO adresar (firma, telefon, ulica)
|
||||
VALUES ($1, $2, $3)
|
||||
RETURNING id
|
||||
"#,
|
||||
firma,
|
||||
telefon,
|
||||
ulica
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let request = GetAdresarRequest { id: record.id };
|
||||
let response = get_adresar(&pool, request).await.unwrap();
|
||||
|
||||
assert_eq!(response.firma, firma);
|
||||
assert_eq!(response.telefon, telefon);
|
||||
assert_eq!(response.ulica, ulica);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_get_max_length_fields(
|
||||
#[future] pool: PgPool,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let firma = "a".repeat(255);
|
||||
let telefon = "1".repeat(20);
|
||||
|
||||
let record = sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO adresar (firma, telefon)
|
||||
VALUES ($1, $2)
|
||||
RETURNING id
|
||||
"#,
|
||||
firma,
|
||||
telefon
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let request = GetAdresarRequest { id: record.id };
|
||||
let response = get_adresar(&pool, request).await.unwrap();
|
||||
|
||||
assert_eq!(response.firma.len(), 255);
|
||||
assert_eq!(response.telefon.len(), 20);
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
// server/tests/adresar/mod.rs
|
||||
|
||||
pub mod post_adresar_test;
|
||||
pub mod put_adresar_test;
|
||||
pub mod get_adresar_test;
|
||||
pub mod get_adresar_count_test;
|
||||
pub mod get_adresar_by_position_test;
|
||||
pub mod delete_adresar_test;
|
||||
@@ -1,222 +0,0 @@
|
||||
// tests/adresar/post_adresar_test.rs
|
||||
use rstest::{fixture, rstest};
|
||||
use server::adresar::handlers::post_adresar;
|
||||
use common::proto::multieko2::adresar::PostAdresarRequest;
|
||||
use crate::common::setup_test_db;
|
||||
use sqlx::PgPool;
|
||||
use tonic;
|
||||
|
||||
// Fixtures
|
||||
#[fixture]
|
||||
async fn pool() -> PgPool {
|
||||
setup_test_db().await
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
|
||||
let pool = pool.await;
|
||||
pool.close().await;
|
||||
pool
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn valid_request() -> PostAdresarRequest {
|
||||
PostAdresarRequest {
|
||||
firma: "Test Company".into(),
|
||||
kz: "KZ123".into(),
|
||||
drc: "DRC456".into(),
|
||||
ulica: "Test Street".into(),
|
||||
psc: "12345".into(),
|
||||
mesto: "Test City".into(),
|
||||
stat: "Test Country".into(),
|
||||
banka: "Test Bank".into(),
|
||||
ucet: "123456789".into(),
|
||||
skladm: "Warehouse M".into(),
|
||||
ico: "12345678".into(),
|
||||
kontakt: "John Doe".into(),
|
||||
telefon: "+421123456789".into(),
|
||||
skladu: "Warehouse U".into(),
|
||||
fax: "+421123456700".into(),
|
||||
}
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn minimal_request() -> PostAdresarRequest {
|
||||
PostAdresarRequest {
|
||||
firma: "Required Only".into(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to check database state
|
||||
async fn assert_response_matches(pool: &PgPool, response: &common::proto::multieko2::adresar::AdresarResponse) {
|
||||
let db_record = sqlx::query!("SELECT * FROM adresar WHERE id = $1", response.id)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(db_record.firma, response.firma);
|
||||
assert_eq!(db_record.telefon.as_deref(), Some(response.telefon.as_str()));
|
||||
// Add assertions for other fields...
|
||||
assert!(!db_record.deleted);
|
||||
assert!(db_record.created_at.is_some());
|
||||
}
|
||||
|
||||
// Tests
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_adresar_success(#[future] pool: PgPool, valid_request: PostAdresarRequest) {
|
||||
let pool = pool.await;
|
||||
let response = post_adresar(&pool, valid_request).await.unwrap();
|
||||
|
||||
assert!(response.id > 0);
|
||||
assert_eq!(response.firma, "Test Company");
|
||||
assert_response_matches(&pool, &response).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_adresar_whitespace_trimming(
|
||||
#[future] pool: PgPool,
|
||||
valid_request: PostAdresarRequest,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let mut request = valid_request;
|
||||
request.firma = " Test Company ".into();
|
||||
request.telefon = " +421123456789 ".into();
|
||||
request.ulica = " Test Street ".into();
|
||||
|
||||
let response = post_adresar(&pool, request).await.unwrap();
|
||||
assert_eq!(response.firma, "Test Company");
|
||||
assert_eq!(response.telefon, "+421123456789");
|
||||
assert_eq!(response.ulica, "Test Street");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_adresar_empty_optional_fields(
|
||||
#[future] pool: PgPool,
|
||||
valid_request: PostAdresarRequest,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let mut request = valid_request;
|
||||
request.telefon = " ".into();
|
||||
|
||||
let response = post_adresar(&pool, request).await.unwrap();
|
||||
let db_telefon = sqlx::query_scalar!("SELECT telefon FROM adresar WHERE id = $1", response.id)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(db_telefon.is_none());
|
||||
assert_eq!(response.telefon, "");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_adresar_invalid_firma(
|
||||
#[future] pool: PgPool,
|
||||
valid_request: PostAdresarRequest,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let mut request = valid_request;
|
||||
request.firma = " ".into();
|
||||
let result = post_adresar(&pool, request).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::InvalidArgument);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_adresar_minimal_valid_request(
|
||||
#[future] pool: PgPool,
|
||||
minimal_request: PostAdresarRequest,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let response = post_adresar(&pool, minimal_request).await.unwrap();
|
||||
|
||||
assert!(response.id > 0);
|
||||
assert_eq!(response.firma, "Required Only");
|
||||
assert!(response.kz.is_empty());
|
||||
assert!(response.drc.is_empty());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_adresar_empty_firma(
|
||||
#[future] pool: PgPool,
|
||||
minimal_request: PostAdresarRequest,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let mut request = minimal_request;
|
||||
request.firma = "".into();
|
||||
let result = post_adresar(&pool, request).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::InvalidArgument);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_adresar_database_error(
|
||||
#[future] closed_pool: PgPool,
|
||||
minimal_request: PostAdresarRequest,
|
||||
) {
|
||||
let closed_pool = closed_pool.await;
|
||||
let result = post_adresar(&closed_pool, minimal_request).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_adresar_field_length_limits(
|
||||
#[future] pool: PgPool,
|
||||
valid_request: PostAdresarRequest,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let mut request = valid_request;
|
||||
request.firma = "a".repeat(255);
|
||||
request.telefon = "1".repeat(20);
|
||||
|
||||
let response = post_adresar(&pool, request).await.unwrap();
|
||||
assert_eq!(response.firma.len(), 255);
|
||||
assert_eq!(response.telefon.len(), 20);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_adresar_special_characters(
|
||||
#[future] pool: PgPool,
|
||||
valid_request: PostAdresarRequest,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let mut request = valid_request;
|
||||
request.telefon = "+420 123-456.789".into();
|
||||
request.ulica = "Náměstí 28. října".into();
|
||||
|
||||
let response = post_adresar(&pool, request.clone()).await.unwrap();
|
||||
assert_eq!(response.telefon, request.telefon);
|
||||
assert_eq!(response.ulica, request.ulica);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_adresar_optional_fields_null_vs_empty(
|
||||
#[future] pool: PgPool,
|
||||
valid_request: PostAdresarRequest,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let mut request = valid_request;
|
||||
request.telefon = String::new();
|
||||
let response = post_adresar(&pool, request).await.unwrap();
|
||||
|
||||
let db_telefon = sqlx::query_scalar!("SELECT telefon FROM adresar WHERE id = $1", response.id)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(db_telefon.is_none());
|
||||
}
|
||||
@@ -1,266 +0,0 @@
|
||||
// tests/adresar/put_adresar_test.rs
|
||||
use rstest::{fixture, rstest};
|
||||
use server::adresar::handlers::put_adresar;
|
||||
use common::proto::multieko2::adresar::PutAdresarRequest;
|
||||
use crate::common::setup_test_db;
|
||||
use sqlx::PgPool;
|
||||
use tonic;
|
||||
|
||||
// Fixtures
|
||||
#[fixture]
|
||||
async fn pool() -> PgPool {
|
||||
setup_test_db().await
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
|
||||
let pool = pool.await;
|
||||
pool.close().await;
|
||||
pool
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_record(#[future] pool: PgPool) -> (PgPool, i64) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Create a test record in the database
|
||||
let record = sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO adresar (
|
||||
firma, kz, drc, ulica, psc, mesto, stat, banka, ucet,
|
||||
skladm, ico, kontakt, telefon, skladu, fax, deleted
|
||||
)
|
||||
VALUES (
|
||||
'Original Company', 'Original KZ', 'Original DRC', 'Original Street',
|
||||
'12345', 'Original City', 'Original Country', 'Original Bank',
|
||||
'Original Account', 'Original SkladM', 'Original ICO',
|
||||
'Original Contact', '+421123456789', 'Original SkladU', 'Original Fax',
|
||||
false
|
||||
)
|
||||
RETURNING id
|
||||
"#
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
(pool, record.id)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn valid_request_template() -> PutAdresarRequest {
|
||||
PutAdresarRequest {
|
||||
id: 0, // This will be replaced in each test
|
||||
firma: "Updated Company".into(),
|
||||
kz: "Updated KZ".into(),
|
||||
drc: "Updated DRC".into(),
|
||||
ulica: "Updated Street".into(),
|
||||
psc: "67890".into(),
|
||||
mesto: "Updated City".into(),
|
||||
stat: "Updated Country".into(),
|
||||
banka: "Updated Bank".into(),
|
||||
ucet: "987654321".into(),
|
||||
skladm: "Updated SkladM".into(),
|
||||
ico: "87654321".into(),
|
||||
kontakt: "Jane Doe".into(),
|
||||
telefon: "+421987654321".into(),
|
||||
skladu: "Updated SkladU".into(),
|
||||
fax: "+421987654300".into(),
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to check database state
|
||||
async fn assert_response_matches(pool: &PgPool, id: i64, response: &common::proto::multieko2::adresar::AdresarResponse) {
|
||||
let db_record = sqlx::query!("SELECT * FROM adresar WHERE id = $1", id)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(db_record.firma, response.firma);
|
||||
assert_eq!(db_record.kz.unwrap_or_default(), response.kz);
|
||||
assert_eq!(db_record.drc.unwrap_or_default(), response.drc);
|
||||
assert_eq!(db_record.ulica.unwrap_or_default(), response.ulica);
|
||||
assert_eq!(db_record.psc.unwrap_or_default(), response.psc);
|
||||
assert_eq!(db_record.mesto.unwrap_or_default(), response.mesto);
|
||||
assert_eq!(db_record.stat.unwrap_or_default(), response.stat);
|
||||
assert_eq!(db_record.banka.unwrap_or_default(), response.banka);
|
||||
assert_eq!(db_record.ucet.unwrap_or_default(), response.ucet);
|
||||
assert_eq!(db_record.skladm.unwrap_or_default(), response.skladm);
|
||||
assert_eq!(db_record.ico.unwrap_or_default(), response.ico);
|
||||
assert_eq!(db_record.kontakt.unwrap_or_default(), response.kontakt);
|
||||
assert_eq!(db_record.telefon.unwrap_or_default(), response.telefon);
|
||||
assert_eq!(db_record.skladu.unwrap_or_default(), response.skladu);
|
||||
assert_eq!(db_record.fax.unwrap_or_default(), response.fax);
|
||||
assert!(!db_record.deleted, "Record should not be deleted");
|
||||
}
|
||||
|
||||
// Tests
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_adresar_success(#[future] existing_record: (PgPool, i64), valid_request_template: PutAdresarRequest) {
|
||||
let (pool, id) = existing_record.await;
|
||||
|
||||
let mut request = valid_request_template;
|
||||
request.id = id;
|
||||
|
||||
let response = put_adresar(&pool, request).await.unwrap();
|
||||
|
||||
assert_eq!(response.id, id);
|
||||
assert_response_matches(&pool, id, &response).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_whitespace_fields(#[future] existing_record: (PgPool, i64), valid_request_template: PutAdresarRequest) {
|
||||
let (pool, id) = existing_record.await;
|
||||
|
||||
let mut request = valid_request_template;
|
||||
request.id = id;
|
||||
request.firma = " Updated Company ".into();
|
||||
request.telefon = " +421987654321 ".into();
|
||||
|
||||
let response = put_adresar(&pool, request).await.unwrap();
|
||||
|
||||
// Verify trimmed values in response
|
||||
assert_eq!(response.firma, "Updated Company");
|
||||
assert_eq!(response.telefon, "+421987654321");
|
||||
|
||||
// Verify raw values in database
|
||||
let db_record = sqlx::query!("SELECT firma, telefon FROM adresar WHERE id = $1", id)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(db_record.firma, "Updated Company"); // Trimmed
|
||||
assert_eq!(db_record.telefon.unwrap(), "+421987654321"); // Trimmed
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_empty_required_field(#[future] existing_record: (PgPool, i64), valid_request_template: PutAdresarRequest) {
|
||||
let (pool, id) = existing_record.await;
|
||||
|
||||
let mut request = valid_request_template;
|
||||
request.id = id;
|
||||
request.firma = "".into();
|
||||
|
||||
let result = put_adresar(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::InvalidArgument);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_nonexistent_id(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
let request = PutAdresarRequest {
|
||||
id: 9999, // Non-existent ID
|
||||
firma: "Updated Company".into(),
|
||||
kz: "Updated KZ".into(),
|
||||
drc: "Updated DRC".into(),
|
||||
ulica: "Updated Street".into(),
|
||||
psc: "67890".into(),
|
||||
mesto: "Updated City".into(),
|
||||
stat: "Updated Country".into(),
|
||||
banka: "Updated Bank".into(),
|
||||
ucet: "987654321".into(),
|
||||
skladm: "Updated SkladM".into(),
|
||||
ico: "87654321".into(),
|
||||
kontakt: "Jane Doe".into(),
|
||||
telefon: "+421987654321".into(),
|
||||
skladu: "Updated SkladU".into(),
|
||||
fax: "+421987654300".into(),
|
||||
};
|
||||
|
||||
let result = put_adresar(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_deleted_record(#[future] existing_record: (PgPool, i64), valid_request_template: PutAdresarRequest) {
|
||||
let (pool, id) = existing_record.await;
|
||||
|
||||
// Mark the record as deleted
|
||||
sqlx::query!("UPDATE adresar SET deleted = true WHERE id = $1", id)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut request = valid_request_template;
|
||||
request.id = id;
|
||||
|
||||
let result = put_adresar(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_clear_optional_fields(#[future] existing_record: (PgPool, i64), valid_request_template: PutAdresarRequest) {
|
||||
let (pool, id) = existing_record.await;
|
||||
|
||||
let mut request = valid_request_template;
|
||||
request.id = id;
|
||||
request.telefon = String::new();
|
||||
request.ulica = String::new();
|
||||
|
||||
let response = put_adresar(&pool, request).await.unwrap();
|
||||
|
||||
// Check response contains empty strings
|
||||
assert!(response.telefon.is_empty());
|
||||
assert!(response.ulica.is_empty());
|
||||
|
||||
// Check database contains NULL
|
||||
let db_record = sqlx::query!("SELECT telefon, ulica FROM adresar WHERE id = $1", id)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(db_record.telefon.is_none());
|
||||
assert!(db_record.ulica.is_none());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_max_length_fields(#[future] existing_record: (PgPool, i64), valid_request_template: PutAdresarRequest) {
|
||||
let (pool, id) = existing_record.await;
|
||||
|
||||
let mut request = valid_request_template;
|
||||
request.id = id;
|
||||
request.firma = "a".repeat(255);
|
||||
request.telefon = "1".repeat(20);
|
||||
|
||||
let _response = put_adresar(&pool, request).await.unwrap();
|
||||
|
||||
let db_record = sqlx::query!("SELECT firma, telefon FROM adresar WHERE id = $1", id)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(db_record.firma.len(), 255);
|
||||
assert_eq!(db_record.telefon.unwrap().len(), 20);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_special_characters(#[future] existing_record: (PgPool, i64), valid_request_template: PutAdresarRequest) {
|
||||
let (pool, id) = existing_record.await;
|
||||
|
||||
let mut request = valid_request_template;
|
||||
request.id = id;
|
||||
request.ulica = "Náměstí 28. října".into();
|
||||
request.telefon = "+420 123-456.789".into();
|
||||
|
||||
let _response = put_adresar(&pool, request).await.unwrap();
|
||||
|
||||
let db_record = sqlx::query!("SELECT ulica, telefon FROM adresar WHERE id = $1", id)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(db_record.ulica.unwrap(), "Náměstí 28. října");
|
||||
assert_eq!(db_record.telefon.unwrap(), "+420 123-456.789");
|
||||
}
|
||||
@@ -1,56 +1,88 @@
|
||||
// tests/common/mod.rs
|
||||
use dotenvy;
|
||||
use sqlx::{postgres::PgPoolOptions, PgPool};
|
||||
|
||||
use dotenvy::dotenv;
|
||||
use rand::distr::Alphanumeric;
|
||||
use rand::Rng;
|
||||
use sqlx::{postgres::PgPoolOptions, Connection, Executor, PgConnection, PgPool};
|
||||
use std::env;
|
||||
use std::path::Path;
|
||||
|
||||
pub async fn setup_test_db() -> PgPool {
|
||||
// Get path to server directory
|
||||
let manifest_dir = env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR must be set");
|
||||
let env_path = Path::new(&manifest_dir).join(".env_test");
|
||||
fn get_database_url() -> String {
|
||||
dotenv().ok();
|
||||
env::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set")
|
||||
}
|
||||
|
||||
// Load environment variables
|
||||
dotenvy::from_path(env_path).ok();
|
||||
async fn get_root_connection() -> PgConnection {
|
||||
PgConnection::connect(&get_database_url())
|
||||
.await
|
||||
.expect("Failed to create root connection to test database")
|
||||
}
|
||||
|
||||
/// The primary test setup function.
|
||||
/// Creates a new, unique schema and returns a connection pool that is scoped to that schema.
|
||||
/// This is the key to test isolation.
|
||||
pub async fn setup_isolated_db() -> PgPool {
|
||||
let mut root_conn = get_root_connection().await;
|
||||
|
||||
// Make schema names more unique - include timestamp + random
|
||||
let schema_name = format!(
|
||||
"test_{}_{}",
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos(),
|
||||
rand::rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(8)
|
||||
.map(char::from)
|
||||
.collect::<String>()
|
||||
.to_lowercase()
|
||||
);
|
||||
|
||||
root_conn
|
||||
.execute(format!("CREATE SCHEMA \"{}\"", schema_name).as_str())
|
||||
.await
|
||||
.unwrap_or_else(|_| panic!("Failed to create schema: {}", schema_name));
|
||||
|
||||
root_conn
|
||||
.execute("CREATE SCHEMA IF NOT EXISTS \"default\"")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create connection pool
|
||||
let database_url = env::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set");
|
||||
let pool = PgPoolOptions::new()
|
||||
.max_connections(5)
|
||||
.connect(&database_url)
|
||||
.after_connect(move |conn, _meta| {
|
||||
let schema_name = schema_name.clone();
|
||||
Box::pin(async move {
|
||||
conn.execute(format!("SET search_path TO \"{}\", \"default\", \"public\"", schema_name).as_str())
|
||||
.await?;
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.connect(&get_database_url())
|
||||
.await
|
||||
.expect("Failed to create pool");
|
||||
.expect("Failed to create isolated pool");
|
||||
|
||||
// Run migrations
|
||||
sqlx::migrate!()
|
||||
.run(&pool)
|
||||
.await
|
||||
.expect("Migrations failed");
|
||||
.expect("Migrations failed in isolated schema");
|
||||
|
||||
// Insert default profile if it doesn't exist
|
||||
let profile = sqlx::query!(
|
||||
sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO profiles (name)
|
||||
INSERT INTO schemas (name)
|
||||
VALUES ('default')
|
||||
ON CONFLICT (name) DO NOTHING
|
||||
RETURNING id
|
||||
"#
|
||||
)
|
||||
.fetch_optional(&pool)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.expect("Failed to insert test profile");
|
||||
|
||||
let profile_id = if let Some(profile) = profile {
|
||||
profile.id
|
||||
} else {
|
||||
// If the profile already exists, fetch its ID
|
||||
sqlx::query!(
|
||||
"SELECT id FROM profiles WHERE name = 'default'"
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.expect("Failed to fetch default profile ID")
|
||||
.id
|
||||
};
|
||||
.expect("Failed to insert test profile in isolated schema");
|
||||
|
||||
pool
|
||||
}
|
||||
|
||||
/// Compatibility alias for the old function name
|
||||
/// This allows existing tests to continue working without modification
|
||||
pub async fn setup_test_db() -> PgPool {
|
||||
setup_isolated_db().await
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
// tests/mod.rs
|
||||
pub mod adresar;
|
||||
pub mod tables_data;
|
||||
pub mod common;
|
||||
|
||||
// pub mod table_definition;
|
||||
|
||||
3
server/tests/table_definition/mod.rs
Normal file
3
server/tests/table_definition/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
// server/tests/table_definition/mod.rs
|
||||
|
||||
pub mod post_table_definition_test;
|
||||
601
server/tests/table_definition/post_table_definition_test.rs
Normal file
601
server/tests/table_definition/post_table_definition_test.rs
Normal file
@@ -0,0 +1,601 @@
|
||||
// tests/table_definition/post_table_definition_test.rs
|
||||
|
||||
// Keep all your normal use statements
|
||||
use common::proto::multieko2::table_definition::{
|
||||
ColumnDefinition, PostTableDefinitionRequest, TableLink,
|
||||
};
|
||||
use rstest::{fixture, rstest};
|
||||
use server::table_definition::handlers::post_table_definition;
|
||||
use sqlx::{postgres::PgPoolOptions, Connection, Executor, PgConnection, PgPool, Row}; // Add PgConnection etc.
|
||||
use tonic::Code;
|
||||
// Add these two new use statements for the isolation logic
|
||||
use rand::distr::Alphanumeric;
|
||||
use rand::Rng;
|
||||
use std::env;
|
||||
use dotenvy;
|
||||
use std::path::Path;
|
||||
|
||||
async fn setup_isolated_gen_schema_db() -> PgPool {
|
||||
let manifest_dir = env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR must be set");
|
||||
let env_path = Path::new(&manifest_dir).join(".env_test");
|
||||
dotenvy::from_path(env_path).ok();
|
||||
|
||||
let database_url = env::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set");
|
||||
|
||||
let unique_schema_name = format!(
|
||||
"test_{}",
|
||||
rand::rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(12)
|
||||
.map(char::from)
|
||||
.collect::<String>()
|
||||
);
|
||||
|
||||
let mut root_conn = PgConnection::connect(&database_url).await.unwrap();
|
||||
|
||||
// Create the test schema
|
||||
root_conn
|
||||
.execute(format!("CREATE SCHEMA \"{}\"", unique_schema_name).as_str())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create schemas A and B for cross-profile tests
|
||||
root_conn
|
||||
.execute("CREATE SCHEMA IF NOT EXISTS \"A\"")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
root_conn
|
||||
.execute("CREATE SCHEMA IF NOT EXISTS \"B\"")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// IMPORTANT: Create the "default" schema if it doesn't exist
|
||||
root_conn
|
||||
.execute("CREATE SCHEMA IF NOT EXISTS \"default\"")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let pool = PgPoolOptions::new()
|
||||
.max_connections(5)
|
||||
.after_connect(move |conn, _meta| {
|
||||
let schema = unique_schema_name.clone();
|
||||
Box::pin(async move {
|
||||
// Set search path to include test schema, default, A, B, and public
|
||||
conn.execute(format!("SET search_path = '{}', 'default', 'A', 'B', 'public'", schema).as_str())
|
||||
.await?;
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.connect(&database_url)
|
||||
.await
|
||||
.expect("Failed to create isolated pool");
|
||||
|
||||
sqlx::migrate!()
|
||||
.run(&pool)
|
||||
.await
|
||||
.expect("Migrations failed in isolated schema");
|
||||
|
||||
// Insert into the schemas table - use INSERT ... ON CONFLICT to avoid duplicates
|
||||
sqlx::query!(
|
||||
"INSERT INTO schemas (name) VALUES ('default'), ('A'), ('B') ON CONFLICT (name) DO NOTHING"
|
||||
)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.expect("Failed to insert test schemas");
|
||||
|
||||
pool
|
||||
}
|
||||
|
||||
// ========= Fixtures for THIS FILE ONLY =========
|
||||
|
||||
#[fixture]
|
||||
async fn pool() -> PgPool {
|
||||
// This fixture now calls the LOCAL, SPECIALIZED setup function.
|
||||
setup_isolated_gen_schema_db().await
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
|
||||
let pool = pool.await;
|
||||
pool.close().await;
|
||||
pool
|
||||
}
|
||||
|
||||
/// This fixture now works perfectly and is also isolated,
|
||||
/// because it depends on the `pool` fixture above. No changes needed here!
|
||||
#[fixture]
|
||||
async fn pool_with_preexisting_table(#[future] pool: PgPool) -> PgPool {
|
||||
let pool = pool.await;
|
||||
let create_customers_req = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "customers".into(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "customer_name".into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
indexes: vec!["customer_name".into()],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(&pool, create_customers_req)
|
||||
.await
|
||||
.expect("Failed to create pre-requisite 'customers' table");
|
||||
pool
|
||||
}
|
||||
|
||||
|
||||
// ========= Helper Functions =========
|
||||
|
||||
/// Checks the PostgreSQL information_schema to verify a table and its columns exist.
|
||||
async fn assert_table_structure_is_correct(
|
||||
pool: &PgPool,
|
||||
schema_name: &str, // ADD: schema parameter
|
||||
table_name: &str,
|
||||
expected_cols: &[(&str, &str)],
|
||||
) {
|
||||
let table_exists = sqlx::query_scalar::<_, bool>(
|
||||
"SELECT EXISTS (
|
||||
SELECT FROM information_schema.tables
|
||||
WHERE table_schema = $1 AND table_name = $2
|
||||
)",
|
||||
)
|
||||
.bind(schema_name) // CHANGE: use dynamic schema instead of 'gen'
|
||||
.bind(table_name)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(table_exists, "Table '{}.{}' was not created", schema_name, table_name); // CHANGE: dynamic schema in error message
|
||||
|
||||
for (col_name, col_type) in expected_cols {
|
||||
let record = sqlx::query(
|
||||
"SELECT data_type FROM information_schema.columns
|
||||
WHERE table_schema = $1 AND table_name = $2 AND column_name = $3",
|
||||
)
|
||||
.bind(schema_name) // CHANGE: use dynamic schema instead of 'gen'
|
||||
.bind(table_name)
|
||||
.bind(col_name)
|
||||
.fetch_optional(pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let found_type = record.unwrap_or_else(|| panic!("Column '{}' not found in table '{}.{}'", col_name, schema_name, table_name)).get::<String, _>("data_type"); // CHANGE: dynamic schema in error message
|
||||
|
||||
// Handle type mappings, e.g., TEXT -> character varying, NUMERIC -> numeric
|
||||
let normalized_found_type = found_type.to_lowercase();
|
||||
let normalized_expected_type = col_type.to_lowercase();
|
||||
|
||||
assert!(
|
||||
normalized_found_type.contains(&normalized_expected_type),
|
||||
"Column '{}' has wrong type. Expected: {}, Found: {}",
|
||||
col_name,
|
||||
col_type,
|
||||
found_type
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ========= Tests =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_success(#[future] pool: PgPool) {
|
||||
// Arrange
|
||||
let pool = pool.await;
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "invoices".into(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "invoice_number".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
ColumnDefinition {
|
||||
name: "amount".into(),
|
||||
field_type: "decimal(10, 2)".into(),
|
||||
},
|
||||
],
|
||||
indexes: vec!["invoice_number".into()],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
// Act
|
||||
let response = post_table_definition(&pool, request).await.unwrap();
|
||||
|
||||
// Assert
|
||||
assert!(response.success);
|
||||
assert!(response.sql.contains("CREATE TABLE \"default\".\"invoices\""));
|
||||
assert!(response.sql.contains("\"invoice_number\" TEXT"));
|
||||
assert!(response.sql.contains("\"amount\" NUMERIC(10, 2)"));
|
||||
assert!(response
|
||||
.sql
|
||||
.contains("CREATE INDEX \"idx_invoices_invoice_number\""));
|
||||
|
||||
// Verify actual DB state - FIXED: Added schema parameter
|
||||
assert_table_structure_is_correct(
|
||||
&pool,
|
||||
"default", // Schema name parameter
|
||||
"invoices",
|
||||
&[
|
||||
("id", "bigint"),
|
||||
("deleted", "boolean"),
|
||||
("invoice_number", "text"),
|
||||
("amount", "numeric"),
|
||||
("created_at", "timestamp with time zone"),
|
||||
],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_fail_on_invalid_decimal_format(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let invalid_types = vec![
|
||||
"decimal(0,0)", // precision too small
|
||||
"decimal(5,10)", // scale > precision
|
||||
"decimal(10)", // missing scale
|
||||
"decimal(a,b)", // non-numeric
|
||||
];
|
||||
|
||||
for invalid_type in invalid_types {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: format!("table_{}", invalid_type),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "amount".into(),
|
||||
field_type: invalid_type.into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_with_link(
|
||||
#[future] pool_with_preexisting_table: PgPool,
|
||||
) {
|
||||
// Arrange
|
||||
let pool = pool_with_preexisting_table.await;
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "orders".into(),
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![TableLink { // CORRECTED
|
||||
linked_table_name: "customers".into(),
|
||||
required: true,
|
||||
}],
|
||||
};
|
||||
|
||||
// Act
|
||||
let response = post_table_definition(&pool, request).await.unwrap();
|
||||
|
||||
// Assert
|
||||
assert!(response.success);
|
||||
assert!(response.sql.contains(
|
||||
"\"customers_id\" BIGINT NOT NULL REFERENCES \"default\".\"customers\"(id)"
|
||||
));
|
||||
assert!(response
|
||||
.sql
|
||||
.contains("CREATE INDEX \"idx_orders_customers_fk\""));
|
||||
|
||||
// Verify actual DB state - FIXED: Added schema parameter
|
||||
assert_table_structure_is_correct(
|
||||
&pool,
|
||||
"default", // Schema name parameter
|
||||
"orders",
|
||||
&[("customers_id", "bigint")],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_fail_on_duplicate_table_name(#[future] pool: PgPool) {
|
||||
// Arrange
|
||||
let pool = pool.await;
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "reused_name".into(),
|
||||
..Default::default()
|
||||
};
|
||||
// Create it once
|
||||
post_table_definition(&pool, request.clone()).await.unwrap();
|
||||
|
||||
// Act: Try to create it again
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
|
||||
// Assert
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(err.code(), Code::AlreadyExists);
|
||||
assert_eq!(err.message(), "Table already exists in this profile");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_fail_on_invalid_table_name(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let mut request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "ends_with_id".into(), // Invalid name
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let result = post_table_definition(&pool, request.clone()).await;
|
||||
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
|
||||
|
||||
request.table_name = "deleted".into(); // Reserved name
|
||||
let result = post_table_definition(&pool, request.clone()).await;
|
||||
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_fail_on_invalid_column_type(#[future] pool: PgPool) {
|
||||
// Arrange
|
||||
let pool = pool.await;
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "bad_col_type".into(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "some_col".into(),
|
||||
field_type: "super_string_9000".into(), // Invalid type
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Act
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
|
||||
// Assert
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(err.code(), Code::InvalidArgument);
|
||||
assert!(err.message().contains("Invalid field type"));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_fail_on_index_for_nonexistent_column(#[future] pool: PgPool) {
|
||||
// Arrange
|
||||
let pool = pool.await;
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "bad_index".into(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "real_column".into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
indexes: vec!["fake_column".into()], // Index on a column not in the list
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
if let Err(err) = result {
|
||||
assert!(err.message().contains("Index column 'fake_column' not found"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_fail_on_link_to_nonexistent_table(#[future] pool: PgPool) {
|
||||
// Arrange
|
||||
let pool = pool.await;
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "bad_link".into(),
|
||||
links: vec![TableLink { // CORRECTED
|
||||
linked_table_name: "i_do_not_exist".into(),
|
||||
required: false,
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Act
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
|
||||
// Assert
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(err.code(), Code::NotFound);
|
||||
assert!(err.message().contains("Linked table i_do_not_exist not found"));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_database_error_on_closed_pool(
|
||||
#[future] closed_pool: PgPool,
|
||||
) {
|
||||
// Arrange
|
||||
let pool = closed_pool.await;
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "wont_be_created".into(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Act
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
|
||||
// Assert
|
||||
assert_eq!(result.unwrap_err().code(), Code::Internal);
|
||||
}
|
||||
|
||||
// Tests that minimal, uppercase and whitespace‐padded decimal specs
|
||||
// are accepted and correctly mapped to NUMERIC(p, s).
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_valid_decimal_variants(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let cases = vec![
|
||||
("decimal(1,1)", "NUMERIC(1, 1)"),
|
||||
("decimal(1,0)", "NUMERIC(1, 0)"),
|
||||
("DECIMAL(5,2)", "NUMERIC(5, 2)"),
|
||||
("decimal( 5 , 2 )", "NUMERIC(5, 2)"),
|
||||
];
|
||||
for (i, (typ, expect)) in cases.into_iter().enumerate() {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: format!("dec_valid_{}", i),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "amount".into(),
|
||||
field_type: typ.into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let resp = post_table_definition(&pool, request).await.unwrap();
|
||||
assert!(resp.success, "{}", typ);
|
||||
assert!(
|
||||
resp.sql.contains(expect),
|
||||
"expected `{}` to map to {}, got `{}`",
|
||||
typ,
|
||||
expect,
|
||||
resp.sql
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that malformed decimal inputs are rejected with InvalidArgument.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_fail_on_malformed_decimal_inputs(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let bad = vec!["decimal", "decimal()", "decimal(5,)", "decimal(,2)", "decimal(, )"];
|
||||
for (i, typ) in bad.into_iter().enumerate() {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: format!("dec_bad_{}", i),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "amt".into(),
|
||||
field_type: typ.into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let err = post_table_definition(&pool, request).await.unwrap_err();
|
||||
assert_eq!(err.code(), Code::InvalidArgument, "{}", typ);
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that obviously invalid column identifiers are rejected
|
||||
// (start with digit/underscore, contain space or hyphen, or are empty).
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_fail_on_invalid_column_names(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let bad_names = vec!["1col", "_col", "col name", "col-name", ""];
|
||||
for name in bad_names {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "tbl_invalid_cols".into(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: name.into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let err = post_table_definition(&pool, request).await.unwrap_err();
|
||||
assert_eq!(err.code(), Code::InvalidArgument, "{}", name);
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that a user‐supplied column ending in "_id" is rejected
|
||||
// to avoid collision with system‐generated FKs.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_fail_on_column_name_suffix_id(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "valid_table".into(), // FIXED: Use valid table name
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "invalid_column_id".into(), // FIXED: Test invalid COLUMN name
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
if let Err(status) = result {
|
||||
// UPDATED: Should mention column, not table
|
||||
assert!(status.message().contains("Column name") &&
|
||||
status.message().contains("end with '_id'"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_invalid_characters_are_rejected(#[future] pool: PgPool) {
|
||||
// RENAMED: was test_name_sanitization
|
||||
let pool = pool.await;
|
||||
let req = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "My-Table!".into(), // Invalid characters
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "col".into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
// CHANGED: Now expects error instead of sanitization
|
||||
let result = post_table_definition(&pool, req).await;
|
||||
assert!(result.is_err());
|
||||
if let Err(status) = result {
|
||||
assert_eq!(status.code(), tonic::Code::InvalidArgument);
|
||||
assert!(status.message().contains("Table name contains invalid characters"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_unicode_characters_are_rejected(#[future] pool: PgPool) {
|
||||
// RENAMED: was test_sanitization_of_unicode_and_special_chars
|
||||
let pool = pool.await;
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "produits_😂".into(), // Invalid unicode
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "col_normal".into(), // Valid name
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
// CHANGED: Now expects error instead of sanitization
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
if let Err(status) = result {
|
||||
assert_eq!(status.code(), tonic::Code::InvalidArgument);
|
||||
assert!(status.message().contains("Table name contains invalid characters"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_sql_injection_attempts_are_rejected(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let req = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "users; DROP TABLE users;".into(), // SQL injection attempt
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "col_normal".into(), // Valid name
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
// CHANGED: Now expects error instead of sanitization
|
||||
let result = post_table_definition(&pool, req).await;
|
||||
assert!(result.is_err());
|
||||
if let Err(status) = result {
|
||||
assert_eq!(status.code(), tonic::Code::InvalidArgument);
|
||||
assert!(status.message().contains("Table name contains invalid characters"));
|
||||
}
|
||||
}
|
||||
|
||||
include!("post_table_definition_test2.rs");
|
||||
include!("post_table_definition_test3.rs");
|
||||
include!("post_table_definition_test4.rs");
|
||||
include!("post_table_definition_test5.rs");
|
||||
include!("post_table_definition_test6.rs");
|
||||
510
server/tests/table_definition/post_table_definition_test2.rs
Normal file
510
server/tests/table_definition/post_table_definition_test2.rs
Normal file
@@ -0,0 +1,510 @@
|
||||
// ============================================================================
|
||||
// Additional edge‐case tests for PostTableDefinition
|
||||
// ============================================================================
|
||||
|
||||
// 1) Field‐type mapping for every predefined key, in various casing.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_field_type_mapping_various_casing(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let cases = vec![
|
||||
("text", "TEXT", "text"),
|
||||
("TEXT", "TEXT", "text"),
|
||||
("TeXt", "TEXT", "text"),
|
||||
("string", "TEXT", "text"),
|
||||
("boolean", "BOOLEAN", "boolean"),
|
||||
("Boolean", "BOOLEAN", "boolean"),
|
||||
("timestamp", "TIMESTAMPTZ", "timestamp with time zone"),
|
||||
("time", "TIMESTAMPTZ", "timestamp with time zone"),
|
||||
("money", "NUMERIC(14, 4)", "numeric"),
|
||||
("integer", "INTEGER", "integer"),
|
||||
("date", "DATE", "date"),
|
||||
];
|
||||
for (i, &(input, expected_sql, expected_db)) in cases.iter().enumerate() {
|
||||
let tbl = format!("ftm_{}", i);
|
||||
let req = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: tbl.clone(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "col".into(),
|
||||
field_type: input.into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let resp = post_table_definition(&pool, req).await.unwrap();
|
||||
assert!(
|
||||
resp.sql.contains(&format!("\"col\" {}", expected_sql)),
|
||||
"field‐type {:?} did not map to {} in `{}`",
|
||||
input,
|
||||
expected_sql,
|
||||
resp.sql
|
||||
);
|
||||
assert_table_structure_is_correct(
|
||||
&pool,
|
||||
"default", // FIXED: Added schema parameter
|
||||
&tbl,
|
||||
&[
|
||||
("id", "bigint"),
|
||||
("deleted", "boolean"),
|
||||
("col", expected_db),
|
||||
("created_at", "timestamp with time zone"),
|
||||
],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
// 3) Invalid index names must be rejected.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_fail_on_invalid_index_names(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let test_cases = vec![
|
||||
("1col", "Index name cannot start with a number"),
|
||||
("_col", "Index name cannot start with underscore"),
|
||||
("col-name", "Index name contains invalid characters"),
|
||||
];
|
||||
|
||||
for (idx, expected_error) in test_cases {
|
||||
let req = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "idx_bad".into(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "good".into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
indexes: vec![idx.into()],
|
||||
..Default::default()
|
||||
};
|
||||
let result = post_table_definition(&pool, req).await;
|
||||
assert!(result.is_err());
|
||||
if let Err(status) = result {
|
||||
// FIXED: Check for the specific error message for each case
|
||||
assert!(status.message().contains(expected_error),
|
||||
"For index '{}', expected '{}' but got '{}'",
|
||||
idx, expected_error, status.message());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 4) More invalid‐table‐name cases: starts-with digit/underscore or sanitizes to empty.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_fail_on_more_invalid_table_names(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let cases = vec![
|
||||
("1tbl", "invalid table name"),
|
||||
("_tbl", "invalid table name"),
|
||||
];
|
||||
for (name, expected_msg) in cases {
|
||||
let req = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: name.into(),
|
||||
..Default::default()
|
||||
};
|
||||
let result = post_table_definition(&pool, req).await;
|
||||
assert!(result.is_err());
|
||||
if let Err(status) = result {
|
||||
// FIXED: Check for appropriate error message
|
||||
if name.starts_with('_') {
|
||||
assert!(status.message().contains("Table name cannot start with underscore"));
|
||||
} else if name.chars().next().unwrap().is_ascii_digit() {
|
||||
assert!(status.message().contains("Table name cannot start with a number"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 5) Name‐sanitization: mixed‐case table names and strip invalid characters.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_name_sanitization(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let req = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "My-Table!123".into(), // Invalid characters
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "user_name".into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// FIXED: Now expect error instead of success
|
||||
let result = post_table_definition(&pool, req).await;
|
||||
assert!(result.is_err());
|
||||
if let Err(status) = result {
|
||||
assert!(status.message().contains("Table name contains invalid characters"));
|
||||
}
|
||||
}
|
||||
|
||||
// 6) Creating a table with no custom columns, indexes, or links → only system columns.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_minimal_table(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let profile_name = "test_minimal";
|
||||
let req = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: "minimal".into(),
|
||||
..Default::default()
|
||||
};
|
||||
let resp = post_table_definition(&pool, req).await.unwrap();
|
||||
assert!(resp.sql.contains("id BIGSERIAL PRIMARY KEY"));
|
||||
assert!(resp.sql.contains("deleted BOOLEAN NOT NULL"));
|
||||
assert!(resp.sql.contains("created_at TIMESTAMPTZ"));
|
||||
assert_table_structure_is_correct(
|
||||
&pool,
|
||||
profile_name,
|
||||
"minimal",
|
||||
&[
|
||||
("id", "bigint"),
|
||||
("deleted", "boolean"),
|
||||
("created_at", "timestamp with time zone"),
|
||||
],
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
// 7) Required & optional links: NOT NULL vs NULL.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_nullable_and_multiple_links(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// FIXED: Use different prefixes to avoid FK column collisions
|
||||
let unique_suffix = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis() % 1000000;
|
||||
let customers_table = format!("customers_{}", unique_suffix);
|
||||
let suppliers_table = format!("suppliers_{}", unique_suffix); // Different prefix
|
||||
let orders_table = format!("orders_{}", unique_suffix);
|
||||
|
||||
// Create customers table
|
||||
let customers_req = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: customers_table.clone(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "name".into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
post_table_definition(&pool, customers_req).await
|
||||
.expect("Failed to create customers table");
|
||||
|
||||
// Create suppliers table
|
||||
let suppliers_req = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: suppliers_table.clone(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "name".into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
post_table_definition(&pool, suppliers_req).await
|
||||
.expect("Failed to create suppliers table");
|
||||
|
||||
// Create orders table that links to both
|
||||
let orders_req = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: orders_table.clone(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "amount".into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
links: vec![
|
||||
TableLink {
|
||||
linked_table_name: customers_table,
|
||||
required: true, // Required link
|
||||
},
|
||||
TableLink {
|
||||
linked_table_name: suppliers_table,
|
||||
required: false, // Optional link
|
||||
},
|
||||
],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let resp = post_table_definition(&pool, orders_req).await
|
||||
.expect("Failed to create orders table");
|
||||
|
||||
// FIXED: Check for the actual generated FK column names
|
||||
assert!(
|
||||
resp.sql.contains(&format!("\"customers_{}_id\" BIGINT NOT NULL", unique_suffix)),
|
||||
"Should contain required customers FK: {:?}",
|
||||
resp.sql
|
||||
);
|
||||
assert!(
|
||||
resp.sql.contains(&format!("\"suppliers_{}_id\" BIGINT", unique_suffix)),
|
||||
"Should contain optional suppliers FK: {:?}",
|
||||
resp.sql
|
||||
);
|
||||
|
||||
// Check database-level nullability for optional FK
|
||||
let is_nullable: String = sqlx::query_scalar!(
|
||||
"SELECT is_nullable \
|
||||
FROM information_schema.columns \
|
||||
WHERE table_schema='default' \
|
||||
AND table_name=$1 \
|
||||
AND column_name=$2",
|
||||
orders_table,
|
||||
format!("suppliers_{}_id", unique_suffix)
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(is_nullable, "YES");
|
||||
}
|
||||
|
||||
// 8) Duplicate links in one request → Internal.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_fail_on_duplicate_links(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let unique_id = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
let customers_table = format!("customers_{}", unique_id);
|
||||
|
||||
// Create the prerequisite table
|
||||
let prereq_req = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: customers_table.clone(),
|
||||
columns: vec![],
|
||||
links: vec![],
|
||||
indexes: vec![],
|
||||
};
|
||||
post_table_definition(&pool, prereq_req).await.expect("Failed to create prerequisite table");
|
||||
|
||||
// Now, test the duplicate link scenario
|
||||
let req = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: format!("dup_links_{}", unique_id),
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![
|
||||
TableLink {
|
||||
linked_table_name: customers_table.clone(),
|
||||
required: true,
|
||||
},
|
||||
TableLink {
|
||||
linked_table_name: customers_table.clone(),
|
||||
required: false,
|
||||
},
|
||||
],
|
||||
};
|
||||
let err = post_table_definition(&pool, req).await.unwrap_err();
|
||||
assert_eq!(err.code(), Code::InvalidArgument);
|
||||
assert!(err.message().contains(&format!("Duplicate link to table '{}'", customers_table)));
|
||||
}
|
||||
|
||||
// 9) Self‐referential FK: link child back to same‐profile parent.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_self_referential_link(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
post_table_definition(
|
||||
&pool,
|
||||
PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "selfref".into(),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let resp = post_table_definition(
|
||||
&pool,
|
||||
PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "selfref_child".into(),
|
||||
links: vec![TableLink {
|
||||
linked_table_name: "selfref".into(),
|
||||
required: true,
|
||||
}],
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(
|
||||
resp
|
||||
.sql
|
||||
.contains("\"selfref_id\" BIGINT NOT NULL REFERENCES \"default\".\"selfref\"(id)"), // FIXED: Changed from gen to "default"
|
||||
"{:?}",
|
||||
resp.sql
|
||||
);
|
||||
}
|
||||
|
||||
// 11) Cross‐profile uniqueness & link isolation.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_cross_profile_uniqueness_and_link_isolation(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
// Profile a: foo (CHANGED: lowercase)
|
||||
post_table_definition(&pool, PostTableDefinitionRequest {
|
||||
profile_name: "a".into(), // CHANGED: was "A"
|
||||
table_name: "foo".into(),
|
||||
columns: vec![ColumnDefinition { name: "col".into(), field_type: "text".into() }],
|
||||
..Default::default()
|
||||
}).await.unwrap();
|
||||
|
||||
// Profile b: foo, bar (CHANGED: lowercase)
|
||||
post_table_definition(&pool, PostTableDefinitionRequest {
|
||||
profile_name: "b".into(), // CHANGED: was "B"
|
||||
table_name: "foo".into(),
|
||||
columns: vec![ColumnDefinition { name: "col".into(), field_type: "text".into() }],
|
||||
..Default::default()
|
||||
}).await.unwrap();
|
||||
|
||||
post_table_definition(&pool, PostTableDefinitionRequest {
|
||||
profile_name: "b".into(), // CHANGED: was "B"
|
||||
table_name: "bar".into(),
|
||||
columns: vec![ColumnDefinition { name: "col".into(), field_type: "text".into() }],
|
||||
..Default::default()
|
||||
}).await.unwrap();
|
||||
|
||||
// a linking to b.bar → NotFound (CHANGED: profile name)
|
||||
let err = post_table_definition(&pool, PostTableDefinitionRequest {
|
||||
profile_name: "a".into(), // CHANGED: was "A"
|
||||
table_name: "linker".into(),
|
||||
columns: vec![ColumnDefinition { name: "col".into(), field_type: "text".into() }],
|
||||
links: vec![TableLink {
|
||||
linked_table_name: "bar".into(),
|
||||
required: false,
|
||||
}],
|
||||
..Default::default()
|
||||
}).await.unwrap_err();
|
||||
assert_eq!(err.code(), Code::NotFound);
|
||||
}
|
||||
|
||||
// 12) SQL‐injection attempts are sanitized.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_sql_injection_sanitization(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let req = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "users; DROP TABLE users;".into(), // SQL injection attempt
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "col_drop".into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// FIXED: Now expect error instead of success
|
||||
let result = post_table_definition(&pool, req).await;
|
||||
assert!(result.is_err());
|
||||
if let Err(status) = result {
|
||||
assert!(status.message().contains("Table name contains invalid characters"));
|
||||
}
|
||||
}
|
||||
|
||||
// 13) Reserved‐column shadowing: id, deleted, created_at cannot be user‐defined.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_reserved_column_shadowing(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
for col in &["id", "deleted", "created_at"] {
|
||||
let req = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: format!("tbl_{}", col),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: (*col).into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let err = post_table_definition(&pool, req).await.unwrap_err();
|
||||
assert_eq!(err.code(), Code::InvalidArgument, "{:?}", col); // FIXED: Changed from Internal to InvalidArgument
|
||||
}
|
||||
}
|
||||
|
||||
// 14) Identifier‐length overflow (>63 chars) yields Internal.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_long_identifier_length(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let long = "a".repeat(64);
|
||||
let req = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: long.clone(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: long.clone(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let err = post_table_definition(&pool, req).await.unwrap_err();
|
||||
assert_eq!(err.code(), Code::InvalidArgument);
|
||||
}
|
||||
|
||||
// 15) Decimal precision overflow must be caught by our parser.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_decimal_precision_overflow(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let req = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "dp_overflow".into(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "amount".into(),
|
||||
field_type: "decimal(9999999999,1)".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let err = post_table_definition(&pool, req).await.unwrap_err();
|
||||
assert_eq!(err.code(), Code::InvalidArgument);
|
||||
assert!(
|
||||
err
|
||||
.message()
|
||||
.to_lowercase()
|
||||
.contains("invalid precision"),
|
||||
"{}",
|
||||
err.message()
|
||||
);
|
||||
}
|
||||
|
||||
// 16) Repeated profile insertion only creates one profile row.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_repeated_profile_insertion(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let prof = "repeat_prof";
|
||||
post_table_definition(
|
||||
&pool,
|
||||
PostTableDefinitionRequest {
|
||||
profile_name: prof.into(),
|
||||
table_name: "t1".into(),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
post_table_definition(
|
||||
&pool,
|
||||
PostTableDefinitionRequest {
|
||||
profile_name: prof.into(),
|
||||
table_name: "t2".into(),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let cnt: i64 = sqlx::query_scalar!(
|
||||
"SELECT COUNT(*) FROM schemas WHERE name = $1", // FIXED: Changed from profiles to schemas
|
||||
prof
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(cnt, 1);
|
||||
}
|
||||
274
server/tests/table_definition/post_table_definition_test3.rs
Normal file
274
server/tests/table_definition/post_table_definition_test3.rs
Normal file
@@ -0,0 +1,274 @@
|
||||
// tests/table_definition/post_table_definition_test3.rs
|
||||
|
||||
// NOTE: All 'use' statements have been removed from this file.
|
||||
// They are inherited from the parent file that includes this one.
|
||||
|
||||
// ========= Helper Functions for this Test File =========
|
||||
|
||||
/// Checks that a table definition does NOT exist for a given profile and table name.
|
||||
async fn assert_table_definition_does_not_exist(pool: &PgPool, profile_name: &str, table_name: &str) {
|
||||
let count: i64 = sqlx::query_scalar!(
|
||||
"SELECT COUNT(*) FROM table_definitions td
|
||||
JOIN schemas p ON td.schema_id = p.id
|
||||
WHERE p.name = $1 AND td.table_name = $2",
|
||||
profile_name,
|
||||
table_name
|
||||
)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
.expect("Failed to query for table definition")
|
||||
.unwrap_or(0);
|
||||
|
||||
assert_eq!(
|
||||
count, 0,
|
||||
"Table definition for '{}/{}' was found but should have been rolled back.",
|
||||
profile_name, table_name
|
||||
);
|
||||
}
|
||||
|
||||
// ========= Category 2: Advanced Identifier and Naming Collisions =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_fail_on_column_name_collision_with_fk(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Use a unique table name to avoid conflicts with other tests
|
||||
let unique_id = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.subsec_nanos();
|
||||
let customers_table = format!("customers_collision_{}", unique_id);
|
||||
let orders_table = format!("orders_collision_{}", unique_id);
|
||||
|
||||
// First, create the prerequisite table using the proper API
|
||||
let customers_request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: customers_table.clone(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "name".into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
links: vec![],
|
||||
indexes: vec![],
|
||||
};
|
||||
|
||||
// Create the customers table
|
||||
let customers_result = post_table_definition(&pool, customers_request).await;
|
||||
assert!(customers_result.is_ok(), "Failed to create prerequisite customers table: {:?}", customers_result);
|
||||
|
||||
// Now test the collision scenario
|
||||
// This should fail because we're trying to create a "customers_collision_xxxxx_id" column
|
||||
// while also linking to the table (which auto-generates the same foreign key)
|
||||
let fk_column_name = format!("{}_id", customers_table);
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: orders_table,
|
||||
columns: vec![ColumnDefinition {
|
||||
name: fk_column_name.clone(), // This will collide with the generated FK
|
||||
field_type: "integer".into(),
|
||||
}],
|
||||
links: vec![TableLink {
|
||||
linked_table_name: customers_table,
|
||||
required: true,
|
||||
}],
|
||||
indexes: vec![],
|
||||
};
|
||||
|
||||
// Act
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
|
||||
// Assert - this should now fail with InvalidArgument because of the column name validation
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(
|
||||
err.code(),
|
||||
Code::InvalidArgument,
|
||||
"Expected InvalidArgument due to column name ending in _id, got: {:?}",
|
||||
err
|
||||
);
|
||||
|
||||
// FIXED: More flexible error message check
|
||||
assert!(
|
||||
err.message().contains("Column name") &&
|
||||
err.message().contains("cannot be") &&
|
||||
err.message().contains("end with '_id'"),
|
||||
"Error message should mention the invalid column name: {}",
|
||||
err.message()
|
||||
);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_fail_on_duplicate_column_names_in_request(#[future] pool: PgPool) {
|
||||
// Scenario: The request itself contains two columns with the same name.
|
||||
// Expected: Database error on CREATE TABLE with duplicate column definition.
|
||||
let pool = pool.await;
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "duplicate_cols".into(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "product_name".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
ColumnDefinition {
|
||||
name: "product_name".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Act
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
|
||||
// Assert
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(err.code(), Code::Internal);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_link_to_sanitized_table_name(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// FIXED: Use valid table name instead of invalid one
|
||||
let table_name = "my_invoices";
|
||||
|
||||
// 1. Create the table with a VALID name
|
||||
let create_req = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: table_name.into(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "amount".into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let resp = post_table_definition(&pool, create_req).await.unwrap();
|
||||
assert!(resp.sql.contains(&format!("\"default\".\"{}\"", table_name)));
|
||||
|
||||
// 2. Link to the correct name - should succeed
|
||||
let link_req_success = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "payments".into(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "amount".into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
links: vec![TableLink {
|
||||
linked_table_name: table_name.into(),
|
||||
required: true,
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let success_resp = post_table_definition(&pool, link_req_success).await.unwrap();
|
||||
assert!(success_resp.success);
|
||||
}
|
||||
|
||||
// ========= Category 3: Complex Link and Profile Logic =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_fail_on_true_self_referential_link(#[future] pool: PgPool) {
|
||||
// Scenario: A table attempts to link to itself in the same request.
|
||||
// Expected: NotFound, because the table definition doesn't exist yet at link-check time.
|
||||
let pool = pool.await;
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "employees".into(),
|
||||
links: vec![TableLink {
|
||||
linked_table_name: "employees".into(), // Self-reference
|
||||
required: false, // For a manager_id FK
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Act
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
|
||||
// Assert
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(err.code(), Code::NotFound);
|
||||
assert!(err.message().contains("Linked table employees not found"));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_behavior_on_empty_profile_name(#[future] pool: PgPool) {
|
||||
// Scenario: Attempt to create a table with an empty profile name.
|
||||
// Expected: This should be rejected by input validation.
|
||||
let pool = pool.await;
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "".into(),
|
||||
table_name: "table_in_empty_profile".into(),
|
||||
..Default::default()
|
||||
};
|
||||
// Act
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
// Assert
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(
|
||||
err.code(),
|
||||
Code::InvalidArgument, // Changed from Internal
|
||||
"Expected InvalidArgument error from input validation"
|
||||
);
|
||||
assert!(
|
||||
err.message().contains("Profile name cannot be empty"), // Updated message
|
||||
"Unexpected error message: {}",
|
||||
err.message()
|
||||
);
|
||||
}
|
||||
|
||||
// ========= Category 4: Concurrency =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_race_condition_on_table_creation(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// FIXED: Use unique profile and table names to avoid conflicts between test runs
|
||||
let unique_id = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
|
||||
let request1 = PostTableDefinitionRequest {
|
||||
profile_name: format!("concurrent_profile_{}", unique_id),
|
||||
table_name: "racy_table".into(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "test_col".into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let request2 = request1.clone();
|
||||
|
||||
let pool1 = pool.clone();
|
||||
let pool2 = pool.clone();
|
||||
|
||||
// Act
|
||||
let (res1, res2) = tokio::join!(
|
||||
post_table_definition(&pool1, request1),
|
||||
post_table_definition(&pool2, request2)
|
||||
);
|
||||
|
||||
// Assert
|
||||
let results = vec![res1, res2];
|
||||
let success_count = results.iter().filter(|r| r.is_ok()).count();
|
||||
let failure_count = results.iter().filter(|r| r.is_err()).count();
|
||||
|
||||
assert_eq!(
|
||||
success_count, 1,
|
||||
"Exactly one request should succeed"
|
||||
);
|
||||
assert_eq!(failure_count, 1, "Exactly one request should fail");
|
||||
|
||||
let err = results
|
||||
.into_iter()
|
||||
.find(|r| r.is_err())
|
||||
.unwrap()
|
||||
.unwrap_err();
|
||||
assert_eq!(err.code(), Code::AlreadyExists);
|
||||
assert_eq!(err.message(), "Table already exists in this profile");
|
||||
}
|
||||
207
server/tests/table_definition/post_table_definition_test4.rs
Normal file
207
server/tests/table_definition/post_table_definition_test4.rs
Normal file
@@ -0,0 +1,207 @@
|
||||
// tests/table_definition/post_table_definition_test4.rs
|
||||
|
||||
// NOTE: All 'use' statements are inherited from the parent file that includes this one.
|
||||
|
||||
// ========= Category 5: Implementation-Specific Edge Cases =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_fail_on_fk_base_name_collision(#[future] pool: PgPool) {
|
||||
// Scenario: Link to two tables (`team1_users`, `team2_users`) that use full table names
|
||||
// for FK columns, so no collision occurs - this should succeed.
|
||||
let pool = pool.await;
|
||||
|
||||
// Arrange: Create the two prerequisite tables
|
||||
let req1 = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "team1_users".into(),
|
||||
..Default::default()
|
||||
};
|
||||
post_table_definition(&pool, req1).await.unwrap();
|
||||
|
||||
let req2 = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "team2_users".into(),
|
||||
..Default::default()
|
||||
};
|
||||
post_table_definition(&pool, req2).await.unwrap();
|
||||
|
||||
// Arrange: A request that links to both - should succeed with full table names
|
||||
let linking_req = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "tasks".into(),
|
||||
links: vec![
|
||||
TableLink {
|
||||
linked_table_name: "team1_users".into(),
|
||||
required: true,
|
||||
},
|
||||
TableLink {
|
||||
linked_table_name: "team2_users".into(),
|
||||
required: false,
|
||||
},
|
||||
],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Act
|
||||
let result = post_table_definition(&pool, linking_req).await;
|
||||
|
||||
// Assert - should succeed
|
||||
let response = result.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Verify the SQL contains both full table name FK columns
|
||||
assert!(response.sql.contains("\"team1_users_id\""),
|
||||
"SQL should contain team1_users_id column");
|
||||
assert!(response.sql.contains("\"team2_users_id\""),
|
||||
"SQL should contain team2_users_id column");
|
||||
|
||||
// Verify the references are correct
|
||||
assert!(response.sql.contains("REFERENCES \"default\".\"team1_users\"(id)"));
|
||||
assert!(response.sql.contains("REFERENCES \"default\".\"team2_users\"(id)"));
|
||||
|
||||
// Verify one is NOT NULL and one is nullable
|
||||
assert!(response.sql.contains("\"team1_users_id\" BIGINT NOT NULL"));
|
||||
assert!(response.sql.contains("\"team2_users_id\" BIGINT REFERENCES"));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_sql_reserved_keywords_as_identifiers_are_allowed(#[future] pool: PgPool) {
|
||||
// NOTE: This test confirms that the system currently allows SQL reserved keywords
|
||||
// as column names because they are correctly quoted. This is technically correct,
|
||||
// but some systems add validation to block this as a policy to prevent user confusion.
|
||||
let pool = pool.await;
|
||||
let keywords = vec!["user", "select", "group", "order"];
|
||||
|
||||
for (i, keyword) in keywords.into_iter().enumerate() {
|
||||
let table_name = format!("keyword_test_{}", i);
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: table_name.clone(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: keyword.into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Act & Assert
|
||||
let response = post_table_definition(&pool, request)
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
panic!(
|
||||
"Failed to create table with reserved keyword '{}': {:?}",
|
||||
keyword, e
|
||||
)
|
||||
});
|
||||
|
||||
assert!(response.success);
|
||||
assert!(response.sql.contains(&format!("\"{}\" TEXT", keyword)));
|
||||
|
||||
// FIXED: Added schema parameter
|
||||
assert_table_structure_is_correct(&pool, "default", &table_name, &[(keyword, "text")]).await;
|
||||
}
|
||||
}
|
||||
|
||||
// ========= Category 6: Environmental and Extreme Edge Cases =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_sanitization_of_unicode_and_special_chars(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "produits_😂".into(), // Invalid unicode
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "col_with_unicode".into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// FIXED: Now expect error instead of success
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
if let Err(status) = result {
|
||||
assert!(status.message().contains("Table name contains invalid characters"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_fail_gracefully_if_schema_is_missing(#[future] pool: PgPool) {
|
||||
// Scenario: The handler relies on schemas existing. This test ensures
|
||||
// it fails gracefully if the schema creation fails.
|
||||
let pool = pool.await;
|
||||
|
||||
// Arrange: Try to create a table with an invalid schema name that would cause issues
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "invalid-schema-name!@#".into(), // This should be sanitized and potentially cause issues
|
||||
table_name: "this_will_fail".into(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Act
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
|
||||
// Assert - This should either succeed with sanitized name or fail gracefully
|
||||
match result {
|
||||
Ok(_) => {
|
||||
// If it succeeds, the sanitization worked
|
||||
// This is actually a valid outcome
|
||||
},
|
||||
Err(err) => {
|
||||
// If it fails, it should be a clear error, not a panic
|
||||
assert_eq!(err.code(), Code::InvalidArgument);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_column_name_with_id_suffix_is_rejected(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "orders".into(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "legacy_order_id".into(),
|
||||
field_type: "integer".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err(), "Column names ending with '_id' should be rejected");
|
||||
if let Err(status) = result {
|
||||
assert_eq!(status.code(), tonic::Code::InvalidArgument);
|
||||
// UPDATED: Match the new error message format
|
||||
assert!(status.message().contains("Column name 'legacy_order_id' cannot be") &&
|
||||
status.message().contains("end with '_id'"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_table_name_with_id_suffix_is_rejected(#[future] pool: PgPool) {
|
||||
// Test that table names ending with '_id' are properly rejected during input validation
|
||||
let pool = pool.await;
|
||||
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "orders_id".into(), // This should be rejected
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "customer_name".into(), // Valid column name
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Act & Assert - should fail validation
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err(), "Table names ending with '_id' should be rejected");
|
||||
if let Err(status) = result {
|
||||
assert_eq!(status.code(), tonic::Code::InvalidArgument);
|
||||
assert!(status.message().contains("Table name cannot be 'id', 'deleted', 'created_at' or end with '_id'"));
|
||||
}
|
||||
}
|
||||
402
server/tests/table_definition/post_table_definition_test5.rs
Normal file
402
server/tests/table_definition/post_table_definition_test5.rs
Normal file
@@ -0,0 +1,402 @@
|
||||
// tests/table_definition/post_table_definition_test5.rs
|
||||
|
||||
// NOTE: All 'use' statements are inherited from the parent file that includes this one.
|
||||
|
||||
// ========= Category 7: Schema Validation and Edge Cases =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_schema_name_validation_reserved_names(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
let reserved_names = vec![
|
||||
"pg_catalog",
|
||||
"information_schema",
|
||||
"pg_toast",
|
||||
"public", // May be reserved depending on implementation
|
||||
];
|
||||
|
||||
for reserved_name in reserved_names {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: reserved_name.into(),
|
||||
table_name: "test_table".into(),
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err(), "Reserved schema name '{}' should be rejected", reserved_name);
|
||||
if let Err(status) = result {
|
||||
assert_eq!(status.code(), Code::InvalidArgument);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_schema_name_validation_sql_injection(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
let malicious_names = vec![
|
||||
"test; DROP SCHEMA public",
|
||||
"test'; DROP TABLE users; --",
|
||||
"test\"; CREATE TABLE evil; --",
|
||||
];
|
||||
|
||||
for malicious_name in malicious_names {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: malicious_name.into(),
|
||||
table_name: "test_table".into(),
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err(), "Malicious schema name '{}' should be rejected", malicious_name);
|
||||
if let Err(status) = result {
|
||||
assert_eq!(status.code(), Code::InvalidArgument);
|
||||
assert!(status.message().contains("contains invalid characters"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_schema_name_length_limits(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Test schema name length limits (63 chars in PostgreSQL)
|
||||
let long_name = "a".repeat(64);
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: long_name,
|
||||
table_name: "test_table".into(),
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err(), "Schema names longer than 63 characters should be rejected");
|
||||
if let Err(status) = result {
|
||||
assert_eq!(status.code(), Code::InvalidArgument);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_unicode_in_schema_names_rejected(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
let unicode_names = vec![
|
||||
"test_😀",
|
||||
"schéma",
|
||||
"тест",
|
||||
"测试",
|
||||
];
|
||||
|
||||
for unicode_name in unicode_names {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: unicode_name.into(),
|
||||
table_name: "test_table".into(),
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err(), "Unicode schema name '{}' should be rejected", unicode_name);
|
||||
if let Err(status) = result {
|
||||
assert_eq!(status.code(), Code::InvalidArgument);
|
||||
assert!(status.message().contains("contains invalid characters"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ========= Category 8: Foreign Key Edge Cases =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_fk_column_name_uniqueness_collision(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Create tables with similar suffixes
|
||||
let req1 = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "customers_146053".into(),
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(&pool, req1).await.unwrap();
|
||||
|
||||
let req2 = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "suppliers_146053".into(),
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(&pool, req2).await.unwrap();
|
||||
|
||||
// Create a table linking to both - should succeed with full table names
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "orders_test".into(), // Use unique name to avoid conflicts
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![
|
||||
TableLink {
|
||||
linked_table_name: "customers_146053".into(),
|
||||
required: true,
|
||||
},
|
||||
TableLink {
|
||||
linked_table_name: "suppliers_146053".into(),
|
||||
required: true,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
|
||||
// Should succeed - no collision with full table names
|
||||
assert!(result.is_ok());
|
||||
let response = result.unwrap();
|
||||
|
||||
// Verify both FK columns are created with full table names
|
||||
assert!(response.sql.contains("\"customers_146053_id\""));
|
||||
assert!(response.sql.contains("\"suppliers_146053_id\""));
|
||||
|
||||
// Verify both are NOT NULL (required = true)
|
||||
assert!(response.sql.contains("\"customers_146053_id\" BIGINT NOT NULL"));
|
||||
assert!(response.sql.contains("\"suppliers_146053_id\" BIGINT NOT NULL"));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_cross_schema_references_prevented(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Create table in schema A
|
||||
let req_a = PostTableDefinitionRequest {
|
||||
profile_name: "scheam_a".into(),
|
||||
table_name: "users".into(),
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(&pool, req_a).await.unwrap();
|
||||
|
||||
// Try to link from schema B to schema A's table
|
||||
let req_b = PostTableDefinitionRequest {
|
||||
profile_name: "schema_b".into(),
|
||||
table_name: "orders".into(),
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![TableLink {
|
||||
linked_table_name: "users".into(), // This should not find A.users
|
||||
required: true,
|
||||
}],
|
||||
};
|
||||
|
||||
let result = post_table_definition(&pool, req_b).await;
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().message().contains("not found"));
|
||||
}
|
||||
|
||||
// ========= Category 9: Concurrent Operations =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_schema_creation(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
use futures::future::join_all;
|
||||
|
||||
let futures = (0..10).map(|i| {
|
||||
let pool = pool.clone();
|
||||
async move {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: format!("concurrent_schema_{}", i),
|
||||
table_name: "test_table".into(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "test_column".into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(&pool, request).await
|
||||
}
|
||||
});
|
||||
|
||||
let results = join_all(futures).await;
|
||||
assert!(results.iter().all(|r| r.is_ok()));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_table_creation_with_many_foreign_keys(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Create several tables to link to
|
||||
for i in 0..5 {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: format!("target_table_{}", i),
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(&pool, request).await.unwrap();
|
||||
}
|
||||
|
||||
// Create a table that links to all of them
|
||||
let links = (0..5).map(|i| TableLink {
|
||||
linked_table_name: format!("target_table_{}", i),
|
||||
required: false,
|
||||
}).collect();
|
||||
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "many_links_table".into(),
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links,
|
||||
};
|
||||
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
// ========= Category 10: Empty and Boundary Cases =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_empty_schema_and_table_names_rejected(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Test empty schema name
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "".into(),
|
||||
table_name: "valid_table".into(),
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
|
||||
|
||||
// Test empty table name
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "".into(),
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_schema_name_case_sensitivity(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// First, verify that uppercase letters are rejected
|
||||
let invalid_request = PostTableDefinitionRequest {
|
||||
profile_name: "TestSchema".into(), // Contains uppercase - should be rejected
|
||||
table_name: "test_table".into(),
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
let result = post_table_definition(&pool, invalid_request).await;
|
||||
assert!(result.is_err());
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(err.code(), Code::InvalidArgument);
|
||||
assert!(err.message().contains("contains invalid characters"));
|
||||
|
||||
// Now test with valid lowercase names - create first schema
|
||||
let request1 = PostTableDefinitionRequest {
|
||||
profile_name: "test_schema_a".into(),
|
||||
table_name: "test_table".into(),
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(&pool, request1).await.unwrap();
|
||||
|
||||
// Different lowercase schema should work fine
|
||||
let request2 = PostTableDefinitionRequest {
|
||||
profile_name: "test_schema_b".into(),
|
||||
table_name: "test_table".into(), // Same table name, different schema
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
let result2 = post_table_definition(&pool, request2).await;
|
||||
assert!(result2.is_ok(), "Different schemas should allow same table names");
|
||||
|
||||
// Same schema name should cause table collision
|
||||
let request3 = PostTableDefinitionRequest {
|
||||
profile_name: "test_schema_a".into(), // Same schema as request1
|
||||
table_name: "test_table".into(), // Same table name as request1
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
let result3 = post_table_definition(&pool, request3).await;
|
||||
assert!(result3.is_err(), "Same schema + table should cause collision");
|
||||
let err3 = result3.unwrap_err();
|
||||
assert_eq!(err3.code(), Code::AlreadyExists);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_whitespace_in_identifiers_rejected(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Test schema name with whitespace
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "test schema".into(),
|
||||
table_name: "test_table".into(),
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
|
||||
|
||||
// Test table name with whitespace
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "test table".into(),
|
||||
columns: vec![],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
|
||||
|
||||
// Test column name with whitespace
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "test_table".into(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "test column".into(),
|
||||
field_type: "text".into(),
|
||||
}],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
|
||||
}
|
||||
294
server/tests/table_definition/post_table_definition_test6.rs
Normal file
294
server/tests/table_definition/post_table_definition_test6.rs
Normal file
@@ -0,0 +1,294 @@
|
||||
// Additional edge case tests for decimal handling
|
||||
// Add these to your test files
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_decimal_negative_values_rejected(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let negative_cases = vec![
|
||||
"decimal(-1, 0)", // negative precision
|
||||
"decimal(5, -1)", // negative scale
|
||||
"decimal(-5, -2)", // both negative
|
||||
];
|
||||
|
||||
for negative_type in negative_cases {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: format!("table_neg_{}", negative_type.replace(['(', ')', ',', '-'], "_")),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "amount".into(),
|
||||
field_type: negative_type.into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err(), "Negative values should be rejected: {}", negative_type);
|
||||
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_decimal_postgresql_limits(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Test maximum valid PostgreSQL precision (1000)
|
||||
let max_valid_request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "max_precision_test".into(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "amount".into(),
|
||||
field_type: "decimal(1000, 0)".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// This should succeed (though you might want to add a limit in your code)
|
||||
let result = post_table_definition(&pool, max_valid_request).await;
|
||||
// Note: Currently your code doesn't enforce PostgreSQL's 1000 limit,
|
||||
// so this will succeed. You may want to add that validation.
|
||||
|
||||
// Test over PostgreSQL limit
|
||||
let over_limit_request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "over_precision_test".into(),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "amount".into(),
|
||||
field_type: "decimal(1001, 0)".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// This might succeed in your validation but fail at PostgreSQL level
|
||||
// Consider adding explicit validation for this
|
||||
let _result = post_table_definition(&pool, over_limit_request).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_decimal_leading_zeros_and_signs(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let cases = vec![
|
||||
("decimal(01, 02)", false), // leading zeros - should be REJECTED
|
||||
("decimal(001, 000)", false), // multiple leading zeros - should be REJECTED
|
||||
("decimal(+5, +2)", false), // explicit positive signs - should be REJECTED
|
||||
("decimal(05, +2)", false), // mixed formats - should be REJECTED
|
||||
("decimal(2, 1)", true), // clean format - should succeed
|
||||
("decimal(10, 0)", true), // clean format - should succeed
|
||||
("decimal(5, 5)", true), // scale equals precision - should succeed
|
||||
("decimal(1, 0)", true), // minimum valid case - should succeed
|
||||
];
|
||||
|
||||
for (i, (field_type, should_succeed)) in cases.into_iter().enumerate() {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
// Use completely independent, valid table names
|
||||
table_name: format!("test_table_{}", i),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "amount".into(),
|
||||
field_type: field_type.into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
if should_succeed {
|
||||
assert!(result.is_ok(),
|
||||
"Should succeed: '{}' but got error: {:?}",
|
||||
field_type,
|
||||
result.as_ref().err().map(|e| e.message()));
|
||||
} else {
|
||||
assert!(result.is_err(), "Should fail: '{}'", field_type);
|
||||
if let Err(status) = result {
|
||||
assert_eq!(status.code(), Code::InvalidArgument,
|
||||
"Wrong error code for case '{}': {}", field_type, status.message());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_decimal_extra_parameters_rejected(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let invalid_cases = vec![
|
||||
"decimal(5,2,3)", // too many parameters
|
||||
"decimal(5,,2)", // double comma
|
||||
"decimal(5, 2, )", // trailing comma
|
||||
"decimal(5,2,)", // trailing comma variant
|
||||
"decimal(5,2,3,4)", // way too many parameters
|
||||
];
|
||||
|
||||
for invalid_case in invalid_cases {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: format!("table_{}", invalid_case.replace(['(', ')', ','], "_")),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "amount".into(),
|
||||
field_type: invalid_case.into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err(), "Should reject extra parameters: {}", invalid_case);
|
||||
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_decimal_floating_point_inputs_rejected(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let floating_cases = vec![
|
||||
"decimal(5.5, 2)", // floating point precision
|
||||
"decimal(5, 2.0)", // floating point scale
|
||||
"decimal(5.1, 2.9)", // both floating point
|
||||
"decimal(1.0, 0.0)", // explicit decimals
|
||||
];
|
||||
|
||||
for floating_case in floating_cases {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: format!("table_{}", floating_case.replace(['(', ')', ',', '.'], "_")),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "amount".into(),
|
||||
field_type: floating_case.into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err(), "Should reject floating point: {}", floating_case);
|
||||
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_decimal_whitespace_variations(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let whitespace_cases = vec![
|
||||
("decimal(\t5\t,\t2\t)", true), // tabs
|
||||
("decimal(\n5\n,\n2\n)", true), // newlines
|
||||
("decimal( 5\t, 2\n)", true), // mixed whitespace
|
||||
("decimal(5 2)", false), // missing comma
|
||||
("decimal(5\t2)", false), // tab instead of comma
|
||||
];
|
||||
|
||||
for (i, (case, should_succeed)) in whitespace_cases.into_iter().enumerate() {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: format!("whitespace_test_{}", i),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "amount".into(),
|
||||
field_type: case.into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
if should_succeed {
|
||||
assert!(result.is_ok(), "Should handle whitespace: {}", case);
|
||||
} else {
|
||||
assert!(result.is_err(), "Should reject invalid format: {}", case);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_decimal_boundary_scale_values(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let boundary_cases = vec![
|
||||
("decimal(10, 10)", true), // scale equals precision (valid)
|
||||
("decimal(10, 11)", false), // scale > precision (invalid, already tested)
|
||||
("decimal(1, 0)", true), // minimum valid case
|
||||
("decimal(2, 1)", true), // normal case
|
||||
];
|
||||
|
||||
for (i, (case, should_succeed)) in boundary_cases.into_iter().enumerate() {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: format!("boundary_test_{}", i),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "amount".into(),
|
||||
field_type: case.into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
if should_succeed {
|
||||
assert!(result.is_ok(), "Should succeed: {}", case);
|
||||
} else {
|
||||
assert!(result.is_err(), "Should fail: {}", case);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_decimal_case_insensitive_variations(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let case_variations = vec![
|
||||
"DECIMAL(5,2)",
|
||||
"Decimal(5,2)",
|
||||
"decimal(5,2)",
|
||||
"DeCiMaL(5,2)",
|
||||
];
|
||||
|
||||
for (i, case_variant) in case_variations.into_iter().enumerate() {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: format!("case_test_{}", i),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "amount".into(),
|
||||
field_type: case_variant.into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_ok(), "Should handle case variation: {}", case_variant);
|
||||
|
||||
let response = result.unwrap();
|
||||
assert!(response.sql.contains("NUMERIC(5, 2)"),
|
||||
"Should map to NUMERIC(5, 2): {}", response.sql);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_decimal_u32_overflow_protection(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Test numbers that would overflow u32 (> 4,294,967,295)
|
||||
// Your current code uses u32::parse, so these should fail gracefully
|
||||
let overflow_cases = vec![
|
||||
"decimal(4294967296, 0)", // u32::MAX + 1
|
||||
"decimal(0, 4294967296)", // u32::MAX + 1 for scale
|
||||
"decimal(99999999999999, 0)", // very large number
|
||||
];
|
||||
|
||||
for overflow_case in overflow_cases {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: format!("overflow_test_{}", overflow_case.len()),
|
||||
columns: vec![ColumnDefinition {
|
||||
name: "amount".into(),
|
||||
field_type: overflow_case.into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let result = post_table_definition(&pool, request).await;
|
||||
assert!(result.is_err(), "Should reject overflow values: {}", overflow_case);
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(err.code(), Code::InvalidArgument);
|
||||
// Should contain either "Invalid precision" or "Invalid scale"
|
||||
assert!(err.message().contains("Invalid") &&
|
||||
(err.message().contains("precision") || err.message().contains("scale")));
|
||||
}
|
||||
}
|
||||
296
server/tests/tables_data/delete/delete_table_data_test.rs
Normal file
296
server/tests/tables_data/delete/delete_table_data_test.rs
Normal file
@@ -0,0 +1,296 @@
|
||||
// tests/tables_data/handlers/delete_table_data_test.rs
|
||||
|
||||
use rstest::{fixture, rstest};
|
||||
use sqlx::{PgPool, Row};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::{mpsc, Mutex};
|
||||
use serde_json::json;
|
||||
use chrono::Utc;
|
||||
use futures::future::join_all;
|
||||
use prost_types::{value::Kind, Value};
|
||||
use rand::Rng;
|
||||
use rand::distr::Alphanumeric; // Corrected import
|
||||
|
||||
// Common imports from other modules
|
||||
use common::proto::multieko2::table_definition::{
|
||||
PostTableDefinitionRequest, ColumnDefinition as TableColumnDefinition, TableLink,
|
||||
};
|
||||
use common::proto::multieko2::tables_data::{
|
||||
DeleteTableDataRequest, DeleteTableDataResponse, PostTableDataRequest, PutTableDataRequest,
|
||||
};
|
||||
use server::indexer::IndexCommand;
|
||||
use server::table_definition::handlers::post_table_definition;
|
||||
use server::tables_data::handlers::{delete_table_data, post_table_data, put_table_data};
|
||||
use crate::common::setup_test_db;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref TEST_MUTEX: Arc<Mutex<()>> = Arc::new(Mutex::new(()));
|
||||
}
|
||||
|
||||
// ========= Test Helpers =========
|
||||
|
||||
fn generate_unique_id() -> String {
|
||||
rand::rng() // Corrected function call
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(8)
|
||||
.map(char::from)
|
||||
.collect::<String>()
|
||||
.to_lowercase()
|
||||
}
|
||||
|
||||
fn string_to_proto_value(s: &str) -> Value {
|
||||
Value {
|
||||
kind: Some(Kind::StringValue(s.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
// ========= Fixtures =========
|
||||
|
||||
#[fixture]
|
||||
async fn pool() -> PgPool {
|
||||
setup_test_db().await
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
|
||||
let pool = pool.await;
|
||||
pool.close().await;
|
||||
pool
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_profile(#[future] pool: PgPool) -> (PgPool, String, i64) {
|
||||
let pool = pool.await;
|
||||
let profile_name = format!("testprofile_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
||||
|
||||
// FIX: The table is `schemas`, not `profiles`.
|
||||
let profile = sqlx::query!(
|
||||
"INSERT INTO schemas (name) VALUES ($1) RETURNING id",
|
||||
profile_name
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
(pool, profile_name, profile.id)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_table(
|
||||
#[future] existing_profile: (PgPool, String, i64),
|
||||
) -> (PgPool, String, i64, String) {
|
||||
let (pool, profile_name, schema_id) = existing_profile.await;
|
||||
let table_name = format!("test_table_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
||||
|
||||
// Use post_table_definition instead of manual table creation
|
||||
let table_def_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition {
|
||||
name: "test_data".into(),
|
||||
field_type: "text".into(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(&pool, table_def_request).await.unwrap();
|
||||
(pool, profile_name, schema_id, table_name)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_record(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) -> (PgPool, String, String, i64) {
|
||||
let (pool, profile_name, _schema_id, table_name) = existing_table.await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("test_data".to_string(), string_to_proto_value("Test Record"));
|
||||
|
||||
let post_req = PostTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let (indexer_tx, _indexer_rx) = mpsc::channel(1);
|
||||
let response = post_table_data(&pool, post_req, &indexer_tx).await.unwrap();
|
||||
|
||||
(pool, profile_name, table_name, response.inserted_id)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_deleted_record(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) -> (PgPool, String, String, i64) {
|
||||
let (pool, profile_name, _schema_id, table_name) = existing_table.await;
|
||||
|
||||
// First create a record
|
||||
let mut data = HashMap::new();
|
||||
data.insert("test_data".to_string(), string_to_proto_value("Test Deleted Record"));
|
||||
|
||||
let post_req = PostTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let (indexer_tx, _indexer_rx) = mpsc::channel(1);
|
||||
let response = post_table_data(&pool, post_req, &indexer_tx).await.unwrap();
|
||||
let record_id = response.inserted_id;
|
||||
|
||||
// Then delete it
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
record_id,
|
||||
};
|
||||
delete_table_data(&pool, delete_req).await.unwrap();
|
||||
|
||||
(pool, profile_name, table_name, record_id)
|
||||
}
|
||||
|
||||
// New fixture for advanced tests
|
||||
#[derive(Clone)]
|
||||
struct AdvancedDeleteContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
category_table: String,
|
||||
product_table: String,
|
||||
indexer_tx: mpsc::Sender<IndexCommand>,
|
||||
indexer_rx: Arc<tokio::sync::Mutex<mpsc::Receiver<IndexCommand>>>,
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn advanced_delete_context() -> AdvancedDeleteContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("adv_del_profile_{}", unique_id);
|
||||
let category_table = format!("categories_adv_del_{}", unique_id);
|
||||
let product_table = format!("products_adv_del_{}", unique_id);
|
||||
|
||||
let category_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: category_table.clone(),
|
||||
columns: vec![TableColumnDefinition { name: "name".into(), field_type: "text".into() }],
|
||||
links: vec![], indexes: vec![],
|
||||
};
|
||||
post_table_definition(&pool, category_def).await.unwrap();
|
||||
|
||||
let product_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: product_table.clone(),
|
||||
columns: vec![TableColumnDefinition { name: "name".into(), field_type: "text".into() }],
|
||||
links: vec![TableLink { linked_table_name: category_table.clone(), required: true }],
|
||||
indexes: vec![],
|
||||
};
|
||||
post_table_definition(&pool, product_def).await.unwrap();
|
||||
|
||||
let (tx, rx) = mpsc::channel(100);
|
||||
AdvancedDeleteContext {
|
||||
pool, profile_name, category_table, product_table,
|
||||
indexer_tx: tx,
|
||||
indexer_rx: Arc::new(tokio::sync::Mutex::new(rx)),
|
||||
}
|
||||
}
|
||||
|
||||
// ========= Basic Tests (from your original file) =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_data_success(
|
||||
#[future] existing_record: (PgPool, String, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, table_name, record_id) = existing_record.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
record_id,
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
let query = format!("SELECT deleted FROM \"{}\".\"{}\" WHERE id = $1", profile_name, table_name);
|
||||
let row = sqlx::query(&query).bind(record_id).fetch_one(&pool).await.unwrap();
|
||||
assert!(row.get::<bool, _>("deleted"));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_data_profile_not_found(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: "NonExistentProfile".to_string(),
|
||||
table_name: "test_table".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_data_table_not_found(
|
||||
#[future] existing_profile: (PgPool, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, _) = existing_profile.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name: "non_existent_table".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_data_record_not_found(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) {
|
||||
let (pool, profile_name, _, table_name) = existing_table.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name: table_name.clone(),
|
||||
record_id: 9999,
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(!response.success);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_data_already_deleted(
|
||||
#[future] existing_deleted_record: (PgPool, String, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, table_name, record_id) = existing_deleted_record.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name: table_name.clone(),
|
||||
record_id,
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(!response.success);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_data_database_error(#[future] closed_pool: PgPool) {
|
||||
let closed_pool = closed_pool.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: "test".to_string(),
|
||||
table_name: "test".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&closed_pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
|
||||
// Include the new, more advanced tests
|
||||
include!("delete_table_data_test2.rs");
|
||||
include!("delete_table_data_test3.rs");
|
||||
241
server/tests/tables_data/delete/delete_table_data_test2.rs
Normal file
241
server/tests/tables_data/delete/delete_table_data_test2.rs
Normal file
@@ -0,0 +1,241 @@
|
||||
// tests/tables_data/handlers/delete_table_data_test2.rs
|
||||
|
||||
// ========================================================================
|
||||
// Foreign Key Integrity Tests
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_soft_delete_does_not_break_foreign_key_references(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Arrange: Create a category and a product that links to it.
|
||||
let context = advanced_delete_context.await;
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value("Electronics"));
|
||||
let category_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let category_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
|
||||
let category_id = category_res.inserted_id;
|
||||
|
||||
let mut product_data = HashMap::new();
|
||||
product_data.insert("name".to_string(), string_to_proto_value("Laptop"));
|
||||
product_data.insert(
|
||||
format!("{}_id", context.category_table),
|
||||
Value { kind: Some(Kind::NumberValue(category_id as f64)) },
|
||||
);
|
||||
let product_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
data: product_data,
|
||||
};
|
||||
let product_res = post_table_data(&context.pool, product_req, &context.indexer_tx).await.unwrap();
|
||||
let product_id = product_res.inserted_id;
|
||||
|
||||
// Act: Soft-delete the category record.
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: category_id,
|
||||
};
|
||||
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
|
||||
assert!(delete_res.success);
|
||||
|
||||
// Assert: The product record still exists and its foreign key still points to the (now soft-deleted) category ID.
|
||||
let query = format!(
|
||||
r#"SELECT "{}_id" FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.category_table, context.profile_name, context.product_table
|
||||
);
|
||||
let fk_id: i64 = sqlx::query_scalar(&query)
|
||||
.bind(product_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(fk_id, category_id, "Foreign key reference should remain intact after soft delete.");
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Indexer Integration Tests
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_does_not_send_indexer_command(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = advanced_delete_context.await;
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value("Test Category"));
|
||||
let category_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let category_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
|
||||
let category_id = category_res.inserted_id;
|
||||
|
||||
// Drain the create command from the channel
|
||||
let _ = context.indexer_rx.lock().await.recv().await;
|
||||
|
||||
// Act
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: category_id,
|
||||
};
|
||||
|
||||
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
|
||||
assert!(delete_res.success);
|
||||
|
||||
// Assert: Check that NO command was sent. This verifies current behavior.
|
||||
let recv_result = tokio::time::timeout(
|
||||
std::time::Duration::from_millis(50),
|
||||
context.indexer_rx.lock().await.recv()
|
||||
).await;
|
||||
|
||||
assert!(recv_result.is_err(), "Expected no indexer command to be sent on delete, but one was received.");
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Concurrency and State Mismatch Tests
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_deletes_on_same_record(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = advanced_delete_context.await;
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value("Concurrent Delete Test"));
|
||||
let category_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let category_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
|
||||
let category_id = category_res.inserted_id;
|
||||
|
||||
// Act: Spawn multiple tasks to delete the same record.
|
||||
let mut tasks = vec![];
|
||||
for _ in 0..5 {
|
||||
let pool = context.pool.clone();
|
||||
let req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: category_id,
|
||||
};
|
||||
tasks.push(tokio::spawn(async move {
|
||||
delete_table_data(&pool, req).await
|
||||
}));
|
||||
}
|
||||
let results = join_all(tasks).await;
|
||||
|
||||
// Assert: Exactly one delete should succeed, the rest should fail (softly).
|
||||
let success_count = results.iter().filter(|res|
|
||||
res.is_ok() && res.as_ref().unwrap().is_ok() && res.as_ref().unwrap().as_ref().unwrap().success
|
||||
).count();
|
||||
|
||||
assert_eq!(success_count, 1, "Exactly one concurrent delete operation should succeed.");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_fails_if_physical_table_is_missing(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Arrange: Create definitions, then manually drop the physical table to create a state mismatch.
|
||||
let context = advanced_delete_context.await;
|
||||
let qualified_table = format!("\"{}\".\"{}\"", context.profile_name, context.category_table);
|
||||
sqlx::query(&format!("DROP TABLE {} CASCADE", qualified_table))
|
||||
.execute(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Act: Attempt to delete a record from the logically-defined but physically-absent table.
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: 1, // ID doesn't matter
|
||||
};
|
||||
let result = delete_table_data(&context.pool, delete_req).await;
|
||||
|
||||
// Assert: The operation should fail with the specific internal error for a missing relation.
|
||||
assert!(result.is_err());
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(err.code(), tonic::Code::Internal);
|
||||
assert!(
|
||||
err.message().contains("is defined but does not physically exist"),
|
||||
"Error message should indicate a state mismatch."
|
||||
);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Interaction with Other Endpoints
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_succeeds_on_soft_deleted_record(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Arrange: Create and then soft-delete a record.
|
||||
let context = advanced_delete_context.await;
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value("Original Name"));
|
||||
let category_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let category_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
|
||||
let category_id = category_res.inserted_id;
|
||||
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: category_id,
|
||||
};
|
||||
delete_table_data(&context.pool, delete_req).await.unwrap();
|
||||
|
||||
// Act: Attempt to update the soft-deleted record using the PUT handler.
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("name".to_string(), string_to_proto_value("Updated After Delete"));
|
||||
let put_req = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
id: category_id,
|
||||
data: update_data,
|
||||
};
|
||||
let put_result = put_table_data(&context.pool, put_req, &context.indexer_tx).await;
|
||||
|
||||
// Assert: This test is crucial as it verifies your requirement to "freeze operations".
|
||||
// Currently, the PUT handler does NOT check the deleted flag, so it will succeed.
|
||||
// This test documents that behavior. To make it fail, you would need to add a check
|
||||
// in `put_table_data` to see if the record is already deleted.
|
||||
assert!(put_result.is_ok(), "PUT should succeed on a soft-deleted record (current behavior).");
|
||||
let put_res = put_result.unwrap();
|
||||
assert!(put_res.success);
|
||||
|
||||
// Verify the name was updated, but the record remains marked as deleted.
|
||||
let row = sqlx::query(&format!(
|
||||
r#"SELECT name, deleted FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.category_table
|
||||
))
|
||||
.bind(category_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let name: String = row.get("name");
|
||||
let deleted: bool = row.get("deleted");
|
||||
|
||||
assert_eq!(name, "Updated After Delete");
|
||||
assert!(deleted, "Record should remain soft-deleted after an update.");
|
||||
}
|
||||
567
server/tests/tables_data/delete/delete_table_data_test3.rs
Normal file
567
server/tests/tables_data/delete/delete_table_data_test3.rs
Normal file
@@ -0,0 +1,567 @@
|
||||
// tests/tables_data/handlers/delete_table_data_test3.rs
|
||||
|
||||
// ========================================================================
|
||||
// Input Validation and Edge Cases
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_with_negative_record_id(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) {
|
||||
let (pool, profile_name, _, table_name) = existing_table.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name,
|
||||
record_id: -1,
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(!response.success, "Delete with negative ID should fail gracefully");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_with_zero_record_id(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) {
|
||||
let (pool, profile_name, _, table_name) = existing_table.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name,
|
||||
record_id: 0,
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(!response.success, "Delete with zero ID should fail gracefully");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_with_max_int64_record_id(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) {
|
||||
let (pool, profile_name, _, table_name) = existing_table.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name,
|
||||
record_id: i64::MAX,
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(!response.success, "Delete with max int64 ID should fail gracefully");
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Malformed Input Handling
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_with_empty_profile_name(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: "".to_string(),
|
||||
table_name: "test_table".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&pool, request).await;
|
||||
assert!(result.is_err(), "Empty profile name should be rejected");
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_with_whitespace_only_profile_name(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: " ".to_string(),
|
||||
table_name: "test_table".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&pool, request).await;
|
||||
assert!(result.is_err(), "Whitespace-only profile name should be rejected");
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_with_empty_table_name(
|
||||
#[future] existing_profile: (PgPool, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, _) = existing_profile.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name: "".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&pool, request).await;
|
||||
assert!(result.is_err(), "Empty table name should be rejected");
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_with_sql_injection_attempt(
|
||||
#[future] existing_profile: (PgPool, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, _) = existing_profile.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name: "test'; DROP TABLE users; --".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&pool, request).await;
|
||||
assert!(result.is_err(), "SQL injection attempt should be rejected");
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Data Integrity Verification Tests
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_only_affects_target_record(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) {
|
||||
// Arrange: Create multiple records
|
||||
let (pool, profile_name, _, table_name) = existing_table.await;
|
||||
|
||||
let mut record_ids = Vec::new();
|
||||
for i in 0..5 {
|
||||
let query = format!(
|
||||
"INSERT INTO \"{}\".\"{}\" (deleted) VALUES (false) RETURNING id",
|
||||
profile_name, table_name
|
||||
);
|
||||
let row = sqlx::query(&query).fetch_one(&pool).await.unwrap();
|
||||
let id: i64 = row.get("id");
|
||||
record_ids.push(id);
|
||||
}
|
||||
|
||||
let target_id = record_ids[2]; // Delete the middle record
|
||||
|
||||
// Act: Delete one specific record
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
record_id: target_id,
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Assert: Verify only the target record is deleted
|
||||
for &id in &record_ids {
|
||||
let query = format!(
|
||||
"SELECT deleted FROM \"{}\".\"{}\" WHERE id = $1",
|
||||
profile_name, table_name
|
||||
);
|
||||
let row = sqlx::query(&query).bind(id).fetch_one(&pool).await.unwrap();
|
||||
let is_deleted: bool = row.get("deleted");
|
||||
|
||||
if id == target_id {
|
||||
assert!(is_deleted, "Target record should be marked as deleted");
|
||||
} else {
|
||||
assert!(!is_deleted, "Non-target records should remain undeleted");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_preserves_all_other_fields(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Arrange: Create a record with rich data
|
||||
let context = advanced_delete_context.await;
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value("Preserve Test Category"));
|
||||
|
||||
let category_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let category_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
|
||||
let category_id = category_res.inserted_id;
|
||||
|
||||
// Capture state before deletion
|
||||
let before_query = format!(
|
||||
"SELECT id, name, deleted, created_at FROM \"{}\".\"{}\" WHERE id = $1",
|
||||
context.profile_name, context.category_table
|
||||
);
|
||||
let before_row = sqlx::query(&before_query)
|
||||
.bind(category_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let before_id: i64 = before_row.get("id");
|
||||
let before_name: String = before_row.get("name");
|
||||
let before_deleted: bool = before_row.get("deleted");
|
||||
let before_created_at: chrono::DateTime<chrono::Utc> = before_row.get("created_at");
|
||||
|
||||
// Act: Delete the record
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: category_id,
|
||||
};
|
||||
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
|
||||
assert!(delete_res.success);
|
||||
|
||||
// Assert: Verify only 'deleted' field changed
|
||||
let after_query = format!(
|
||||
"SELECT id, name, deleted, created_at FROM \"{}\".\"{}\" WHERE id = $1",
|
||||
context.profile_name, context.category_table
|
||||
);
|
||||
let after_row = sqlx::query(&after_query)
|
||||
.bind(category_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let after_id: i64 = after_row.get("id");
|
||||
let after_name: String = after_row.get("name");
|
||||
let after_deleted: bool = after_row.get("deleted");
|
||||
let after_created_at: chrono::DateTime<chrono::Utc> = after_row.get("created_at");
|
||||
|
||||
assert_eq!(before_id, after_id, "ID should not change");
|
||||
assert_eq!(before_name, after_name, "Name should not change");
|
||||
assert_eq!(before_created_at, after_created_at, "Created timestamp should not change");
|
||||
assert_eq!(before_deleted, false, "Record should initially be not deleted");
|
||||
assert_eq!(after_deleted, true, "Record should be marked as deleted after operation");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_count_verification(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) {
|
||||
// Arrange: Create records and count them
|
||||
let (pool, profile_name, _, table_name) = existing_table.await;
|
||||
|
||||
// Create 3 records
|
||||
let mut record_ids = Vec::new();
|
||||
for _ in 0..3 {
|
||||
let query = format!(
|
||||
"INSERT INTO \"{}\".\"{}\" (deleted) VALUES (false) RETURNING id",
|
||||
profile_name, table_name
|
||||
);
|
||||
let row = sqlx::query(&query).fetch_one(&pool).await.unwrap();
|
||||
let id: i64 = row.get("id");
|
||||
record_ids.push(id);
|
||||
}
|
||||
|
||||
// Verify initial count
|
||||
let count_query = format!(
|
||||
"SELECT COUNT(*) as total, COUNT(*) FILTER (WHERE deleted = false) as active FROM \"{}\".\"{}\"",
|
||||
profile_name, table_name
|
||||
);
|
||||
let count_row = sqlx::query(&count_query).fetch_one(&pool).await.unwrap();
|
||||
let initial_total: i64 = count_row.get("total");
|
||||
let initial_active: i64 = count_row.get("active");
|
||||
|
||||
// Act: Delete one record
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
record_id: record_ids[0],
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Assert: Verify counts after deletion
|
||||
let final_count_row = sqlx::query(&count_query).fetch_one(&pool).await.unwrap();
|
||||
let final_total: i64 = final_count_row.get("total");
|
||||
let final_active: i64 = final_count_row.get("active");
|
||||
|
||||
assert_eq!(initial_total, final_total, "Total record count should not change (soft delete)");
|
||||
assert_eq!(initial_active - 1, final_active, "Active record count should decrease by 1");
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Multiple Operations Sequence Testing
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_then_post_same_data(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Arrange: Create and delete a record
|
||||
let context = advanced_delete_context.await;
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value("Reusable Name"));
|
||||
|
||||
let category_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data.clone(),
|
||||
};
|
||||
let first_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
|
||||
let first_id = first_res.inserted_id;
|
||||
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: first_id,
|
||||
};
|
||||
delete_table_data(&context.pool, delete_req).await.unwrap();
|
||||
|
||||
// Act: Try to POST the same data again
|
||||
let second_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let second_res = post_table_data(&context.pool, second_req, &context.indexer_tx).await.unwrap();
|
||||
|
||||
// Assert: Should succeed with a new ID
|
||||
assert!(second_res.success);
|
||||
assert_ne!(first_id, second_res.inserted_id, "New record should have different ID");
|
||||
|
||||
// Verify both records exist in database
|
||||
let count_query = format!(
|
||||
"SELECT COUNT(*) as total FROM \"{}\".\"{}\" WHERE name = 'Reusable Name'",
|
||||
context.profile_name, context.category_table
|
||||
);
|
||||
let count: i64 = sqlx::query_scalar(&count_query).fetch_one(&context.pool).await.unwrap();
|
||||
assert_eq!(count, 2, "Should have 2 records with same name (one deleted, one active)");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_multiple_deletes_then_recreate_pattern(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Test a realistic pattern: create, delete, recreate multiple times
|
||||
let context = advanced_delete_context.await;
|
||||
let mut all_ids = Vec::new();
|
||||
|
||||
for i in 0..3 {
|
||||
// Create
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value(&format!("Cycle Name {}", i)));
|
||||
|
||||
let create_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let create_res = post_table_data(&context.pool, create_req, &context.indexer_tx).await.unwrap();
|
||||
all_ids.push(create_res.inserted_id);
|
||||
|
||||
// Delete immediately
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: create_res.inserted_id,
|
||||
};
|
||||
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
|
||||
assert!(delete_res.success);
|
||||
}
|
||||
|
||||
// Verify all records are marked as deleted
|
||||
for &id in &all_ids {
|
||||
let query = format!(
|
||||
"SELECT deleted FROM \"{}\".\"{}\" WHERE id = $1",
|
||||
context.profile_name, context.category_table
|
||||
);
|
||||
let is_deleted: bool = sqlx::query_scalar(&query)
|
||||
.bind(id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(is_deleted, "Record {} should be deleted", id);
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Performance and Stress Tests
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_performance_with_many_records(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Arrange: Create many records
|
||||
let context = advanced_delete_context.await;
|
||||
let record_count = 100; // Adjust based on test environment
|
||||
let mut record_ids = Vec::new();
|
||||
|
||||
for i in 0..record_count {
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value(&format!("Perf Test {}", i)));
|
||||
|
||||
let create_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let create_res = post_table_data(&context.pool, create_req, &context.indexer_tx).await.unwrap();
|
||||
record_ids.push(create_res.inserted_id);
|
||||
}
|
||||
|
||||
// Act: Delete a record from the middle (worst case for performance)
|
||||
let target_id = record_ids[record_count / 2];
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: target_id,
|
||||
};
|
||||
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
|
||||
|
||||
let elapsed = start_time.elapsed();
|
||||
|
||||
// Assert: Operation should succeed and be reasonably fast
|
||||
assert!(delete_res.success);
|
||||
assert!(elapsed.as_millis() < 1000, "Delete should complete within 1 second even with {} records", record_count);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_rapid_sequential_deletes(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Arrange: Create multiple records
|
||||
let context = advanced_delete_context.await;
|
||||
let mut record_ids = Vec::new();
|
||||
|
||||
for i in 0..10 {
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value(&format!("Rapid Delete {}", i)));
|
||||
|
||||
let create_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let create_res = post_table_data(&context.pool, create_req, &context.indexer_tx).await.unwrap();
|
||||
record_ids.push(create_res.inserted_id);
|
||||
}
|
||||
|
||||
// Act: Delete all records rapidly in sequence
|
||||
let start_time = std::time::Instant::now();
|
||||
for &record_id in &record_ids {
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id,
|
||||
};
|
||||
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
|
||||
assert!(delete_res.success, "Delete of record {} should succeed", record_id);
|
||||
}
|
||||
let elapsed = start_time.elapsed();
|
||||
|
||||
// Assert: All deletes should complete in reasonable time
|
||||
assert!(elapsed.as_millis() < 5000, "10 sequential deletes should complete within 5 seconds");
|
||||
|
||||
// Verify all records are deleted
|
||||
let count_query = format!(
|
||||
"SELECT COUNT(*) FILTER (WHERE deleted = true) as deleted_count FROM \"{}\".\"{}\"",
|
||||
context.profile_name, context.category_table
|
||||
);
|
||||
let deleted_count: i64 = sqlx::query_scalar(&count_query).fetch_one(&context.pool).await.unwrap();
|
||||
assert_eq!(deleted_count as usize, record_ids.len(), "All records should be marked as deleted");
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Error Message Quality and Handling Tests
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_error_messages_are_descriptive(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Test profile not found error
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: "NonExistentProfile123".to_string(),
|
||||
table_name: "test_table".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
assert_eq!(error.code(), tonic::Code::NotFound);
|
||||
assert_eq!(error.message(), "Profile not found");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_table_not_found_error_message(
|
||||
#[future] existing_profile: (PgPool, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, _) = existing_profile.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name: "definitely_does_not_exist_12345".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
assert_eq!(error.code(), tonic::Code::NotFound);
|
||||
assert_eq!(error.message(), "Table not found in profile");
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Database State Consistency Tests
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_maintains_foreign_key_constraints(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// This test ensures that soft deletes don't interfere with FK constraint validation
|
||||
let context = advanced_delete_context.await;
|
||||
|
||||
// Create category
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value("FK Test Category"));
|
||||
let category_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let category_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
|
||||
let category_id = category_res.inserted_id;
|
||||
|
||||
// Create product referencing the category
|
||||
let mut product_data = HashMap::new();
|
||||
product_data.insert("name".to_string(), string_to_proto_value("FK Test Product"));
|
||||
product_data.insert(
|
||||
format!("{}_id", context.category_table),
|
||||
Value { kind: Some(Kind::NumberValue(category_id as f64)) },
|
||||
);
|
||||
let product_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
data: product_data,
|
||||
};
|
||||
let product_res = post_table_data(&context.pool, product_req, &context.indexer_tx).await.unwrap();
|
||||
|
||||
// Soft delete the category
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: category_id,
|
||||
};
|
||||
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
|
||||
assert!(delete_res.success);
|
||||
|
||||
// The product should still exist and reference the soft-deleted category
|
||||
let fk_query = format!(
|
||||
"SELECT \"{}_id\" FROM \"{}\".\"{}\" WHERE id = $1",
|
||||
context.category_table, context.profile_name, context.product_table
|
||||
);
|
||||
let fk_value: i64 = sqlx::query_scalar(&fk_query)
|
||||
.bind(product_res.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(fk_value, category_id, "Foreign key should still point to soft-deleted category");
|
||||
}
|
||||
3
server/tests/tables_data/delete/mod.rs
Normal file
3
server/tests/tables_data/delete/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
// tests/tables_data/delete/mod.rs
|
||||
|
||||
pub mod delete_table_data_test;
|
||||
@@ -3,10 +3,12 @@ use rstest::{fixture, rstest};
|
||||
use tonic;
|
||||
use sqlx::PgPool;
|
||||
use common::proto::multieko2::tables_data::GetTableDataCountRequest;
|
||||
use common::proto::multieko2::table_definition::{PostTableDefinitionRequest, ColumnDefinition};
|
||||
use common::proto::multieko2::table_definition::TableLink;
|
||||
use server::tables_data::handlers::get_table_data_count;
|
||||
use server::table_definition::handlers::post_table_definition;
|
||||
use crate::common::setup_test_db;
|
||||
use chrono::Utc;
|
||||
use serde_json::json;
|
||||
|
||||
#[fixture]
|
||||
async fn pool() -> PgPool {
|
||||
@@ -21,66 +23,62 @@ async fn closed_pool(#[future] pool: PgPool) -> PgPool {
|
||||
}
|
||||
|
||||
async fn setup_test_environment(pool: &PgPool) -> (String, String, i64) {
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
|
||||
// Create unique profile and table names
|
||||
let profile_name = format!("test_profile_{}", Utc::now().timestamp_nanos());
|
||||
let table_name = format!("test_table_{}", Utc::now().timestamp_nanos());
|
||||
let profile_name = format!("test_profile_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
let table_name = format!("test_table_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
|
||||
// Create profile
|
||||
let profile_id = sqlx::query_scalar!(
|
||||
"INSERT INTO profiles (name) VALUES ($1) RETURNING id",
|
||||
// Use the table definition handler to create the table properly
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "firma".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(pool, request).await.unwrap();
|
||||
|
||||
// Get the schema_id for cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
)
|
||||
.fetch_one(&mut *tx)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create table definition
|
||||
(profile_name, table_name, schema_id)
|
||||
}
|
||||
|
||||
async fn cleanup_test_environment(pool: &PgPool, schema_id: i64, profile_name: &str) {
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
|
||||
// Cleanup order matters!
|
||||
sqlx::query(&format!(r#"DROP SCHEMA IF EXISTS "{}" CASCADE"#, profile_name))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Delete foreign key relationships first
|
||||
sqlx::query!(
|
||||
r#"INSERT INTO table_definitions (profile_id, table_name, columns, indexes)
|
||||
VALUES ($1, $2, $3, $4)"#,
|
||||
profile_id,
|
||||
table_name,
|
||||
json!({}),
|
||||
json!([])
|
||||
"DELETE FROM table_definition_links WHERE source_table_id IN (SELECT id FROM table_definitions WHERE schema_id = $1) OR linked_table_id IN (SELECT id FROM table_definitions WHERE schema_id = $1)",
|
||||
schema_id
|
||||
)
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create actual table
|
||||
sqlx::query(&format!(
|
||||
r#"CREATE TABLE "{}" (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
deleted BOOLEAN NOT NULL DEFAULT false,
|
||||
firma TEXT NOT NULL
|
||||
)"#,
|
||||
table_name
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tx.commit().await.unwrap();
|
||||
(profile_name, table_name, profile_id)
|
||||
}
|
||||
|
||||
async fn cleanup_test_environment(pool: &PgPool, profile_id: i64, table_name: &str) {
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
|
||||
// Cleanup order matters!
|
||||
sqlx::query(&format!(r#"DROP TABLE IF EXISTS "{}" CASCADE"#, table_name))
|
||||
sqlx::query!("DELETE FROM table_definitions WHERE schema_id = $1", schema_id)
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
sqlx::query!("DELETE FROM table_definitions WHERE profile_id = $1", profile_id)
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
sqlx::query!("DELETE FROM profiles WHERE id = $1", profile_id)
|
||||
|
||||
sqlx::query!("DELETE FROM schemas WHERE id = $1", schema_id)
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -92,21 +90,21 @@ async fn cleanup_test_environment(pool: &PgPool, profile_id: i64, table_name: &s
|
||||
#[tokio::test]
|
||||
async fn test_returns_correct_count(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let (profile_name, table_name, profile_id) = setup_test_environment(&pool).await;
|
||||
let (profile_name, table_name, schema_id) = setup_test_environment(&pool).await;
|
||||
|
||||
// Insert test data
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}" (firma) VALUES ('Test 1')"#,
|
||||
table_name
|
||||
r#"INSERT INTO "{}"."{}" (firma) VALUES ('Test 1')"#,
|
||||
profile_name, table_name
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}" (firma) VALUES ('Test 2')"#,
|
||||
table_name
|
||||
r#"INSERT INTO "{}"."{}" (firma) VALUES ('Test 2')"#,
|
||||
profile_name, table_name
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
@@ -121,28 +119,28 @@ async fn test_returns_correct_count(#[future] pool: PgPool) {
|
||||
let response = get_table_data_count(&pool, request).await.unwrap();
|
||||
assert_eq!(response.count, 2);
|
||||
|
||||
cleanup_test_environment(&pool, profile_id, &table_name).await;
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_excludes_deleted_records(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let (profile_name, table_name, profile_id) = setup_test_environment(&pool).await;
|
||||
let (profile_name, table_name, schema_id) = setup_test_environment(&pool).await;
|
||||
|
||||
// Insert test data
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}" (firma, deleted) VALUES ('Active', false)"#,
|
||||
table_name
|
||||
r#"INSERT INTO "{}"."{}" (firma, deleted) VALUES ('Active', false)"#,
|
||||
profile_name, table_name
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}" (firma, deleted) VALUES ('Deleted', true)"#,
|
||||
table_name
|
||||
r#"INSERT INTO "{}"."{}" (firma, deleted) VALUES ('Deleted', true)"#,
|
||||
profile_name, table_name
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
@@ -157,25 +155,25 @@ async fn test_excludes_deleted_records(#[future] pool: PgPool) {
|
||||
let response = get_table_data_count(&pool, request).await.unwrap();
|
||||
assert_eq!(response.count, 1);
|
||||
|
||||
cleanup_test_environment(&pool, profile_id, &table_name).await;
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_table_not_in_profile(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let (profile_name, _, profile_id) = setup_test_environment(&pool).await;
|
||||
let (profile_name, _, schema_id) = setup_test_environment(&pool).await;
|
||||
|
||||
// Test with non-existent table
|
||||
let request = GetTableDataCountRequest {
|
||||
profile_name,
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: "non_existent_table".to_string(),
|
||||
};
|
||||
let result = get_table_data_count(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
|
||||
cleanup_test_environment(&pool, profile_id, "dummy_table").await;
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
@@ -211,48 +209,44 @@ async fn test_database_error(#[future] closed_pool: PgPool) {
|
||||
#[tokio::test]
|
||||
async fn test_empty_table_count(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
|
||||
let profile_name = format!("empty_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
|
||||
// Use table definition handler to create the table
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: "adresar".to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "name".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
// Clean up existing profiles and their table definitions
|
||||
sqlx::query!("DELETE FROM table_definitions WHERE profile_id IN (SELECT id FROM profiles WHERE name LIKE 'empty_test%')")
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
sqlx::query!("DELETE FROM profiles WHERE name LIKE 'empty_test%'")
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let profile_name = format!("empty_test_{}", Utc::now().timestamp_nanos()); // Unique profile name
|
||||
let profile_id = sqlx::query_scalar!(
|
||||
"INSERT INTO profiles (name) VALUES ($1) RETURNING id",
|
||||
profile_name
|
||||
)
|
||||
.fetch_one(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO table_definitions (profile_id, table_name, columns, indexes)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
"#,
|
||||
profile_id,
|
||||
"adresar",
|
||||
json!({}), // columns
|
||||
json!([]) // indexes
|
||||
)
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tx.commit().await.unwrap();
|
||||
post_table_definition(&pool, request).await.unwrap();
|
||||
|
||||
let request = GetTableDataCountRequest {
|
||||
profile_name,
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: "adresar".to_string(),
|
||||
};
|
||||
let response = get_table_data_count(&pool, request).await.unwrap();
|
||||
|
||||
assert!(response.count >= 0);
|
||||
|
||||
// Cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
include!("get_table_data_count_test2.rs");
|
||||
include!("get_table_data_count_test3.rs");
|
||||
520
server/tests/tables_data/get/get_table_data_count_test2.rs
Normal file
520
server/tests/tables_data/get/get_table_data_count_test2.rs
Normal file
@@ -0,0 +1,520 @@
|
||||
// tests/tables_data/handlers/get_table_data_count_test2.rs
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_schema_with_special_characters(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let profile_name = "test_underscore_profile";
|
||||
let table_name = "test_underscore_table";
|
||||
|
||||
// Use table definition handler to create the table
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "name".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(&pool, request).await.unwrap();
|
||||
|
||||
// Insert test data
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (name) VALUES ('Test Data')"#,
|
||||
profile_name, table_name
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
tx.commit().await.unwrap();
|
||||
|
||||
let request = GetTableDataCountRequest {
|
||||
profile_name: profile_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
};
|
||||
|
||||
let response = get_table_data_count(&pool, request).await.unwrap();
|
||||
assert_eq!(response.count, 1);
|
||||
|
||||
// Cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
cleanup_test_environment(&pool, schema_id, profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_large_dataset_count(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let profile_name = format!("large_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
let table_name = "large_dataset_table";
|
||||
|
||||
// Use table definition handler to create the table
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "value".to_string(),
|
||||
field_type: "integer".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(&pool, request).await.unwrap();
|
||||
|
||||
// Insert 1000 records
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
for i in 1..=1000 {
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (value) VALUES ({})"#,
|
||||
profile_name, table_name, i
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Mark some as deleted
|
||||
sqlx::query(&format!(
|
||||
r#"UPDATE "{}"."{}" SET deleted = true WHERE value <= 100"#,
|
||||
profile_name, table_name
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
tx.commit().await.unwrap();
|
||||
|
||||
let request = GetTableDataCountRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
};
|
||||
|
||||
let response = get_table_data_count(&pool, request).await.unwrap();
|
||||
assert_eq!(response.count, 900); // 1000 - 100 deleted
|
||||
|
||||
// Cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_mixed_deleted_states(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let profile_name = format!("mixed_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
let table_name = "mixed_states_table";
|
||||
|
||||
// Use table definition handler to create the table
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "status".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(&pool, request).await.unwrap();
|
||||
|
||||
// Insert various combinations
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (status, deleted) VALUES ('active', false)"#,
|
||||
profile_name, table_name
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (status, deleted) VALUES ('active', true)"#,
|
||||
profile_name, table_name
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (status, deleted) VALUES ('inactive', false)"#,
|
||||
profile_name, table_name
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (status, deleted) VALUES ('inactive', true)"#,
|
||||
profile_name, table_name
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
tx.commit().await.unwrap();
|
||||
|
||||
let request = GetTableDataCountRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
};
|
||||
|
||||
let response = get_table_data_count(&pool, request).await.unwrap();
|
||||
assert_eq!(response.count, 2); // Only non-deleted records
|
||||
|
||||
// Cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_case_sensitivity_in_names(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let profile_name = "case_test_schema";
|
||||
let table_name = "case_test_table";
|
||||
|
||||
// Use table definition handler to create the table
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "data".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(&pool, request).await.unwrap();
|
||||
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (data) VALUES ('test data')"#,
|
||||
profile_name, table_name
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
tx.commit().await.unwrap();
|
||||
|
||||
// Test exact case
|
||||
let request = GetTableDataCountRequest {
|
||||
profile_name: profile_name.to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
};
|
||||
let response = get_table_data_count(&pool, request).await.unwrap();
|
||||
assert_eq!(response.count, 1);
|
||||
|
||||
// Test wrong case should fail
|
||||
let wrong_case_request = GetTableDataCountRequest {
|
||||
profile_name: "CASE_TEST_SCHEMA".to_string(),
|
||||
table_name: table_name.to_string(),
|
||||
};
|
||||
let wrong_case_result = get_table_data_count(&pool, wrong_case_request).await;
|
||||
assert!(wrong_case_result.is_err());
|
||||
assert_eq!(wrong_case_result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
|
||||
// Cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
cleanup_test_environment(&pool, schema_id, profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_count_requests(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let profile_name = format!("concurrent_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
let table_name = "concurrent_table";
|
||||
|
||||
// Use table definition handler to create the table
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "counter".to_string(),
|
||||
field_type: "integer".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(&pool, request).await.unwrap();
|
||||
|
||||
// Insert initial data
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
for i in 1..=50 {
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (counter) VALUES ({})"#,
|
||||
profile_name, table_name, i
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
tx.commit().await.unwrap();
|
||||
|
||||
// Run multiple concurrent count requests
|
||||
let mut handles = vec![];
|
||||
for _ in 0..10 {
|
||||
let pool_clone = pool.clone();
|
||||
let profile_name_clone = profile_name.clone();
|
||||
let table_name_clone = table_name.to_string();
|
||||
|
||||
let handle = tokio::spawn(async move {
|
||||
let request = GetTableDataCountRequest {
|
||||
profile_name: profile_name_clone,
|
||||
table_name: table_name_clone,
|
||||
};
|
||||
get_table_data_count(&pool_clone, request).await
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
// Wait for all requests to complete
|
||||
for handle in handles {
|
||||
let response = handle.await.unwrap().unwrap();
|
||||
assert_eq!(response.count, 50);
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_table_without_physical_existence(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let profile_name = format!("missing_table_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
let table_name = "missing_physical_table";
|
||||
|
||||
// Create table definition but then manually drop the physical table
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "data".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(&pool, request).await.unwrap();
|
||||
|
||||
// Manually drop the physical table while keeping the definition
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
sqlx::query(&format!(
|
||||
r#"DROP TABLE "{}"."{}" CASCADE"#,
|
||||
profile_name, table_name
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
tx.commit().await.unwrap();
|
||||
|
||||
let request = GetTableDataCountRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
};
|
||||
|
||||
let result = get_table_data_count(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
|
||||
// Cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_numeric_column_types_count(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let profile_name = format!("numeric_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
let table_name = "numeric_types_table";
|
||||
|
||||
// Use table definition handler to create the table with various numeric types
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "big_number".to_string(),
|
||||
field_type: "bigint".to_string(),
|
||||
},
|
||||
ColumnDefinition {
|
||||
name: "small_number".to_string(),
|
||||
field_type: "integer".to_string(),
|
||||
},
|
||||
ColumnDefinition {
|
||||
name: "decimal_number".to_string(),
|
||||
field_type: "decimal(10,2)".to_string(),
|
||||
},
|
||||
ColumnDefinition {
|
||||
name: "timestamp_col".to_string(),
|
||||
field_type: "timestamptz".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(&pool, request).await.unwrap();
|
||||
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (big_number, small_number, decimal_number, timestamp_col)
|
||||
VALUES (9223372036854775807, 2147483647, 99999999.99, NOW())"#,
|
||||
profile_name, table_name
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (big_number, small_number, decimal_number, timestamp_col, deleted)
|
||||
VALUES (1, 1, 1.00, NOW(), true)"#,
|
||||
profile_name, table_name
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
tx.commit().await.unwrap();
|
||||
|
||||
let request = GetTableDataCountRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
};
|
||||
|
||||
let response = get_table_data_count(&pool, request).await.unwrap();
|
||||
assert_eq!(response.count, 1); // Only the non-deleted record
|
||||
|
||||
// Cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_indexed_columns_count(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let profile_name = format!("index_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
let table_name = "indexed_table";
|
||||
|
||||
// Use table definition handler to create the table with indexes
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "searchable_field".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
},
|
||||
ColumnDefinition {
|
||||
name: "category".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec!["searchable_field".to_string(), "category".to_string()],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(&pool, request).await.unwrap();
|
||||
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
for i in 1..=20 {
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (searchable_field, category) VALUES ('data_{}', 'cat_{}')"#,
|
||||
profile_name, table_name, i, i % 3
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
tx.commit().await.unwrap();
|
||||
|
||||
let request = GetTableDataCountRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
};
|
||||
|
||||
let response = get_table_data_count(&pool, request).await.unwrap();
|
||||
assert_eq!(response.count, 20);
|
||||
|
||||
// Cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
715
server/tests/tables_data/get/get_table_data_count_test3.rs
Normal file
715
server/tests/tables_data/get/get_table_data_count_test3.rs
Normal file
@@ -0,0 +1,715 @@
|
||||
// tests/tables_data/handlers/get_table_data_count_test3.rs
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_table_with_foreign_keys(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let profile_name = format!("fk_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
let parent_table = "parent_table";
|
||||
let child_table = "child_table";
|
||||
|
||||
// Create parent table first
|
||||
let parent_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: parent_table.to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "name".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(&pool, parent_request).await.unwrap();
|
||||
|
||||
// Create child table with link to parent
|
||||
let child_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: child_table.to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "description".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![
|
||||
TableLink {
|
||||
linked_table_name: parent_table.to_string(),
|
||||
required: false,
|
||||
}
|
||||
],
|
||||
};
|
||||
|
||||
post_table_definition(&pool, child_request).await.unwrap();
|
||||
|
||||
// Insert test data
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
let parent_id: i64 = sqlx::query_scalar(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (name) VALUES ('Parent 1') RETURNING id"#,
|
||||
profile_name, parent_table
|
||||
))
|
||||
.fetch_one(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (parent_table_id, description) VALUES ({}, 'Child 1')"#,
|
||||
profile_name, child_table, parent_id
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (parent_table_id, description) VALUES ({}, 'Child 2')"#,
|
||||
profile_name, child_table, parent_id
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
tx.commit().await.unwrap();
|
||||
|
||||
// Test parent table count
|
||||
let parent_request = GetTableDataCountRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: parent_table.to_string(),
|
||||
};
|
||||
let parent_response = get_table_data_count(&pool, parent_request).await.unwrap();
|
||||
assert_eq!(parent_response.count, 1);
|
||||
|
||||
// Test child table count
|
||||
let child_request = GetTableDataCountRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: child_table.to_string(),
|
||||
};
|
||||
let child_response = get_table_data_count(&pool, child_request).await.unwrap();
|
||||
assert_eq!(child_response.count, 2);
|
||||
|
||||
// Cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_multiple_foreign_keys(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let profile_name = format!("multi_fk_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
|
||||
// Create three parent tables
|
||||
for table_name in ["users", "categories", "tags"] {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "name".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(&pool, request).await.unwrap();
|
||||
}
|
||||
|
||||
// Create child table with links to all three parents
|
||||
let child_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: "posts".to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "title".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![
|
||||
TableLink {
|
||||
linked_table_name: "users".to_string(),
|
||||
required: true,
|
||||
},
|
||||
TableLink {
|
||||
linked_table_name: "categories".to_string(),
|
||||
required: true,
|
||||
},
|
||||
TableLink {
|
||||
linked_table_name: "tags".to_string(),
|
||||
required: false,
|
||||
}
|
||||
],
|
||||
};
|
||||
|
||||
post_table_definition(&pool, child_request).await.unwrap();
|
||||
|
||||
// Insert test data
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
|
||||
let user_id: i64 = sqlx::query_scalar(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (name) VALUES ('User1') RETURNING id"#,
|
||||
profile_name, "users"
|
||||
)).fetch_one(&mut *tx).await.unwrap();
|
||||
|
||||
let category_id: i64 = sqlx::query_scalar(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (name) VALUES ('Tech') RETURNING id"#,
|
||||
profile_name, "categories"
|
||||
)).fetch_one(&mut *tx).await.unwrap();
|
||||
|
||||
let tag_id: i64 = sqlx::query_scalar(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (name) VALUES ('Important') RETURNING id"#,
|
||||
profile_name, "tags"
|
||||
)).fetch_one(&mut *tx).await.unwrap();
|
||||
|
||||
// Insert posts with foreign keys
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (title, users_id, categories_id, tags_id) VALUES ('Post 1', {}, {}, {})"#,
|
||||
profile_name, "posts", user_id, category_id, tag_id
|
||||
)).execute(&mut *tx).await.unwrap();
|
||||
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (title, users_id, categories_id) VALUES ('Post 2', {}, {})"#,
|
||||
profile_name, "posts", user_id, category_id
|
||||
)).execute(&mut *tx).await.unwrap();
|
||||
|
||||
tx.commit().await.unwrap();
|
||||
|
||||
// Test counts
|
||||
let posts_count = get_table_data_count(&pool, GetTableDataCountRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: "posts".to_string(),
|
||||
}).await.unwrap();
|
||||
assert_eq!(posts_count.count, 2);
|
||||
|
||||
// Cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
).fetch_one(&pool).await.unwrap();
|
||||
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_required_vs_optional_foreign_keys(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let profile_name = format!("req_opt_fk_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
|
||||
// Create parent table
|
||||
let parent_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: "companies".to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "name".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(&pool, parent_request).await.unwrap();
|
||||
|
||||
// Create child table with required and optional links
|
||||
let child_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: "employees".to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "name".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![
|
||||
TableLink {
|
||||
linked_table_name: "companies".to_string(),
|
||||
required: true, // Required foreign key
|
||||
}
|
||||
],
|
||||
};
|
||||
post_table_definition(&pool, child_request).await.unwrap();
|
||||
|
||||
// Insert test data
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
|
||||
let company_id: i64 = sqlx::query_scalar(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (name) VALUES ('TechCorp') RETURNING id"#,
|
||||
profile_name, "companies"
|
||||
)).fetch_one(&mut *tx).await.unwrap();
|
||||
|
||||
// Insert employees with required foreign key
|
||||
for i in 1..=5 {
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (name, companies_id) VALUES ('Employee {}', {})"#,
|
||||
profile_name, "employees", i, company_id
|
||||
)).execute(&mut *tx).await.unwrap();
|
||||
}
|
||||
|
||||
tx.commit().await.unwrap();
|
||||
|
||||
// Test counts
|
||||
let companies_count = get_table_data_count(&pool, GetTableDataCountRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: "companies".to_string(),
|
||||
}).await.unwrap();
|
||||
assert_eq!(companies_count.count, 1);
|
||||
|
||||
let employees_count = get_table_data_count(&pool, GetTableDataCountRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: "employees".to_string(),
|
||||
}).await.unwrap();
|
||||
assert_eq!(employees_count.count, 5);
|
||||
|
||||
// Cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
).fetch_one(&pool).await.unwrap();
|
||||
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_performance_stress_large_dataset(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let profile_name = format!("stress_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
let table_name = "stress_table";
|
||||
|
||||
// Create table
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "data".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
},
|
||||
ColumnDefinition {
|
||||
name: "number".to_string(),
|
||||
field_type: "integer".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec!["number".to_string()], // Add index for better performance
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(&pool, request).await.unwrap();
|
||||
|
||||
// Insert 10,000 records in batches
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
for batch in 0..100 {
|
||||
let mut values = Vec::new();
|
||||
for i in 1..=100 {
|
||||
let record_num = batch * 100 + i;
|
||||
values.push(format!("('Data {}', {})", record_num, record_num));
|
||||
}
|
||||
|
||||
let sql = format!(
|
||||
r#"INSERT INTO "{}"."{}" (data, number) VALUES {}"#,
|
||||
profile_name, table_name, values.join(", ")
|
||||
);
|
||||
sqlx::query(&sql).execute(&mut *tx).await.unwrap();
|
||||
}
|
||||
|
||||
// Mark some as deleted
|
||||
sqlx::query(&format!(
|
||||
r#"UPDATE "{}"."{}" SET deleted = true WHERE number <= 1000"#,
|
||||
profile_name, table_name
|
||||
)).execute(&mut *tx).await.unwrap();
|
||||
|
||||
tx.commit().await.unwrap();
|
||||
|
||||
// Test count performance
|
||||
let start = std::time::Instant::now();
|
||||
let response = get_table_data_count(&pool, GetTableDataCountRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
}).await.unwrap();
|
||||
let duration = start.elapsed();
|
||||
|
||||
assert_eq!(response.count, 9000); // 10000 - 1000 deleted
|
||||
|
||||
// Performance assertion - should complete within reasonable time
|
||||
assert!(duration.as_secs() < 5, "Count operation took too long: {:?}", duration);
|
||||
|
||||
// Cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
).fetch_one(&pool).await.unwrap();
|
||||
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_maximum_identifier_lengths(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Test with maximum length names (63 characters - PostgreSQL limit)
|
||||
let max_profile_name = "a".repeat(63);
|
||||
let max_table_name = "b".repeat(63);
|
||||
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: max_profile_name.clone(),
|
||||
table_name: max_table_name.clone(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "c".repeat(63), // Max column name
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(&pool, request).await.unwrap();
|
||||
|
||||
// Insert test data
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" ("{}") VALUES ('test')"#,
|
||||
max_profile_name, max_table_name, "c".repeat(63)
|
||||
)).execute(&mut *tx).await.unwrap();
|
||||
tx.commit().await.unwrap();
|
||||
|
||||
// Test count
|
||||
let response = get_table_data_count(&pool, GetTableDataCountRequest {
|
||||
profile_name: max_profile_name.clone(),
|
||||
table_name: max_table_name.clone(),
|
||||
}).await.unwrap();
|
||||
assert_eq!(response.count, 1);
|
||||
|
||||
// Cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
max_profile_name
|
||||
).fetch_one(&pool).await.unwrap();
|
||||
|
||||
cleanup_test_environment(&pool, schema_id, &max_profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_complex_schema_hierarchy(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let profile_name = format!("hierarchy_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
|
||||
// Create A -> B -> C -> D hierarchy
|
||||
let tables = ["table_a", "table_b", "table_c", "table_d"];
|
||||
|
||||
// Create first table (no dependencies)
|
||||
let request_a = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: tables[0].to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "name".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(&pool, request_a).await.unwrap();
|
||||
|
||||
// Create subsequent tables with dependencies
|
||||
for i in 1..tables.len() {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: tables[i].to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "data".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![
|
||||
TableLink {
|
||||
linked_table_name: tables[i-1].to_string(),
|
||||
required: true,
|
||||
}
|
||||
],
|
||||
};
|
||||
post_table_definition(&pool, request).await.unwrap();
|
||||
}
|
||||
|
||||
// Insert hierarchical data
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
|
||||
let a_id: i64 = sqlx::query_scalar(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (name) VALUES ('Root') RETURNING id"#,
|
||||
profile_name, tables[0]
|
||||
)).fetch_one(&mut *tx).await.unwrap();
|
||||
|
||||
let b_id: i64 = sqlx::query_scalar(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (data, table_a_id) VALUES ('Level B', {}) RETURNING id"#,
|
||||
profile_name, tables[1], a_id
|
||||
)).fetch_one(&mut *tx).await.unwrap();
|
||||
|
||||
let c_id: i64 = sqlx::query_scalar(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (data, table_b_id) VALUES ('Level C', {}) RETURNING id"#,
|
||||
profile_name, tables[2], b_id
|
||||
)).fetch_one(&mut *tx).await.unwrap();
|
||||
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (data, table_c_id) VALUES ('Level D', {})"#,
|
||||
profile_name, tables[3], c_id
|
||||
)).execute(&mut *tx).await.unwrap();
|
||||
|
||||
tx.commit().await.unwrap();
|
||||
|
||||
// Test counts for each level
|
||||
for (i, table_name) in tables.iter().enumerate() {
|
||||
let response = get_table_data_count(&pool, GetTableDataCountRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
}).await.unwrap();
|
||||
assert_eq!(response.count, 1, "Table {} should have count 1", table_name);
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
).fetch_one(&pool).await.unwrap();
|
||||
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_insert_and_count(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let profile_name = format!("concurrent_insert_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
let table_name = "concurrent_ops_table";
|
||||
|
||||
// Create table
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "value".to_string(),
|
||||
field_type: "integer".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(&pool, request).await.unwrap();
|
||||
|
||||
// Insert initial data
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
for i in 1..=100 {
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (value) VALUES ({})"#,
|
||||
profile_name, table_name, i
|
||||
)).execute(&mut *tx).await.unwrap();
|
||||
}
|
||||
tx.commit().await.unwrap();
|
||||
|
||||
// Run concurrent operations
|
||||
let mut count_handles = vec![];
|
||||
let mut insert_handles = vec![];
|
||||
|
||||
// Spawn count operations
|
||||
for _ in 0..5 {
|
||||
let pool_clone = pool.clone();
|
||||
let profile_name_clone = profile_name.clone();
|
||||
let table_name_clone = table_name.to_string();
|
||||
|
||||
let handle = tokio::spawn(async move {
|
||||
let mut counts = Vec::new();
|
||||
for _ in 0..10 {
|
||||
let response = get_table_data_count(&pool_clone, GetTableDataCountRequest {
|
||||
profile_name: profile_name_clone.clone(),
|
||||
table_name: table_name_clone.clone(),
|
||||
}).await.unwrap();
|
||||
counts.push(response.count);
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(10)).await;
|
||||
}
|
||||
counts
|
||||
});
|
||||
count_handles.push(handle);
|
||||
}
|
||||
|
||||
// Spawn insert operations
|
||||
for i in 0..3 {
|
||||
let pool_clone = pool.clone();
|
||||
let profile_name_clone = profile_name.clone();
|
||||
let table_name_clone = table_name.to_string();
|
||||
|
||||
let handle = tokio::spawn(async move {
|
||||
for j in 1..=20 {
|
||||
let value = (i * 100) + j + 1000; // Ensure unique values
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (value) VALUES ({})"#,
|
||||
profile_name_clone, table_name_clone, value
|
||||
)).execute(&pool_clone).await.unwrap();
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(5)).await;
|
||||
}
|
||||
});
|
||||
insert_handles.push(handle);
|
||||
}
|
||||
|
||||
// Wait for all operations to complete
|
||||
for handle in count_handles {
|
||||
handle.await.unwrap();
|
||||
}
|
||||
for handle in insert_handles {
|
||||
handle.await.unwrap();
|
||||
}
|
||||
|
||||
// Final count should be 100 + (3 * 20) = 160
|
||||
let final_response = get_table_data_count(&pool, GetTableDataCountRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
}).await.unwrap();
|
||||
assert_eq!(final_response.count, 160);
|
||||
|
||||
// Cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
).fetch_one(&pool).await.unwrap();
|
||||
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_edge_case_all_records_deleted(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let profile_name = format!("all_deleted_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
let table_name = "all_deleted_table";
|
||||
|
||||
// Create table
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "data".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(&pool, request).await.unwrap();
|
||||
|
||||
// Insert and then delete all records
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
for i in 1..=50 {
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (data) VALUES ('Record {}')"#,
|
||||
profile_name, table_name, i
|
||||
)).execute(&mut *tx).await.unwrap();
|
||||
}
|
||||
|
||||
// Mark all as deleted
|
||||
sqlx::query(&format!(
|
||||
r#"UPDATE "{}"."{}" SET deleted = true"#,
|
||||
profile_name, table_name
|
||||
)).execute(&mut *tx).await.unwrap();
|
||||
|
||||
tx.commit().await.unwrap();
|
||||
|
||||
// Count should be 0
|
||||
let response = get_table_data_count(&pool, GetTableDataCountRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
}).await.unwrap();
|
||||
assert_eq!(response.count, 0);
|
||||
|
||||
// Cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
).fetch_one(&pool).await.unwrap();
|
||||
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_cross_schema_isolation(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let profile1 = format!("schema1_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
let profile2 = format!("schema2_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
let table_name = "isolation_test_table";
|
||||
|
||||
// Create identical tables in two different schemas
|
||||
for profile_name in [&profile1, &profile2] {
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "data".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(&pool, request).await.unwrap();
|
||||
}
|
||||
|
||||
// Insert different amounts of data in each schema
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
|
||||
// Schema 1: 10 records
|
||||
for i in 1..=10 {
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (data) VALUES ('Schema1 Record {}')"#,
|
||||
profile1, table_name, i
|
||||
)).execute(&mut *tx).await.unwrap();
|
||||
}
|
||||
|
||||
// Schema 2: 25 records
|
||||
for i in 1..=25 {
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}"."{}" (data) VALUES ('Schema2 Record {}')"#,
|
||||
profile2, table_name, i
|
||||
)).execute(&mut *tx).await.unwrap();
|
||||
}
|
||||
|
||||
tx.commit().await.unwrap();
|
||||
|
||||
// Test counts are isolated
|
||||
let count1 = get_table_data_count(&pool, GetTableDataCountRequest {
|
||||
profile_name: profile1.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
}).await.unwrap();
|
||||
assert_eq!(count1.count, 10);
|
||||
|
||||
let count2 = get_table_data_count(&pool, GetTableDataCountRequest {
|
||||
profile_name: profile2.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
}).await.unwrap();
|
||||
assert_eq!(count2.count, 25);
|
||||
|
||||
// Cleanup both schemas
|
||||
for profile_name in [&profile1, &profile2] {
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
).fetch_one(&pool).await.unwrap();
|
||||
|
||||
cleanup_test_environment(&pool, schema_id, profile_name).await;
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,28 @@
|
||||
// tests/tables_data/handlers/get_table_data_test.rs
|
||||
use rstest::{fixture, rstest};
|
||||
use server::tables_data::handlers::get_table_data;
|
||||
use common::proto::multieko2::tables_data::{GetTableDataRequest, GetTableDataResponse};
|
||||
use common::proto::multieko2::tables_data::GetTableDataRequest;
|
||||
use crate::common::setup_test_db;
|
||||
use sqlx::{PgPool, Row};
|
||||
use tonic;
|
||||
use chrono::Utc;
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
use futures::future::join_all;
|
||||
use rand::distr::Alphanumeric;
|
||||
use rand::Rng;
|
||||
use rust_decimal::Decimal;
|
||||
use rust_decimal_macros::dec;
|
||||
use server::table_definition::handlers::post_table_definition;
|
||||
use server::tables_data::handlers::post_table_data;
|
||||
use common::proto::multieko2::table_definition::{
|
||||
PostTableDefinitionRequest, ColumnDefinition as TableColumnDefinition, TableLink
|
||||
};
|
||||
use common::proto::multieko2::tables_data::PostTableDataRequest;
|
||||
use prost_types::Value;
|
||||
use prost_types::value::Kind;
|
||||
use tokio::sync::mpsc;
|
||||
use server::indexer::IndexCommand;
|
||||
|
||||
#[fixture]
|
||||
async fn pool() -> PgPool {
|
||||
@@ -22,60 +37,70 @@ async fn closed_pool(#[future] pool: PgPool) -> PgPool {
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn profile(#[future] pool: PgPool) -> (PgPool, String, i64) {
|
||||
async fn schema(#[future] pool: PgPool) -> (PgPool, String, i64) {
|
||||
let pool = pool.await;
|
||||
let profile_name = format!("TestProfile_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
||||
|
||||
let profile = sqlx::query!(
|
||||
"INSERT INTO profiles (name) VALUES ($1) RETURNING id",
|
||||
profile_name
|
||||
let schema_name = format!("testschema_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
||||
|
||||
// Insert into schemas table instead of profiles
|
||||
let schema = sqlx::query!(
|
||||
"INSERT INTO schemas (name) VALUES ($1) RETURNING id",
|
||||
schema_name
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
(pool, profile_name, profile.id)
|
||||
|
||||
// Create the actual PostgreSQL schema
|
||||
let create_schema_sql = format!("CREATE SCHEMA IF NOT EXISTS \"{}\"", schema_name);
|
||||
sqlx::query(&create_schema_sql)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
(pool, schema_name, schema.id)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn table_definition(#[future] profile: (PgPool, String, i64)) -> (PgPool, String, String, i64) {
|
||||
let (pool, profile_name, profile_id) = profile.await;
|
||||
async fn table_definition(#[future] schema: (PgPool, String, i64)) -> (PgPool, String, String, i64) {
|
||||
let (pool, schema_name, schema_id) = schema.await;
|
||||
let table_name = format!("test_table_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
||||
|
||||
// Define columns and indexes for the table
|
||||
let columns = json!([
|
||||
"\"name\" VARCHAR(255)",
|
||||
"\"name\" TEXT",
|
||||
"\"age\" INTEGER",
|
||||
"\"email\" VARCHAR(100)",
|
||||
"\"email\" TEXT",
|
||||
"\"is_active\" BOOLEAN"
|
||||
]);
|
||||
let indexes = json!([]); // Add empty indexes array
|
||||
let indexes = json!([]);
|
||||
|
||||
// Use schema_id instead of profile_id
|
||||
let table_def = sqlx::query!(
|
||||
"INSERT INTO table_definitions (profile_id, table_name, columns, indexes) VALUES ($1, $2, $3, $4) RETURNING id",
|
||||
profile_id,
|
||||
"INSERT INTO table_definitions (schema_id, table_name, columns, indexes) VALUES ($1, $2, $3, $4) RETURNING id",
|
||||
schema_id,
|
||||
table_name,
|
||||
columns,
|
||||
indexes // Add indexes to the insert
|
||||
indexes
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create actual table
|
||||
// Create actual table in the schema
|
||||
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
|
||||
let create_table = format!(
|
||||
r#"
|
||||
CREATE TABLE "{}" (
|
||||
CREATE TABLE {} (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
deleted BOOLEAN NOT NULL DEFAULT false,
|
||||
firma TEXT NOT NULL,
|
||||
name VARCHAR(255),
|
||||
deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
name TEXT,
|
||||
age INTEGER,
|
||||
email VARCHAR(100),
|
||||
is_active BOOLEAN
|
||||
email TEXT,
|
||||
is_active BOOLEAN,
|
||||
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
"#,
|
||||
table_name
|
||||
qualified_table
|
||||
);
|
||||
|
||||
sqlx::query(&create_table)
|
||||
@@ -83,23 +108,23 @@ async fn table_definition(#[future] profile: (PgPool, String, i64)) -> (PgPool,
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
(pool, profile_name, table_name, table_def.id)
|
||||
(pool, schema_name, table_name, table_def.id)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn regular_record(#[future] table_definition: (PgPool, String, String, i64)) -> (PgPool, String, String, i64) {
|
||||
let (pool, profile_name, table_name, _) = table_definition.await;
|
||||
let (pool, schema_name, table_name, _) = table_definition.await;
|
||||
|
||||
// Insert a record with all fields
|
||||
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
|
||||
let query = format!(
|
||||
r#"INSERT INTO "{}" (firma, name, age, email, is_active)
|
||||
VALUES ($1, $2, $3, $4, $5)
|
||||
r#"INSERT INTO {} (name, age, email, is_active)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
RETURNING id"#,
|
||||
table_name
|
||||
qualified_table
|
||||
);
|
||||
|
||||
let record = sqlx::query(&query)
|
||||
.bind("Test Company")
|
||||
.bind("John Doe")
|
||||
.bind(30)
|
||||
.bind("john@example.com")
|
||||
@@ -109,59 +134,58 @@ async fn regular_record(#[future] table_definition: (PgPool, String, String, i64
|
||||
.unwrap();
|
||||
|
||||
let id: i64 = record.get("id");
|
||||
(pool, profile_name, table_name, id)
|
||||
(pool, schema_name, table_name, id)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn null_fields_record(#[future] table_definition: (PgPool, String, String, i64)) -> (PgPool, String, String, i64) {
|
||||
let (pool, profile_name, table_name, _) = table_definition.await;
|
||||
let (pool, schema_name, table_name, _) = table_definition.await;
|
||||
|
||||
// Insert a record with only required fields
|
||||
// Insert a record with only basic fields (all others will be NULL)
|
||||
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
|
||||
let query = format!(
|
||||
r#"INSERT INTO "{}" (firma)
|
||||
VALUES ($1)
|
||||
r#"INSERT INTO {} DEFAULT VALUES
|
||||
RETURNING id"#,
|
||||
table_name
|
||||
qualified_table
|
||||
);
|
||||
|
||||
let record = sqlx::query(&query)
|
||||
.bind("Null Fields Company")
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let id: i64 = record.get("id");
|
||||
(pool, profile_name, table_name, id)
|
||||
(pool, schema_name, table_name, id)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn deleted_record(#[future] table_definition: (PgPool, String, String, i64)) -> (PgPool, String, String, i64) {
|
||||
let (pool, profile_name, table_name, _) = table_definition.await;
|
||||
let (pool, schema_name, table_name, _) = table_definition.await;
|
||||
|
||||
// Insert a deleted record
|
||||
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
|
||||
let query = format!(
|
||||
r#"INSERT INTO "{}" (firma, deleted)
|
||||
VALUES ($1, true)
|
||||
r#"INSERT INTO {} (deleted)
|
||||
VALUES (true)
|
||||
RETURNING id"#,
|
||||
table_name
|
||||
qualified_table
|
||||
);
|
||||
|
||||
let record = sqlx::query(&query)
|
||||
.bind("Deleted Company")
|
||||
.bind(true)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let id: i64 = record.get("id");
|
||||
(pool, profile_name, table_name, id)
|
||||
(pool, schema_name, table_name, id)
|
||||
}
|
||||
|
||||
async fn assert_response_matches(pool: &PgPool, table_name: &str, id: i64, response: &HashMap<String, String>) {
|
||||
let columns = format!(
|
||||
"id, deleted, firma, name, age, email, is_active"
|
||||
);
|
||||
let query = format!(r#"SELECT {} FROM "{}" WHERE id = $1"#, columns, table_name);
|
||||
|
||||
async fn assert_response_matches(pool: &PgPool, schema_name: &str, table_name: &str, id: i64, response: &HashMap<String, String>) {
|
||||
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
|
||||
let columns = "id, deleted, name, age, email, is_active";
|
||||
let query = format!(r#"SELECT {} FROM {} WHERE id = $1"#, columns, qualified_table);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.bind(id)
|
||||
.fetch_one(pool)
|
||||
@@ -170,30 +194,38 @@ async fn assert_response_matches(pool: &PgPool, table_name: &str, id: i64, respo
|
||||
|
||||
assert_eq!(row.get::<i64, _>("id").to_string(), response["id"]);
|
||||
assert_eq!(row.get::<bool, _>("deleted").to_string(), response["deleted"]);
|
||||
assert_eq!(row.get::<String, _>("firma"), response["firma"]);
|
||||
|
||||
|
||||
// Check optional fields
|
||||
let name: Option<String> = row.try_get("name").unwrap_or(None);
|
||||
assert_eq!(name.unwrap_or_default(), response["name"]);
|
||||
|
||||
|
||||
let age: Option<i32> = row.try_get("age").unwrap_or(None);
|
||||
assert_eq!(age.map(|v| v.to_string()).unwrap_or_default(), response["age"]);
|
||||
|
||||
|
||||
let email: Option<String> = row.try_get("email").unwrap_or(None);
|
||||
assert_eq!(email.unwrap_or_default(), response["email"]);
|
||||
|
||||
|
||||
let is_active: Option<bool> = row.try_get("is_active").unwrap_or(None);
|
||||
assert_eq!(is_active.map(|v| v.to_string()).unwrap_or_default(), response["is_active"]);
|
||||
}
|
||||
|
||||
async fn cleanup_test_data(pool: &PgPool, table_name: &str) {
|
||||
let _ = sqlx::query(&format!(r#"DROP TABLE IF EXISTS "{}" CASCADE"#, table_name))
|
||||
async fn cleanup_test_data(pool: &PgPool, schema_name: &str, table_name: &str) {
|
||||
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
|
||||
let _ = sqlx::query(&format!(r#"DROP TABLE IF EXISTS {} CASCADE"#, qualified_table))
|
||||
.execute(pool)
|
||||
.await;
|
||||
|
||||
|
||||
let _ = sqlx::query!("DELETE FROM table_definitions WHERE table_name = $1", table_name)
|
||||
.execute(pool)
|
||||
.await;
|
||||
|
||||
let _ = sqlx::query(&format!(r#"DROP SCHEMA IF EXISTS "{}" CASCADE"#, schema_name))
|
||||
.execute(pool)
|
||||
.await;
|
||||
|
||||
let _ = sqlx::query!("DELETE FROM schemas WHERE name = $1", schema_name)
|
||||
.execute(pool)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
@@ -201,27 +233,26 @@ async fn cleanup_test_data(pool: &PgPool, table_name: &str) {
|
||||
async fn test_get_table_data_success(
|
||||
#[future] regular_record: (PgPool, String, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, table_name, id) = regular_record.await;
|
||||
|
||||
let (pool, schema_name, table_name, id) = regular_record.await;
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
profile_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
id,
|
||||
};
|
||||
|
||||
|
||||
let response = get_table_data(&pool, request).await.unwrap();
|
||||
|
||||
assert_eq!(response.data["id"], id.to_string());
|
||||
assert_eq!(response.data["firma"], "Test Company");
|
||||
assert_eq!(response.data["name"], "John Doe");
|
||||
assert_eq!(response.data["age"], "30");
|
||||
assert_eq!(response.data["email"], "john@example.com");
|
||||
assert_eq!(response.data["is_active"], "true");
|
||||
assert_eq!(response.data["deleted"], "false");
|
||||
|
||||
assert_response_matches(&pool, &table_name, id, &response.data).await;
|
||||
|
||||
cleanup_test_data(&pool, &table_name).await;
|
||||
|
||||
assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await;
|
||||
|
||||
cleanup_test_data(&pool, &schema_name, &table_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
@@ -229,10 +260,10 @@ async fn test_get_table_data_success(
|
||||
async fn test_get_optional_fields_null(
|
||||
#[future] null_fields_record: (PgPool, String, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, table_name, id) = null_fields_record.await;
|
||||
|
||||
let (pool, schema_name, table_name, id) = null_fields_record.await;
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
profile_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
id,
|
||||
};
|
||||
@@ -244,10 +275,10 @@ async fn test_get_optional_fields_null(
|
||||
assert_eq!(response.data["email"], "");
|
||||
assert_eq!(response.data["is_active"], "");
|
||||
assert_eq!(response.data["deleted"], "false");
|
||||
|
||||
assert_response_matches(&pool, &table_name, id, &response.data).await;
|
||||
|
||||
cleanup_test_data(&pool, &table_name).await;
|
||||
|
||||
assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await;
|
||||
|
||||
cleanup_test_data(&pool, &schema_name, &table_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
@@ -255,20 +286,20 @@ async fn test_get_optional_fields_null(
|
||||
async fn test_get_nonexistent_id(
|
||||
#[future] table_definition: (PgPool, String, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, table_name, _) = table_definition.await;
|
||||
|
||||
let (pool, schema_name, table_name, _) = table_definition.await;
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
profile_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
id: 9999,
|
||||
};
|
||||
|
||||
let result = get_table_data(&pool, request).await;
|
||||
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
|
||||
cleanup_test_data(&pool, &table_name).await;
|
||||
|
||||
cleanup_test_data(&pool, &schema_name, &table_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
@@ -276,20 +307,20 @@ async fn test_get_nonexistent_id(
|
||||
async fn test_get_deleted_record(
|
||||
#[future] deleted_record: (PgPool, String, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, table_name, id) = deleted_record.await;
|
||||
|
||||
let (pool, schema_name, table_name, id) = deleted_record.await;
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
profile_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
id,
|
||||
};
|
||||
|
||||
let result = get_table_data(&pool, request).await;
|
||||
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
|
||||
cleanup_test_data(&pool, &table_name).await;
|
||||
|
||||
cleanup_test_data(&pool, &schema_name, &table_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
@@ -298,7 +329,7 @@ async fn test_get_database_error(
|
||||
#[future] closed_pool: PgPool,
|
||||
) {
|
||||
let closed_pool = closed_pool.await;
|
||||
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: "test".into(),
|
||||
table_name: "test".into(),
|
||||
@@ -306,7 +337,7 @@ async fn test_get_database_error(
|
||||
};
|
||||
|
||||
let result = get_table_data(&closed_pool, request).await;
|
||||
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
@@ -316,17 +347,17 @@ async fn test_get_database_error(
|
||||
async fn test_get_special_characters(
|
||||
#[future] table_definition: (PgPool, String, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, table_name, _) = table_definition.await;
|
||||
let (pool, schema_name, table_name, _) = table_definition.await;
|
||||
|
||||
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
|
||||
let query = format!(
|
||||
r#"INSERT INTO "{}" (firma, name, email)
|
||||
VALUES ($1, $2, $3)
|
||||
r#"INSERT INTO {} (name, email)
|
||||
VALUES ($1, $2)
|
||||
RETURNING id"#,
|
||||
table_name
|
||||
qualified_table
|
||||
);
|
||||
|
||||
let record = sqlx::query(&query)
|
||||
.bind("Test Company")
|
||||
.bind("Náměstí ČR")
|
||||
.bind("čšěř@example.com")
|
||||
.fetch_one(&pool)
|
||||
@@ -336,7 +367,7 @@ async fn test_get_special_characters(
|
||||
let id: i64 = record.get("id");
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
profile_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
id,
|
||||
};
|
||||
@@ -345,10 +376,10 @@ async fn test_get_special_characters(
|
||||
|
||||
assert_eq!(response.data["name"], "Náměstí ČR");
|
||||
assert_eq!(response.data["email"], "čšěř@example.com");
|
||||
|
||||
assert_response_matches(&pool, &table_name, id, &response.data).await;
|
||||
|
||||
cleanup_test_data(&pool, &table_name).await;
|
||||
|
||||
assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await;
|
||||
|
||||
cleanup_test_data(&pool, &schema_name, &table_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
@@ -356,18 +387,18 @@ async fn test_get_special_characters(
|
||||
async fn test_get_max_length_fields(
|
||||
#[future] table_definition: (PgPool, String, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, table_name, _) = table_definition.await;
|
||||
let (pool, schema_name, table_name, _) = table_definition.await;
|
||||
|
||||
let long_name = "a".repeat(255);
|
||||
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
|
||||
let query = format!(
|
||||
r#"INSERT INTO "{}" (firma, name)
|
||||
VALUES ($1, $2)
|
||||
r#"INSERT INTO {} (name)
|
||||
VALUES ($1)
|
||||
RETURNING id"#,
|
||||
table_name
|
||||
qualified_table
|
||||
);
|
||||
|
||||
let record = sqlx::query(&query)
|
||||
.bind("Test Company")
|
||||
.bind(&long_name)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
@@ -376,7 +407,7 @@ async fn test_get_max_length_fields(
|
||||
let id: i64 = record.get("id");
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
profile_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
id,
|
||||
};
|
||||
@@ -385,10 +416,10 @@ async fn test_get_max_length_fields(
|
||||
|
||||
assert_eq!(response.data["name"], long_name);
|
||||
assert_eq!(response.data["name"].len(), 255);
|
||||
|
||||
assert_response_matches(&pool, &table_name, id, &response.data).await;
|
||||
|
||||
cleanup_test_data(&pool, &table_name).await;
|
||||
|
||||
assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await;
|
||||
|
||||
cleanup_test_data(&pool, &schema_name, &table_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
@@ -397,7 +428,7 @@ async fn test_get_invalid_profile(
|
||||
#[future] pool: PgPool,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: "non_existent_profile".into(),
|
||||
table_name: "test_table".into(),
|
||||
@@ -405,7 +436,7 @@ async fn test_get_invalid_profile(
|
||||
};
|
||||
|
||||
let result = get_table_data(&pool, request).await;
|
||||
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
@@ -413,20 +444,22 @@ async fn test_get_invalid_profile(
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_get_invalid_table(
|
||||
#[future] profile: (PgPool, String, i64),
|
||||
#[future] schema: (PgPool, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, _) = profile.await;
|
||||
|
||||
let (pool, schema_name, _) = schema.await;
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name,
|
||||
profile_name: schema_name.clone(),
|
||||
table_name: "non_existent_table".into(),
|
||||
id: 1,
|
||||
};
|
||||
|
||||
let result = get_table_data(&pool, request).await;
|
||||
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
|
||||
cleanup_test_data(&pool, &schema_name, "non_existent_table").await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
@@ -434,17 +467,19 @@ async fn test_get_invalid_table(
|
||||
async fn test_get_invalid_column(
|
||||
#[future] regular_record: (PgPool, String, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, table_name, id) = regular_record.await;
|
||||
|
||||
let (pool, schema_name, table_name, id) = regular_record.await;
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
profile_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
id,
|
||||
};
|
||||
|
||||
let result = get_table_data(&pool, request).await;
|
||||
|
||||
|
||||
assert!(result.is_ok()); // Should still succeed as we're not filtering columns
|
||||
|
||||
cleanup_test_data(&pool, &table_name).await;
|
||||
|
||||
cleanup_test_data(&pool, &schema_name, &table_name).await;
|
||||
}
|
||||
|
||||
include!("get_table_data_test2.rs");
|
||||
1112
server/tests/tables_data/get/get_table_data_test2.rs
Normal file
1112
server/tests/tables_data/get/get_table_data_test2.rs
Normal file
File diff suppressed because it is too large
Load Diff
4
server/tests/tables_data/get/mod.rs
Normal file
4
server/tests/tables_data/get/mod.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
// tests/tables_data/get/mod.rs
|
||||
|
||||
pub mod get_table_data_count_test;
|
||||
pub mod get_table_data_test;
|
||||
@@ -1,266 +0,0 @@
|
||||
// tests/tables_data/handlers/delete_table_data_test.rs
|
||||
use rstest::{fixture, rstest};
|
||||
use server::tables_data::handlers::delete_table_data;
|
||||
use common::proto::multieko2::tables_data::DeleteTableDataRequest;
|
||||
use crate::common::setup_test_db;
|
||||
use sqlx::{PgPool, Row};
|
||||
use tonic;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use chrono::Utc;
|
||||
use serde_json::json;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref TEST_MUTEX: Arc<Mutex<()>> = Arc::new(Mutex::new(()));
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn pool() -> PgPool {
|
||||
setup_test_db().await
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
|
||||
let pool = pool.await;
|
||||
pool.close().await;
|
||||
pool
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_profile(#[future] pool: PgPool) -> (PgPool, String, i64) {
|
||||
let pool = pool.await;
|
||||
let profile_name = format!("TestProfile_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
||||
let profile = sqlx::query!(
|
||||
"INSERT INTO profiles (name) VALUES ($1) RETURNING id",
|
||||
profile_name
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
(pool, profile_name, profile.id)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_table(
|
||||
#[future] existing_profile: (PgPool, String, i64),
|
||||
) -> (PgPool, String, i64, String) {
|
||||
let (pool, profile_name, profile_id) = existing_profile.await;
|
||||
let table_name = format!("test_table_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
||||
|
||||
// Define columns for the table
|
||||
let columns = json!([
|
||||
{
|
||||
"name": "id",
|
||||
"type": "BIGSERIAL",
|
||||
"primary_key": true
|
||||
},
|
||||
{
|
||||
"name": "deleted",
|
||||
"type": "BOOLEAN",
|
||||
"default": false
|
||||
}
|
||||
]);
|
||||
|
||||
// Add indexes definition - this is what's missing
|
||||
let indexes = json!([]); // Empty array if no indexes, but not null
|
||||
|
||||
sqlx::query!(
|
||||
"INSERT INTO table_definitions (profile_id, table_name, columns, indexes) VALUES ($1, $2, $3, $4)",
|
||||
profile_id,
|
||||
table_name,
|
||||
columns,
|
||||
indexes
|
||||
)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let create_table = format!(
|
||||
r#"
|
||||
CREATE TABLE "{}" (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
deleted BOOLEAN NOT NULL DEFAULT false
|
||||
)
|
||||
"#,
|
||||
table_name
|
||||
);
|
||||
|
||||
sqlx::query(&create_table)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
(pool, profile_name, profile_id, table_name)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_record(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) -> (PgPool, String, String, i64) {
|
||||
let (pool, profile_name, _profile_id, table_name) = existing_table.await;
|
||||
let query = format!(
|
||||
"INSERT INTO \"{}\" (deleted) VALUES (false) RETURNING id",
|
||||
table_name
|
||||
);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let id: i64 = row.get("id");
|
||||
(pool, profile_name, table_name, id)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_deleted_record(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) -> (PgPool, String, String, i64) {
|
||||
let (pool, profile_name, _profile_id, table_name) = existing_table.await;
|
||||
let query = format!(
|
||||
"INSERT INTO \"{}\" (deleted) VALUES (true) RETURNING id",
|
||||
table_name
|
||||
);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let id: i64 = row.get("id");
|
||||
(pool, profile_name, table_name, id)
|
||||
}
|
||||
|
||||
async fn cleanup_test_data(pool: &PgPool, table_name: &str) {
|
||||
// Clean up table definition
|
||||
sqlx::query!(
|
||||
"DELETE FROM table_definitions WHERE table_name = $1",
|
||||
table_name
|
||||
)
|
||||
.execute(pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Clean up physical table
|
||||
let drop_table = format!(r#"DROP TABLE IF EXISTS "{}" CASCADE"#, table_name);
|
||||
sqlx::query(&drop_table)
|
||||
.execute(pool)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_data_success(
|
||||
#[future] existing_record: (PgPool, String, String, i64),
|
||||
) {
|
||||
let _guard = TEST_MUTEX.lock().await;
|
||||
let (pool, profile_name, table_name, record_id) = existing_record.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
record_id,
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
let query = format!(
|
||||
"SELECT deleted FROM \"{}\" WHERE id = $1",
|
||||
table_name
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(record_id)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(row.get::<bool, _>("deleted"));
|
||||
|
||||
cleanup_test_data(&pool, &table_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_data_profile_not_found(
|
||||
#[future] pool: PgPool,
|
||||
) {
|
||||
let _guard = TEST_MUTEX.lock().await;
|
||||
let pool = pool.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: "NonExistentProfile".to_string(),
|
||||
table_name: "test_table".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_data_table_not_found(
|
||||
#[future] existing_profile: (PgPool, String, i64),
|
||||
) {
|
||||
let _guard = TEST_MUTEX.lock().await;
|
||||
let (pool, profile_name, _) = existing_profile.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name: "non_existent_table".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_data_record_not_found(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) {
|
||||
let _guard = TEST_MUTEX.lock().await;
|
||||
let (pool, profile_name, _, table_name) = existing_table.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name: table_name.clone(), // Clone here
|
||||
record_id: 9999,
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(!response.success);
|
||||
|
||||
cleanup_test_data(&pool, &table_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_data_already_deleted(
|
||||
#[future] existing_deleted_record: (PgPool, String, String, i64),
|
||||
) {
|
||||
let _guard = TEST_MUTEX.lock().await;
|
||||
let (pool, profile_name, table_name, record_id) = existing_deleted_record.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name: table_name.clone(), // Clone here
|
||||
record_id,
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(!response.success);
|
||||
|
||||
cleanup_test_data(&pool, &table_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_data_database_error(
|
||||
#[future] closed_pool: PgPool,
|
||||
) {
|
||||
let closed_pool = closed_pool.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: "test".to_string(),
|
||||
table_name: "test".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&closed_pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
@@ -3,6 +3,8 @@ use rstest::{fixture, rstest};
|
||||
use tonic;
|
||||
use sqlx::PgPool;
|
||||
use common::proto::multieko2::tables_data::GetTableDataByPositionRequest;
|
||||
use common::proto::multieko2::table_definition::{PostTableDefinitionRequest, ColumnDefinition};
|
||||
use server::table_definition::handlers::post_table_definition;
|
||||
use server::tables_data::handlers::get_table_data_by_position;
|
||||
use crate::common::setup_test_db;
|
||||
use chrono::Utc;
|
||||
@@ -21,73 +23,53 @@ async fn closed_pool(#[future] pool: PgPool) -> PgPool {
|
||||
}
|
||||
|
||||
async fn setup_test_environment(pool: &PgPool) -> (String, String, i64) {
|
||||
let now = Utc::now();
|
||||
let profile_name = format!("test_profile_{}", now.timestamp_nanos_opt().unwrap());
|
||||
let table_name = format!("test_table_{}", now.timestamp_nanos_opt().unwrap());
|
||||
|
||||
// Start transaction
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
// Create unique profile and table names
|
||||
let profile_name = format!("test_profile_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
let table_name = format!("test_table_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
|
||||
|
||||
// Create profile
|
||||
let profile_id = sqlx::query_scalar!(
|
||||
"INSERT INTO profiles (name) VALUES ($1) RETURNING id",
|
||||
// Use the table definition handler to create the table properly
|
||||
let request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
columns: vec![
|
||||
ColumnDefinition {
|
||||
name: "firma".to_string(),
|
||||
field_type: "text".to_string(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(pool, request).await.unwrap();
|
||||
|
||||
// Get the schema_id for cleanup
|
||||
let schema_id = sqlx::query_scalar!(
|
||||
"SELECT id FROM schemas WHERE name = $1",
|
||||
profile_name
|
||||
)
|
||||
.fetch_one(&mut *tx)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create table definition with proper columns
|
||||
let columns_json = serde_json::json!([
|
||||
r#""id" BIGSERIAL PRIMARY KEY"#,
|
||||
r#""deleted" BOOLEAN NOT NULL DEFAULT FALSE"#,
|
||||
r#""firma" TEXT NOT NULL"#
|
||||
]);
|
||||
|
||||
sqlx::query!(
|
||||
r#"INSERT INTO table_definitions (profile_id, table_name, columns, indexes)
|
||||
VALUES ($1, $2, $3, $4)"#,
|
||||
profile_id,
|
||||
table_name,
|
||||
columns_json, // Use proper columns array
|
||||
json!([])
|
||||
)
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create actual table
|
||||
sqlx::query(&format!(
|
||||
r#"CREATE TABLE "{}" (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
deleted BOOLEAN NOT NULL DEFAULT false,
|
||||
firma TEXT NOT NULL
|
||||
)"#,
|
||||
table_name
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tx.commit().await.unwrap();
|
||||
(profile_name, table_name, profile_id)
|
||||
(profile_name, table_name, schema_id)
|
||||
}
|
||||
|
||||
async fn cleanup_test_environment(pool: &PgPool, profile_id: i64, table_name: &str) {
|
||||
async fn cleanup_test_environment(pool: &PgPool, schema_id: i64, profile_name: &str) {
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
|
||||
// Cleanup order matters!
|
||||
sqlx::query(&format!(r#"DROP TABLE IF EXISTS "{}" CASCADE"#, table_name))
|
||||
sqlx::query(&format!(r#"DROP SCHEMA IF EXISTS "{}" CASCADE"#, profile_name))
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
sqlx::query!("DELETE FROM table_definitions WHERE profile_id = $1", profile_id)
|
||||
sqlx::query!("DELETE FROM table_definitions WHERE schema_id = $1", schema_id)
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
sqlx::query!("DELETE FROM profiles WHERE id = $1", profile_id)
|
||||
sqlx::query!("DELETE FROM schemas WHERE id = $1", schema_id)
|
||||
.execute(&mut *tx)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -101,12 +83,13 @@ async fn test_retrieves_correct_record_by_position(
|
||||
#[future] pool: PgPool,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let (profile_name, table_name, profile_id) = setup_test_environment(&pool).await;
|
||||
let (profile_name, table_name, schema_id) = setup_test_environment(&pool).await;
|
||||
|
||||
// Insert test data
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
let id1: i64 = sqlx::query_scalar(&format!(
|
||||
r#"INSERT INTO "{}" (firma) VALUES ('Test 1') RETURNING id"#,
|
||||
r#"INSERT INTO "{}"."{}" (firma) VALUES ('Test 1') RETURNING id"#,
|
||||
profile_name,
|
||||
table_name
|
||||
))
|
||||
.fetch_one(&mut *tx)
|
||||
@@ -114,7 +97,8 @@ async fn test_retrieves_correct_record_by_position(
|
||||
.unwrap();
|
||||
|
||||
let id2: i64 = sqlx::query_scalar(&format!(
|
||||
r#"INSERT INTO "{}" (firma) VALUES ('Test 2') RETURNING id"#,
|
||||
r#"INSERT INTO "{}"."{}" (firma) VALUES ('Test 2') RETURNING id"#,
|
||||
profile_name,
|
||||
table_name
|
||||
))
|
||||
.fetch_one(&mut *tx)
|
||||
@@ -140,7 +124,7 @@ async fn test_retrieves_correct_record_by_position(
|
||||
let response = get_table_data_by_position(&pool, request).await.unwrap();
|
||||
assert_eq!(response.data["id"], id2.to_string());
|
||||
|
||||
cleanup_test_environment(&pool, profile_id, &table_name).await;
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
@@ -149,12 +133,13 @@ async fn test_excludes_deleted_records(
|
||||
#[future] pool: PgPool,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let (profile_name, table_name, profile_id) = setup_test_environment(&pool).await;
|
||||
let (profile_name, table_name, schema_id) = setup_test_environment(&pool).await;
|
||||
|
||||
// Insert test data
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
let id1: i64 = sqlx::query_scalar(&format!(
|
||||
r#"INSERT INTO "{}" (firma) VALUES ('Test 1') RETURNING id"#,
|
||||
r#"INSERT INTO "{}"."{}" (firma) VALUES ('Test 1') RETURNING id"#,
|
||||
profile_name,
|
||||
table_name
|
||||
))
|
||||
.fetch_one(&mut *tx)
|
||||
@@ -163,7 +148,8 @@ async fn test_excludes_deleted_records(
|
||||
|
||||
// Insert and delete a record
|
||||
let deleted_id: i64 = sqlx::query_scalar(&format!(
|
||||
r#"INSERT INTO "{}" (firma) VALUES ('Deleted') RETURNING id"#,
|
||||
r#"INSERT INTO "{}"."{}" (firma) VALUES ('Deleted') RETURNING id"#,
|
||||
profile_name,
|
||||
table_name
|
||||
))
|
||||
.fetch_one(&mut *tx)
|
||||
@@ -171,7 +157,8 @@ async fn test_excludes_deleted_records(
|
||||
.unwrap();
|
||||
|
||||
let id2: i64 = sqlx::query_scalar(&format!(
|
||||
r#"INSERT INTO "{}" (firma) VALUES ('Test 2') RETURNING id"#,
|
||||
r#"INSERT INTO "{}"."{}" (firma) VALUES ('Test 2') RETURNING id"#,
|
||||
profile_name,
|
||||
table_name
|
||||
))
|
||||
.fetch_one(&mut *tx)
|
||||
@@ -179,7 +166,8 @@ async fn test_excludes_deleted_records(
|
||||
.unwrap();
|
||||
|
||||
sqlx::query(&format!(
|
||||
r#"UPDATE "{}" SET deleted = true WHERE id = $1"#,
|
||||
r#"UPDATE "{}"."{}" SET deleted = true WHERE id = $1"#,
|
||||
profile_name,
|
||||
table_name
|
||||
))
|
||||
.bind(deleted_id)
|
||||
@@ -206,7 +194,7 @@ async fn test_excludes_deleted_records(
|
||||
let response = get_table_data_by_position(&pool, request).await.unwrap();
|
||||
assert_eq!(response.data["id"], id2.to_string());
|
||||
|
||||
cleanup_test_environment(&pool, profile_id, &table_name).await;
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
@@ -215,7 +203,7 @@ async fn test_invalid_position(
|
||||
#[future] pool: PgPool,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let (profile_name, table_name, profile_id) = setup_test_environment(&pool).await;
|
||||
let (profile_name, table_name, schema_id) = setup_test_environment(&pool).await;
|
||||
|
||||
// Test position 0
|
||||
let request = GetTableDataByPositionRequest {
|
||||
@@ -237,7 +225,7 @@ async fn test_invalid_position(
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::InvalidArgument);
|
||||
|
||||
cleanup_test_environment(&pool, profile_id, &table_name).await;
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
@@ -246,12 +234,13 @@ async fn test_position_out_of_bounds(
|
||||
#[future] pool: PgPool,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let (profile_name, table_name, profile_id) = setup_test_environment(&pool).await;
|
||||
let (profile_name, table_name, schema_id) = setup_test_environment(&pool).await;
|
||||
|
||||
// Insert one record
|
||||
let mut tx = pool.begin().await.unwrap();
|
||||
sqlx::query(&format!(
|
||||
r#"INSERT INTO "{}" (firma) VALUES ('Test 1')"#,
|
||||
r#"INSERT INTO "{}"."{}" (firma) VALUES ('Test 1')"#,
|
||||
profile_name,
|
||||
table_name
|
||||
))
|
||||
.execute(&mut *tx)
|
||||
@@ -269,7 +258,7 @@ async fn test_position_out_of_bounds(
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
|
||||
cleanup_test_environment(&pool, profile_id, &table_name).await;
|
||||
cleanup_test_environment(&pool, schema_id, &profile_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
@@ -278,7 +267,7 @@ async fn test_table_not_in_profile(
|
||||
#[future] pool: PgPool,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let (profile_name, _, profile_id) = setup_test_environment(&pool).await;
|
||||
let (profile_name, _, schema_id) = setup_test_environment(&pool).await;
|
||||
|
||||
// Test with non-existent table
|
||||
let request = GetTableDataByPositionRequest {
|
||||
@@ -290,7 +279,7 @@ async fn test_table_not_in_profile(
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
|
||||
cleanup_test_environment(&pool, profile_id, "dummy_table").await;
|
||||
cleanup_test_environment(&pool, schema_id, "dummy_table").await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
|
||||
@@ -1,8 +1,3 @@
|
||||
// tests/tables_data/mod.rs
|
||||
pub mod post_table_data_test;
|
||||
pub mod put_table_data_test;
|
||||
pub mod delete_table_data_test;
|
||||
pub mod get_table_data_test;
|
||||
pub mod get_table_data_count_test;
|
||||
pub mod get_table_data_by_position_test;
|
||||
|
||||
pub mod get_table_data_by_position_test;
|
||||
|
||||
@@ -1,299 +0,0 @@
|
||||
// tests/tables_data/handlers/post_table_data_test.rs
|
||||
use rstest::{fixture, rstest};
|
||||
use sqlx::PgPool;
|
||||
use std::collections::HashMap;
|
||||
use common::proto::multieko2::tables_data::{PostTableDataRequest, PostTableDataResponse};
|
||||
use server::tables_data::handlers::post_table_data;
|
||||
use crate::common::setup_test_db;
|
||||
use tonic;
|
||||
use chrono::Utc;
|
||||
|
||||
// Fixtures
|
||||
#[fixture]
|
||||
async fn pool() -> PgPool {
|
||||
setup_test_db().await
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
|
||||
let pool = pool.await;
|
||||
pool.close().await;
|
||||
pool
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn valid_request() -> HashMap<String, String> {
|
||||
let mut map = HashMap::new();
|
||||
map.insert("firma".into(), "Test Company".into());
|
||||
map.insert("kz".into(), "KZ123".into());
|
||||
map.insert("drc".into(), "DRC456".into());
|
||||
map.insert("ulica".into(), "Test Street".into());
|
||||
map.insert("psc".into(), "12345".into());
|
||||
map.insert("mesto".into(), "Test City".into());
|
||||
map.insert("stat".into(), "Test Country".into());
|
||||
map.insert("banka".into(), "Test Bank".into());
|
||||
map.insert("ucet".into(), "123456789".into());
|
||||
map.insert("skladm".into(), "Warehouse M".into());
|
||||
map.insert("ico".into(), "12345678".into());
|
||||
map.insert("kontakt".into(), "John Doe".into());
|
||||
map.insert("telefon".into(), "+421123456789".into());
|
||||
map.insert("skladu".into(), "Warehouse U".into());
|
||||
map.insert("fax".into(), "+421123456700".into());
|
||||
map
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn minimal_request() -> HashMap<String, String> {
|
||||
let mut map = HashMap::new();
|
||||
map.insert("firma".into(), "Required Only".into());
|
||||
map
|
||||
}
|
||||
|
||||
fn create_table_request(data: HashMap<String, String>) -> PostTableDataRequest {
|
||||
PostTableDataRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "2025_adresar".into(),
|
||||
data,
|
||||
}
|
||||
}
|
||||
|
||||
async fn assert_table_response(pool: &PgPool, response: &PostTableDataResponse, expected: &HashMap<String, String>) {
|
||||
let row = sqlx::query!(r#"SELECT * FROM "2025_adresar" WHERE id = $1"#, response.inserted_id)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(row.firma, expected["firma"]);
|
||||
assert!(!row.deleted);
|
||||
|
||||
// Check optional fields using direct struct access
|
||||
let check_field = |field: &str, value: &str| {
|
||||
let db_value = match field {
|
||||
"kz" => row.kz.as_deref().unwrap_or_default(),
|
||||
"drc" => row.drc.as_deref().unwrap_or_default(),
|
||||
"ulica" => row.ulica.as_deref().unwrap_or_default(),
|
||||
"psc" => row.psc.as_deref().unwrap_or_default(),
|
||||
"mesto" => row.mesto.as_deref().unwrap_or_default(),
|
||||
"stat" => row.stat.as_deref().unwrap_or_default(),
|
||||
"banka" => row.banka.as_deref().unwrap_or_default(),
|
||||
"ucet" => row.ucet.as_deref().unwrap_or_default(),
|
||||
"skladm" => row.skladm.as_deref().unwrap_or_default(),
|
||||
"ico" => row.ico.as_deref().unwrap_or_default(),
|
||||
"kontakt" => row.kontakt.as_deref().unwrap_or_default(),
|
||||
"telefon" => row.telefon.as_deref().unwrap_or_default(),
|
||||
"skladu" => row.skladu.as_deref().unwrap_or_default(),
|
||||
"fax" => row.fax.as_deref().unwrap_or_default(),
|
||||
_ => panic!("Unexpected field: {}", field),
|
||||
};
|
||||
assert_eq!(db_value, value);
|
||||
};
|
||||
|
||||
check_field("kz", expected.get("kz").unwrap_or(&String::new()));
|
||||
check_field("drc", expected.get("drc").unwrap_or(&String::new()));
|
||||
check_field("ulica", expected.get("ulica").unwrap_or(&String::new()));
|
||||
check_field("psc", expected.get("psc").unwrap_or(&String::new()));
|
||||
check_field("mesto", expected.get("mesto").unwrap_or(&String::new()));
|
||||
check_field("stat", expected.get("stat").unwrap_or(&String::new()));
|
||||
check_field("banka", expected.get("banka").unwrap_or(&String::new()));
|
||||
check_field("ucet", expected.get("ucet").unwrap_or(&String::new()));
|
||||
check_field("skladm", expected.get("skladm").unwrap_or(&String::new()));
|
||||
check_field("ico", expected.get("ico").unwrap_or(&String::new()));
|
||||
check_field("kontakt", expected.get("kontakt").unwrap_or(&String::new()));
|
||||
check_field("telefon", expected.get("telefon").unwrap_or(&String::new()));
|
||||
check_field("skladu", expected.get("skladu").unwrap_or(&String::new()));
|
||||
check_field("fax", expected.get("fax").unwrap_or(&String::new()));
|
||||
|
||||
// Handle timestamp conversion
|
||||
let odt = row.created_at.unwrap();
|
||||
let created_at = chrono::DateTime::from_timestamp(odt.unix_timestamp(), odt.nanosecond())
|
||||
.expect("Invalid timestamp");
|
||||
assert!(created_at <= Utc::now());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_success(
|
||||
#[future] pool: PgPool,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let request = create_table_request(valid_request.clone());
|
||||
let response = post_table_data(&pool, request).await.unwrap();
|
||||
|
||||
assert!(response.inserted_id > 0);
|
||||
assert!(response.success);
|
||||
assert_eq!(response.message, "Data inserted successfully");
|
||||
assert_table_response(&pool, &response, &valid_request).await;
|
||||
}
|
||||
|
||||
// Remaining tests follow the same pattern with fixed parameter declarations
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_whitespace_trimming(
|
||||
#[future] pool: PgPool,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("firma".into(), " Test Company ".into());
|
||||
request.insert("telefon".into(), " +421123456789 ".into());
|
||||
|
||||
let response = post_table_data(&pool, create_table_request(request)).await.unwrap();
|
||||
|
||||
let row = sqlx::query!(r#"SELECT firma, telefon FROM "2025_adresar" WHERE id = $1"#, response.inserted_id)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(row.firma, "Test Company");
|
||||
assert_eq!(row.telefon.unwrap(), "+421123456789");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_empty_optional_fields(
|
||||
#[future] pool: PgPool,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("telefon".into(), " ".into());
|
||||
|
||||
let response = post_table_data(&pool, create_table_request(request)).await.unwrap();
|
||||
let telefon: Option<String> = sqlx::query_scalar!(r#"SELECT telefon FROM "2025_adresar" WHERE id = $1"#, response.inserted_id)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(telefon.is_none());
|
||||
}
|
||||
|
||||
// Fixed parameter declarations for remaining tests
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_invalid_firma(
|
||||
#[future] pool: PgPool,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("firma".into(), " ".into());
|
||||
|
||||
let result = post_table_data(&pool, create_table_request(request)).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::InvalidArgument);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_minimal_request(
|
||||
#[future] pool: PgPool,
|
||||
minimal_request: HashMap<String, String>,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let response = post_table_data(&pool, create_table_request(minimal_request.clone())).await.unwrap();
|
||||
assert!(response.inserted_id > 0);
|
||||
assert_table_response(&pool, &response, &minimal_request).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_telefon_length_limit(
|
||||
#[future] pool: PgPool,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("telefon".into(), "1".repeat(16));
|
||||
|
||||
let result = post_table_data(&pool, create_table_request(request)).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_special_characters(
|
||||
#[future] pool: PgPool,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("ulica".into(), "Náměstí 28. října 123/456".into());
|
||||
|
||||
let response = post_table_data(&pool, create_table_request(request)).await.unwrap();
|
||||
let row = sqlx::query!(r#"SELECT ulica FROM "2025_adresar" WHERE id = $1"#, response.inserted_id)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(row.ulica.unwrap(), "Náměstí 28. října 123/456");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_database_error(
|
||||
#[future] closed_pool: PgPool,
|
||||
minimal_request: HashMap<String, String>,
|
||||
) {
|
||||
let closed_pool = closed_pool.await;
|
||||
let result = post_table_data(&closed_pool, create_table_request(minimal_request)).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_empty_firma(
|
||||
#[future] pool: PgPool,
|
||||
minimal_request: HashMap<String, String>,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let mut request = minimal_request;
|
||||
request.insert("firma".into(), "".into());
|
||||
|
||||
let result = post_table_data(&pool, create_table_request(request)).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::InvalidArgument);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_optional_fields_null_vs_empty(
|
||||
#[future] pool: PgPool,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("telefon".into(), "".into());
|
||||
|
||||
let response = post_table_data(&pool, create_table_request(request)).await.unwrap();
|
||||
let telefon: Option<String> = sqlx::query_scalar!(r#"SELECT telefon FROM "2025_adresar" WHERE id = $1"#, response.inserted_id)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(telefon.is_none());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_field_length_limits(
|
||||
#[future] pool: PgPool,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("firma".into(), "a".repeat(255));
|
||||
request.insert("telefon".into(), "1".repeat(15)); // Within limits
|
||||
|
||||
let response = post_table_data(&pool, create_table_request(request)).await.unwrap();
|
||||
let row = sqlx::query!(r#"SELECT firma, telefon FROM "2025_adresar" WHERE id = $1"#, response.inserted_id)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(row.firma.len(), 255);
|
||||
assert_eq!(row.telefon.unwrap().len(), 15);
|
||||
}
|
||||
@@ -1,254 +0,0 @@
|
||||
// tests/tables_data/handlers/put_table_data_test.rs
|
||||
use rstest::{fixture, rstest};
|
||||
use sqlx::PgPool;
|
||||
use std::collections::HashMap;
|
||||
use common::proto::multieko2::tables_data::{PutTableDataRequest, PutTableDataResponse};
|
||||
use server::tables_data::handlers::put_table_data;
|
||||
use crate::common::setup_test_db;
|
||||
use tonic;
|
||||
use chrono::Utc;
|
||||
|
||||
// Fixtures
|
||||
#[fixture]
|
||||
async fn pool() -> PgPool {
|
||||
setup_test_db().await
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
|
||||
let pool = pool.await;
|
||||
pool.close().await;
|
||||
pool
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_record(#[future] pool: PgPool) -> (PgPool, i64) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Create a test record in the database
|
||||
let record = sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO "2025_adresar" (
|
||||
firma, kz, drc, ulica, psc, mesto, stat, banka, ucet,
|
||||
skladm, ico, kontakt, telefon, skladu, fax, deleted
|
||||
)
|
||||
VALUES (
|
||||
'Original Company', 'Original KZ', 'Original DRC', 'Original Street',
|
||||
'12345', 'Original City', 'Original Country', 'Original Bank',
|
||||
'Original Account', 'Original SkladM', 'Original ICO',
|
||||
'Original Contact', '+421123456789', 'Original SkladU', 'Original Fax',
|
||||
false
|
||||
)
|
||||
RETURNING id
|
||||
"#
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
(pool, record.id)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn valid_request_template() -> HashMap<String, String> {
|
||||
let mut map = HashMap::new();
|
||||
map.insert("firma".into(), "Updated Company".into());
|
||||
map.insert("kz".into(), "Updated KZ".into());
|
||||
map.insert("drc".into(), "Updated DRC".into());
|
||||
map.insert("ulica".into(), "Updated Street".into());
|
||||
map.insert("psc".into(), "67890".into());
|
||||
map.insert("mesto".into(), "Updated City".into());
|
||||
map.insert("stat".into(), "Updated Country".into());
|
||||
map.insert("banka".into(), "Updated Bank".into());
|
||||
map.insert("ucet".into(), "987654321".into());
|
||||
map.insert("skladm".into(), "Updated SkladM".into());
|
||||
map.insert("ico".into(), "87654321".into());
|
||||
map.insert("kontakt".into(), "Jane Doe".into());
|
||||
map.insert("telefon".into(), "+421987654321".into());
|
||||
map.insert("skladu".into(), "Updated SkladU".into());
|
||||
map.insert("fax".into(), "+421987654300".into());
|
||||
map
|
||||
}
|
||||
|
||||
// Helper to check database state
|
||||
async fn assert_response_matches(pool: &PgPool, id: i64, response: &PutTableDataResponse) {
|
||||
let db_record = sqlx::query!(r#"SELECT * FROM "2025_adresar" WHERE id = $1"#, id)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(db_record.firma, "Updated Company");
|
||||
assert_eq!(db_record.kz.unwrap_or_default(), "Updated KZ");
|
||||
assert_eq!(db_record.drc.unwrap_or_default(), "Updated DRC");
|
||||
assert_eq!(db_record.ulica.unwrap_or_default(), "Updated Street");
|
||||
assert_eq!(db_record.psc.unwrap_or_default(), "67890");
|
||||
assert_eq!(db_record.mesto.unwrap_or_default(), "Updated City");
|
||||
assert_eq!(db_record.stat.unwrap_or_default(), "Updated Country");
|
||||
assert_eq!(db_record.banka.unwrap_or_default(), "Updated Bank");
|
||||
assert_eq!(db_record.ucet.unwrap_or_default(), "987654321");
|
||||
assert_eq!(db_record.skladm.unwrap_or_default(), "Updated SkladM");
|
||||
assert_eq!(db_record.ico.unwrap_or_default(), "87654321");
|
||||
assert_eq!(db_record.kontakt.unwrap_or_default(), "Jane Doe");
|
||||
assert_eq!(db_record.telefon.unwrap_or_default(), "+421987654321");
|
||||
assert_eq!(db_record.skladu.unwrap_or_default(), "Updated SkladU");
|
||||
assert_eq!(db_record.fax.unwrap_or_default(), "+421987654300");
|
||||
assert!(!db_record.deleted, "Record should not be deleted");
|
||||
}
|
||||
|
||||
// Tests
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_table_data_success(
|
||||
#[future] existing_record: (PgPool, i64),
|
||||
valid_request_template: HashMap<String, String>,
|
||||
) {
|
||||
let (pool, id) = existing_record.await;
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "2025_adresar".into(),
|
||||
id,
|
||||
data: valid_request_template,
|
||||
};
|
||||
|
||||
let response = put_table_data(&pool, request).await.unwrap();
|
||||
|
||||
assert!(response.success);
|
||||
assert_eq!(response.message, "Data updated successfully");
|
||||
assert_eq!(response.updated_id, id);
|
||||
assert_response_matches(&pool, id, &response).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_table_data_whitespace_trimming(
|
||||
#[future] existing_record: (PgPool, i64),
|
||||
valid_request_template: HashMap<String, String>,
|
||||
) {
|
||||
let (pool, id) = existing_record.await;
|
||||
|
||||
let mut data = valid_request_template;
|
||||
data.insert("firma".into(), " Updated Company ".into());
|
||||
data.insert("telefon".into(), " +421987654321 ".into());
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "2025_adresar".into(),
|
||||
id,
|
||||
data,
|
||||
};
|
||||
|
||||
let response = put_table_data(&pool, request).await.unwrap();
|
||||
|
||||
// Verify trimmed values in response
|
||||
assert_eq!(response.message, "Data updated successfully");
|
||||
|
||||
// Verify raw values in database
|
||||
let db_record = sqlx::query!(r#"SELECT firma, telefon FROM "2025_adresar" WHERE id = $1"#, id)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(db_record.firma, "Updated Company"); // Trimmed
|
||||
assert_eq!(db_record.telefon.unwrap(), "+421987654321"); // Trimmed
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_table_data_empty_required_field(
|
||||
#[future] existing_record: (PgPool, i64),
|
||||
valid_request_template: HashMap<String, String>,
|
||||
) {
|
||||
let (pool, id) = existing_record.await;
|
||||
|
||||
let mut data = valid_request_template;
|
||||
data.insert("firma".into(), "".into());
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "2025_adresar".into(),
|
||||
id,
|
||||
data,
|
||||
};
|
||||
|
||||
let result = put_table_data(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::InvalidArgument);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_table_data_nonexistent_id(
|
||||
#[future] pool: PgPool,
|
||||
valid_request_template: HashMap<String, String>,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "2025_adresar".into(),
|
||||
id: 9999, // Non-existent ID
|
||||
data: valid_request_template,
|
||||
};
|
||||
|
||||
let result = put_table_data(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_table_data_deleted_record(
|
||||
#[future] existing_record: (PgPool, i64),
|
||||
valid_request_template: HashMap<String, String>,
|
||||
) {
|
||||
let (pool, id) = existing_record.await;
|
||||
|
||||
// Mark the record as deleted
|
||||
sqlx::query!(r#"UPDATE "2025_adresar" SET deleted = true WHERE id = $1"#, id)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "2025_adresar".into(),
|
||||
id,
|
||||
data: valid_request_template,
|
||||
};
|
||||
|
||||
let result = put_table_data(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_table_data_clear_optional_fields(
|
||||
#[future] existing_record: (PgPool, i64),
|
||||
valid_request_template: HashMap<String, String>,
|
||||
) {
|
||||
let (pool, id) = existing_record.await;
|
||||
|
||||
let mut data = valid_request_template;
|
||||
data.insert("telefon".into(), String::new());
|
||||
data.insert("ulica".into(), String::new());
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: "default".into(),
|
||||
table_name: "2025_adresar".into(),
|
||||
id,
|
||||
data,
|
||||
};
|
||||
|
||||
let response = put_table_data(&pool, request).await.unwrap();
|
||||
|
||||
// Check database contains NULL for cleared fields
|
||||
let db_record = sqlx::query!(r#"SELECT telefon, ulica FROM "2025_adresar" WHERE id = $1"#, id)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(db_record.telefon.is_none());
|
||||
assert!(db_record.ulica.is_none());
|
||||
}
|
||||
@@ -1,2 +1,7 @@
|
||||
// tests/tables_data/mod.rs
|
||||
|
||||
pub mod handlers;
|
||||
// pub mod get;
|
||||
// pub mod delete;
|
||||
// pub mod post;
|
||||
// pub mod put;
|
||||
|
||||
3
server/tests/tables_data/post/mod.rs
Normal file
3
server/tests/tables_data/post/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
// tests/tables_data/post/mod.rs
|
||||
|
||||
pub mod post_table_data_test;
|
||||
484
server/tests/tables_data/post/post_table_data_test.rs
Normal file
484
server/tests/tables_data/post/post_table_data_test.rs
Normal file
@@ -0,0 +1,484 @@
|
||||
// tests/tables_data/handlers/post_table_data_test.rs
|
||||
use rstest::{fixture, rstest};
|
||||
use sqlx::PgPool;
|
||||
use std::collections::HashMap;
|
||||
use prost_types::Value;
|
||||
use prost_types::value::Kind;
|
||||
use common::proto::multieko2::tables_data::{PostTableDataRequest, PostTableDataResponse};
|
||||
use common::proto::multieko2::table_definition::TableLink;
|
||||
use common::proto::multieko2::table_definition::{
|
||||
PostTableDefinitionRequest, ColumnDefinition as TableColumnDefinition
|
||||
};
|
||||
use server::tables_data::handlers::post_table_data;
|
||||
use server::table_definition::handlers::post_table_definition;
|
||||
use crate::common::setup_test_db;
|
||||
use tonic;
|
||||
use chrono::Utc;
|
||||
use sqlx::types::chrono::DateTime;
|
||||
use tokio::sync::mpsc;
|
||||
use server::indexer::IndexCommand;
|
||||
use sqlx::Row;
|
||||
use rand::distr::Alphanumeric;
|
||||
use rand::Rng;
|
||||
use rust_decimal::prelude::FromPrimitive;
|
||||
|
||||
// Helper function to generate unique identifiers for test isolation
|
||||
fn generate_unique_id() -> String {
|
||||
rand::rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(8)
|
||||
.map(char::from)
|
||||
.collect::<String>()
|
||||
.to_lowercase()
|
||||
}
|
||||
|
||||
// Helper function to convert string to protobuf Value
|
||||
fn string_to_proto_value(s: String) -> Value {
|
||||
Value {
|
||||
kind: Some(Kind::StringValue(s)),
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to convert HashMap<String, String> to HashMap<String, Value>
|
||||
fn convert_to_proto_values(data: HashMap<String, String>) -> HashMap<String, Value> {
|
||||
data.into_iter()
|
||||
.map(|(k, v)| (k, string_to_proto_value(v)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
// Create the table definition for adresar test with unique name
|
||||
async fn create_adresar_table(pool: &PgPool, table_name: &str, profile_name: &str) -> Result<(), tonic::Status> {
|
||||
let table_def_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: table_name.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition {
|
||||
name: "firma".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "kz".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "drc".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "ulica".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "psc".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "mesto".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "stat".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "banka".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "ucet".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "skladm".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "ico".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "kontakt".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "telefon".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "skladu".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "fax".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(pool, table_def_request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Test context structure to hold unique identifiers
|
||||
#[derive(Clone)]
|
||||
struct TestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
table_name: String,
|
||||
indexer_tx: mpsc::Sender<IndexCommand>,
|
||||
}
|
||||
|
||||
// Fixtures
|
||||
#[fixture]
|
||||
async fn test_context() -> TestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("test_profile_{}", unique_id);
|
||||
let table_name = format!("adresar_test_{}", unique_id);
|
||||
|
||||
// Create the table for this specific test
|
||||
create_adresar_table(&pool, &table_name, &profile_name).await
|
||||
.expect("Failed to create test table");
|
||||
|
||||
let (tx, _rx) = mpsc::channel(100);
|
||||
|
||||
TestContext {
|
||||
pool,
|
||||
profile_name,
|
||||
table_name,
|
||||
indexer_tx: tx,
|
||||
}
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn closed_test_context() -> TestContext {
|
||||
let mut context = test_context().await;
|
||||
context.pool.close().await;
|
||||
context
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn valid_request() -> HashMap<String, String> {
|
||||
let mut map = HashMap::new();
|
||||
map.insert("firma".into(), "Test Company".into());
|
||||
map.insert("kz".into(), "KZ123".into());
|
||||
map.insert("drc".into(), "DRC456".into());
|
||||
map.insert("ulica".into(), "Test Street".into());
|
||||
map.insert("psc".into(), "12345".into());
|
||||
map.insert("mesto".into(), "Test City".into());
|
||||
map.insert("stat".into(), "Test Country".into());
|
||||
map.insert("banka".into(), "Test Bank".into());
|
||||
map.insert("ucet".into(), "123456789".into());
|
||||
map.insert("skladm".into(), "Warehouse M".into());
|
||||
map.insert("ico".into(), "12345678".into());
|
||||
map.insert("kontakt".into(), "John Doe".into());
|
||||
map.insert("telefon".into(), "+421123456789".into());
|
||||
map.insert("skladu".into(), "Warehouse U".into());
|
||||
map.insert("fax".into(), "+421123456700".into());
|
||||
map
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn minimal_request() -> HashMap<String, String> {
|
||||
let mut map = HashMap::new();
|
||||
map.insert("firma".into(), "Required Only".into());
|
||||
map
|
||||
}
|
||||
|
||||
fn create_table_request(context: &TestContext, data: HashMap<String, String>) -> PostTableDataRequest {
|
||||
PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data: convert_to_proto_values(data),
|
||||
}
|
||||
}
|
||||
|
||||
async fn assert_table_response(context: &TestContext, response: &PostTableDataResponse, expected: &HashMap<String, String>) {
|
||||
// Use dynamic query since table is created at runtime with unique names
|
||||
let query = format!(
|
||||
r#"SELECT * FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Get values from Row dynamically
|
||||
let firma: String = row.get("firma");
|
||||
let deleted: bool = row.get("deleted");
|
||||
|
||||
assert_eq!(firma, expected["firma"]);
|
||||
assert!(!deleted);
|
||||
|
||||
// Check optional fields
|
||||
let check_field = |field: &str, expected_value: &str| {
|
||||
let db_value: Option<String> = row.get(field);
|
||||
assert_eq!(db_value.as_deref().unwrap_or(""), expected_value);
|
||||
};
|
||||
|
||||
check_field("kz", expected.get("kz").unwrap_or(&String::new()));
|
||||
check_field("drc", expected.get("drc").unwrap_or(&String::new()));
|
||||
check_field("ulica", expected.get("ulica").unwrap_or(&String::new()));
|
||||
check_field("psc", expected.get("psc").unwrap_or(&String::new()));
|
||||
check_field("mesto", expected.get("mesto").unwrap_or(&String::new()));
|
||||
check_field("stat", expected.get("stat").unwrap_or(&String::new()));
|
||||
check_field("banka", expected.get("banka").unwrap_or(&String::new()));
|
||||
check_field("ucet", expected.get("ucet").unwrap_or(&String::new()));
|
||||
check_field("skladm", expected.get("skladm").unwrap_or(&String::new()));
|
||||
check_field("ico", expected.get("ico").unwrap_or(&String::new()));
|
||||
check_field("kontakt", expected.get("kontakt").unwrap_or(&String::new()));
|
||||
check_field("telefon", expected.get("telefon").unwrap_or(&String::new()));
|
||||
check_field("skladu", expected.get("skladu").unwrap_or(&String::new()));
|
||||
check_field("fax", expected.get("fax").unwrap_or(&String::new()));
|
||||
|
||||
// Handle timestamp conversion
|
||||
let created_at: Option<chrono::DateTime<Utc>> = row.get("created_at");
|
||||
assert!(created_at.unwrap() <= Utc::now());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_success(
|
||||
#[future] test_context: TestContext,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let context = test_context.await;
|
||||
let request = create_table_request(&context, valid_request.clone());
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx).await.unwrap();
|
||||
|
||||
assert!(response.inserted_id > 0);
|
||||
assert!(response.success);
|
||||
assert_eq!(response.message, "Data inserted successfully");
|
||||
assert_table_response(&context, &response, &valid_request).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_whitespace_trimming(
|
||||
#[future] test_context: TestContext,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let context = test_context.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("firma".into(), " Test Company ".into());
|
||||
request.insert("telefon".into(), " +421123456789 ".into());
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, request), &context.indexer_tx).await.unwrap();
|
||||
|
||||
let query = format!(
|
||||
r#"SELECT firma, telefon FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let firma: String = row.get("firma");
|
||||
let telefon: Option<String> = row.get("telefon");
|
||||
|
||||
assert_eq!(firma, "Test Company");
|
||||
assert_eq!(telefon.unwrap(), "+421123456789");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_empty_optional_fields(
|
||||
#[future] test_context: TestContext,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let context = test_context.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("telefon".into(), " ".into());
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, request), &context.indexer_tx).await.unwrap();
|
||||
|
||||
let query = format!(
|
||||
r#"SELECT telefon FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let telefon: Option<String> = sqlx::query_scalar(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(telefon.is_none());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_minimal_request(
|
||||
#[future] test_context: TestContext,
|
||||
minimal_request: HashMap<String, String>,
|
||||
) {
|
||||
let context = test_context.await;
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, minimal_request.clone()), &context.indexer_tx).await.unwrap();
|
||||
assert!(response.inserted_id > 0);
|
||||
assert_table_response(&context, &response, &minimal_request).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_telefon_length_limit(
|
||||
#[future] test_context: TestContext,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let context = test_context.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("telefon".into(), "1".repeat(16));
|
||||
|
||||
let result = post_table_data(&context.pool, create_table_request(&context, request), &context.indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_special_characters(
|
||||
#[future] test_context: TestContext,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let context = test_context.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("ulica".into(), "Náměstí 28. října 123/456".into());
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, request), &context.indexer_tx).await.unwrap();
|
||||
|
||||
let query = format!(
|
||||
r#"SELECT ulica FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let ulica: Option<String> = row.get("ulica");
|
||||
assert_eq!(ulica.unwrap(), "Náměstí 28. října 123/456");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_database_error(
|
||||
#[future] closed_test_context: TestContext,
|
||||
minimal_request: HashMap<String, String>,
|
||||
) {
|
||||
let context = closed_test_context.await;
|
||||
let result = post_table_data(&context.pool, create_table_request(&context, minimal_request), &context.indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_optional_fields_null_vs_empty(
|
||||
#[future] test_context: TestContext,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let context = test_context.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("telefon".into(), "".into());
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, request), &context.indexer_tx).await.unwrap();
|
||||
|
||||
let query = format!(
|
||||
r#"SELECT telefon FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let telefon: Option<String> = sqlx::query_scalar(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(telefon.is_none());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_field_length_limits(
|
||||
#[future] test_context: TestContext,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let context = test_context.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("firma".into(), "a".repeat(255));
|
||||
request.insert("telefon".into(), "1".repeat(15)); // Within limits
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, request), &context.indexer_tx).await.unwrap();
|
||||
|
||||
let query = format!(
|
||||
r#"SELECT firma, telefon FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let firma: String = row.get("firma");
|
||||
let telefon: Option<String> = row.get("telefon");
|
||||
|
||||
assert_eq!(firma.len(), 255);
|
||||
assert_eq!(telefon.unwrap().len(), 15);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_with_null_values(
|
||||
#[future] test_context: TestContext,
|
||||
) {
|
||||
let context = test_context.await;
|
||||
|
||||
// Create a request with some null values
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), string_to_proto_value("Test Company".into()));
|
||||
data.insert("telefon".into(), Value { kind: Some(Kind::NullValue(0)) }); // Explicit null
|
||||
data.insert("ulica".into(), Value { kind: None }); // Another way to represent null
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx).await.unwrap();
|
||||
|
||||
let query = format!(
|
||||
r#"SELECT firma, telefon, ulica FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let firma: String = row.get("firma");
|
||||
let telefon: Option<String> = row.get("telefon");
|
||||
let ulica: Option<String> = row.get("ulica");
|
||||
|
||||
assert_eq!(firma, "Test Company");
|
||||
assert!(telefon.is_none());
|
||||
assert!(ulica.is_none());
|
||||
}
|
||||
|
||||
include!("post_table_data_test2.rs");
|
||||
include!("post_table_data_test3.rs");
|
||||
include!("post_table_data_test4.rs");
|
||||
include!("post_table_data_test5.rs");
|
||||
484
server/tests/tables_data/post/post_table_data_test2.rs
Normal file
484
server/tests/tables_data/post/post_table_data_test2.rs
Normal file
@@ -0,0 +1,484 @@
|
||||
// tests/tables_data/handlers/post_table_data_test2.rs
|
||||
|
||||
// ========= Additional helper functions for test2 =========
|
||||
|
||||
async fn create_test_indexer_channel() -> mpsc::Sender<IndexCommand> {
|
||||
let (tx, mut rx) = mpsc::channel(100);
|
||||
|
||||
// Spawn a task to consume indexer messages to prevent blocking
|
||||
tokio::spawn(async move {
|
||||
while let Some(_) = rx.recv().await {
|
||||
// Just consume the messages
|
||||
}
|
||||
});
|
||||
|
||||
tx
|
||||
}
|
||||
|
||||
// ========= Extended Data Type Validation Tests =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_boolean_system_column_validation(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test setting the deleted flag with string (should fail)
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), "System Test Company".to_string());
|
||||
data.insert("deleted".into(), "true".to_string()); // String instead of boolean
|
||||
|
||||
let result = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected boolean for column 'deleted'"));
|
||||
}
|
||||
}
|
||||
|
||||
// ========= String Processing and Edge Cases =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_unicode_special_characters_comprehensive(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let special_strings = vec![
|
||||
"José María González", // Accented characters
|
||||
"Москва", // Cyrillic
|
||||
"北京市", // Chinese
|
||||
"🚀 Tech Company 🌟", // Emoji
|
||||
"Line\nBreak\tTab", // Control characters
|
||||
"Quote\"Test'Apostrophe", // Quotes
|
||||
"SQL'; DROP TABLE test; --", // SQL injection attempt
|
||||
"Price: $1,000.50 (50% off!)", // Special symbols
|
||||
];
|
||||
|
||||
for (i, test_string) in special_strings.into_iter().enumerate() {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), test_string.to_string());
|
||||
data.insert("kz".into(), format!("TEST{}", i));
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await.unwrap();
|
||||
assert!(response.success, "Failed for string: '{}'", test_string);
|
||||
|
||||
// Verify the data was stored correctly
|
||||
let query = format!(
|
||||
r#"SELECT firma FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let stored_firma: Option<String> = sqlx::query_scalar::<_, Option<String>>(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stored_firma.unwrap(), test_string.trim());
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_field_length_boundaries(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test telefon field length validation (should reject >15 chars)
|
||||
let length_test_cases = vec![
|
||||
("1234567890123456", true), // 16 chars - should fail
|
||||
("123456789012345", false), // 15 chars - should pass
|
||||
("", false), // Empty - should pass (becomes NULL)
|
||||
("1", false), // Single char - should pass
|
||||
];
|
||||
|
||||
for (test_string, should_fail) in length_test_cases {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), "Length Test Company".to_string());
|
||||
data.insert("telefon".into(), test_string.to_string());
|
||||
|
||||
let result = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await;
|
||||
|
||||
if should_fail {
|
||||
assert!(result.is_err(), "Should fail for telefon length: {}", test_string.len());
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::Internal);
|
||||
assert!(err.message().contains("Value too long for telefon"));
|
||||
}
|
||||
} else {
|
||||
assert!(result.is_ok(), "Should succeed for telefon length: {}", test_string.len());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ========= NULL vs Empty String Handling =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_empty_strings_become_null(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let test_cases = vec![
|
||||
("", "empty_string"),
|
||||
(" ", "whitespace_only"),
|
||||
("\t\n", "tabs_newlines"),
|
||||
(" Normal Value ", "padded_value"),
|
||||
];
|
||||
|
||||
for (input, test_name) in test_cases {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), format!("Test {}", test_name));
|
||||
data.insert("ulica".into(), input.to_string());
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await.unwrap();
|
||||
assert!(response.success, "Failed for test case: {}", test_name);
|
||||
|
||||
// Check what was actually stored
|
||||
let query = format!(
|
||||
r#"SELECT ulica FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let stored_ulica: Option<String> = sqlx::query_scalar::<_, Option<String>>(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let trimmed = input.trim();
|
||||
if trimmed.is_empty() {
|
||||
assert!(stored_ulica.is_none(), "Empty/whitespace string should be NULL for: {}", test_name);
|
||||
} else {
|
||||
assert_eq!(stored_ulica.unwrap(), trimmed, "String should be trimmed for: {}", test_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ========= Concurrent Operations Testing =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_inserts_same_table(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
|
||||
use futures::future::join_all;
|
||||
|
||||
// Create multiple concurrent insert operations
|
||||
let futures = (0..10).map(|i| {
|
||||
let context = context.clone();
|
||||
async move {
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), format!("Concurrent Company {}", i));
|
||||
data.insert("kz".into(), format!("CONC{}", i));
|
||||
data.insert("mesto".into(), format!("City {}", i));
|
||||
|
||||
post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await
|
||||
}
|
||||
});
|
||||
|
||||
let results = join_all(futures).await;
|
||||
|
||||
// All inserts should succeed
|
||||
for (i, result) in results.into_iter().enumerate() {
|
||||
assert!(result.is_ok(), "Concurrent insert {} should succeed", i);
|
||||
}
|
||||
|
||||
// Verify all records were inserted
|
||||
let query = format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE firma LIKE 'Concurrent Company%'"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let count: i64 = sqlx::query_scalar::<_, i64>(&query)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, 10);
|
||||
}
|
||||
|
||||
// ========= Error Scenarios =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_invalid_column_names(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), "Valid Company".to_string());
|
||||
data.insert("nonexistent_column".into(), "Invalid".to_string());
|
||||
|
||||
let result = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Invalid column: nonexistent_column"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_empty_data_request(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Try to insert completely empty data
|
||||
let data = HashMap::new();
|
||||
|
||||
let result = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("No valid columns to insert"));
|
||||
}
|
||||
}
|
||||
|
||||
// ========= Performance and Stress Testing =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_rapid_sequential_inserts(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
// Perform rapid sequential inserts
|
||||
for i in 0..50 {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), format!("Rapid Company {}", i));
|
||||
data.insert("kz".into(), format!("RAP{}", i));
|
||||
data.insert("telefon".into(), format!("+421{:09}", i));
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await.unwrap();
|
||||
assert!(response.success, "Rapid insert {} should succeed", i);
|
||||
}
|
||||
|
||||
let duration = start_time.elapsed();
|
||||
println!("50 rapid inserts took: {:?}", duration);
|
||||
|
||||
// Verify all records were inserted
|
||||
let query = format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE firma LIKE 'Rapid Company%'"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let count: i64 = sqlx::query_scalar::<_, i64>(&query)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, 50);
|
||||
}
|
||||
|
||||
// ========= SQL Injection Protection =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_sql_injection_protection(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let injection_attempts = vec![
|
||||
"'; DROP TABLE users; --",
|
||||
"1; DELETE FROM adresar; --",
|
||||
"admin'; UPDATE adresar SET firma='hacked' WHERE '1'='1",
|
||||
"' OR '1'='1",
|
||||
"'; INSERT INTO adresar (firma) VALUES ('injected'); --",
|
||||
"Robert'); DROP TABLE students; --", // Classic Bobby Tables
|
||||
];
|
||||
|
||||
let injection_count = injection_attempts.len();
|
||||
|
||||
for (i, injection) in injection_attempts.into_iter().enumerate() {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), injection.to_string());
|
||||
data.insert("kz".into(), format!("INJ{}", i));
|
||||
|
||||
// These should all succeed because values are properly parameterized
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await.unwrap();
|
||||
assert!(response.success, "SQL injection attempt should be safely handled: {}", injection);
|
||||
|
||||
// Verify the injection attempt was stored as literal text
|
||||
let query = format!(
|
||||
r#"SELECT firma FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let stored_firma: String = sqlx::query_scalar::<_, String>(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stored_firma, injection);
|
||||
}
|
||||
|
||||
// Verify the table still exists and has the expected number of records
|
||||
let query = format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE kz LIKE 'INJ%'"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let count: i64 = sqlx::query_scalar::<_, i64>(&query)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, injection_count as i64);
|
||||
}
|
||||
|
||||
// ========= Large Data Testing =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_large_text_fields(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test various large text sizes (except telefon which has length limits)
|
||||
let sizes = vec![1000, 5000, 10000];
|
||||
|
||||
for size in sizes {
|
||||
let large_text = "A".repeat(size);
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), large_text.clone());
|
||||
data.insert("ulica".into(), format!("Street with {} chars", size));
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await.unwrap();
|
||||
assert!(response.success, "Failed for size: {}", size);
|
||||
|
||||
// Verify the large text was stored correctly
|
||||
let query = format!(
|
||||
r#"SELECT firma FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let stored_firma: String = sqlx::query_scalar::<_, String>(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stored_firma.len(), size);
|
||||
assert_eq!(stored_firma, large_text);
|
||||
}
|
||||
}
|
||||
|
||||
// ========= Indexer Integration Testing =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_indexer_command_generation(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let (indexer_tx, mut indexer_rx) = mpsc::channel(100);
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), "Indexer Test Company".to_string());
|
||||
data.insert("kz".into(), "IDX123".to_string());
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Check that indexer command was sent
|
||||
let indexer_command = tokio::time::timeout(
|
||||
tokio::time::Duration::from_millis(100),
|
||||
indexer_rx.recv()
|
||||
).await;
|
||||
|
||||
assert!(indexer_command.is_ok());
|
||||
let command = indexer_command.unwrap().unwrap();
|
||||
|
||||
match command {
|
||||
IndexCommand::AddOrUpdate(data) => {
|
||||
assert_eq!(data.table_name, context.table_name);
|
||||
assert_eq!(data.row_id, response.inserted_id);
|
||||
},
|
||||
IndexCommand::Delete(_) => panic!("Expected AddOrUpdate command, got Delete"),
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_indexer_failure_resilience(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
|
||||
// Create a closed channel to simulate indexer failure
|
||||
let (indexer_tx, indexer_rx) = mpsc::channel(1);
|
||||
drop(indexer_rx); // Close receiver to simulate failure
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), "Resilience Test Company".to_string());
|
||||
data.insert("kz".into(), "RES123".to_string());
|
||||
|
||||
// Insert should still succeed even if indexer fails
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Verify data was inserted despite indexer failure
|
||||
let query = format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE kz = 'RES123'"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let count: i64 = sqlx::query_scalar::<_, i64>(&query)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, 1);
|
||||
}
|
||||
|
||||
// ========= Profile and Table Validation =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_nonexistent_profile_error(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), "Test Company".to_string());
|
||||
|
||||
let invalid_request = PostTableDataRequest {
|
||||
profile_name: "nonexistent_profile".into(),
|
||||
table_name: context.table_name.clone(),
|
||||
data: convert_to_proto_values(data),
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, invalid_request, &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::NotFound);
|
||||
assert!(err.message().contains("Profile not found"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_nonexistent_table_error(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), "Test Company".to_string());
|
||||
|
||||
let invalid_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: "nonexistent_table".into(),
|
||||
data: convert_to_proto_values(data),
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, invalid_request, &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::NotFound);
|
||||
assert!(err.message().contains("Table not found"));
|
||||
}
|
||||
}
|
||||
847
server/tests/tables_data/post/post_table_data_test3.rs
Normal file
847
server/tests/tables_data/post/post_table_data_test3.rs
Normal file
@@ -0,0 +1,847 @@
|
||||
// tests/tables_data/handlers/post_table_data_test3.rs
|
||||
|
||||
// ========================================================================
|
||||
// ADDITIONAL HELPER FUNCTIONS FOR TEST3
|
||||
// ========================================================================
|
||||
|
||||
// Helper to create different Value types
|
||||
fn create_string_value(s: &str) -> Value {
|
||||
Value { kind: Some(Kind::StringValue(s.to_string())) }
|
||||
}
|
||||
|
||||
fn create_number_value(n: f64) -> Value {
|
||||
Value { kind: Some(Kind::NumberValue(n)) }
|
||||
}
|
||||
|
||||
fn create_bool_value(b: bool) -> Value {
|
||||
Value { kind: Some(Kind::BoolValue(b)) }
|
||||
}
|
||||
|
||||
fn create_null_value() -> Value {
|
||||
Value { kind: Some(Kind::NullValue(0)) }
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// FIXTURES AND CONTEXT SETUP FOR ADVANCED TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[derive(Clone)]
|
||||
struct DataTypeTestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
table_name: String,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct ForeignKeyTestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
category_table: String,
|
||||
product_table: String,
|
||||
order_table: String,
|
||||
}
|
||||
|
||||
// Create a table with various data types for comprehensive testing
|
||||
async fn create_data_type_test_table(pool: &PgPool, table_name: &str, profile_name: &str) -> Result<(), tonic::Status> {
|
||||
let table_def_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: table_name.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "my_text".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "my_bool".into(), field_type: "boolean".into() },
|
||||
TableColumnDefinition { name: "my_timestamp".into(), field_type: "timestamp".into() },
|
||||
TableColumnDefinition { name: "my_bigint".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "my_money".into(), field_type: "decimal(19,4)".into() },
|
||||
TableColumnDefinition { name: "my_date".into(), field_type: "date".into() },
|
||||
TableColumnDefinition { name: "my_decimal".into(), field_type: "decimal(10,2)".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(pool, table_def_request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Create foreign key test tables (category -> product -> order)
|
||||
async fn create_foreign_key_test_tables(pool: &PgPool, profile_name: &str, category_table: &str, product_table: &str, order_table: &str) -> Result<(), tonic::Status> {
|
||||
// Create category table first (no dependencies)
|
||||
let category_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: category_table.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "description".into(), field_type: "text".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, category_def).await?;
|
||||
|
||||
// Create product table with required link to category
|
||||
let product_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: product_table.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "price".into(), field_type: "decimal(10,2)".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![
|
||||
TableLink { linked_table_name: category_table.into(), required: true },
|
||||
],
|
||||
};
|
||||
post_table_definition(pool, product_def).await?;
|
||||
|
||||
// Create order table with required link to product and optional link to category
|
||||
let order_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: order_table.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "quantity".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "notes".into(), field_type: "text".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![
|
||||
TableLink { linked_table_name: product_table.into(), required: true },
|
||||
TableLink { linked_table_name: category_table.into(), required: false }, // Optional link
|
||||
],
|
||||
};
|
||||
post_table_definition(pool, order_def).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn data_type_test_context() -> DataTypeTestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("dtype_profile_{}", unique_id);
|
||||
let table_name = format!("dtype_table_{}", unique_id);
|
||||
|
||||
create_data_type_test_table(&pool, &table_name, &profile_name).await
|
||||
.expect("Failed to create data type test table");
|
||||
|
||||
DataTypeTestContext { pool, profile_name, table_name }
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn foreign_key_test_context() -> ForeignKeyTestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("fk_profile_{}", unique_id);
|
||||
let category_table = format!("category_{}", unique_id);
|
||||
let product_table = format!("product_{}", unique_id);
|
||||
let order_table = format!("order_{}", unique_id);
|
||||
|
||||
create_foreign_key_test_tables(&pool, &profile_name, &category_table, &product_table, &order_table).await
|
||||
.expect("Failed to create foreign key test tables");
|
||||
|
||||
ForeignKeyTestContext { pool, profile_name, category_table, product_table, order_table }
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// DATA TYPE VALIDATION TESTS
|
||||
// ========================================================================
|
||||
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_correct_data_types_success(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Test String"));
|
||||
data.insert("my_bool".into(), create_bool_value(true));
|
||||
data.insert("my_timestamp".into(), create_string_value("2024-01-15T10:30:00Z"));
|
||||
data.insert("my_bigint".into(), create_number_value(42.0));
|
||||
data.insert("my_money".into(), create_string_value("123.45")); // Use string for decimal
|
||||
data.insert("my_decimal".into(), create_string_value("999.99")); // Use string for decimal
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
let response = post_table_data(&context.pool, request, &indexer_tx).await.unwrap();
|
||||
assert!(response.success);
|
||||
assert!(response.inserted_id > 0);
|
||||
|
||||
// Verify data was stored correctly
|
||||
let query = format!(
|
||||
r#"SELECT my_text, my_bool, my_timestamp, my_bigint FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_text: String = row.get("my_text");
|
||||
let stored_bool: bool = row.get("my_bool");
|
||||
// Change this based on your actual column type in the schema:
|
||||
// If my_bigint is defined as "integer" in table definition, use i32:
|
||||
let stored_bigint: i32 = row.get("my_bigint");
|
||||
// If my_bigint is defined as "biginteger" or "bigint" in table definition, use i64:
|
||||
// let stored_bigint: i64 = row.get("my_bigint");
|
||||
|
||||
assert_eq!(stored_text, "Test String");
|
||||
assert_eq!(stored_bool, true);
|
||||
assert_eq!(stored_bigint, 42);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_type_mismatch_string_for_boolean(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Required field"));
|
||||
data.insert("my_bool".into(), create_string_value("true")); // String instead of boolean
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected boolean for column 'my_bool'"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_type_mismatch_string_for_integer(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Required field"));
|
||||
data.insert("my_bigint".into(), create_string_value("42")); // String instead of number
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected number for column 'my_bigint'"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_type_mismatch_number_for_boolean(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Required field"));
|
||||
data.insert("my_bool".into(), create_number_value(1.0)); // Number instead of boolean
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected boolean for column 'my_bool'"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_decimal_requires_string_not_number(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Required field"));
|
||||
data.insert("my_money".into(), create_number_value(123.45)); // Number instead of string for decimal
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected a string representation for decimal column 'my_money'"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_invalid_timestamp_format(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Required field"));
|
||||
data.insert("my_timestamp".into(), create_string_value("not-a-date"));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Invalid timestamp for my_timestamp"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_float_for_integer_field(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Required field"));
|
||||
data.insert("my_bigint".into(), create_number_value(123.45)); // Float for integer field
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected integer for column 'my_bigint', but got a float"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_valid_timestamp_formats(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let valid_timestamps = vec![
|
||||
"2024-01-15T10:30:00Z",
|
||||
"2024-01-15T10:30:00+00:00",
|
||||
"2024-01-15T10:30:00.123Z",
|
||||
"2024-12-31T23:59:59Z",
|
||||
"1970-01-01T00:00:00Z", // Unix epoch
|
||||
];
|
||||
|
||||
for (i, timestamp) in valid_timestamps.into_iter().enumerate() {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value(&format!("Test {}", i)));
|
||||
data.insert("my_timestamp".into(), create_string_value(timestamp));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Failed for timestamp: {}", timestamp);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_boundary_integer_values(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Use safer boundary values that don't have f64 precision issues
|
||||
let boundary_values = vec![
|
||||
0.0,
|
||||
1.0,
|
||||
-1.0,
|
||||
2147483647.0, // i32::MAX (for INTEGER columns)
|
||||
-2147483648.0, // i32::MIN (for INTEGER columns)
|
||||
];
|
||||
|
||||
for (i, value) in boundary_values.into_iter().enumerate() {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value(&format!("Boundary test {}", i)));
|
||||
data.insert("my_bigint".into(), create_number_value(value));
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Failed for boundary value: {}", value);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_null_values_for_all_types(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Required for test"));
|
||||
data.insert("my_bool".into(), create_null_value());
|
||||
data.insert("my_timestamp".into(), create_null_value());
|
||||
data.insert("my_bigint".into(), create_null_value());
|
||||
data.insert("my_money".into(), create_null_value());
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let response = post_table_data(&context.pool, request, &indexer_tx).await.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Verify nulls were stored correctly
|
||||
let query = format!(
|
||||
r#"SELECT my_bool, my_timestamp, my_bigint FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_bool: Option<bool> = row.get("my_bool");
|
||||
let stored_timestamp: Option<chrono::DateTime<Utc>> = row.get("my_timestamp");
|
||||
let stored_bigint: Option<i64> = row.get("my_bigint");
|
||||
|
||||
assert!(stored_bool.is_none());
|
||||
assert!(stored_timestamp.is_none());
|
||||
assert!(stored_bigint.is_none());
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// FOREIGN KEY CONSTRAINT TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_insert_with_valid_foreign_key(#[future] foreign_key_test_context: ForeignKeyTestContext) {
|
||||
let context = foreign_key_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// First, insert a category
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".into(), create_string_value("Electronics"));
|
||||
category_data.insert("description".into(), create_string_value("Electronic devices"));
|
||||
|
||||
let category_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
|
||||
let category_response = post_table_data(&context.pool, category_request, &indexer_tx).await.unwrap();
|
||||
let category_id = category_response.inserted_id;
|
||||
|
||||
// Now insert a product with the valid category_id
|
||||
let mut product_data = HashMap::new();
|
||||
product_data.insert("name".into(), create_string_value("Laptop"));
|
||||
product_data.insert("price".into(), create_string_value("999.99")); // Use string for decimal
|
||||
product_data.insert(format!("{}_id", context.category_table), create_number_value(category_id as f64));
|
||||
|
||||
let product_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
data: product_data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, product_request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Insert with valid foreign key should succeed");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_insert_with_nonexistent_foreign_key(#[future] foreign_key_test_context: ForeignKeyTestContext) {
|
||||
let context = foreign_key_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Try to insert product with non-existent category_id
|
||||
let mut product_data = HashMap::new();
|
||||
product_data.insert("name".into(), create_string_value("Laptop"));
|
||||
product_data.insert("price".into(), create_string_value("999.99"));
|
||||
product_data.insert(format!("{}_id", context.category_table), create_number_value(99999.0)); // Non-existent ID
|
||||
|
||||
let product_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
data: product_data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, product_request, &indexer_tx).await;
|
||||
assert!(result.is_err(), "Insert with non-existent foreign key should fail");
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::Internal);
|
||||
assert!(err.message().contains("Insert failed"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_insert_with_null_required_foreign_key(#[future] foreign_key_test_context: ForeignKeyTestContext) {
|
||||
let context = foreign_key_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Try to insert product without category_id (required foreign key)
|
||||
let mut product_data = HashMap::new();
|
||||
product_data.insert("name".into(), create_string_value("Laptop"));
|
||||
product_data.insert("price".into(), create_string_value("999.99"));
|
||||
// Intentionally omit category_id
|
||||
|
||||
let product_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
data: product_data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, product_request, &indexer_tx).await;
|
||||
assert!(result.is_err(), "Insert without required foreign key should fail");
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::Internal);
|
||||
assert!(err.message().contains("Insert failed"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_insert_with_null_optional_foreign_key(#[future] foreign_key_test_context: ForeignKeyTestContext) {
|
||||
let context = foreign_key_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// First create a category and product for the required FK
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".into(), create_string_value("Electronics"));
|
||||
|
||||
let category_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
|
||||
let category_response = post_table_data(&context.pool, category_request, &indexer_tx).await.unwrap();
|
||||
|
||||
let mut product_data = HashMap::new();
|
||||
product_data.insert("name".into(), create_string_value("Laptop"));
|
||||
product_data.insert("price".into(), create_string_value("999.99"));
|
||||
product_data.insert(format!("{}_id", context.category_table), create_number_value(category_response.inserted_id as f64));
|
||||
|
||||
let product_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
data: product_data,
|
||||
};
|
||||
|
||||
let product_response = post_table_data(&context.pool, product_request, &indexer_tx).await.unwrap();
|
||||
|
||||
// Now insert order with required product_id but without optional category_id
|
||||
let mut order_data = HashMap::new();
|
||||
order_data.insert("quantity".into(), create_number_value(2.0));
|
||||
order_data.insert("notes".into(), create_string_value("Test order"));
|
||||
order_data.insert(format!("{}_id", context.product_table), create_number_value(product_response.inserted_id as f64));
|
||||
// Intentionally omit optional category_id
|
||||
|
||||
let order_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.order_table.clone(),
|
||||
data: order_data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, order_request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Insert with NULL optional foreign key should succeed");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_multiple_foreign_keys_scenario(#[future] foreign_key_test_context: ForeignKeyTestContext) {
|
||||
let context = foreign_key_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Create category
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".into(), create_string_value("Books"));
|
||||
|
||||
let category_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
|
||||
let category_response = post_table_data(&context.pool, category_request, &indexer_tx).await.unwrap();
|
||||
|
||||
// Create product
|
||||
let mut product_data = HashMap::new();
|
||||
product_data.insert("name".into(), create_string_value("Programming Book"));
|
||||
product_data.insert("price".into(), create_string_value("49.99"));
|
||||
product_data.insert(format!("{}_id", context.category_table), create_number_value(category_response.inserted_id as f64));
|
||||
|
||||
let product_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
data: product_data,
|
||||
};
|
||||
|
||||
let product_response = post_table_data(&context.pool, product_request, &indexer_tx).await.unwrap();
|
||||
|
||||
// Create order with both foreign keys
|
||||
let mut order_data = HashMap::new();
|
||||
order_data.insert("quantity".into(), create_number_value(3.0));
|
||||
order_data.insert("notes".into(), create_string_value("Bulk order"));
|
||||
order_data.insert(format!("{}_id", context.product_table), create_number_value(product_response.inserted_id as f64));
|
||||
order_data.insert(format!("{}_id", context.category_table), create_number_value(category_response.inserted_id as f64));
|
||||
|
||||
let order_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.order_table.clone(),
|
||||
data: order_data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, order_request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Insert with multiple valid foreign keys should succeed");
|
||||
|
||||
// Verify the data was inserted correctly
|
||||
let product_id_col = format!("{}_id", context.product_table);
|
||||
let category_id_col = format!("{}_id", context.category_table);
|
||||
|
||||
let query = format!(
|
||||
r#"SELECT quantity, "{}", "{}" FROM "{}"."{}" WHERE id = $1"#,
|
||||
product_id_col, category_id_col, context.profile_name, context.order_table
|
||||
);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.bind(result.unwrap().inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Fix: quantity is defined as "integer" in the foreign key test context, so use i32
|
||||
let quantity: i32 = row.get("quantity");
|
||||
let stored_product_id: i64 = row.get(product_id_col.as_str());
|
||||
let stored_category_id: Option<i64> = row.get(category_id_col.as_str());
|
||||
|
||||
assert_eq!(quantity, 3);
|
||||
assert_eq!(stored_product_id, product_response.inserted_id);
|
||||
assert_eq!(stored_category_id.unwrap(), category_response.inserted_id);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// ADDITIONAL EDGE CASE TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_extremely_large_decimal_numbers(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let large_decimals = vec![
|
||||
"1000000000.0000",
|
||||
"999999999999.99",
|
||||
"-999999999999.99",
|
||||
"0.0001",
|
||||
];
|
||||
|
||||
for (i, decimal_str) in large_decimals.into_iter().enumerate() {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value(&format!("Large decimal test {}", i)));
|
||||
data.insert("my_money".into(), create_string_value(decimal_str));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Failed for large decimal: {}", decimal_str);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_boolean_edge_cases(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let boolean_values = vec![true, false];
|
||||
|
||||
for (i, bool_val) in boolean_values.into_iter().enumerate() {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value(&format!("Boolean test {}", i)));
|
||||
data.insert("my_bool".into(), create_bool_value(bool_val));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let response = post_table_data(&context.pool, request, &indexer_tx).await.unwrap();
|
||||
|
||||
// Verify boolean was stored correctly
|
||||
let query = format!(
|
||||
r#"SELECT my_bool FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let stored_bool: bool = sqlx::query_scalar::<_, bool>(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stored_bool, bool_val);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_decimal_precision_handling(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let decimal_values = vec![
|
||||
"0.01",
|
||||
"99.99",
|
||||
"123.45",
|
||||
"999.99",
|
||||
"-123.45",
|
||||
];
|
||||
|
||||
for (i, decimal_val) in decimal_values.into_iter().enumerate() {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value(&format!("Decimal test {}", i)));
|
||||
data.insert("my_decimal".into(), create_string_value(decimal_val));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Failed for decimal value: {}", decimal_val);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_invalid_decimal_string_formats(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let invalid_decimals = vec![
|
||||
"not-a-number",
|
||||
"123.45.67",
|
||||
"abc123",
|
||||
"",
|
||||
" ",
|
||||
];
|
||||
|
||||
for (i, invalid_decimal) in invalid_decimals.into_iter().enumerate() {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value(&format!("Invalid decimal test {}", i)));
|
||||
data.insert("my_decimal".into(), create_string_value(invalid_decimal));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
|
||||
if invalid_decimal.trim().is_empty() {
|
||||
// Empty strings should be treated as NULL and succeed
|
||||
assert!(result.is_ok(), "Empty string should be treated as NULL for: {}", invalid_decimal);
|
||||
} else {
|
||||
// Invalid decimal strings should fail
|
||||
assert!(result.is_err(), "Should fail for invalid decimal: {}", invalid_decimal);
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Invalid decimal string format"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_mixed_null_and_valid_data(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Mixed data test"));
|
||||
data.insert("my_bool".into(), create_bool_value(true));
|
||||
data.insert("my_timestamp".into(), create_null_value());
|
||||
data.insert("my_bigint".into(), create_number_value(42.0));
|
||||
data.insert("my_money".into(), create_null_value());
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
let response = post_table_data(&context.pool, request, &indexer_tx).await.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Verify mixed null and valid data was stored correctly
|
||||
let query = format!(
|
||||
r#"SELECT my_text, my_bool, my_timestamp, my_bigint, my_money FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_text: String = row.get("my_text");
|
||||
let stored_bool: bool = row.get("my_bool");
|
||||
let stored_timestamp: Option<DateTime<Utc>> = row.get("my_timestamp");
|
||||
// Change this based on your actual column type in the schema:
|
||||
// If my_bigint is defined as "integer" in table definition, use i32:
|
||||
let stored_bigint: i32 = row.get("my_bigint");
|
||||
// If my_bigint is defined as "biginteger" or "bigint" in table definition, use i64:
|
||||
// let stored_bigint: i64 = row.get("my_bigint");
|
||||
let stored_money: Option<Decimal> = row.get("my_money");
|
||||
|
||||
assert_eq!(stored_text, "Mixed data test");
|
||||
assert_eq!(stored_bool, true);
|
||||
assert!(stored_timestamp.is_none());
|
||||
assert_eq!(stored_bigint, 42);
|
||||
assert!(stored_money.is_none());
|
||||
}
|
||||
264
server/tests/tables_data/post/post_table_data_test4.rs
Normal file
264
server/tests/tables_data/post/post_table_data_test4.rs
Normal file
@@ -0,0 +1,264 @@
|
||||
// tests/tables_data/handlers/post_table_data_test4.rs
|
||||
|
||||
use rust_decimal::Decimal;
|
||||
use rust_decimal_macros::dec;
|
||||
|
||||
// Helper to create a protobuf Value from a string
|
||||
fn proto_string(s: &str) -> Value {
|
||||
Value {
|
||||
kind: Some(Kind::StringValue(s.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to create a protobuf Value from a number
|
||||
fn proto_number(n: f64) -> Value {
|
||||
Value {
|
||||
kind: Some(Kind::NumberValue(n)),
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to create a protobuf Null Value
|
||||
fn proto_null() -> Value {
|
||||
Value {
|
||||
kind: Some(Kind::NullValue(0)),
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to create a table with various decimal types for testing
|
||||
async fn create_financial_table(
|
||||
pool: &PgPool,
|
||||
table_name: &str,
|
||||
profile_name: &str,
|
||||
) -> Result<(), tonic::Status> {
|
||||
let table_def_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: table_name.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition {
|
||||
name: "product_name".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
// Standard money column
|
||||
TableColumnDefinition {
|
||||
name: "price".into(),
|
||||
field_type: "decimal(19, 4)".into(),
|
||||
},
|
||||
// Column for things like exchange rates or precise factors
|
||||
TableColumnDefinition {
|
||||
name: "rate".into(),
|
||||
field_type: "decimal(10, 5)".into(),
|
||||
},
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(pool, table_def_request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// A new test context fixture for our financial table
|
||||
#[fixture]
|
||||
async fn decimal_test_context() -> TestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("decimal_profile_{}", unique_id);
|
||||
let table_name = format!("invoices_{}", unique_id);
|
||||
|
||||
create_financial_table(&pool, &table_name, &profile_name)
|
||||
.await
|
||||
.expect("Failed to create decimal test table");
|
||||
|
||||
let (tx, _rx) = mpsc::channel(100);
|
||||
|
||||
TestContext {
|
||||
pool,
|
||||
profile_name,
|
||||
table_name,
|
||||
indexer_tx: tx,
|
||||
}
|
||||
}
|
||||
|
||||
// ========= DECIMAL/NUMERIC DATA TYPE TESTS =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_insert_valid_decimal_string(#[future] decimal_test_context: TestContext) {
|
||||
let context = decimal_test_context.await;
|
||||
let mut data = HashMap::new();
|
||||
data.insert("product_name".into(), proto_string("Laptop"));
|
||||
data.insert("price".into(), proto_string("1499.99"));
|
||||
data.insert("rate".into(), proto_string("-0.12345"));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
let query = format!(
|
||||
r#"SELECT price, rate FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let price: Decimal = row.get("price");
|
||||
let rate: Decimal = row.get("rate");
|
||||
|
||||
assert_eq!(price, dec!(1499.99));
|
||||
assert_eq!(rate, dec!(-0.12345));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_insert_decimal_from_number_fails(#[future] decimal_test_context: TestContext) {
|
||||
let context = decimal_test_context.await;
|
||||
let mut data = HashMap::new();
|
||||
data.insert("product_name".into(), proto_string("Mouse"));
|
||||
// THIS IS THE INVALID PART: using a number for a decimal field.
|
||||
data.insert("price".into(), proto_number(75.50));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
// The operation should fail.
|
||||
let result = post_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
// Verify the error is correct.
|
||||
let status = result.unwrap_err();
|
||||
assert_eq!(status.code(), tonic::Code::InvalidArgument);
|
||||
assert!(status
|
||||
.message()
|
||||
.contains("Expected a string representation for decimal column 'price'"));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_decimal_rounding_behavior(#[future] decimal_test_context: TestContext) {
|
||||
let context = decimal_test_context.await;
|
||||
let mut data = HashMap::new();
|
||||
data.insert("product_name".into(), proto_string("Keyboard"));
|
||||
// price is NUMERIC(19, 4), so this should be rounded up by the database
|
||||
data.insert("price".into(), proto_string("99.12345"));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
let price: Decimal = sqlx::query_scalar(&format!(
|
||||
r#"SELECT price FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// PostgreSQL rounds away from zero (0.5 rounds up)
|
||||
assert_eq!(price, dec!(99.1235));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_insert_null_and_empty_string_for_decimal(
|
||||
#[future] decimal_test_context: TestContext,
|
||||
) {
|
||||
let context = decimal_test_context.await;
|
||||
let mut data = HashMap::new();
|
||||
data.insert("product_name".into(), proto_string("Monitor"));
|
||||
data.insert("price".into(), proto_string(" ")); // Empty string should be NULL
|
||||
data.insert("rate".into(), proto_null()); // Explicit NULL
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
let row = sqlx::query(&format!(
|
||||
r#"SELECT price, rate FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let price: Option<Decimal> = row.get("price");
|
||||
let rate: Option<Decimal> = row.get("rate");
|
||||
|
||||
assert!(price.is_none());
|
||||
assert!(rate.is_none());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_invalid_decimal_string_fails(#[future] decimal_test_context: TestContext) {
|
||||
let context = decimal_test_context.await;
|
||||
let mut data = HashMap::new();
|
||||
data.insert("product_name".into(), proto_string("Bad Data"));
|
||||
data.insert("price".into(), proto_string("not-a-number"));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
let status = result.unwrap_err();
|
||||
assert_eq!(status.code(), tonic::Code::InvalidArgument);
|
||||
assert!(status
|
||||
.message()
|
||||
.contains("Invalid decimal string format for column 'price'"));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_decimal_precision_overflow_fails(#[future] decimal_test_context: TestContext) {
|
||||
let context = decimal_test_context.await;
|
||||
let mut data = HashMap::new();
|
||||
data.insert("product_name".into(), proto_string("Too Expensive"));
|
||||
// rate is NUMERIC(10, 5), so it allows 5 digits before the decimal.
|
||||
// 123456.1 is 6 digits before, so it should fail at the database level.
|
||||
data.insert("rate".into(), proto_string("123456.1"));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
let status = result.unwrap_err();
|
||||
// This error comes from the database itself.
|
||||
assert_eq!(status.code(), tonic::Code::InvalidArgument);
|
||||
assert!(status.message().contains("Numeric field overflow"));
|
||||
}
|
||||
588
server/tests/tables_data/post/post_table_data_test5.rs
Normal file
588
server/tests/tables_data/post/post_table_data_test5.rs
Normal file
@@ -0,0 +1,588 @@
|
||||
// ========================================================================
|
||||
// COMPREHENSIVE INTEGER ROBUSTNESS TESTS - ADD TO TEST FILE 5
|
||||
// ========================================================================
|
||||
|
||||
#[derive(Clone)]
|
||||
struct IntegerRobustnessTestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
mixed_integer_table: String,
|
||||
bigint_only_table: String,
|
||||
integer_only_table: String,
|
||||
}
|
||||
|
||||
// Create tables with different integer type combinations
|
||||
async fn create_integer_robustness_tables(pool: &PgPool, profile_name: &str) -> Result<IntegerRobustnessTestContext, tonic::Status> {
|
||||
let unique_id = generate_unique_id();
|
||||
let mixed_table = format!("mixed_int_table_{}", unique_id);
|
||||
let bigint_table = format!("bigint_table_{}", unique_id);
|
||||
let integer_table = format!("integer_table_{}", unique_id);
|
||||
|
||||
// Table with both INTEGER and BIGINT columns
|
||||
let mixed_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: mixed_table.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "small_int".into(), field_type: "integer".into() }, // i32
|
||||
TableColumnDefinition { name: "big_int".into(), field_type: "biginteger".into() }, // i64
|
||||
TableColumnDefinition { name: "another_int".into(), field_type: "int".into() }, // i32 (alias)
|
||||
TableColumnDefinition { name: "another_bigint".into(), field_type: "bigint".into() }, // i64 (alias)
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, mixed_def).await?;
|
||||
|
||||
// Table with only BIGINT columns
|
||||
let bigint_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: bigint_table.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "value1".into(), field_type: "biginteger".into() },
|
||||
TableColumnDefinition { name: "value2".into(), field_type: "bigint".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, bigint_def).await?;
|
||||
|
||||
// Table with only INTEGER columns
|
||||
let integer_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: integer_table.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "value1".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "value2".into(), field_type: "int".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, integer_def).await?;
|
||||
|
||||
Ok(IntegerRobustnessTestContext {
|
||||
pool: pool.clone(),
|
||||
profile_name: profile_name.to_string(),
|
||||
mixed_integer_table: mixed_table,
|
||||
bigint_only_table: bigint_table,
|
||||
integer_only_table: integer_table,
|
||||
})
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn integer_robustness_context() -> IntegerRobustnessTestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("int_robust_profile_{}", unique_id);
|
||||
|
||||
create_integer_robustness_tables(&pool, &profile_name).await
|
||||
.expect("Failed to create integer robustness test tables")
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// BOUNDARY AND OVERFLOW TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_integer_boundary_values_comprehensive(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test i32 boundaries on INTEGER columns
|
||||
let i32_boundary_tests = vec![
|
||||
(2147483647.0, "i32::MAX"),
|
||||
(-2147483648.0, "i32::MIN"),
|
||||
(2147483646.0, "i32::MAX - 1"),
|
||||
(-2147483647.0, "i32::MIN + 1"),
|
||||
(0.0, "zero"),
|
||||
(1.0, "one"),
|
||||
(-1.0, "negative one"),
|
||||
];
|
||||
|
||||
for (value, description) in i32_boundary_tests {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("i32 test: {}", description)));
|
||||
data.insert("value1".into(), create_number_value(value));
|
||||
data.insert("value2".into(), create_number_value(value));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.integer_only_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Failed for i32 value {}: {}", value, description);
|
||||
|
||||
// Verify correct storage
|
||||
let response = result.unwrap();
|
||||
let query = format!(
|
||||
r#"SELECT value1, value2 FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.integer_only_table
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_val1: i32 = row.get("value1");
|
||||
let stored_val2: i32 = row.get("value2");
|
||||
assert_eq!(stored_val1, value as i32);
|
||||
assert_eq!(stored_val2, value as i32);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_bigint_boundary_values_comprehensive(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test i64 boundaries that can be precisely represented in f64
|
||||
let i64_boundary_tests = vec![
|
||||
(9223372036854774784.0, "Close to i64::MAX (precisely representable)"),
|
||||
(-9223372036854774784.0, "Close to i64::MIN (precisely representable)"),
|
||||
(4611686018427387904.0, "i64::MAX / 2"),
|
||||
(-4611686018427387904.0, "i64::MIN / 2"),
|
||||
(2147483647.0, "i32::MAX in i64 column"),
|
||||
(-2147483648.0, "i32::MIN in i64 column"),
|
||||
(1000000000000.0, "One trillion"),
|
||||
(-1000000000000.0, "Negative one trillion"),
|
||||
];
|
||||
|
||||
for (value, description) in i64_boundary_tests {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("i64 test: {}", description)));
|
||||
data.insert("value1".into(), create_number_value(value));
|
||||
data.insert("value2".into(), create_number_value(value));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.bigint_only_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Failed for i64 value {}: {}", value, description);
|
||||
|
||||
// Verify correct storage
|
||||
let response = result.unwrap();
|
||||
let query = format!(
|
||||
r#"SELECT value1, value2 FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.bigint_only_table
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_val1: i64 = row.get("value1");
|
||||
let stored_val2: i64 = row.get("value2");
|
||||
assert_eq!(stored_val1, value as i64);
|
||||
assert_eq!(stored_val2, value as i64);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_integer_overflow_rejection_i32(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Values that should be rejected for INTEGER columns
|
||||
let overflow_values = vec![
|
||||
(2147483648.0, "i32::MAX + 1"),
|
||||
(-2147483649.0, "i32::MIN - 1"),
|
||||
(3000000000.0, "3 billion"),
|
||||
(-3000000000.0, "negative 3 billion"),
|
||||
(4294967296.0, "2^32"),
|
||||
(9223372036854775807.0, "i64::MAX (should fail on i32)"),
|
||||
];
|
||||
|
||||
for (value, description) in overflow_values {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("Overflow test: {}", description)));
|
||||
data.insert("value1".into(), create_number_value(value));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.integer_only_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err(), "Should have failed for i32 overflow value {}: {}", value, description);
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Integer value out of range for INTEGER column"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_bigint_overflow_rejection_i64(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Values that should be rejected for BIGINT columns
|
||||
// Only include values that actually DON'T round-trip correctly
|
||||
let overflow_values = vec![
|
||||
(f64::INFINITY, "Positive infinity"),
|
||||
(f64::NEG_INFINITY, "Negative infinity"),
|
||||
(1e20, "Very large number (100,000,000,000,000,000,000)"),
|
||||
(-1e20, "Very large negative number"),
|
||||
(1e25, "Extremely large number"),
|
||||
(-1e25, "Extremely large negative number"),
|
||||
(f64::MAX, "f64::MAX"),
|
||||
(f64::MIN, "f64::MIN"),
|
||||
// Remove the problematic values that actually round-trip correctly:
|
||||
// (9223372036854775808.0, "Just above i64 safe range"), // This actually round-trips!
|
||||
// (-9223372036854775808.0, "Just below i64 safe range"), // This might also round-trip!
|
||||
];
|
||||
|
||||
for (value, description) in overflow_values {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("i64 Overflow test: {}", description)));
|
||||
data.insert("value1".into(), create_number_value(value));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.bigint_only_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
|
||||
assert!(result.is_err(), "Should have failed for i64 overflow value {}: {}", value, description);
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
// Check for either message format (the new robust check should catch these)
|
||||
let message = err.message();
|
||||
assert!(
|
||||
message.contains("Integer value out of range for BIGINT column") ||
|
||||
message.contains("Expected integer for column") ||
|
||||
message.contains("but got a float"),
|
||||
"Unexpected error message for {}: {}",
|
||||
description,
|
||||
message
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_bigint_successful_roundtrip_values(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Values that SHOULD successfully round-trip and be accepted
|
||||
let successful_values = vec![
|
||||
(9223372036854775808.0, "Exactly i64::MAX as f64 (legitimate value)"),
|
||||
(-9223372036854775808.0, "Exactly i64::MIN as f64 (legitimate value)"),
|
||||
(9223372036854774784.0, "Large but precisely representable in f64"),
|
||||
(-9223372036854774784.0, "Large negative but precisely representable in f64"),
|
||||
(0.0, "Zero"),
|
||||
(1.0, "One"),
|
||||
(-1.0, "Negative one"),
|
||||
(2147483647.0, "i32::MAX as f64"),
|
||||
(-2147483648.0, "i32::MIN as f64"),
|
||||
];
|
||||
|
||||
for (value, description) in successful_values {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("Successful test: {}", description)));
|
||||
data.insert("value1".into(), create_number_value(value));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.bigint_only_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Should have succeeded for legitimate i64 value {}: {}", value, description);
|
||||
|
||||
// Verify it was stored correctly
|
||||
if let Ok(response) = result {
|
||||
let query = format!(
|
||||
r#"SELECT value1 FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.bigint_only_table
|
||||
);
|
||||
let stored_value: i64 = sqlx::query_scalar(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stored_value, value as i64, "Stored value should match for {}", description);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_mixed_integer_types_in_same_table(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test inserting different values into different integer types in the same table
|
||||
let test_cases = vec![
|
||||
(42.0, 1000000000000.0, "Small i32, large i64"),
|
||||
(2147483647.0, 9223372036854774784.0, "i32::MAX, near i64::MAX"),
|
||||
(-2147483648.0, -9223372036854774784.0, "i32::MIN, near i64::MIN"),
|
||||
(0.0, 0.0, "Both zero"),
|
||||
(-1.0, -1.0, "Both negative one"),
|
||||
];
|
||||
|
||||
for (i32_val, i64_val, description) in test_cases {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("Mixed test: {}", description)));
|
||||
data.insert("small_int".into(), create_number_value(i32_val));
|
||||
data.insert("big_int".into(), create_number_value(i64_val));
|
||||
data.insert("another_int".into(), create_number_value(i32_val));
|
||||
data.insert("another_bigint".into(), create_number_value(i64_val));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Failed for mixed integer test: {}", description);
|
||||
|
||||
// Verify correct storage with correct types
|
||||
let response = result.unwrap();
|
||||
let query = format!(
|
||||
r#"SELECT small_int, big_int, another_int, another_bigint FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.mixed_integer_table
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_small_int: i32 = row.get("small_int");
|
||||
let stored_big_int: i64 = row.get("big_int");
|
||||
let stored_another_int: i32 = row.get("another_int");
|
||||
let stored_another_bigint: i64 = row.get("another_bigint");
|
||||
|
||||
assert_eq!(stored_small_int, i32_val as i32);
|
||||
assert_eq!(stored_big_int, i64_val as i64);
|
||||
assert_eq!(stored_another_int, i32_val as i32);
|
||||
assert_eq!(stored_another_bigint, i64_val as i64);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_wrong_type_for_mixed_integer_columns(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Try to put i64 values into i32 columns
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value("Wrong type test"));
|
||||
data.insert("small_int".into(), create_number_value(3000000000.0)); // Too big for i32
|
||||
data.insert("big_int".into(), create_number_value(42.0)); // This should be fine
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err(), "Should fail when putting i64 value in i32 column");
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Integer value out of range for INTEGER column"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_float_precision_edge_cases(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test values that have fractional parts (should be rejected)
|
||||
let fractional_values = vec![
|
||||
(42.1, "42.1"),
|
||||
(42.9, "42.9"),
|
||||
(42.000001, "42.000001"),
|
||||
(-42.5, "-42.5"),
|
||||
(0.1, "0.1"),
|
||||
(2147483646.5, "Near i32::MAX with fraction"),
|
||||
];
|
||||
|
||||
for (value, description) in fractional_values {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("Float test: {}", description)));
|
||||
data.insert("value1".into(), create_number_value(value));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.integer_only_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err(), "Should fail for fractional value {}: {}", value, description);
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected integer for column") && err.message().contains("but got a float"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_null_integer_handling_comprehensive(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test null values in mixed integer table
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value("Null integer test"));
|
||||
data.insert("small_int".into(), create_null_value());
|
||||
data.insert("big_int".into(), create_null_value());
|
||||
data.insert("another_int".into(), create_number_value(42.0));
|
||||
data.insert("another_bigint".into(), create_number_value(1000000000000.0));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Should succeed with null integer values");
|
||||
|
||||
// Verify null storage
|
||||
let response = result.unwrap();
|
||||
let query = format!(
|
||||
r#"SELECT small_int, big_int, another_int, another_bigint FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.mixed_integer_table
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_small_int: Option<i32> = row.get("small_int");
|
||||
let stored_big_int: Option<i64> = row.get("big_int");
|
||||
let stored_another_int: i32 = row.get("another_int");
|
||||
let stored_another_bigint: i64 = row.get("another_bigint");
|
||||
|
||||
assert!(stored_small_int.is_none());
|
||||
assert!(stored_big_int.is_none());
|
||||
assert_eq!(stored_another_int, 42);
|
||||
assert_eq!(stored_another_bigint, 1000000000000);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_mixed_integer_inserts(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test concurrent inserts with different integer types
|
||||
let tasks: Vec<_> = (0..10).map(|i| {
|
||||
let context = context.clone();
|
||||
let indexer_tx = indexer_tx.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("Concurrent test {}", i)));
|
||||
data.insert("small_int".into(), create_number_value((i * 1000) as f64));
|
||||
data.insert("big_int".into(), create_number_value((i as i64 * 1000000000000) as f64));
|
||||
data.insert("another_int".into(), create_number_value((i * -100) as f64));
|
||||
data.insert("another_bigint".into(), create_number_value((i as i64 * -1000000000000) as f64));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
post_table_data(&context.pool, request, &indexer_tx).await
|
||||
})
|
||||
}).collect();
|
||||
|
||||
// Wait for all tasks to complete
|
||||
let results = futures::future::join_all(tasks).await;
|
||||
|
||||
// All should succeed
|
||||
for (i, result) in results.into_iter().enumerate() {
|
||||
let task_result = result.expect("Task should not panic");
|
||||
assert!(task_result.is_ok(), "Concurrent insert {} should succeed", i);
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// PERFORMANCE AND STRESS TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_rapid_integer_inserts_stress(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Rapid sequential inserts with alternating integer types
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
for i in 0..100 {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("Stress test {}", i)));
|
||||
|
||||
// Alternate between different boundary values
|
||||
let small_val = match i % 4 {
|
||||
0 => 2147483647.0, // i32::MAX
|
||||
1 => -2147483648.0, // i32::MIN
|
||||
2 => 0.0,
|
||||
_ => (i as f64) * 1000.0,
|
||||
};
|
||||
|
||||
let big_val = match i % 4 {
|
||||
0 => 9223372036854774784.0, // Near i64::MAX
|
||||
1 => -9223372036854774784.0, // Near i64::MIN
|
||||
2 => 0.0,
|
||||
_ => (i as f64) * 1000000000000.0,
|
||||
};
|
||||
|
||||
data.insert("small_int".into(), create_number_value(small_val));
|
||||
data.insert("big_int".into(), create_number_value(big_val));
|
||||
data.insert("another_int".into(), create_number_value(small_val));
|
||||
data.insert("another_bigint".into(), create_number_value(big_val));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Rapid insert {} should succeed", i);
|
||||
}
|
||||
|
||||
let duration = start.elapsed();
|
||||
println!("100 mixed integer inserts took: {:?}", duration);
|
||||
|
||||
// Should complete in reasonable time (adjust threshold as needed)
|
||||
assert!(duration.as_secs() < 10, "Stress test took too long: {:?}", duration);
|
||||
}
|
||||
3
server/tests/tables_data/put/mod.rs
Normal file
3
server/tests/tables_data/put/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
// tests/tables_data/put/mod.rs
|
||||
|
||||
pub mod put_table_data_test;
|
||||
544
server/tests/tables_data/put/put_table_data_test.rs
Normal file
544
server/tests/tables_data/put/put_table_data_test.rs
Normal file
@@ -0,0 +1,544 @@
|
||||
// tests/tables_data/handlers/put_table_data_test.rs
|
||||
|
||||
use rstest::{fixture, rstest};
|
||||
use sqlx::{PgPool, Row};
|
||||
use std::collections::HashMap;
|
||||
use prost_types::{value::Kind, Value};
|
||||
use common::proto::multieko2::table_definition::{
|
||||
PostTableDefinitionRequest, ColumnDefinition as TableColumnDefinition, TableLink,
|
||||
};
|
||||
use common::proto::multieko2::tables_data::{
|
||||
PostTableDataRequest, PutTableDataRequest,
|
||||
};
|
||||
use server::table_definition::handlers::post_table_definition;
|
||||
// The post_table_data handler is used in the "Arrange" step of each test to create initial data.
|
||||
use server::tables_data::handlers::post_table_data;
|
||||
// The put_table_data handler is the function we are testing.
|
||||
use server::tables_data::handlers::put_table_data;
|
||||
use rust_decimal_macros::dec;
|
||||
use crate::common::setup_test_db;
|
||||
use tokio::sync::mpsc;
|
||||
use server::indexer::IndexCommand;
|
||||
use rand::Rng;
|
||||
use rand::distr::Alphanumeric;
|
||||
use futures;
|
||||
|
||||
// ========= Test Helpers =========
|
||||
|
||||
fn generate_unique_id() -> String {
|
||||
rand::rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(8)
|
||||
.map(char::from)
|
||||
.collect::<String>()
|
||||
.to_lowercase()
|
||||
}
|
||||
|
||||
fn string_to_proto_value(s: &str) -> Value {
|
||||
Value {
|
||||
kind: Some(Kind::StringValue(s.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
fn bool_to_proto_value(b: bool) -> Value {
|
||||
Value {
|
||||
kind: Some(Kind::BoolValue(b)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_adresar_table(
|
||||
pool: &PgPool,
|
||||
table_name: &str,
|
||||
profile_name: &str,
|
||||
) -> Result<(), tonic::Status> {
|
||||
let table_def_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: table_name.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "firma".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "kz".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "ulica".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "mesto".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "telefon".into(), field_type: "text".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, table_def_request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Helper to create a record and return its ID for tests
|
||||
async fn create_initial_record(
|
||||
context: &TestContext,
|
||||
initial_data: HashMap<String, Value>,
|
||||
) -> i64 {
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data: initial_data,
|
||||
};
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.expect("Setup: Failed to create initial record");
|
||||
response.inserted_id
|
||||
}
|
||||
|
||||
// ========= Fixtures =========
|
||||
|
||||
#[derive(Clone)]
|
||||
struct TestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
table_name: String,
|
||||
indexer_tx: mpsc::Sender<IndexCommand>,
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[fixture]
|
||||
async fn test_context() -> TestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("test_profile_{}", unique_id);
|
||||
let table_name = format!("adresar_test_{}", unique_id);
|
||||
create_adresar_table(&pool, &table_name, &profile_name)
|
||||
.await
|
||||
.expect("Failed to create test table");
|
||||
let (tx, mut rx) = mpsc::channel(100);
|
||||
// Drain receiver to prevent blocking
|
||||
tokio::spawn(async move { while rx.recv().await.is_some() {} });
|
||||
TestContext { pool, profile_name, table_name, indexer_tx: tx }
|
||||
}
|
||||
|
||||
// ========= Update Tests (Converted from Post Tests) =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_table_data_success(#[future] test_context: TestContext) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let mut initial_data = HashMap::new();
|
||||
initial_data.insert("firma".to_string(), string_to_proto_value("Original Company"));
|
||||
initial_data.insert("ulica".to_string(), string_to_proto_value("Original Street"));
|
||||
let record_id = create_initial_record(&context, initial_data).await;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("firma".to_string(), string_to_proto_value("Updated Company"));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
let response = put_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Assert
|
||||
assert!(response.success);
|
||||
assert_eq!(response.updated_id, record_id);
|
||||
|
||||
let row = sqlx::query(&format!(
|
||||
r#"SELECT firma, ulica FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let firma: String = row.get("firma");
|
||||
let ulica: String = row.get("ulica");
|
||||
assert_eq!(firma, "Updated Company");
|
||||
assert_eq!(ulica, "Original Street"); // Should be unchanged
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_table_data_whitespace_trimming(
|
||||
#[future] test_context: TestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value("Original"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("firma".to_string(), string_to_proto_value(" Trimmed Co. "));
|
||||
update_data.insert("telefon".to_string(), string_to_proto_value(" 12345 "));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
put_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Assert
|
||||
let row = sqlx::query(&format!(
|
||||
r#"SELECT firma, telefon FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
let firma: String = row.get("firma");
|
||||
let telefon: Option<String> = row.get("telefon");
|
||||
assert_eq!(firma, "Trimmed Co.");
|
||||
assert_eq!(telefon.unwrap(), "12345");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_field_to_null_with_empty_string(
|
||||
#[future] test_context: TestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"telefon".to_string(),
|
||||
string_to_proto_value("555-1234"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("telefon".to_string(), string_to_proto_value(" ")); // Update to empty
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
put_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Assert
|
||||
let telefon: Option<String> =
|
||||
sqlx::query_scalar(&format!(
|
||||
r#"SELECT telefon FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(telefon.is_none());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_telefon_length_limit_error(
|
||||
#[future] test_context: TestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"telefon".to_string(),
|
||||
string_to_proto_value("valid-number"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("telefon".to_string(), string_to_proto_value("1".repeat(16).as_str()));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
|
||||
// Verify original data is untouched
|
||||
let telefon: String = sqlx::query_scalar(&format!(
|
||||
r#"SELECT telefon FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(telefon, "valid-number");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_with_invalid_column_name(#[future] test_context: TestContext) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value("Original"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("nonexistent_col".to_string(), string_to_proto_value("invalid"));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_err());
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Invalid column: nonexistent_col"));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_with_empty_data_request(#[future] test_context: TestContext) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value("Original"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: HashMap::new(), // Empty data map
|
||||
};
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert: An update with no fields should be a no-op and succeed.
|
||||
assert!(result.is_ok());
|
||||
let response = result.unwrap();
|
||||
assert!(response.success);
|
||||
assert_eq!(response.updated_id, record_id);
|
||||
|
||||
// Verify original data is untouched
|
||||
let firma: String = sqlx::query_scalar(&format!(
|
||||
r#"SELECT firma FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(firma, "Original");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_sql_injection_protection(#[future] test_context: TestContext) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let injection_attempt = "admin'; UPDATE adresar SET firma='hacked' WHERE '1'='1";
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value("Safe Company"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("firma".to_string(), string_to_proto_value(injection_attempt));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
put_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Assert
|
||||
let firma: String = sqlx::query_scalar(&format!(
|
||||
r#"SELECT firma FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(firma, injection_attempt); // Should be stored as a literal string
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_nonexistent_record_error(#[future] test_context: TestContext) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let nonexistent_id = 999999;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("firma".to_string(), string_to_proto_value("No one to update"));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: nonexistent_id,
|
||||
data: update_data,
|
||||
};
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_err());
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(err.code(), tonic::Code::NotFound);
|
||||
assert!(err.message().contains("Record not found"));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_updates_different_records(
|
||||
#[future] test_context: TestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let mut record_ids = Vec::new();
|
||||
for i in 0..10 {
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value(&format!("Concurrent-{}", i)),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
record_ids.push(record_id);
|
||||
}
|
||||
|
||||
// Act
|
||||
let mut tasks = Vec::new();
|
||||
for (i, record_id) in record_ids.iter().enumerate() {
|
||||
let context = context.clone();
|
||||
let record_id = *record_id;
|
||||
let task = tokio::spawn(async move {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert(
|
||||
"mesto".to_string(),
|
||||
string_to_proto_value(&format!("City-{}", i)),
|
||||
);
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await
|
||||
});
|
||||
tasks.push(task);
|
||||
}
|
||||
let results = futures::future::join_all(tasks).await;
|
||||
|
||||
// Assert
|
||||
for result in results {
|
||||
assert!(result.unwrap().is_ok());
|
||||
}
|
||||
|
||||
let count: i64 = sqlx::query_scalar(&format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE mesto LIKE 'City-%'"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, 10);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_boolean_system_column_validation(
|
||||
#[future] test_context: TestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value("To be deleted"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act: Try to update 'deleted' with a string, which is invalid
|
||||
let mut invalid_data = HashMap::new();
|
||||
invalid_data.insert("deleted".to_string(), string_to_proto_value("true"));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: invalid_data,
|
||||
};
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert: The operation must fail
|
||||
assert!(result.is_err());
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected boolean for column 'deleted'"));
|
||||
|
||||
// Act: Try to update 'deleted' with a proper boolean
|
||||
let mut valid_data = HashMap::new();
|
||||
valid_data.insert("deleted".to_string(), bool_to_proto_value(true));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: valid_data,
|
||||
};
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert: The operation must succeed
|
||||
assert!(result.is_ok());
|
||||
let deleted: bool = sqlx::query_scalar(&format!(
|
||||
r#"SELECT deleted FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(deleted);
|
||||
}
|
||||
|
||||
include!("put_table_data_test2.rs");
|
||||
include!("put_table_data_test3.rs");
|
||||
include!("put_table_data_test4.rs");
|
||||
include!("put_table_data_test5.rs");
|
||||
1293
server/tests/tables_data/put/put_table_data_test2.rs
Normal file
1293
server/tests/tables_data/put/put_table_data_test2.rs
Normal file
File diff suppressed because it is too large
Load Diff
1001
server/tests/tables_data/put/put_table_data_test3.rs
Normal file
1001
server/tests/tables_data/put/put_table_data_test3.rs
Normal file
File diff suppressed because it is too large
Load Diff
907
server/tests/tables_data/put/put_table_data_test4.rs
Normal file
907
server/tests/tables_data/put/put_table_data_test4.rs
Normal file
@@ -0,0 +1,907 @@
|
||||
// tests/tables_data/handlers/put_table_data_test4.rs
|
||||
|
||||
|
||||
#[derive(Clone)]
|
||||
struct ComprehensiveIntegerTestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
mixed_integer_table: String,
|
||||
bigint_only_table: String,
|
||||
integer_only_table: String,
|
||||
indexer_tx: mpsc::Sender<IndexCommand>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct AdvancedDecimalTestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
table_name: String,
|
||||
indexer_tx: mpsc::Sender<IndexCommand>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct PerformanceTestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
stress_table: String,
|
||||
indexer_tx: mpsc::Sender<IndexCommand>,
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// TABLE CREATION HELPERS FOR COMPREHENSIVE TESTING
|
||||
// ========================================================================
|
||||
|
||||
async fn create_comprehensive_integer_tables(
|
||||
pool: &PgPool,
|
||||
profile_name: &str,
|
||||
) -> Result<ComprehensiveIntegerTestContext, tonic::Status> {
|
||||
let unique_id = generate_unique_id();
|
||||
let mixed_table = format!("comprehensive_mixed_table_{}", unique_id);
|
||||
let bigint_table = format!("comprehensive_bigint_table_{}", unique_id);
|
||||
let integer_table = format!("comprehensive_integer_table_{}", unique_id);
|
||||
|
||||
// Table with both INTEGER and BIGINT columns for comprehensive testing
|
||||
let mixed_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: mixed_table.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "small_int".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "big_int".into(), field_type: "biginteger".into() },
|
||||
TableColumnDefinition { name: "another_int".into(), field_type: "int".into() },
|
||||
TableColumnDefinition { name: "another_bigint".into(), field_type: "bigint".into() },
|
||||
TableColumnDefinition { name: "nullable_int".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "nullable_bigint".into(), field_type: "biginteger".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, mixed_def).await?;
|
||||
|
||||
// Table with only BIGINT columns for edge case testing
|
||||
let bigint_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: bigint_table.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "value1".into(), field_type: "biginteger".into() },
|
||||
TableColumnDefinition { name: "value2".into(), field_type: "bigint".into() },
|
||||
TableColumnDefinition { name: "extreme_value".into(), field_type: "biginteger".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, bigint_def).await?;
|
||||
|
||||
// Table with only INTEGER columns for boundary testing
|
||||
let integer_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: integer_table.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "value1".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "value2".into(), field_type: "int".into() },
|
||||
TableColumnDefinition { name: "boundary_test".into(), field_type: "integer".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, integer_def).await?;
|
||||
|
||||
let (tx, mut rx) = mpsc::channel(100);
|
||||
tokio::spawn(async move { while rx.recv().await.is_some() {} });
|
||||
|
||||
Ok(ComprehensiveIntegerTestContext {
|
||||
pool: pool.clone(),
|
||||
profile_name: profile_name.to_string(),
|
||||
mixed_integer_table: mixed_table,
|
||||
bigint_only_table: bigint_table,
|
||||
integer_only_table: integer_table,
|
||||
indexer_tx: tx,
|
||||
})
|
||||
}
|
||||
|
||||
async fn create_advanced_decimal_table(
|
||||
pool: &PgPool,
|
||||
table_name: &str,
|
||||
profile_name: &str,
|
||||
) -> Result<(), tonic::Status> {
|
||||
let table_def_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: table_name.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "product_name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "price".into(), field_type: "decimal(19, 4)".into() },
|
||||
TableColumnDefinition { name: "rate".into(), field_type: "decimal(10, 5)".into() },
|
||||
TableColumnDefinition { name: "discount".into(), field_type: "decimal(5, 3)".into() },
|
||||
TableColumnDefinition { name: "ultra_precise".into(), field_type: "decimal(28, 10)".into() },
|
||||
TableColumnDefinition { name: "percentage".into(), field_type: "decimal(5, 4)".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(pool, table_def_request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_performance_stress_table(
|
||||
pool: &PgPool,
|
||||
table_name: &str,
|
||||
profile_name: &str,
|
||||
) -> Result<(), tonic::Status> {
|
||||
let table_def_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: table_name.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "test_name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "int_val1".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "int_val2".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "bigint_val1".into(), field_type: "biginteger".into() },
|
||||
TableColumnDefinition { name: "bigint_val2".into(), field_type: "biginteger".into() },
|
||||
TableColumnDefinition { name: "decimal_val".into(), field_type: "decimal(10, 2)".into() },
|
||||
TableColumnDefinition { name: "bool_val".into(), field_type: "boolean".into() },
|
||||
TableColumnDefinition { name: "timestamp_val".into(), field_type: "timestamptz".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(pool, table_def_request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// FIXTURES
|
||||
// ========================================================================
|
||||
|
||||
#[fixture]
|
||||
async fn comprehensive_integer_test_context() -> ComprehensiveIntegerTestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("comp_int_profile_{}", unique_id);
|
||||
|
||||
create_comprehensive_integer_tables(&pool, &profile_name).await
|
||||
.expect("Failed to create comprehensive integer test tables")
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn advanced_decimal_test_context() -> AdvancedDecimalTestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("adv_decimal_profile_{}", unique_id);
|
||||
let table_name = format!("advanced_decimals_{}", unique_id);
|
||||
|
||||
create_advanced_decimal_table(&pool, &table_name, &profile_name).await
|
||||
.expect("Failed to create advanced decimal test table");
|
||||
|
||||
let (tx, mut rx) = mpsc::channel(100);
|
||||
tokio::spawn(async move { while rx.recv().await.is_some() {} });
|
||||
|
||||
AdvancedDecimalTestContext { pool, profile_name, table_name, indexer_tx: tx }
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn performance_test_context() -> PerformanceTestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("perf_profile_{}", unique_id);
|
||||
let stress_table = format!("stress_table_{}", unique_id);
|
||||
|
||||
create_performance_stress_table(&pool, &stress_table, &profile_name).await
|
||||
.expect("Failed to create performance stress test table");
|
||||
|
||||
let (tx, mut rx) = mpsc::channel(100);
|
||||
tokio::spawn(async move { while rx.recv().await.is_some() {} });
|
||||
|
||||
PerformanceTestContext { pool, profile_name, stress_table, indexer_tx: tx }
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// HELPER FUNCTIONS FOR CREATING INITIAL RECORDS
|
||||
// ========================================================================
|
||||
|
||||
async fn create_initial_comprehensive_integer_record(
|
||||
context: &ComprehensiveIntegerTestContext,
|
||||
table_name: &str
|
||||
) -> i64 {
|
||||
let mut initial_data = HashMap::new();
|
||||
initial_data.insert("name".to_string(), string_to_proto_value("Initial Record"));
|
||||
|
||||
match table_name {
|
||||
table if table.contains("mixed") => {
|
||||
initial_data.insert("small_int".to_string(), Value { kind: Some(Kind::NumberValue(100.0)) });
|
||||
initial_data.insert("big_int".to_string(), Value { kind: Some(Kind::NumberValue(1000000000000.0)) });
|
||||
initial_data.insert("another_int".to_string(), Value { kind: Some(Kind::NumberValue(200.0)) });
|
||||
initial_data.insert("another_bigint".to_string(), Value { kind: Some(Kind::NumberValue(2000000000000.0)) });
|
||||
initial_data.insert("nullable_int".to_string(), Value { kind: Some(Kind::NumberValue(300.0)) });
|
||||
initial_data.insert("nullable_bigint".to_string(), Value { kind: Some(Kind::NumberValue(3000000000000.0)) });
|
||||
},
|
||||
table if table.contains("bigint") => {
|
||||
initial_data.insert("value1".to_string(), Value { kind: Some(Kind::NumberValue(1000000000000.0)) });
|
||||
initial_data.insert("value2".to_string(), Value { kind: Some(Kind::NumberValue(2000000000000.0)) });
|
||||
initial_data.insert("extreme_value".to_string(), Value { kind: Some(Kind::NumberValue(9223372036854774784.0)) });
|
||||
},
|
||||
table if table.contains("integer") => {
|
||||
initial_data.insert("value1".to_string(), Value { kind: Some(Kind::NumberValue(100.0)) });
|
||||
initial_data.insert("value2".to_string(), Value { kind: Some(Kind::NumberValue(200.0)) });
|
||||
initial_data.insert("boundary_test".to_string(), Value { kind: Some(Kind::NumberValue(300.0)) });
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
data: initial_data,
|
||||
};
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.expect("Setup: Failed to create initial integer record");
|
||||
response.inserted_id
|
||||
}
|
||||
|
||||
async fn create_initial_advanced_decimal_record(context: &AdvancedDecimalTestContext) -> i64 {
|
||||
let mut initial_data = HashMap::new();
|
||||
initial_data.insert("product_name".to_string(), string_to_proto_value("Initial Product"));
|
||||
initial_data.insert("price".to_string(), string_to_proto_value("100.0000"));
|
||||
initial_data.insert("rate".to_string(), string_to_proto_value("1.00000"));
|
||||
initial_data.insert("discount".to_string(), string_to_proto_value("0.100"));
|
||||
initial_data.insert("ultra_precise".to_string(), string_to_proto_value("123.4567890123"));
|
||||
initial_data.insert("percentage".to_string(), string_to_proto_value("0.9999"));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data: initial_data,
|
||||
};
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.expect("Setup: Failed to create initial decimal record");
|
||||
response.inserted_id
|
||||
}
|
||||
|
||||
async fn create_initial_performance_record(context: &PerformanceTestContext) -> i64 {
|
||||
let mut initial_data = HashMap::new();
|
||||
initial_data.insert("test_name".to_string(), string_to_proto_value("Initial Performance Test"));
|
||||
initial_data.insert("int_val1".to_string(), Value { kind: Some(Kind::NumberValue(1.0)) });
|
||||
initial_data.insert("int_val2".to_string(), Value { kind: Some(Kind::NumberValue(2.0)) });
|
||||
initial_data.insert("bigint_val1".to_string(), Value { kind: Some(Kind::NumberValue(1000000000000.0)) });
|
||||
initial_data.insert("bigint_val2".to_string(), Value { kind: Some(Kind::NumberValue(2000000000000.0)) });
|
||||
initial_data.insert("decimal_val".to_string(), string_to_proto_value("123.45"));
|
||||
initial_data.insert("bool_val".to_string(), Value { kind: Some(Kind::BoolValue(false)) });
|
||||
initial_data.insert("timestamp_val".to_string(), string_to_proto_value("2024-01-01T00:00:00Z"));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.stress_table.clone(),
|
||||
data: initial_data,
|
||||
};
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.expect("Setup: Failed to create initial performance record");
|
||||
response.inserted_id
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// BIGINT SUCCESSFUL ROUNDTRIP VALUE TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_bigint_successful_roundtrip_values(
|
||||
#[future] comprehensive_integer_test_context: ComprehensiveIntegerTestContext,
|
||||
) {
|
||||
let context = comprehensive_integer_test_context.await;
|
||||
let record_id = create_initial_comprehensive_integer_record(&context, &context.bigint_only_table).await;
|
||||
|
||||
// Values that SHOULD successfully round-trip and be accepted for updates
|
||||
let successful_values = vec![
|
||||
(9223372036854775808.0, "Exactly i64::MAX as f64 (legitimate value)"),
|
||||
(-9223372036854775808.0, "Exactly i64::MIN as f64 (legitimate value)"),
|
||||
(9223372036854774784.0, "Large but precisely representable in f64"),
|
||||
(-9223372036854774784.0, "Large negative but precisely representable in f64"),
|
||||
(0.0, "Zero"),
|
||||
(1.0, "One"),
|
||||
(-1.0, "Negative one"),
|
||||
(2147483647.0, "i32::MAX as f64"),
|
||||
(-2147483648.0, "i32::MIN as f64"),
|
||||
(4611686018427387904.0, "i64::MAX / 2"),
|
||||
(-4611686018427387904.0, "i64::MIN / 2"),
|
||||
(1000000000000.0, "One trillion"),
|
||||
(-1000000000000.0, "Negative one trillion"),
|
||||
];
|
||||
|
||||
for (value, description) in successful_values {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("name".to_string(), string_to_proto_value(&format!("Roundtrip test: {}", description)));
|
||||
update_data.insert("value1".to_string(), Value { kind: Some(Kind::NumberValue(value)) });
|
||||
update_data.insert("extreme_value".to_string(), Value { kind: Some(Kind::NumberValue(value)) });
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.bigint_only_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_ok(), "Should have succeeded for legitimate i64 update value {}: {}", value, description);
|
||||
|
||||
// Verify it was stored correctly
|
||||
if let Ok(response) = result {
|
||||
let query = format!(
|
||||
r#"SELECT value1, extreme_value FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.bigint_only_table
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_value1: i64 = row.get("value1");
|
||||
let stored_extreme_value: i64 = row.get("extreme_value");
|
||||
|
||||
assert_eq!(stored_value1, value as i64, "Value1 should match for {}", description);
|
||||
assert_eq!(stored_extreme_value, value as i64, "Extreme value should match for {}", description);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_bigint_overflow_rejection_comprehensive(
|
||||
#[future] comprehensive_integer_test_context: ComprehensiveIntegerTestContext,
|
||||
) {
|
||||
let context = comprehensive_integer_test_context.await;
|
||||
let record_id = create_initial_comprehensive_integer_record(&context, &context.bigint_only_table).await;
|
||||
|
||||
// Values that should be rejected for BIGINT columns due to precision loss or overflow
|
||||
let overflow_values = vec![
|
||||
(f64::INFINITY, "Positive infinity"),
|
||||
(f64::NEG_INFINITY, "Negative infinity"),
|
||||
(1e20, "Very large number (100,000,000,000,000,000,000)"),
|
||||
(-1e20, "Very large negative number"),
|
||||
(1e25, "Extremely large number"),
|
||||
(-1e25, "Extremely large negative number"),
|
||||
(f64::MAX, "f64::MAX"),
|
||||
(f64::MIN, "f64::MIN"),
|
||||
(f64::NAN, "NaN"),
|
||||
];
|
||||
|
||||
for (value, description) in overflow_values {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("name".to_string(), string_to_proto_value(&format!("i64 Overflow update test: {}", description)));
|
||||
update_data.insert("extreme_value".to_string(), Value { kind: Some(Kind::NumberValue(value)) });
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.bigint_only_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
assert!(result.is_err(), "Should have failed for i64 overflow update value {}: {}", value, description);
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
let message = err.message();
|
||||
assert!(
|
||||
message.contains("Integer value out of range for BIGINT column") ||
|
||||
message.contains("Expected integer for column") ||
|
||||
message.contains("but got a float") ||
|
||||
message.contains("Invalid number"),
|
||||
"Unexpected error message for {}: {}",
|
||||
description,
|
||||
message
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// WRONG TYPE FOR MIXED INTEGER COLUMNS TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_wrong_type_for_mixed_integer_columns(
|
||||
#[future] comprehensive_integer_test_context: ComprehensiveIntegerTestContext,
|
||||
) {
|
||||
let context = comprehensive_integer_test_context.await;
|
||||
let record_id = create_initial_comprehensive_integer_record(&context, &context.mixed_integer_table).await;
|
||||
|
||||
// Try to put i64 values into i32 columns (should fail)
|
||||
let wrong_type_tests = vec![
|
||||
("small_int", 3000000000.0, "3 billion in i32 column"),
|
||||
("another_int", -3000000000.0, "negative 3 billion in i32 column"),
|
||||
("nullable_int", 2147483648.0, "i32::MAX + 1 in i32 column"),
|
||||
("small_int", 9223372036854775807.0, "i64::MAX in i32 column"),
|
||||
];
|
||||
|
||||
for (column_name, value, description) in wrong_type_tests {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("name".to_string(), string_to_proto_value(&format!("Wrong type test: {}", description)));
|
||||
update_data.insert(column_name.to_string(), Value { kind: Some(Kind::NumberValue(value)) });
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_err(), "Should fail when putting i64 value {} in i32 column {}", value, column_name);
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Integer value out of range for INTEGER column"));
|
||||
}
|
||||
}
|
||||
|
||||
// Try fractional values in integer columns (should fail)
|
||||
let fractional_tests = vec![
|
||||
("small_int", 42.5, "fractional in i32 column"),
|
||||
("big_int", 1000000000000.1, "fractional in i64 column"),
|
||||
("another_int", -42.9, "negative fractional in i32 column"),
|
||||
("another_bigint", -1000000000000.9, "negative fractional in i64 column"),
|
||||
];
|
||||
|
||||
for (column_name, value, description) in fractional_tests {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("name".to_string(), string_to_proto_value(&format!("Fractional test: {}", description)));
|
||||
update_data.insert(column_name.to_string(), Value { kind: Some(Kind::NumberValue(value)) });
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_err(), "Should fail for fractional value {} in column {}", value, column_name);
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected integer for column") && err.message().contains("but got a float"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// CONCURRENT MIXED INTEGER UPDATES TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_concurrent_mixed_integer_updates(
|
||||
#[future] comprehensive_integer_test_context: ComprehensiveIntegerTestContext,
|
||||
) {
|
||||
let context = comprehensive_integer_test_context.await;
|
||||
// Create multiple records for concurrent updating
|
||||
let mut record_ids = Vec::new();
|
||||
for i in 0..10 {
|
||||
let record_id = create_initial_comprehensive_integer_record(&context, &context.mixed_integer_table).await;
|
||||
record_ids.push(record_id);
|
||||
}
|
||||
// Test concurrent updates with different integer types
|
||||
let tasks: Vec<_> = record_ids.into_iter().enumerate().map(|(i, record_id)| {
|
||||
let context = context.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("name".to_string(), string_to_proto_value(&format!("Concurrent update test {}", i)));
|
||||
update_data.insert("small_int".to_string(), Value { kind: Some(Kind::NumberValue((i * 1000) as f64)) });
|
||||
update_data.insert("big_int".to_string(), Value { kind: Some(Kind::NumberValue((i as i64 * 1000000000000) as f64)) });
|
||||
// Fix: Cast i to i32 first, then multiply by negative number
|
||||
update_data.insert("another_int".to_string(), Value { kind: Some(Kind::NumberValue(((i as i32) * -100) as f64)) });
|
||||
update_data.insert("another_bigint".to_string(), Value { kind: Some(Kind::NumberValue((i as i64 * -1000000000000) as f64)) });
|
||||
// Alternate between null and values for nullable columns
|
||||
if i % 2 == 0 {
|
||||
update_data.insert("nullable_int".to_string(), Value { kind: Some(Kind::NumberValue((i * 42) as f64)) });
|
||||
update_data.insert("nullable_bigint".to_string(), Value { kind: Some(Kind::NullValue(0)) });
|
||||
} else {
|
||||
update_data.insert("nullable_int".to_string(), Value { kind: Some(Kind::NullValue(0)) });
|
||||
update_data.insert("nullable_bigint".to_string(), Value { kind: Some(Kind::NumberValue((i as i64 * 9999999999) as f64)) });
|
||||
}
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await
|
||||
})
|
||||
}).collect();
|
||||
// Wait for all tasks to complete
|
||||
let results = futures::future::join_all(tasks).await;
|
||||
// All should succeed
|
||||
for (i, result) in results.into_iter().enumerate() {
|
||||
let task_result = result.expect("Task should not panic");
|
||||
assert!(task_result.is_ok(), "Concurrent integer update {} should succeed", i);
|
||||
}
|
||||
// Verify all records were updated correctly
|
||||
let query = format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE name LIKE 'Concurrent update test%'"#,
|
||||
context.profile_name, context.mixed_integer_table
|
||||
);
|
||||
let count: i64 = sqlx::query_scalar(&query)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, 10);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// ADVANCED DECIMAL PRECISION EDGE CASES
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_ultra_high_precision_decimals(
|
||||
#[future] advanced_decimal_test_context: AdvancedDecimalTestContext,
|
||||
) {
|
||||
let context = advanced_decimal_test_context.await;
|
||||
let record_id = create_initial_advanced_decimal_record(&context).await;
|
||||
|
||||
let ultra_precision_tests = vec![
|
||||
("ultra_precise", "123456789.1234567890", dec!(123456789.1234567890)),
|
||||
("ultra_precise", "-999999999.9999999999", dec!(-999999999.9999999999)),
|
||||
("ultra_precise", "0.0000000001", dec!(0.0000000001)),
|
||||
("percentage", "0.9999", dec!(0.9999)), // decimal(5,4) - 0.9999 is max
|
||||
("percentage", "0.0001", dec!(0.0001)), // decimal(5,4) - minimum precision
|
||||
];
|
||||
|
||||
for (field, value_str, expected_decimal) in ultra_precision_tests {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("product_name".to_string(), string_to_proto_value("Ultra precision test"));
|
||||
update_data.insert(field.to_string(), string_to_proto_value(value_str));
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let response = put_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Verify ultra high precision was preserved
|
||||
let query = format!(
|
||||
r#"SELECT {} FROM "{}"."{}" WHERE id = $1"#,
|
||||
field, context.profile_name, context.table_name
|
||||
);
|
||||
let stored_value: rust_decimal::Decimal = sqlx::query_scalar(&query)
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stored_value, expected_decimal, "Ultra precision mismatch for field {}", field);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_decimal_edge_case_rounding(
|
||||
#[future] advanced_decimal_test_context: AdvancedDecimalTestContext,
|
||||
) {
|
||||
let context = advanced_decimal_test_context.await;
|
||||
let record_id = create_initial_advanced_decimal_record(&context).await;
|
||||
|
||||
// Test edge cases where rounding behavior is critical
|
||||
let edge_rounding_tests = vec![
|
||||
("price", "12345.99995", dec!(12346.0000)), // Should round up at 5
|
||||
("rate", "1.999995", dec!(2.00000)), // Should round up
|
||||
("discount", "0.9995", dec!(1.000)), // Should round up to 1.000
|
||||
("percentage", "0.99995", dec!(1.0000)), // decimal(5,4) rounds to 1.0000
|
||||
("ultra_precise", "1.99999999995", dec!(2.0000000000)), // Ultra precision rounding
|
||||
];
|
||||
|
||||
for (field, input_value, expected_rounded) in edge_rounding_tests {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("product_name".to_string(), string_to_proto_value("Edge rounding test"));
|
||||
update_data.insert(field.to_string(), string_to_proto_value(input_value));
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let response = put_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Verify edge case rounding was applied correctly
|
||||
let query = format!(
|
||||
r#"SELECT {} FROM "{}"."{}" WHERE id = $1"#,
|
||||
field, context.profile_name, context.table_name
|
||||
);
|
||||
let stored_value: rust_decimal::Decimal = sqlx::query_scalar(&query)
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stored_value, expected_rounded, "Edge rounding mismatch for field {}", field);
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// PERFORMANCE AND STRESS TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_rapid_integer_updates_stress(
|
||||
#[future] performance_test_context: PerformanceTestContext,
|
||||
) {
|
||||
let context = performance_test_context.await;
|
||||
|
||||
// Create initial records for stress testing
|
||||
let mut record_ids = Vec::new();
|
||||
for i in 0..100 {
|
||||
let record_id = create_initial_performance_record(&context).await;
|
||||
record_ids.push(record_id);
|
||||
}
|
||||
|
||||
// Rapid sequential updates with alternating integer types and complex data
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
for (i, record_id) in record_ids.iter().enumerate() {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("test_name".to_string(), string_to_proto_value(&format!("Stress update test {}", i)));
|
||||
|
||||
// Alternate between different boundary values for stress testing
|
||||
let small_val = match i % 4 {
|
||||
0 => 2147483647.0, // i32::MAX
|
||||
1 => -2147483648.0, // i32::MIN
|
||||
2 => 0.0,
|
||||
_ => (i as f64) * 1000.0,
|
||||
};
|
||||
|
||||
let big_val = match i % 4 {
|
||||
0 => 9223372036854774784.0, // Near i64::MAX
|
||||
1 => -9223372036854774784.0, // Near i64::MIN
|
||||
2 => 0.0,
|
||||
_ => (i as f64) * 1000000000000.0,
|
||||
};
|
||||
|
||||
update_data.insert("int_val1".to_string(), Value { kind: Some(Kind::NumberValue(small_val)) });
|
||||
update_data.insert("int_val2".to_string(), Value { kind: Some(Kind::NumberValue(small_val)) });
|
||||
update_data.insert("bigint_val1".to_string(), Value { kind: Some(Kind::NumberValue(big_val)) });
|
||||
update_data.insert("bigint_val2".to_string(), Value { kind: Some(Kind::NumberValue(big_val)) });
|
||||
|
||||
// Add some decimal and other type updates for comprehensive stress test
|
||||
update_data.insert("decimal_val".to_string(), string_to_proto_value(&format!("{}.{:02}", i * 10, i % 100)));
|
||||
update_data.insert("bool_val".to_string(), Value { kind: Some(Kind::BoolValue(i % 2 == 0)) });
|
||||
update_data.insert("timestamp_val".to_string(), string_to_proto_value(&format!("2024-01-{:02}T{:02}:00:00Z", (i % 28) + 1, i % 24)));
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.stress_table.clone(),
|
||||
id: *record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_ok(), "Rapid stress update {} should succeed", i);
|
||||
}
|
||||
|
||||
let duration = start.elapsed();
|
||||
println!("100 mixed data type stress updates took: {:?}", duration);
|
||||
|
||||
// Should complete in reasonable time (adjust threshold as needed)
|
||||
assert!(duration.as_secs() < 15, "Stress test took too long: {:?}", duration);
|
||||
|
||||
// Verify all records were updated correctly
|
||||
let query = format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE test_name LIKE 'Stress update test%'"#,
|
||||
context.profile_name, context.stress_table
|
||||
);
|
||||
|
||||
let count: i64 = sqlx::query_scalar(&query)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, 100);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_concurrent_stress_mixed_data_types(
|
||||
#[future] performance_test_context: PerformanceTestContext,
|
||||
) {
|
||||
let context = performance_test_context.await;
|
||||
|
||||
// Create initial records
|
||||
let mut record_ids = Vec::new();
|
||||
for i in 0..20 {
|
||||
let record_id = create_initial_performance_record(&context).await;
|
||||
record_ids.push(record_id);
|
||||
}
|
||||
|
||||
// Concurrent stress test with mixed data types
|
||||
let tasks: Vec<_> = record_ids.into_iter().enumerate().map(|(i, record_id)| {
|
||||
let context = context.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("test_name".to_string(), string_to_proto_value(&format!("Concurrent stress {}", i)));
|
||||
|
||||
// Use complex values that stress different validation paths
|
||||
let complex_int = match i % 3 {
|
||||
0 => 2147483647.0 - (i as f64), // Near i32::MAX
|
||||
1 => -2147483648.0 + (i as f64), // Near i32::MIN
|
||||
_ => (i as f64) * 12345.0,
|
||||
};
|
||||
|
||||
let complex_bigint = match i % 3 {
|
||||
0 => 9223372036854774784.0 - (i as f64 * 1000000000.0),
|
||||
1 => -9223372036854774784.0 + (i as f64 * 1000000000.0),
|
||||
_ => (i as f64) * 987654321012345.0,
|
||||
};
|
||||
|
||||
update_data.insert("int_val1".to_string(), Value { kind: Some(Kind::NumberValue(complex_int)) });
|
||||
update_data.insert("int_val2".to_string(), Value { kind: Some(Kind::NumberValue(complex_int)) });
|
||||
update_data.insert("bigint_val1".to_string(), Value { kind: Some(Kind::NumberValue(complex_bigint)) });
|
||||
update_data.insert("bigint_val2".to_string(), Value { kind: Some(Kind::NumberValue(complex_bigint)) });
|
||||
update_data.insert("decimal_val".to_string(), string_to_proto_value(&format!("{}.{:02}", i * 33, (i * 7) % 100)));
|
||||
update_data.insert("bool_val".to_string(), Value { kind: Some(Kind::BoolValue((i * 3) % 2 == 0)) });
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.stress_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await
|
||||
})
|
||||
}).collect();
|
||||
|
||||
// Wait for all concurrent updates to complete
|
||||
let results = futures::future::join_all(tasks).await;
|
||||
|
||||
// All should succeed
|
||||
for (i, result) in results.into_iter().enumerate() {
|
||||
let task_result = result.expect("Task should not panic");
|
||||
assert!(task_result.is_ok(), "Concurrent stress update {} should succeed", i);
|
||||
}
|
||||
|
||||
// Verify all records were updated
|
||||
let query = format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE test_name LIKE 'Concurrent stress%'"#,
|
||||
context.profile_name, context.stress_table
|
||||
);
|
||||
|
||||
let count: i64 = sqlx::query_scalar(&query)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, 20);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// EDGE CASE COMBINATION TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_complex_mixed_data_type_combinations(
|
||||
#[future] comprehensive_integer_test_context: ComprehensiveIntegerTestContext,
|
||||
) {
|
||||
let context = comprehensive_integer_test_context.await;
|
||||
let record_id = create_initial_comprehensive_integer_record(&context, &context.mixed_integer_table).await;
|
||||
|
||||
// Test complex combinations of data type updates that stress multiple validation paths
|
||||
let complex_combinations = vec![
|
||||
(
|
||||
"All boundary values",
|
||||
HashMap::from([
|
||||
("small_int".to_string(), Value { kind: Some(Kind::NumberValue(2147483647.0)) }),
|
||||
("big_int".to_string(), Value { kind: Some(Kind::NumberValue(9223372036854774784.0)) }),
|
||||
("another_int".to_string(), Value { kind: Some(Kind::NumberValue(-2147483648.0)) }),
|
||||
("another_bigint".to_string(), Value { kind: Some(Kind::NumberValue(-9223372036854774784.0)) }),
|
||||
("nullable_int".to_string(), Value { kind: Some(Kind::NullValue(0)) }),
|
||||
("nullable_bigint".to_string(), Value { kind: Some(Kind::NullValue(0)) }),
|
||||
])
|
||||
),
|
||||
(
|
||||
"Mixed nulls and values",
|
||||
HashMap::from([
|
||||
("small_int".to_string(), Value { kind: Some(Kind::NumberValue(42.0)) }),
|
||||
("big_int".to_string(), Value { kind: Some(Kind::NullValue(0)) }),
|
||||
("another_int".to_string(), Value { kind: Some(Kind::NullValue(0)) }),
|
||||
("another_bigint".to_string(), Value { kind: Some(Kind::NumberValue(1000000000000.0)) }),
|
||||
("nullable_int".to_string(), Value { kind: Some(Kind::NumberValue(123.0)) }),
|
||||
("nullable_bigint".to_string(), Value { kind: Some(Kind::NullValue(0)) }),
|
||||
])
|
||||
),
|
||||
(
|
||||
"Zero and near-zero values",
|
||||
HashMap::from([
|
||||
("small_int".to_string(), Value { kind: Some(Kind::NumberValue(0.0)) }),
|
||||
("big_int".to_string(), Value { kind: Some(Kind::NumberValue(1.0)) }),
|
||||
("another_int".to_string(), Value { kind: Some(Kind::NumberValue(-1.0)) }),
|
||||
("another_bigint".to_string(), Value { kind: Some(Kind::NumberValue(0.0)) }),
|
||||
("nullable_int".to_string(), Value { kind: Some(Kind::NumberValue(1.0)) }),
|
||||
("nullable_bigint".to_string(), Value { kind: Some(Kind::NumberValue(-1.0)) }),
|
||||
])
|
||||
),
|
||||
];
|
||||
|
||||
for (description, mut update_data) in complex_combinations {
|
||||
update_data.insert("name".to_string(), string_to_proto_value(&format!("Complex combo: {}", description)));
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data.clone(),
|
||||
};
|
||||
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_ok(), "Complex combination should succeed: {}", description);
|
||||
|
||||
// Verify the complex combination was stored correctly
|
||||
let query = format!(
|
||||
r#"SELECT small_int, big_int, another_int, another_bigint, nullable_int, nullable_bigint FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.mixed_integer_table
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify each field based on what was set in update_data
|
||||
for (field_name, expected_value) in update_data.iter() {
|
||||
if field_name == "name" { continue; } // Skip text field
|
||||
|
||||
match expected_value.kind.as_ref().unwrap() {
|
||||
Kind::NumberValue(num) => {
|
||||
match field_name.as_str() {
|
||||
"small_int" | "another_int" | "nullable_int" => {
|
||||
let stored: Option<i32> = row.get(field_name.as_str());
|
||||
if let Some(stored_val) = stored {
|
||||
assert_eq!(stored_val, *num as i32, "Field {} mismatch in {}", field_name, description);
|
||||
}
|
||||
},
|
||||
"big_int" | "another_bigint" | "nullable_bigint" => {
|
||||
let stored: Option<i64> = row.get(field_name.as_str());
|
||||
if let Some(stored_val) = stored {
|
||||
assert_eq!(stored_val, *num as i64, "Field {} mismatch in {}", field_name, description);
|
||||
}
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
},
|
||||
Kind::NullValue(_) => {
|
||||
match field_name.as_str() {
|
||||
"small_int" | "another_int" | "nullable_int" => {
|
||||
let stored: Option<i32> = row.get(field_name.as_str());
|
||||
assert!(stored.is_none(), "Field {} should be null in {}", field_name, description);
|
||||
},
|
||||
"big_int" | "another_bigint" | "nullable_bigint" => {
|
||||
let stored: Option<i64> = row.get(field_name.as_str());
|
||||
assert!(stored.is_none(), "Field {} should be null in {}", field_name, description);
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
259
server/tests/tables_data/put/put_table_data_test5.rs
Normal file
259
server/tests/tables_data/put/put_table_data_test5.rs
Normal file
@@ -0,0 +1,259 @@
|
||||
// tests/tables_data/handlers/put_table_data_test5.rs
|
||||
|
||||
// ========================================================================
|
||||
// MISSING TEST SCENARIOS REPLICATED FROM POST TESTS
|
||||
// ========================================================================
|
||||
|
||||
// Fixture to provide a closed database pool, simulating a connection error.
|
||||
// This is needed for the database error test.
|
||||
#[fixture]
|
||||
async fn closed_test_context() -> TestContext {
|
||||
let mut context = test_context().await;
|
||||
context.pool.close().await;
|
||||
context
|
||||
}
|
||||
|
||||
// Test 1: Ensure that an update fails gracefully when the database is unavailable.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_table_data_database_error(
|
||||
#[future] closed_test_context: TestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = closed_test_context.await;
|
||||
// The record ID doesn't matter as the connection is already closed.
|
||||
let record_id = 1;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value("This will fail"),
|
||||
);
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
let result =
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
|
||||
// Test 2: Ensure that updating a required foreign key to NULL is not allowed.
|
||||
// This uses the `foreign_key_update_test_context` from `put_table_data_test3.rs`.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_required_foreign_key_to_null_fails(
|
||||
#[future]
|
||||
foreign_key_update_test_context: ForeignKeyUpdateTestContext,
|
||||
) {
|
||||
let context = foreign_key_update_test_context.await;
|
||||
|
||||
// Arrange: Create a category and a product linked to it.
|
||||
let mut category_data = HashMap::new();
|
||||
category_data
|
||||
.insert("name".to_string(), string_to_proto_value("Test Category"));
|
||||
let category_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let category_response = post_table_data(
|
||||
&context.pool,
|
||||
category_request,
|
||||
&context.indexer_tx,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let category_id = category_response.inserted_id;
|
||||
|
||||
let mut product_data = HashMap::new();
|
||||
product_data
|
||||
.insert("name".to_string(), string_to_proto_value("Test Product"));
|
||||
product_data.insert(
|
||||
format!("{}_id", context.category_table),
|
||||
Value { kind: Some(Kind::NumberValue(category_id as f64)) },
|
||||
);
|
||||
let product_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
data: product_data,
|
||||
};
|
||||
let product_response =
|
||||
post_table_data(&context.pool, product_request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
let product_id = product_response.inserted_id;
|
||||
|
||||
// Act: Attempt to update the product's required foreign key to NULL.
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert(
|
||||
format!("{}_id", context.category_table),
|
||||
Value { kind: Some(Kind::NullValue(0)) },
|
||||
);
|
||||
|
||||
let update_request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
id: product_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result =
|
||||
put_table_data(&context.pool, update_request, &context.indexer_tx)
|
||||
.await;
|
||||
|
||||
// Assert: The operation should fail due to a database constraint.
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"Update of required foreign key to NULL should fail"
|
||||
);
|
||||
let err = result.unwrap_err();
|
||||
// The database will likely return a NOT NULL violation, which our handler
|
||||
// wraps as an Internal error.
|
||||
assert_eq!(err.code(), tonic::Code::Internal);
|
||||
assert!(err.message().contains("Update failed"));
|
||||
}
|
||||
|
||||
// tests/tables_data/handlers/put_table_data_test6.rs
|
||||
|
||||
// ========================================================================
|
||||
// MISSING DATA TYPE VALIDATION TESTS FOR PUT HANDLER
|
||||
// ========================================================================
|
||||
|
||||
// Note: These tests are replicated from post_table_data_test3.rs to ensure
|
||||
// the PUT handler has the same level of type validation coverage as the POST handler.
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_type_mismatch_string_for_integer(
|
||||
#[future] data_type_test_context: DataTypeTestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = data_type_test_context.await;
|
||||
let record_id = create_initial_data_type_record(&context).await;
|
||||
|
||||
// Act: Attempt to update an integer column with a string value.
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert(
|
||||
"my_bigint".to_string(),
|
||||
create_string_value("not-an-integer"),
|
||||
);
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result =
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_err());
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err
|
||||
.message()
|
||||
.contains("Expected number for column 'my_bigint'"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_type_mismatch_number_for_boolean(
|
||||
#[future] data_type_test_context: DataTypeTestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = data_type_test_context.await;
|
||||
let record_id = create_initial_data_type_record(&context).await;
|
||||
|
||||
// Act: Attempt to update a boolean column with a number value.
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("my_bool".to_string(), create_number_value(1.0));
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result =
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_err());
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err
|
||||
.message()
|
||||
.contains("Expected boolean for column 'my_bool'"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_with_various_valid_timestamp_formats(
|
||||
#[future] data_type_test_context: DataTypeTestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = data_type_test_context.await;
|
||||
let record_id = create_initial_data_type_record(&context).await;
|
||||
|
||||
let valid_timestamps = vec![
|
||||
"2025-06-24T18:30:00Z",
|
||||
"2023-01-01T00:00:00+00:00",
|
||||
"2024-02-29T12:00:00.123456Z",
|
||||
"1999-12-31T23:59:59.999Z",
|
||||
];
|
||||
|
||||
for timestamp_str in valid_timestamps {
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert(
|
||||
"my_timestamp".to_string(),
|
||||
create_string_value(timestamp_str),
|
||||
);
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result =
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Update should succeed for valid timestamp format: {}",
|
||||
timestamp_str
|
||||
);
|
||||
|
||||
// Verify the value was stored correctly
|
||||
let stored_timestamp: chrono::DateTime<chrono::Utc> =
|
||||
sqlx::query_scalar(&format!(
|
||||
r#"SELECT my_timestamp FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let expected_timestamp =
|
||||
chrono::DateTime::parse_from_rfc3339(timestamp_str)
|
||||
.unwrap()
|
||||
.with_timezone(&chrono::Utc);
|
||||
assert_eq!(stored_timestamp, expected_timestamp);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user