Compare commits

...

21 Commits

Author SHA1 Message Date
Priec
2a811b1f8c rename the column aliases 2026-05-02 00:38:54 +02:00
Priec
1f9c29411e multiple requests to the structure of a tables at once(batching) 2026-04-30 11:48:03 +02:00
Priec
b928004c76 search with multiquery redesigned 2026-04-29 19:56:17 +02:00
Priec
fb4769301c column name indexing 2026-04-29 01:33:48 +02:00
Priec
036e12f345 indexing done by the profile and not table 2026-04-29 01:08:59 +02:00
Priec
1ceab57f3b exact search endpoint 2026-04-29 00:40:36 +02:00
Priec
5de1cd7623 refactoring search based on the profile 2026-04-29 00:38:42 +02:00
Priec
1867de513d get profile details with scripts and tables columns is now working 2026-04-27 22:01:17 +02:00
Priec
42181499fe canvas has more advanced suggestions for the textarea now 2026-04-25 22:18:51 +02:00
Priec
36249739d3 sync 2026-04-23 00:10:58 +02:00
Priec
bbd7c29681 frontend using steel engine 2026-04-22 18:20:14 +02:00
Priec
2818a5f280 updates on the client with suggestions and obeying new project structure, addtable and addlogic still not fixed 2026-04-15 23:34:47 +02:00
Priec
946ee9677c v0.5.10 is here, we removed the manualf debug and default in the canvas library and cleaned client from it 2026-01-05 17:21:28 +01:00
Priec
ec16930569 completely redesigned core of the tui, not migrated everything yet, but very close 2026-01-03 09:06:35 +01:00
Priec
accfb4f346 experimental client 2025-12-24 23:32:21 +01:00
Priec
e5ce96e210 rust fmt and server clippy changes 2025-12-21 00:15:26 +01:00
Priec
a506cd8f08 linted protobufs via buf 2025-12-20 20:57:11 +01:00
Priec
1cedd58708 serialization and gitignore in canvas 2025-12-18 20:04:03 +01:00
Priec
7f7ebd3ad6 Update client submodule pointer 2025-11-22 23:31:42 +01:00
Priec
9f6d480aee added validation2, NEXT DO GRPCURL TESTING OF THE VALIDATIONS 2025-11-21 23:17:16 +01:00
Priec
339d06ce7e table structure docs are made 2025-10-26 22:02:44 +01:00
34 changed files with 2567 additions and 646 deletions

1
.gitignore vendored
View File

@@ -6,3 +6,4 @@ steel_decimal/tests/property_tests.proptest-regressions
.direnv/
canvas/*.toml
.aider*
.codex

148
Cargo.lock generated
View File

@@ -493,11 +493,12 @@ checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
[[package]]
name = "canvas"
version = "0.5.0"
version = "0.6.3"
dependencies = [
"anyhow",
"async-trait",
"crossterm",
"derivative",
"once_cell",
"ratatui",
"regex",
@@ -584,7 +585,7 @@ dependencies = [
[[package]]
name = "client"
version = "0.5.0"
version = "0.6.3"
dependencies = [
"anyhow",
"async-trait",
@@ -595,12 +596,18 @@ dependencies = [
"dotenvy",
"futures",
"lazy_static",
"nucleo",
"prost 0.13.5",
"prost-types 0.13.5",
"ratatui",
"regex",
"rstest",
"serde",
"serde_json",
"steel-core",
"steel-decimal",
"strum 0.27.2",
"strum_macros 0.27.2",
"time",
"tokio",
"tokio-test",
@@ -608,7 +615,6 @@ dependencies = [
"tonic",
"tracing",
"tracing-subscriber",
"tui-textarea",
"unicode-segmentation",
"unicode-width 0.2.0",
"uuid",
@@ -635,7 +641,7 @@ dependencies = [
[[package]]
name = "common"
version = "0.5.0"
version = "0.6.3"
dependencies = [
"prost 0.13.5",
"prost-build 0.14.1",
@@ -936,6 +942,17 @@ dependencies = [
"serde",
]
[[package]]
name = "derivative"
version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "digest"
version = "0.10.7"
@@ -1959,11 +1976,11 @@ checksum = "08ab2867e3eeeca90e844d1940eab391c9dc5228783db2ed999acbc0a9ed375a"
[[package]]
name = "matchers"
version = "0.1.0"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558"
checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9"
dependencies = [
"regex-automata 0.1.10",
"regex-automata",
]
[[package]]
@@ -2080,12 +2097,32 @@ dependencies = [
[[package]]
name = "nu-ansi-term"
version = "0.46.0"
version = "0.50.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84"
checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5"
dependencies = [
"overload",
"winapi",
"windows-sys 0.60.2",
]
[[package]]
name = "nucleo"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5262af4c94921c2646c5ac6ff7900c2af9cbb08dc26a797e18130a7019c039d4"
dependencies = [
"nucleo-matcher",
"parking_lot",
"rayon",
]
[[package]]
name = "nucleo-matcher"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf33f538733d1a5a3494b836ba913207f14d9d4a1d3cd67030c5061bdd2cac85"
dependencies = [
"memchr",
"unicode-segmentation",
]
[[package]]
@@ -2243,12 +2280,6 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d"
[[package]]
name = "overload"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
[[package]]
name = "ownedbytes"
version = "0.9.0"
@@ -2756,7 +2787,7 @@ dependencies = [
"itertools 0.13.0",
"lru",
"paste",
"strum",
"strum 0.26.3",
"unicode-segmentation",
"unicode-truncate",
"unicode-width 0.2.0",
@@ -2810,17 +2841,8 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata 0.4.9",
"regex-syntax 0.8.5",
]
[[package]]
name = "regex-automata"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
dependencies = [
"regex-syntax 0.6.29",
"regex-automata",
"regex-syntax",
]
[[package]]
@@ -2831,15 +2853,9 @@ checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax 0.8.5",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.6.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]]
name = "regex-syntax"
version = "0.8.5"
@@ -3100,7 +3116,7 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b"
[[package]]
name = "search"
version = "0.5.0"
version = "0.6.3"
dependencies = [
"anyhow",
"common",
@@ -3199,7 +3215,7 @@ dependencies = [
[[package]]
name = "server"
version = "0.5.0"
version = "0.6.3"
dependencies = [
"anyhow",
"bcrypt",
@@ -3210,6 +3226,7 @@ dependencies = [
"futures",
"jsonwebtoken",
"lazy_static",
"once_cell",
"prost 0.13.5",
"prost-build 0.14.1",
"prost-types 0.13.5",
@@ -3234,6 +3251,7 @@ dependencies = [
"tonic",
"tonic-reflection",
"tracing",
"tracing-subscriber",
"uuid",
"validator",
]
@@ -3756,9 +3774,15 @@ version = "0.26.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06"
dependencies = [
"strum_macros",
"strum_macros 0.26.4",
]
[[package]]
name = "strum"
version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf"
[[package]]
name = "strum_macros"
version = "0.26.4"
@@ -3772,6 +3796,18 @@ dependencies = [
"syn 2.0.104",
]
[[package]]
name = "strum_macros"
version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn 2.0.104",
]
[[package]]
name = "subtle"
version = "2.6.1"
@@ -3830,7 +3866,7 @@ dependencies = [
"fnv",
"once_cell",
"plist",
"regex-syntax 0.8.5",
"regex-syntax",
"serde",
"serde_derive",
"serde_json",
@@ -3936,7 +3972,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d60769b80ad7953d8a7b2c70cdfe722bbcdcac6bccc8ac934c40c034d866fc18"
dependencies = [
"byteorder",
"regex-syntax 0.8.5",
"regex-syntax",
"utf8-ranges",
]
@@ -4318,9 +4354,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3"
[[package]]
name = "tracing"
version = "0.1.41"
version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100"
dependencies = [
"log",
"pin-project-lite",
@@ -4330,9 +4366,9 @@ dependencies = [
[[package]]
name = "tracing-attributes"
version = "0.1.30"
version = "0.1.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903"
checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da"
dependencies = [
"proc-macro2",
"quote",
@@ -4341,9 +4377,9 @@ dependencies = [
[[package]]
name = "tracing-core"
version = "0.1.34"
version = "0.1.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678"
checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a"
dependencies = [
"once_cell",
"valuable",
@@ -4362,14 +4398,14 @@ dependencies = [
[[package]]
name = "tracing-subscriber"
version = "0.3.19"
version = "0.3.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008"
checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e"
dependencies = [
"matchers",
"nu-ansi-term",
"once_cell",
"regex",
"regex-automata",
"sharded-slab",
"smallvec",
"thread_local",
@@ -4399,18 +4435,6 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e78122066b0cb818b8afd08f7ed22f7fdbc3e90815035726f0840d0d26c0747a"
[[package]]
name = "tui-textarea"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a5318dd619ed73c52a9417ad19046724effc1287fb75cdcc4eca1d6ac1acbae"
dependencies = [
"crossterm",
"ratatui",
"regex",
"unicode-width 0.2.0",
]
[[package]]
name = "typed-arena"
version = "2.0.2"

View File

@@ -5,7 +5,7 @@ resolver = "2"
[workspace.package]
# TODO: idk how to do the name, fix later
# name = "komp_ac"
version = "0.5.0"
version = "0.6.3"
edition = "2021"
license = "GPL-3.0-or-later"
authors = ["Filip Priečinský <filippriec@gmail.com>"]

View File

@@ -16,10 +16,5 @@ cargo watch -x 'run --package client -- client'
Client with tracing:
```
ENABLE_TRACING=1 RUST_LOG=client=debug cargo watch -x 'run --package client -- client'
```
Client with debug that cant be traced
```
cargo run --package client --features ui-debug -- client
```

2
canvas

Submodule canvas updated: 29fdc5a6c7...abbda5b7a9

2
client

Submodule client updated: c1839bd960...ab990ac128

1
common/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
docs/

11
common/Makefile Normal file
View File

@@ -0,0 +1,11 @@
DOC_OUT := docs/grpc_reference.html
.PHONY: docs
docs:
@echo "Generating gRPC documentation..."
mkdir -p $(dir $(DOC_OUT))
protoc \
--doc_out=html,index.html:$(dir $(DOC_OUT)) \
--proto_path=proto proto/*.proto
@echo "✅ Docs written to $(DOC_OUT)"

View File

@@ -20,6 +20,18 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
".komp_ac.table_validation.TableValidationResponse",
"#[derive(serde::Serialize, serde::Deserialize)]",
)
.type_attribute(
".komp_ac.table_validation.PatternRule",
"#[derive(serde::Serialize, serde::Deserialize)]",
)
.type_attribute(
".komp_ac.table_validation.PatternRules",
"#[derive(serde::Serialize, serde::Deserialize)]",
)
.type_attribute(
".komp_ac.table_validation.CustomFormatter",
"#[derive(serde::Serialize, serde::Deserialize)]",
)
.type_attribute(
".komp_ac.table_validation.UpdateFieldValidationRequest",
"#[derive(serde::Serialize, serde::Deserialize)]",
@@ -49,6 +61,14 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
".komp_ac.table_definition.TableDefinitionResponse",
"#[derive(serde::Serialize, serde::Deserialize)]"
)
.type_attribute(
".komp_ac.table_definition.RenameColumnAliasRequest",
"#[derive(serde::Serialize, serde::Deserialize)]"
)
.type_attribute(
".komp_ac.table_definition.RenameColumnAliasResponse",
"#[derive(serde::Serialize, serde::Deserialize)]"
)
.type_attribute(
".komp_ac.table_script.PostTableScriptRequest",
"#[derive(serde::Serialize, serde::Deserialize)]",

View File

@@ -6,78 +6,78 @@ import "common.proto";
// import "table_structure.proto";
service Adresar {
rpc PostAdresar (PostAdresarRequest) returns (AdresarResponse);
rpc GetAdresar (GetAdresarRequest) returns (AdresarResponse);
rpc PutAdresar (PutAdresarRequest) returns (AdresarResponse);
rpc DeleteAdresar (DeleteAdresarRequest) returns (DeleteAdresarResponse);
rpc GetAdresarCount (common.Empty) returns (common.CountResponse);
rpc GetAdresarByPosition (common.PositionRequest) returns (AdresarResponse);
rpc PostAdresar(PostAdresarRequest) returns (AdresarResponse);
rpc GetAdresar(GetAdresarRequest) returns (AdresarResponse);
rpc PutAdresar(PutAdresarRequest) returns (AdresarResponse);
rpc DeleteAdresar(DeleteAdresarRequest) returns (DeleteAdresarResponse);
rpc GetAdresarCount(common.Empty) returns (common.CountResponse);
rpc GetAdresarByPosition(common.PositionRequest) returns (AdresarResponse);
}
message GetAdresarRequest {
int64 id = 1;
int64 id = 1;
}
message DeleteAdresarRequest {
int64 id = 1;
int64 id = 1;
}
message PostAdresarRequest {
string firma = 1;
string kz = 2;
string drc = 3;
string ulica = 4;
string psc = 5;
string mesto = 6;
string stat = 7;
string banka = 8;
string ucet = 9;
string skladm = 10;
string ico = 11;
string kontakt = 12;
string telefon = 13;
string skladu = 14;
string fax = 15;
string firma = 1;
string kz = 2;
string drc = 3;
string ulica = 4;
string psc = 5;
string mesto = 6;
string stat = 7;
string banka = 8;
string ucet = 9;
string skladm = 10;
string ico = 11;
string kontakt = 12;
string telefon = 13;
string skladu = 14;
string fax = 15;
}
message AdresarResponse {
int64 id = 1;
string firma = 2;
string kz = 3;
string drc = 4;
string ulica = 5;
string psc = 6;
string mesto = 7;
string stat = 8;
string banka = 9;
string ucet = 10;
string skladm = 11;
string ico = 12;
string kontakt = 13;
string telefon = 14;
string skladu = 15;
string fax = 16;
int64 id = 1;
string firma = 2;
string kz = 3;
string drc = 4;
string ulica = 5;
string psc = 6;
string mesto = 7;
string stat = 8;
string banka = 9;
string ucet = 10;
string skladm = 11;
string ico = 12;
string kontakt = 13;
string telefon = 14;
string skladu = 15;
string fax = 16;
}
message PutAdresarRequest {
int64 id = 1;
string firma = 2;
string kz = 3;
string drc = 4;
string ulica = 5;
string psc = 6;
string mesto = 7;
string stat = 8;
string banka = 9;
string ucet = 10;
string skladm = 11;
string ico = 12;
string kontakt = 13;
string telefon = 14;
string skladu = 15;
string fax = 16;
int64 id = 1;
string firma = 2;
string kz = 3;
string drc = 4;
string ulica = 5;
string psc = 6;
string mesto = 7;
string stat = 8;
string banka = 9;
string ucet = 10;
string skladm = 11;
string ico = 12;
string kontakt = 13;
string telefon = 14;
string skladu = 15;
string fax = 16;
}
message DeleteAdresarResponse {
bool success = 1;
bool success = 1;
}

View File

@@ -5,35 +5,35 @@ package komp_ac.auth;
import "common.proto";
service AuthService {
rpc Register(RegisterRequest) returns (AuthResponse);
rpc Login(LoginRequest) returns (LoginResponse);
rpc Register(RegisterRequest) returns (AuthResponse);
rpc Login(LoginRequest) returns (LoginResponse);
}
message RegisterRequest {
string username = 1;
string email = 2;
string password = 3;
string password_confirmation = 4;
string role = 5;
string username = 1;
string email = 2;
string password = 3;
string password_confirmation = 4;
string role = 5;
}
message AuthResponse {
string id = 1; // UUID in string format
string username = 2; // Registered username
string email = 3; // Registered email (if provided)
string role = 4; // Default role: 'accountant'
string id = 1; // UUID in string format
string username = 2; // Registered username
string email = 3; // Registered email (if provided)
string role = 4; // Default role: 'accountant'
}
message LoginRequest {
string identifier = 1; // Can be username or email
string password = 2;
string identifier = 1; // Can be username or email
string password = 2;
}
message LoginResponse {
string access_token = 1; // JWT token
string token_type = 2; // Usually "Bearer"
int32 expires_in = 3; // Expiration in seconds (86400 for 24 hours)
string user_id = 4; // User's UUID in string format
string role = 5; // User's role
string username = 6;
string access_token = 1; // JWT token
string token_type = 2; // Usually "Bearer"
int32 expires_in = 3; // Expiration in seconds (86400 for 24 hours)
string user_id = 4; // User's UUID in string format
string role = 5; // User's role
string username = 6;
}

View File

@@ -3,5 +3,9 @@ syntax = "proto3";
package komp_ac.common;
message Empty {}
message CountResponse { int64 count = 1; }
message PositionRequest { int64 position = 1; }
message CountResponse {
int64 count = 1;
}
message PositionRequest {
int64 position = 1;
}

View File

@@ -3,18 +3,34 @@ syntax = "proto3";
package komp_ac.search;
service Searcher {
rpc SearchTable(SearchRequest) returns (SearchResponse);
rpc Search(SearchRequest) returns (SearchResponse);
}
enum MatchMode {
MATCH_MODE_UNSPECIFIED = 0;
MATCH_MODE_FUZZY = 1;
MATCH_MODE_EXACT = 2;
}
message ColumnConstraint {
string column = 1;
string query = 2;
MatchMode mode = 3;
}
message SearchRequest {
string table_name = 1;
string query = 2;
string profile_name = 1;
optional string table_name = 2;
string free_query = 3;
repeated ColumnConstraint must = 4;
optional uint32 limit = 5;
}
message SearchResponse {
message Hit {
int64 id = 1; // PostgreSQL row ID
float score = 2;
string content_json = 3;
}
repeated Hit hits = 1;
message Hit {
int64 id = 1; // PostgreSQL row ID
float score = 2;
string content_json = 3;
string table_name = 4;
}
repeated Hit hits = 1;
}

View File

@@ -3,44 +3,44 @@ syntax = "proto3";
package komp_ac.search2;
service Search2 {
rpc SearchTable(Search2Request) returns (Search2Response);
rpc SearchTable(Search2Request) returns (Search2Response);
}
message Search2Request {
string profile_name = 1;
string table_name = 2;
repeated ColumnFilter column_filters = 3;
optional string text_query = 4; // Optional fallback text search
optional string text_query = 4; // Optional fallback text search
optional int32 limit = 5;
optional string order_by = 6;
optional bool order_desc = 7;
}
message ColumnFilter {
string column_name = 1;
FilterType filter_type = 2;
string value = 3;
optional string value2 = 4; // For range queries
string column_name = 1;
FilterType filter_type = 2;
string value = 3;
optional string value2 = 4; // For range queries
}
enum FilterType {
EQUALS = 0;
CONTAINS = 1;
STARTS_WITH = 2;
ENDS_WITH = 3;
RANGE = 4;
GREATER_THAN = 5;
LESS_THAN = 6;
IS_NULL = 7;
IS_NOT_NULL = 8;
EQUALS = 0;
CONTAINS = 1;
STARTS_WITH = 2;
ENDS_WITH = 3;
RANGE = 4;
GREATER_THAN = 5;
LESS_THAN = 6;
IS_NULL = 7;
IS_NOT_NULL = 8;
}
message Search2Response {
message Hit {
int64 id = 1;
string content_json = 2; // No score - this is SQL-based
optional string match_info = 3; // Info about which columns matched
}
repeated Hit hits = 1;
int32 total_count = 2; // Total matching records (for pagination)
message Hit {
int64 id = 1;
string content_json = 2; // No score - this is SQL-based
optional string match_info = 3; // Info about which columns matched
}
repeated Hit hits = 1;
int32 total_count = 2; // Total matching records (for pagination)
}

View File

@@ -4,56 +4,186 @@ package komp_ac.table_definition;
import "common.proto";
// The TableDefinition service manages the entire lifecycle of user-defined
// tables (stored as both metadata and physical PostgreSQL tables) inside
// logical "profiles" (schemas). Each table has stored structure, links, and
// validation rules.
service TableDefinition {
rpc PostTableDefinition (PostTableDefinitionRequest) returns (TableDefinitionResponse);
rpc GetProfileTree (komp_ac.common.Empty) returns (ProfileTreeResponse);
rpc DeleteTable (DeleteTableRequest) returns (DeleteTableResponse);
// Creates a new table (and schema if missing) with system columns,
// linked-table foreign keys, user-defined columns, and optional indexes.
// Also inserts metadata and default validation rules. Entirely transactional.
rpc PostTableDefinition(PostTableDefinitionRequest) returns (TableDefinitionResponse);
// Lists all profiles (schemas) and their tables with declared dependencies.
// This provides a tree-like overview of table relationships.
rpc GetProfileTree(komp_ac.common.Empty) returns (ProfileTreeResponse);
// Fetches all tables with their columns and scripts for a specific profile.
// Pure data retrieval - no business logic.
rpc GetProfileDetails(GetProfileDetailsRequest) returns (GetProfileDetailsResponse);
// Renames a user-visible column alias while keeping the physical column unchanged.
rpc RenameColumnAlias(RenameColumnAliasRequest) returns (RenameColumnAliasResponse);
// Drops a table and its metadata, then deletes the profile if it becomes empty.
rpc DeleteTable(DeleteTableRequest) returns (DeleteTableResponse);
}
// A single link to another table within the same profile (schema).
message TableLink {
string linked_table_name = 1;
bool required = 2;
// Name of an existing table within the same profile to link to.
// For each link, a "<linked>_id" column is created on the new table.
// That column references "<linked>"(id) and adds an index automatically.
string linked_table_name = 1;
// If true, the generated foreign key column is NOT NULL.
// Otherwise the column allows NULL.
// Duplicate links to the same target table in one request are rejected.
bool required = 2;
}
// Defines the input for creating a new table definition.
message PostTableDefinitionRequest {
string table_name = 1;
repeated TableLink links = 2;
repeated ColumnDefinition columns = 3;
repeated string indexes = 4;
string profile_name = 5;
// Table name to create inside the target profile.
// Must be lowercase, alphanumeric with underscores,
// start with a letter, and be <= 63 chars.
// Forbidden names: "id", "deleted", "created_at", or ending in "_id".
string table_name = 1;
// List of links (foreign keys) to existing tables in the same profile.
// Each will automatically get a "<linked>_id" column and an index.
repeated TableLink links = 2;
// List of user-defined columns (adds to system/id/fk columns).
repeated ColumnDefinition columns = 3;
// List of column names to be indexed (must match existing user-defined columns).
// Indexes can target only user-defined columns; system columns ("id", "deleted",
// "created_at") and automatically generated foreign key ("*_id") columns already
// have indexes. Requests trying to index those columns are rejected.
repeated string indexes = 4;
// Name of profile (Postgres schema) where the table will be created.
// Same naming rules as table_name; cannot collide with reserved schemas
// like "public", "information_schema", or ones starting with "pg_".
string profile_name = 5;
}
// Describes one user-defined column for a table.
message ColumnDefinition {
string name = 1;
string field_type = 2;
// Column name that follows the same validation rules as table_name.
// Must be lowercase, start with a letter, no uppercase characters,
// and cannot be "id", "deleted", "created_at", or end with "_id".
string name = 1;
// Logical column type. Supported values (case-insensitive):
// TEXT / STRING
// BOOLEAN
// TIMESTAMP / TIMESTAMPTZ / TIME
// MONEY (= NUMERIC(14,4))
// INTEGER / INT
// BIGINTEGER / BIGINT
// DATE
// DECIMAL(p,s) → NUMERIC(p,s)
// DECIMAL args must be integers (no sign, no dot, no leading zeros);
// s ≤ p and p ≥ 1.
string field_type = 2;
}
// Response after table creation (success + DDL preview).
message TableDefinitionResponse {
bool success = 1;
string sql = 2;
// True if all DB changes and metadata inserts succeeded.
bool success = 1;
// The actual SQL executed: CREATE TABLE + CREATE INDEX statements.
string sql = 2;
}
// Describes the tree of all profiles and their tables.
message ProfileTreeResponse {
message Table {
int64 id = 1;
string name = 2;
repeated string depends_on = 3;
}
// Table entry in a profile.
message Table {
// Internal ID from table_definitions.id (metadata record).
int64 id = 1;
message Profile {
string name = 1;
repeated Table tables = 2;
}
// Table name within the profile (schema).
string name = 2;
repeated Profile profiles = 1;
// Other tables this one references (based on link definitions only).
repeated string depends_on = 3;
}
// Profile (schema) entry.
message Profile {
// Name of the schema/profile (as stored in `schemas.name`).
string name = 1;
// All tables in that schema and their dependencies.
repeated Table tables = 2;
}
// All profiles in the system.
repeated Profile profiles = 1;
}
// Request to fetch all tables, columns and scripts for a profile.
message GetProfileDetailsRequest {
// Profile (schema) name to fetch details for.
string profile_name = 1;
}
// Response with all tables, columns and scripts for a profile.
message GetProfileDetailsResponse {
string profile_name = 1;
repeated TableDetail tables = 2;
}
// Describes a table with its columns and associated scripts.
message TableDetail {
string name = 1;
int64 id = 2;
repeated ColumnDefinition columns = 3;
repeated ScriptInfo scripts = 4;
}
// A script that targets a specific column in a table.
message ScriptInfo {
int64 script_id = 1;
string target_column = 2;
string target_column_type = 3;
string script = 4;
string description = 5;
}
// Request to rename one user-visible column alias in a table.
message RenameColumnAliasRequest {
string profile_name = 1;
string table_name = 2;
string old_column_name = 3;
string new_column_name = 4;
}
// Response after renaming one column alias.
message RenameColumnAliasResponse {
bool success = 1;
string message = 2;
}
// Request to delete one table definition entirely.
message DeleteTableRequest {
string profile_name = 1;
string table_name = 2;
// Profile (schema) name owning the table (must exist).
string profile_name = 1;
// Table to drop (must exist in the profile).
// Executes DROP TABLE "profile"."table" CASCADE and then removes metadata.
string table_name = 2;
}
// Response after table deletion.
message DeleteTableResponse {
bool success = 1;
string message = 2;
// True if table and metadata were successfully deleted in one transaction.
bool success = 1;
// Human-readable summary of what was removed.
string message = 2;
}

View File

@@ -1,18 +1,139 @@
// common/proto/table_script.proto
syntax = "proto3";
package komp_ac.table_script;
// Manages column-computation scripts for user-defined tables.
// Each script belongs to a single table (table_definition_id) and populates
// exactly one target column in that table. The server:
// - Validates script syntax (non-empty, balanced parentheses, starts with '(')
// - Validates the target column (exists, not a system column, allowed type)
// - Validates column/type usage inside math expressions
// - Validates referenced tables/columns against the schema
// - Enforces link constraints for structured access (see notes below)
// - Analyzes dependencies and prevents cycles across the schema
// - Transforms the script to decimal-safe math (steel_decimal)
// - Upserts into table_scripts and records dependencies in script_dependencies
// The whole operation is transactional.
service TableScript {
rpc PostTableScript(PostTableScriptRequest) returns (TableScriptResponse);
// Create or update a script for a specific table and target column.
//
// Behavior:
// - Fetches the table by table_definition_id (must exist)
// - Validates "script" (syntax), "target_column" (exists and type rules),
// and all referenced tables/columns (must exist in same schema)
// - Validates math operations: prohibits using certain data types in math
// - Enforces link constraints for structured table access:
// • Allowed always: self-references (same table)
// • Structured access via steel_get_column / steel_get_column_with_index
// requires an explicit link in table_definition_links
// • Raw SQL access via steel_query_sql is permitted (still validated)
// - Detects and rejects circular dependencies across all scripts in the schema
// (self-references are allowed and not treated as cycles)
// - Transforms the script to decimal-safe operations (steel_decimal)
// - UPSERTS into table_scripts on (table_definitions_id, target_column)
// and saves a normalized dependency list into script_dependencies
rpc PostTableScript(PostTableScriptRequest) returns (TableScriptResponse);
// Fetch all stored scripts for a specific table.
//
// Behavior:
// - Resolves the table from (profile_name, table_name)
// - Returns the stored, transformed script from table_scripts
// - Includes normalized dependency metadata from script_dependencies
// - Returns an empty scripts list when the table has no scripts
rpc GetTableScripts(GetTableScriptsRequest) returns (GetTableScriptsResponse);
}
// Request to create or update a script bound to a specific table and column.
message PostTableScriptRequest {
int64 table_definition_id = 1;
string target_column = 2;
string script = 3;
string description = 4;
// Required. The metadata ID from table_definitions.id that identifies the
// table this script belongs to. The table must exist; its schema determines
// where referenced tables/columns are validated and where dependencies are stored.
int64 table_definition_id = 1;
// Required. The target column in the target table that this script computes.
// Must be an existing user-defined column in that table (not a system column).
// System columns are reserved: "id", "deleted", "created_at".
// The column's data type must NOT be one of the prohibited target types:
// BIGINT, DATE, TIMESTAMPTZ
// Note: BOOLEAN targets are allowed (values are converted to Steel #true/#false).
string target_column = 2;
// Required. The script in the Steel DSL (S-expression style).
// Syntax requirements:
// - Non-empty, must start with '('
// - Balanced parentheses
//
// Referencing data:
// - Structured table/column access (enforces link constraints):
// (steel_get_column "table_name" "column_name")
// (steel_get_column_with_index "table_name" index "column_name")
// • index must be a non-negative integer literal
// • self-references are allowed without links
// • other tables require an explicit link from the source table
// (table_definition_links) or the request fails
// - Raw SQL access (no link required, but still validated):
// (steel_query_sql "SELECT ...")
// • Basic checks disallow operations that imply prohibited types,
// e.g., EXTRACT(…), DATE_PART(…), ::DATE, ::TIMESTAMPTZ, ::BIGINT, CAST(…)
// - Self variable access in transformed scripts:
// (get-var "column_name") is treated as referencing the current table
//
// Math operations:
// - The script is transformed by steel_decimal; supported math forms include:
// +, -, *, /, ^, **, pow, sqrt, >, <, =, >=, <=, min, max, abs, round,
// ln, log, log10, exp, sin, cos, tan
// - Columns of the following types CANNOT be used inside math expressions:
// BIGINT, TEXT, BOOLEAN, DATE, TIMESTAMPTZ
//
// Dependency tracking and cycles:
// - Dependencies are extracted from steel_get_column(_with_index), get-var,
// and steel_query_sql and stored in script_dependencies with context
// - Cycles across tables are rejected (self-dependency is allowed)
string script = 3;
// Optional. Free-text description stored alongside the script (no functional effect).
string description = 4;
}
// Response after creating or updating a script.
message TableScriptResponse {
int64 id = 1;
string warnings = 2;
// The ID of the script record in table_scripts (new or existing on upsert).
int64 id = 1;
// Human-readable warnings concatenated into a single string. Possible messages:
// - Warning if the script references itself (may affect first population)
// - Count of raw SQL queries present
// - Info about number of structured linked-table accesses
// - Warning if many dependencies may affect performance
string warnings = 2;
}
message GetTableScriptsRequest {
// Required. Profile (schema) name.
string profile_name = 1;
// Required. Table name within the profile.
string table_name = 2;
}
message GetTableScriptsResponse {
repeated StoredTableScript scripts = 1;
}
message StoredTableScript {
int64 id = 1;
string target_column = 2;
string target_column_type = 3;
string script = 4;
string description = 5;
repeated ScriptDependency dependencies = 6;
}
message ScriptDependency {
string target_table = 1;
string dependency_type = 2;
string column = 3;
int64 index = 4;
string query_fragment = 5;
}

View File

@@ -1,25 +1,74 @@
// proto/table_structure.proto
// common/proto/table_structure.proto
syntax = "proto3";
package komp_ac.table_structure;
import "common.proto";
message GetTableStructureRequest {
string profile_name = 1; // e.g., "default"
string table_name = 2; // e.g., "2025_adresar6"
// Introspects the physical PostgreSQL tables for one or more logical tables
// (defined in table_definitions) and returns their column structures.
// The server validates that:
// - The profile (schema) exists in `schemas`
// - Every table is defined for that profile in `table_definitions`
// It then queries information_schema for the physical tables and returns
// normalized column metadata.
service TableStructureService {
// Return the physical column list (name, normalized data_type,
// nullability, primary key flag) for one or more tables in a profile.
//
// Behavior:
// - NOT_FOUND if profile doesn't exist in `schemas`
// - NOT_FOUND if any table is not defined for that profile in `table_definitions`
// - Queries information_schema.columns ordered by ordinal position
// - Normalizes data_type text (details under TableColumn.data_type)
// - Returns an error if any validated table has no visible columns in
// information_schema (e.g., physical table missing)
rpc GetTableStructure(GetTableStructureRequest) returns (GetTableStructureResponse);
}
// Request identifying the profile (schema) and tables to inspect.
message GetTableStructureRequest {
// Required. Profile (PostgreSQL schema) name. Must exist in `schemas`.
string profile_name = 1;
// Required. Table names within the profile. Each must exist in
// `table_definitions` for the given profile. The physical tables are then
// introspected via information_schema.
repeated string table_names = 2;
}
// Batched response keyed by table name.
message GetTableStructureResponse {
// Per-table physical column lists keyed by requested table name.
map<string, TableStructureResponse> table_structures = 1;
}
// Response with the ordered list of columns (by ordinal position) for one table.
message TableStructureResponse {
// Columns of the physical table, including system columns (id, deleted,
// created_at), user-defined columns, and any foreign-key columns such as
// "<linked_table>_id". May be empty if the physical table is missing.
repeated TableColumn columns = 1;
}
// One physical column entry as reported by information_schema.
message TableColumn {
// Column name exactly as defined in PostgreSQL.
string name = 1;
string data_type = 2; // e.g., "TEXT", "BIGINT", "VARCHAR(255)", "TIMESTAMPTZ"
// Normalized data type string derived from information_schema:
// - VARCHAR(n) when udt_name='varchar' with character_maximum_length
// - CHAR(n) when udt_name='bpchar' with character_maximum_length
// - NUMERIC(p,s) when udt_name='numeric' with precision and scale
// - NUMERIC(p) when udt_name='numeric' with precision only
// - <TYPE>[] for array types (udt_name starting with '_', e.g., INT[] )
// - Otherwise UPPER(udt_name), e.g., TEXT, BIGINT, TIMESTAMPTZ
// Examples: "TEXT", "BIGINT", "VARCHAR(255)", "TIMESTAMPTZ", "NUMERIC(14,4)"
string data_type = 2;
// True if information_schema reports the column as nullable.
bool is_nullable = 3;
// True if the column is part of the table's PRIMARY KEY.
// Typically true for the "id" column created by the system.
bool is_primary_key = 4;
}
service TableStructureService {
rpc GetTableStructure (GetTableStructureRequest) returns (TableStructureResponse);
}

View File

@@ -22,7 +22,8 @@ message FieldValidation {
// Current: only CharacterLimits. More rules can be added later.
CharacterLimits limits = 10;
// Future expansion:
// PatternRules pattern = 11;
PatternRules pattern = 11; // Validation 2
optional CustomFormatter formatter = 14; // Validation 4 custom formatting logic
DisplayMask mask = 3;
// ExternalValidation external = 13;
// CustomFormatter formatter = 14;
@@ -52,18 +53,50 @@ message CharacterLimits {
// Mask for pretty display
message DisplayMask {
string pattern = 1; // e.g., "(###) ###-####" or "####-##-##"
string input_char = 2; // e.g., "#"
string pattern = 1; // e.g., "(###) ###-####" or "####-##-##"
string input_char = 2; // e.g., "#"
optional string template_char = 3; // e.g., "_"
}
// One positionbased validation rule, similar to CharacterFilter + PositionRange
message PatternRule {
// Range descriptor: how far the rule applies
// Examples:
// - "0" → Single position 0
// - "0-3" → Range 0..3 inclusive
// - "from:5" → From position 5 onward
// - "0,2,5" → Multiple discrete positions
string range = 1;
// Character filter type, caseinsensitive keywords:
// "ALPHABETIC", "NUMERIC", "ALPHANUMERIC",
// "ONEOF(<chars>)", "EXACT(:)", "CUSTOM(<name>)"
string filter = 2;
}
message CustomFormatter {
// Formatter type identifier; handled clientside.
// Examples: "PSCFormatter", "PhoneFormatter", "CreditCardFormatter", "DateFormatter"
string type = 1;
// Optional freetext note or parameters (e.g. locale, pattern)
optional string description = 2;
}
// Collection of pattern rules for one field
message PatternRules {
// All rules that make up the validation logic
repeated PatternRule rules = 1;
// Optional humanreadable description for UI/debug purposes
optional string description = 2;
}
// Service to fetch validations for a table
service TableValidationService {
rpc GetTableValidation(GetTableValidationRequest)
returns (TableValidationResponse);
rpc GetTableValidation(GetTableValidationRequest) returns (TableValidationResponse);
rpc UpdateFieldValidation(UpdateFieldValidationRequest)
returns (UpdateFieldValidationResponse);
rpc UpdateFieldValidation(UpdateFieldValidationRequest) returns (UpdateFieldValidationResponse);
}
message UpdateFieldValidationRequest {

View File

@@ -5,67 +5,220 @@ package komp_ac.tables_data;
import "common.proto";
import "google/protobuf/struct.proto";
// Read and write row data for user-defined tables inside profiles (schemas).
// Operations are performed against the physical PostgreSQL table that
// corresponds to the logical table definition and are scoped by profile
// (schema). Deletions are soft (set deleted = true). Typed binding and
// script-based validation are enforced consistently.
service TablesData {
rpc PostTableData (PostTableDataRequest) returns (PostTableDataResponse);
rpc PutTableData (PutTableDataRequest) returns (PutTableDataResponse);
rpc DeleteTableData (DeleteTableDataRequest) returns (DeleteTableDataResponse);
// Insert a new row into a table with strict type binding and script validation.
//
// Behavior:
// - Validates that profile (schema) exists and table is defined for it
// - Validates provided columns exist (user-defined or allowed system/FK columns)
// - For columns targeted by scripts in this table, the client MUST provide the
// value, and it MUST equal the scripts calculated value (compared type-safely)
// - Binds values with correct SQL types, rejects invalid formats/ranges
// - Inserts the row and returns the new id; queues search indexing (best effort)
// - If the physical table is missing but the definition exists, returns INTERNAL
rpc PostTableData(PostTableDataRequest) returns (PostTableDataResponse);
// Update existing row data with strict type binding and script validation.
//
// Behavior:
// - Validates profile and table, and that the record exists
// - If request data is empty, returns success without changing the row
// - For columns targeted by scripts:
// • If included in update, provided value must equal the script result
// • If not included, update must not cause the script result to differ
// from the current stored value; otherwise FAILED_PRECONDITION is returned
// - Binds values with correct SQL types; rejects invalid formats/ranges
// - Updates the row and returns the id; queues search indexing (best effort)
rpc PutTableData(PutTableDataRequest) returns (PutTableDataResponse);
// Soft-delete a single record (sets deleted = true) if it exists and is not already deleted.
//
// Behavior:
// - Validates profile and table definition
// - Updates only rows with deleted = false
// - success = true means a row was actually changed; false means nothing to delete
// - If the physical table is missing but the definition exists, returns INTERNAL
rpc DeleteTableData(DeleteTableDataRequest) returns (DeleteTableDataResponse);
// Fetch a single non-deleted row by id as textified values.
//
// Behavior:
// - Validates profile and table definition
// - Returns all columns as strings (COALESCE(col::TEXT, '') AS col)
// including: id, deleted, all user-defined columns, and FK columns
// named "<linked_table>_id" for each table link
// - Fails with NOT_FOUND if record does not exist or is soft-deleted
// - If the physical table is missing but the definition exists, returns INTERNAL
rpc GetTableData(GetTableDataRequest) returns (GetTableDataResponse);
// Count non-deleted rows in a table.
//
// Behavior:
// - Validates profile and table definition
// - Returns komp_ac.common.CountResponse.count with rows where deleted = FALSE
// - If the physical table is missing but the definition exists, returns INTERNAL
rpc GetTableDataCount(GetTableDataCountRequest) returns (komp_ac.common.CountResponse);
// Fetch the N-th non-deleted row by id order (1-based), then return its full data.
//
// Behavior:
// - position is 1-based (position = 1 → first row by id ASC with deleted = FALSE)
// - Returns NOT_FOUND if position is out of bounds
// - Otherwise identical to GetTableData for the selected id
rpc GetTableDataByPosition(GetTableDataByPositionRequest) returns (GetTableDataResponse);
}
// Insert a new row.
message PostTableDataRequest {
// Required. Profile (PostgreSQL schema) name that owns the table.
// Must exist in the schemas table.
string profile_name = 1;
// Required. Logical table (definition) name within the profile.
// Must exist in table_definitions for the given profile.
string table_name = 2;
// Required. Key-value data for columns to insert.
//
// Allowed keys:
// - User-defined columns from the table definition
// - System/FK columns:
// • "deleted" (BOOLEAN), optional; default FALSE if not provided
// • "<linked_table>_id" (BIGINT) for each table link
//
// Type expectations by SQL type:
// - TEXT: string value; empty string is treated as NULL
// - BOOLEAN: bool value
// - TIMESTAMPTZ: ISO 8601/RFC 3339 string (parsed to TIMESTAMPTZ)
// - INTEGER: number with no fractional part and within i32 range
// - BIGINT: number with no fractional part and within i64 range
// - NUMERIC(p,s): string representation only; empty string becomes NULL
// (numbers for NUMERIC are rejected to avoid precision loss)
//
// Script validation rules:
// - If a script exists for a target column, that column MUST be present here,
// and its provided value MUST equal the scripts computed value (type-aware
// comparison, e.g., decimals are compared numerically).
//
// Notes:
// - Unknown/invalid column names are rejected
// - Some application-specific validations may apply (e.g., max length for
// certain fields like "telefon")
map<string, google.protobuf.Value> data = 3;
}
// Insert response.
message PostTableDataResponse {
// True if the insert succeeded.
bool success = 1;
// Human-readable message.
string message = 2;
// The id of the inserted row.
int64 inserted_id = 3;
}
// Update an existing row.
message PutTableDataRequest {
// Required. Profile (schema) name.
string profile_name = 1;
// Required. Table name within the profile.
string table_name = 2;
// Required. Id of the row to update.
int64 id = 3;
// Required. Columns to update (same typing rules as PostTableDataRequest.data).
//
// Special script rules:
// - If a script targets column X and X is included here, the value for X must
// equal the scripts result (type-aware).
// - If X is not included here but the update would cause the scripts result
// to change compared to the current stored value, the update is rejected with
// FAILED_PRECONDITION, instructing the caller to include X explicitly.
//
// Passing an empty map results in a no-op success response.
map<string, google.protobuf.Value> data = 4;
}
// Update response.
message PutTableDataResponse {
// True if the update succeeded (or no-op on empty data).
bool success = 1;
// Human-readable message.
string message = 2;
// The id of the updated row.
int64 updated_id = 3;
}
// Soft-delete a single row.
message DeleteTableDataRequest {
// Required. Profile (schema) name.
string profile_name = 1;
// Required. Table name within the profile.
string table_name = 2;
// Required. Row id to soft-delete.
int64 record_id = 3;
}
// Soft-delete response.
message DeleteTableDataResponse {
// True if a row was marked deleted (id existed and was not already deleted).
bool success = 1;
}
// Fetch a single non-deleted row by id.
message GetTableDataRequest {
// Required. Profile (schema) name.
string profile_name = 1;
// Required. Table name within the profile.
string table_name = 2;
// Required. Id of the row to fetch.
int64 id = 3;
}
// Row payload: all columns returned as strings.
message GetTableDataResponse {
// Map of column_name → stringified value for:
// - id, deleted
// - all user-defined columns from the table definition
// - FK columns named "<linked_table>_id" for each table link
//
// All values are returned as TEXT via col::TEXT and COALESCEed to empty string
// (NULL becomes ""). The row is returned only if deleted = FALSE.
map<string, string> data = 1;
}
// Count non-deleted rows.
message GetTableDataCountRequest {
string profile_name = 1;
string table_name = 2;
// Required. Profile (schema) name.
string profile_name = 1;
// Required. Table name within the profile.
string table_name = 2;
}
// Fetch by ordinal position among non-deleted rows (1-based).
message GetTableDataByPositionRequest {
// Required. Profile (schema) name.
string profile_name = 1;
// Required. Table name within the profile.
string table_name = 2;
// Required. 1-based position by id ascending among rows with deleted = FALSE.
int32 position = 3;
}

View File

@@ -5,57 +5,57 @@ package komp_ac.uctovnictvo;
import "common.proto";
service Uctovnictvo {
rpc PostUctovnictvo (PostUctovnictvoRequest) returns (UctovnictvoResponse);
rpc GetUctovnictvo (GetUctovnictvoRequest) returns (UctovnictvoResponse);
rpc GetUctovnictvoCount (common.Empty) returns (common.CountResponse);
rpc GetUctovnictvoByPosition (common.PositionRequest) returns (UctovnictvoResponse);
rpc PutUctovnictvo (PutUctovnictvoRequest) returns (UctovnictvoResponse);
rpc PostUctovnictvo(PostUctovnictvoRequest) returns (UctovnictvoResponse);
rpc GetUctovnictvo(GetUctovnictvoRequest) returns (UctovnictvoResponse);
rpc GetUctovnictvoCount(common.Empty) returns (common.CountResponse);
rpc GetUctovnictvoByPosition(common.PositionRequest) returns (UctovnictvoResponse);
rpc PutUctovnictvo(PutUctovnictvoRequest) returns (UctovnictvoResponse);
}
message PostUctovnictvoRequest {
int64 adresar_id = 1;
string c_dokladu = 2;
string datum = 3; // Use string for simplicity, or use google.protobuf.Timestamp for better date handling
string c_faktury = 4;
string obsah = 5;
string stredisko = 6;
string c_uctu = 7;
string md = 8;
string identif = 9;
string poznanka = 10;
string firma = 11;
int64 adresar_id = 1;
string c_dokladu = 2;
string datum = 3; // Use string for simplicity, or use google.protobuf.Timestamp for better date handling
string c_faktury = 4;
string obsah = 5;
string stredisko = 6;
string c_uctu = 7;
string md = 8;
string identif = 9;
string poznanka = 10;
string firma = 11;
}
message UctovnictvoResponse {
int64 id = 1;
int64 adresar_id = 2;
string c_dokladu = 3;
string datum = 4;
string c_faktury = 5;
string obsah = 6;
string stredisko = 7;
string c_uctu = 8;
string md = 9;
string identif = 10;
string poznanka = 11;
string firma = 12;
int64 id = 1;
int64 adresar_id = 2;
string c_dokladu = 3;
string datum = 4;
string c_faktury = 5;
string obsah = 6;
string stredisko = 7;
string c_uctu = 8;
string md = 9;
string identif = 10;
string poznanka = 11;
string firma = 12;
}
message PutUctovnictvoRequest {
int64 id = 1;
int64 adresar_id = 2;
string c_dokladu = 3;
string datum = 4;
string c_faktury = 5;
string obsah = 6;
string stredisko = 7;
string c_uctu = 8;
string md = 9;
string identif = 10;
string poznanka = 11;
string firma = 12;
int64 id = 1;
int64 adresar_id = 2;
string c_dokladu = 3;
string datum = 4;
string c_faktury = 5;
string obsah = 6;
string stredisko = 7;
string c_uctu = 8;
string md = 9;
string identif = 10;
string poznanka = 11;
string firma = 12;
}
message GetUctovnictvoRequest {
int64 id = 1;
int64 id = 1;
}

View File

@@ -37,7 +37,6 @@ pub mod proto {
pub mod table_validation {
include!("proto/komp_ac.table_validation.rs");
}
pub const FILE_DESCRIPTOR_SET: &[u8] =
include_bytes!("proto/descriptor.bin");
pub const FILE_DESCRIPTOR_SET: &[u8] = include_bytes!("proto/descriptor.bin");
}
}

Binary file not shown.

View File

@@ -1,10 +1,25 @@
// This file is @generated by prost-build.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ColumnConstraint {
#[prost(string, tag = "1")]
pub column: ::prost::alloc::string::String,
#[prost(string, tag = "2")]
pub query: ::prost::alloc::string::String,
#[prost(enumeration = "MatchMode", tag = "3")]
pub mode: i32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SearchRequest {
#[prost(string, tag = "1")]
pub table_name: ::prost::alloc::string::String,
#[prost(string, tag = "2")]
pub query: ::prost::alloc::string::String,
pub profile_name: ::prost::alloc::string::String,
#[prost(string, optional, tag = "2")]
pub table_name: ::core::option::Option<::prost::alloc::string::String>,
#[prost(string, tag = "3")]
pub free_query: ::prost::alloc::string::String,
#[prost(message, repeated, tag = "4")]
pub must: ::prost::alloc::vec::Vec<ColumnConstraint>,
#[prost(uint32, optional, tag = "5")]
pub limit: ::core::option::Option<u32>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SearchResponse {
@@ -22,6 +37,37 @@ pub mod search_response {
pub score: f32,
#[prost(string, tag = "3")]
pub content_json: ::prost::alloc::string::String,
#[prost(string, tag = "4")]
pub table_name: ::prost::alloc::string::String,
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum MatchMode {
Unspecified = 0,
Fuzzy = 1,
Exact = 2,
}
impl MatchMode {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
Self::Unspecified => "MATCH_MODE_UNSPECIFIED",
Self::Fuzzy => "MATCH_MODE_FUZZY",
Self::Exact => "MATCH_MODE_EXACT",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"MATCH_MODE_UNSPECIFIED" => Some(Self::Unspecified),
"MATCH_MODE_FUZZY" => Some(Self::Fuzzy),
"MATCH_MODE_EXACT" => Some(Self::Exact),
_ => None,
}
}
}
/// Generated client implementations.
@@ -115,7 +161,7 @@ pub mod searcher_client {
self.inner = self.inner.max_encoding_message_size(limit);
self
}
pub async fn search_table(
pub async fn search(
&mut self,
request: impl tonic::IntoRequest<super::SearchRequest>,
) -> std::result::Result<tonic::Response<super::SearchResponse>, tonic::Status> {
@@ -129,11 +175,11 @@ pub mod searcher_client {
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/komp_ac.search.Searcher/SearchTable",
"/komp_ac.search.Searcher/Search",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(GrpcMethod::new("komp_ac.search.Searcher", "SearchTable"));
.insert(GrpcMethod::new("komp_ac.search.Searcher", "Search"));
self.inner.unary(req, path, codec).await
}
}
@@ -151,7 +197,7 @@ pub mod searcher_server {
/// Generated trait containing gRPC methods that should be implemented for use with SearcherServer.
#[async_trait]
pub trait Searcher: std::marker::Send + std::marker::Sync + 'static {
async fn search_table(
async fn search(
&self,
request: tonic::Request<super::SearchRequest>,
) -> std::result::Result<tonic::Response<super::SearchResponse>, tonic::Status>;
@@ -232,11 +278,11 @@ pub mod searcher_server {
}
fn call(&mut self, req: http::Request<B>) -> Self::Future {
match req.uri().path() {
"/komp_ac.search.Searcher/SearchTable" => {
"/komp_ac.search.Searcher/Search" => {
#[allow(non_camel_case_types)]
struct SearchTableSvc<T: Searcher>(pub Arc<T>);
struct SearchSvc<T: Searcher>(pub Arc<T>);
impl<T: Searcher> tonic::server::UnaryService<super::SearchRequest>
for SearchTableSvc<T> {
for SearchSvc<T> {
type Response = super::SearchResponse;
type Future = BoxFuture<
tonic::Response<Self::Response>,
@@ -248,7 +294,7 @@ pub mod searcher_server {
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
<T as Searcher>::search_table(&inner, request).await
<T as Searcher>::search(&inner, request).await
};
Box::pin(fut)
}
@@ -259,7 +305,7 @@ pub mod searcher_server {
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let method = SearchTableSvc(inner);
let method = SearchSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(

View File

@@ -1,80 +1,199 @@
// This file is @generated by prost-build.
/// A single link to another table within the same profile (schema).
#[derive(serde::Serialize, serde::Deserialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TableLink {
/// Name of an existing table within the same profile to link to.
/// For each link, a "<linked>_id" column is created on the new table.
/// That column references "<linked>"(id) and adds an index automatically.
#[prost(string, tag = "1")]
pub linked_table_name: ::prost::alloc::string::String,
/// If true, the generated foreign key column is NOT NULL.
/// Otherwise the column allows NULL.
/// Duplicate links to the same target table in one request are rejected.
#[prost(bool, tag = "2")]
pub required: bool,
}
/// Defines the input for creating a new table definition.
#[derive(serde::Serialize, serde::Deserialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PostTableDefinitionRequest {
/// Table name to create inside the target profile.
/// Must be lowercase, alphanumeric with underscores,
/// start with a letter, and be <= 63 chars.
/// Forbidden names: "id", "deleted", "created_at", or ending in "_id".
#[prost(string, tag = "1")]
pub table_name: ::prost::alloc::string::String,
/// List of links (foreign keys) to existing tables in the same profile.
/// Each will automatically get a "<linked>_id" column and an index.
#[prost(message, repeated, tag = "2")]
pub links: ::prost::alloc::vec::Vec<TableLink>,
/// List of user-defined columns (adds to system/id/fk columns).
#[prost(message, repeated, tag = "3")]
pub columns: ::prost::alloc::vec::Vec<ColumnDefinition>,
/// List of column names to be indexed (must match existing user-defined columns).
/// Indexes can target only user-defined columns; system columns ("id", "deleted",
/// "created_at") and automatically generated foreign key ("*_id") columns already
/// have indexes. Requests trying to index those columns are rejected.
#[prost(string, repeated, tag = "4")]
pub indexes: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
/// Name of profile (Postgres schema) where the table will be created.
/// Same naming rules as table_name; cannot collide with reserved schemas
/// like "public", "information_schema", or ones starting with "pg_".
#[prost(string, tag = "5")]
pub profile_name: ::prost::alloc::string::String,
}
/// Describes one user-defined column for a table.
#[derive(serde::Serialize, serde::Deserialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ColumnDefinition {
/// Column name that follows the same validation rules as table_name.
/// Must be lowercase, start with a letter, no uppercase characters,
/// and cannot be "id", "deleted", "created_at", or end with "_id".
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
/// Logical column type. Supported values (case-insensitive):
/// TEXT / STRING
/// BOOLEAN
/// TIMESTAMP / TIMESTAMPTZ / TIME
/// MONEY (= NUMERIC(14,4))
/// INTEGER / INT
/// BIGINTEGER / BIGINT
/// DATE
/// DECIMAL(p,s) → NUMERIC(p,s)
/// DECIMAL args must be integers (no sign, no dot, no leading zeros);
/// s ≤ p and p ≥ 1.
#[prost(string, tag = "2")]
pub field_type: ::prost::alloc::string::String,
}
/// Response after table creation (success + DDL preview).
#[derive(serde::Serialize, serde::Deserialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TableDefinitionResponse {
/// True if all DB changes and metadata inserts succeeded.
#[prost(bool, tag = "1")]
pub success: bool,
/// The actual SQL executed: CREATE TABLE + CREATE INDEX statements.
#[prost(string, tag = "2")]
pub sql: ::prost::alloc::string::String,
}
/// Describes the tree of all profiles and their tables.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ProfileTreeResponse {
/// All profiles in the system.
#[prost(message, repeated, tag = "1")]
pub profiles: ::prost::alloc::vec::Vec<profile_tree_response::Profile>,
}
/// Nested message and enum types in `ProfileTreeResponse`.
pub mod profile_tree_response {
/// Table entry in a profile.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Table {
/// Internal ID from table_definitions.id (metadata record).
#[prost(int64, tag = "1")]
pub id: i64,
/// Table name within the profile (schema).
#[prost(string, tag = "2")]
pub name: ::prost::alloc::string::String,
/// Other tables this one references (based on link definitions only).
#[prost(string, repeated, tag = "3")]
pub depends_on: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
}
/// Profile (schema) entry.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Profile {
/// Name of the schema/profile (as stored in `schemas.name`).
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
/// All tables in that schema and their dependencies.
#[prost(message, repeated, tag = "2")]
pub tables: ::prost::alloc::vec::Vec<Table>,
}
}
/// Request to fetch all tables, columns and scripts for a profile.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DeleteTableRequest {
pub struct GetProfileDetailsRequest {
/// Profile (schema) name to fetch details for.
#[prost(string, tag = "1")]
pub profile_name: ::prost::alloc::string::String,
}
/// Response with all tables, columns and scripts for a profile.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetProfileDetailsResponse {
#[prost(string, tag = "1")]
pub profile_name: ::prost::alloc::string::String,
#[prost(message, repeated, tag = "2")]
pub tables: ::prost::alloc::vec::Vec<TableDetail>,
}
/// Describes a table with its columns and associated scripts.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TableDetail {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
#[prost(int64, tag = "2")]
pub id: i64,
#[prost(message, repeated, tag = "3")]
pub columns: ::prost::alloc::vec::Vec<ColumnDefinition>,
#[prost(message, repeated, tag = "4")]
pub scripts: ::prost::alloc::vec::Vec<ScriptInfo>,
}
/// A script that targets a specific column in a table.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ScriptInfo {
#[prost(int64, tag = "1")]
pub script_id: i64,
#[prost(string, tag = "2")]
pub target_column: ::prost::alloc::string::String,
#[prost(string, tag = "3")]
pub target_column_type: ::prost::alloc::string::String,
#[prost(string, tag = "4")]
pub script: ::prost::alloc::string::String,
#[prost(string, tag = "5")]
pub description: ::prost::alloc::string::String,
}
/// Request to rename one user-visible column alias in a table.
#[derive(serde::Serialize, serde::Deserialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RenameColumnAliasRequest {
#[prost(string, tag = "1")]
pub profile_name: ::prost::alloc::string::String,
#[prost(string, tag = "2")]
pub table_name: ::prost::alloc::string::String,
#[prost(string, tag = "3")]
pub old_column_name: ::prost::alloc::string::String,
#[prost(string, tag = "4")]
pub new_column_name: ::prost::alloc::string::String,
}
/// Response after renaming one column alias.
#[derive(serde::Serialize, serde::Deserialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DeleteTableResponse {
pub struct RenameColumnAliasResponse {
#[prost(bool, tag = "1")]
pub success: bool,
#[prost(string, tag = "2")]
pub message: ::prost::alloc::string::String,
}
/// Request to delete one table definition entirely.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DeleteTableRequest {
/// Profile (schema) name owning the table (must exist).
#[prost(string, tag = "1")]
pub profile_name: ::prost::alloc::string::String,
/// Table to drop (must exist in the profile).
/// Executes DROP TABLE "profile"."table" CASCADE and then removes metadata.
#[prost(string, tag = "2")]
pub table_name: ::prost::alloc::string::String,
}
/// Response after table deletion.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DeleteTableResponse {
/// True if table and metadata were successfully deleted in one transaction.
#[prost(bool, tag = "1")]
pub success: bool,
/// Human-readable summary of what was removed.
#[prost(string, tag = "2")]
pub message: ::prost::alloc::string::String,
}
/// Generated client implementations.
pub mod table_definition_client {
#![allow(
@@ -86,6 +205,10 @@ pub mod table_definition_client {
)]
use tonic::codegen::*;
use tonic::codegen::http::Uri;
/// The TableDefinition service manages the entire lifecycle of user-defined
/// tables (stored as both metadata and physical PostgreSQL tables) inside
/// logical "profiles" (schemas). Each table has stored structure, links, and
/// validation rules.
#[derive(Debug, Clone)]
pub struct TableDefinitionClient<T> {
inner: tonic::client::Grpc<T>,
@@ -166,6 +289,9 @@ pub mod table_definition_client {
self.inner = self.inner.max_encoding_message_size(limit);
self
}
/// Creates a new table (and schema if missing) with system columns,
/// linked-table foreign keys, user-defined columns, and optional indexes.
/// Also inserts metadata and default validation rules. Entirely transactional.
pub async fn post_table_definition(
&mut self,
request: impl tonic::IntoRequest<super::PostTableDefinitionRequest>,
@@ -195,6 +321,8 @@ pub mod table_definition_client {
);
self.inner.unary(req, path, codec).await
}
/// Lists all profiles (schemas) and their tables with declared dependencies.
/// This provides a tree-like overview of table relationships.
pub async fn get_profile_tree(
&mut self,
request: impl tonic::IntoRequest<super::super::common::Empty>,
@@ -224,6 +352,68 @@ pub mod table_definition_client {
);
self.inner.unary(req, path, codec).await
}
/// Fetches all tables with their columns and scripts for a specific profile.
/// Pure data retrieval - no business logic.
pub async fn get_profile_details(
&mut self,
request: impl tonic::IntoRequest<super::GetProfileDetailsRequest>,
) -> std::result::Result<
tonic::Response<super::GetProfileDetailsResponse>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::unknown(
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/komp_ac.table_definition.TableDefinition/GetProfileDetails",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(
GrpcMethod::new(
"komp_ac.table_definition.TableDefinition",
"GetProfileDetails",
),
);
self.inner.unary(req, path, codec).await
}
/// Renames a user-visible column alias while keeping the physical column unchanged.
pub async fn rename_column_alias(
&mut self,
request: impl tonic::IntoRequest<super::RenameColumnAliasRequest>,
) -> std::result::Result<
tonic::Response<super::RenameColumnAliasResponse>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::unknown(
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/komp_ac.table_definition.TableDefinition/RenameColumnAlias",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(
GrpcMethod::new(
"komp_ac.table_definition.TableDefinition",
"RenameColumnAlias",
),
);
self.inner.unary(req, path, codec).await
}
/// Drops a table and its metadata, then deletes the profile if it becomes empty.
pub async fn delete_table(
&mut self,
request: impl tonic::IntoRequest<super::DeleteTableRequest>,
@@ -268,6 +458,9 @@ pub mod table_definition_server {
/// Generated trait containing gRPC methods that should be implemented for use with TableDefinitionServer.
#[async_trait]
pub trait TableDefinition: std::marker::Send + std::marker::Sync + 'static {
/// Creates a new table (and schema if missing) with system columns,
/// linked-table foreign keys, user-defined columns, and optional indexes.
/// Also inserts metadata and default validation rules. Entirely transactional.
async fn post_table_definition(
&self,
request: tonic::Request<super::PostTableDefinitionRequest>,
@@ -275,6 +468,8 @@ pub mod table_definition_server {
tonic::Response<super::TableDefinitionResponse>,
tonic::Status,
>;
/// Lists all profiles (schemas) and their tables with declared dependencies.
/// This provides a tree-like overview of table relationships.
async fn get_profile_tree(
&self,
request: tonic::Request<super::super::common::Empty>,
@@ -282,6 +477,24 @@ pub mod table_definition_server {
tonic::Response<super::ProfileTreeResponse>,
tonic::Status,
>;
/// Fetches all tables with their columns and scripts for a specific profile.
/// Pure data retrieval - no business logic.
async fn get_profile_details(
&self,
request: tonic::Request<super::GetProfileDetailsRequest>,
) -> std::result::Result<
tonic::Response<super::GetProfileDetailsResponse>,
tonic::Status,
>;
/// Renames a user-visible column alias while keeping the physical column unchanged.
async fn rename_column_alias(
&self,
request: tonic::Request<super::RenameColumnAliasRequest>,
) -> std::result::Result<
tonic::Response<super::RenameColumnAliasResponse>,
tonic::Status,
>;
/// Drops a table and its metadata, then deletes the profile if it becomes empty.
async fn delete_table(
&self,
request: tonic::Request<super::DeleteTableRequest>,
@@ -290,6 +503,10 @@ pub mod table_definition_server {
tonic::Status,
>;
}
/// The TableDefinition service manages the entire lifecycle of user-defined
/// tables (stored as both metadata and physical PostgreSQL tables) inside
/// logical "profiles" (schemas). Each table has stored structure, links, and
/// validation rules.
#[derive(Debug)]
pub struct TableDefinitionServer<T> {
inner: Arc<T>,
@@ -461,6 +678,98 @@ pub mod table_definition_server {
};
Box::pin(fut)
}
"/komp_ac.table_definition.TableDefinition/GetProfileDetails" => {
#[allow(non_camel_case_types)]
struct GetProfileDetailsSvc<T: TableDefinition>(pub Arc<T>);
impl<
T: TableDefinition,
> tonic::server::UnaryService<super::GetProfileDetailsRequest>
for GetProfileDetailsSvc<T> {
type Response = super::GetProfileDetailsResponse;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::GetProfileDetailsRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
<T as TableDefinition>::get_profile_details(&inner, request)
.await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let method = GetProfileDetailsSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/komp_ac.table_definition.TableDefinition/RenameColumnAlias" => {
#[allow(non_camel_case_types)]
struct RenameColumnAliasSvc<T: TableDefinition>(pub Arc<T>);
impl<
T: TableDefinition,
> tonic::server::UnaryService<super::RenameColumnAliasRequest>
for RenameColumnAliasSvc<T> {
type Response = super::RenameColumnAliasResponse;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::RenameColumnAliasRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
<T as TableDefinition>::rename_column_alias(&inner, request)
.await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let method = RenameColumnAliasSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/komp_ac.table_definition.TableDefinition/DeleteTable" => {
#[allow(non_camel_case_types)]
struct DeleteTableSvc<T: TableDefinition>(pub Arc<T>);

View File

@@ -1,23 +1,114 @@
// This file is @generated by prost-build.
/// Request to create or update a script bound to a specific table and column.
#[derive(serde::Serialize, serde::Deserialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PostTableScriptRequest {
/// Required. The metadata ID from table_definitions.id that identifies the
/// table this script belongs to. The table must exist; its schema determines
/// where referenced tables/columns are validated and where dependencies are stored.
#[prost(int64, tag = "1")]
pub table_definition_id: i64,
/// Required. The target column in the target table that this script computes.
/// Must be an existing user-defined column in that table (not a system column).
/// System columns are reserved: "id", "deleted", "created_at".
/// The column's data type must NOT be one of the prohibited target types:
/// BIGINT, DATE, TIMESTAMPTZ
/// Note: BOOLEAN targets are allowed (values are converted to Steel #true/#false).
#[prost(string, tag = "2")]
pub target_column: ::prost::alloc::string::String,
/// Required. The script in the Steel DSL (S-expression style).
/// Syntax requirements:
/// - Non-empty, must start with '('
/// - Balanced parentheses
///
/// Referencing data:
/// - Structured table/column access (enforces link constraints):
/// (steel_get_column "table_name" "column_name")
/// (steel_get_column_with_index "table_name" index "column_name")
/// • index must be a non-negative integer literal
/// • self-references are allowed without links
/// • other tables require an explicit link from the source table
/// (table_definition_links) or the request fails
/// - Raw SQL access (no link required, but still validated):
/// (steel_query_sql "SELECT ...")
/// • Basic checks disallow operations that imply prohibited types,
/// e.g., EXTRACT(…), DATE_PART(…), ::DATE, ::TIMESTAMPTZ, ::BIGINT, CAST(…)
/// - Self variable access in transformed scripts:
/// (get-var "column_name") is treated as referencing the current table
///
/// Math operations:
/// - The script is transformed by steel_decimal; supported math forms include:
/// +, -, *, /, ^, **, pow, sqrt, >, <, =, >=, <=, min, max, abs, round,
/// ln, log, log10, exp, sin, cos, tan
/// - Columns of the following types CANNOT be used inside math expressions:
/// BIGINT, TEXT, BOOLEAN, DATE, TIMESTAMPTZ
///
/// Dependency tracking and cycles:
/// - Dependencies are extracted from steel_get_column(_with_index), get-var,
/// and steel_query_sql and stored in script_dependencies with context
/// - Cycles across tables are rejected (self-dependency is allowed)
#[prost(string, tag = "3")]
pub script: ::prost::alloc::string::String,
/// Optional. Free-text description stored alongside the script (no functional effect).
#[prost(string, tag = "4")]
pub description: ::prost::alloc::string::String,
}
/// Response after creating or updating a script.
#[derive(serde::Serialize, serde::Deserialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TableScriptResponse {
/// The ID of the script record in table_scripts (new or existing on upsert).
#[prost(int64, tag = "1")]
pub id: i64,
/// Human-readable warnings concatenated into a single string. Possible messages:
/// - Warning if the script references itself (may affect first population)
/// - Count of raw SQL queries present
/// - Info about number of structured linked-table accesses
/// - Warning if many dependencies may affect performance
#[prost(string, tag = "2")]
pub warnings: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetTableScriptsRequest {
/// Required. Profile (schema) name.
#[prost(string, tag = "1")]
pub profile_name: ::prost::alloc::string::String,
/// Required. Table name within the profile.
#[prost(string, tag = "2")]
pub table_name: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetTableScriptsResponse {
#[prost(message, repeated, tag = "1")]
pub scripts: ::prost::alloc::vec::Vec<StoredTableScript>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StoredTableScript {
#[prost(int64, tag = "1")]
pub id: i64,
#[prost(string, tag = "2")]
pub warnings: ::prost::alloc::string::String,
pub target_column: ::prost::alloc::string::String,
#[prost(string, tag = "3")]
pub target_column_type: ::prost::alloc::string::String,
#[prost(string, tag = "4")]
pub script: ::prost::alloc::string::String,
#[prost(string, tag = "5")]
pub description: ::prost::alloc::string::String,
#[prost(message, repeated, tag = "6")]
pub dependencies: ::prost::alloc::vec::Vec<ScriptDependency>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ScriptDependency {
#[prost(string, tag = "1")]
pub target_table: ::prost::alloc::string::String,
#[prost(string, tag = "2")]
pub dependency_type: ::prost::alloc::string::String,
#[prost(string, tag = "3")]
pub column: ::prost::alloc::string::String,
#[prost(int64, tag = "4")]
pub index: i64,
#[prost(string, tag = "5")]
pub query_fragment: ::prost::alloc::string::String,
}
/// Generated client implementations.
pub mod table_script_client {
@@ -30,6 +121,18 @@ pub mod table_script_client {
)]
use tonic::codegen::*;
use tonic::codegen::http::Uri;
/// Manages column-computation scripts for user-defined tables.
/// Each script belongs to a single table (table_definition_id) and populates
/// exactly one target column in that table. The server:
/// - Validates script syntax (non-empty, balanced parentheses, starts with '(')
/// - Validates the target column (exists, not a system column, allowed type)
/// - Validates column/type usage inside math expressions
/// - Validates referenced tables/columns against the schema
/// - Enforces link constraints for structured access (see notes below)
/// - Analyzes dependencies and prevents cycles across the schema
/// - Transforms the script to decimal-safe math (steel_decimal)
/// - Upserts into table_scripts and records dependencies in script_dependencies
/// The whole operation is transactional.
#[derive(Debug, Clone)]
pub struct TableScriptClient<T> {
inner: tonic::client::Grpc<T>,
@@ -110,6 +213,23 @@ pub mod table_script_client {
self.inner = self.inner.max_encoding_message_size(limit);
self
}
/// Create or update a script for a specific table and target column.
///
/// Behavior:
/// - Fetches the table by table_definition_id (must exist)
/// - Validates "script" (syntax), "target_column" (exists and type rules),
/// and all referenced tables/columns (must exist in same schema)
/// - Validates math operations: prohibits using certain data types in math
/// - Enforces link constraints for structured table access:
/// • Allowed always: self-references (same table)
/// • Structured access via steel_get_column / steel_get_column_with_index
/// requires an explicit link in table_definition_links
/// • Raw SQL access via steel_query_sql is permitted (still validated)
/// - Detects and rejects circular dependencies across all scripts in the schema
/// (self-references are allowed and not treated as cycles)
/// - Transforms the script to decimal-safe operations (steel_decimal)
/// - UPSERTS into table_scripts on (table_definitions_id, target_column)
/// and saves a normalized dependency list into script_dependencies
pub async fn post_table_script(
&mut self,
request: impl tonic::IntoRequest<super::PostTableScriptRequest>,
@@ -139,6 +259,42 @@ pub mod table_script_client {
);
self.inner.unary(req, path, codec).await
}
/// Fetch all stored scripts for a specific table.
///
/// Behavior:
/// - Resolves the table from (profile_name, table_name)
/// - Returns the stored, transformed script from table_scripts
/// - Includes normalized dependency metadata from script_dependencies
/// - Returns an empty scripts list when the table has no scripts
pub async fn get_table_scripts(
&mut self,
request: impl tonic::IntoRequest<super::GetTableScriptsRequest>,
) -> std::result::Result<
tonic::Response<super::GetTableScriptsResponse>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::unknown(
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/komp_ac.table_script.TableScript/GetTableScripts",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(
GrpcMethod::new(
"komp_ac.table_script.TableScript",
"GetTableScripts",
),
);
self.inner.unary(req, path, codec).await
}
}
}
/// Generated server implementations.
@@ -154,6 +310,23 @@ pub mod table_script_server {
/// Generated trait containing gRPC methods that should be implemented for use with TableScriptServer.
#[async_trait]
pub trait TableScript: std::marker::Send + std::marker::Sync + 'static {
/// Create or update a script for a specific table and target column.
///
/// Behavior:
/// - Fetches the table by table_definition_id (must exist)
/// - Validates "script" (syntax), "target_column" (exists and type rules),
/// and all referenced tables/columns (must exist in same schema)
/// - Validates math operations: prohibits using certain data types in math
/// - Enforces link constraints for structured table access:
/// • Allowed always: self-references (same table)
/// • Structured access via steel_get_column / steel_get_column_with_index
/// requires an explicit link in table_definition_links
/// • Raw SQL access via steel_query_sql is permitted (still validated)
/// - Detects and rejects circular dependencies across all scripts in the schema
/// (self-references are allowed and not treated as cycles)
/// - Transforms the script to decimal-safe operations (steel_decimal)
/// - UPSERTS into table_scripts on (table_definitions_id, target_column)
/// and saves a normalized dependency list into script_dependencies
async fn post_table_script(
&self,
request: tonic::Request<super::PostTableScriptRequest>,
@@ -161,7 +334,33 @@ pub mod table_script_server {
tonic::Response<super::TableScriptResponse>,
tonic::Status,
>;
/// Fetch all stored scripts for a specific table.
///
/// Behavior:
/// - Resolves the table from (profile_name, table_name)
/// - Returns the stored, transformed script from table_scripts
/// - Includes normalized dependency metadata from script_dependencies
/// - Returns an empty scripts list when the table has no scripts
async fn get_table_scripts(
&self,
request: tonic::Request<super::GetTableScriptsRequest>,
) -> std::result::Result<
tonic::Response<super::GetTableScriptsResponse>,
tonic::Status,
>;
}
/// Manages column-computation scripts for user-defined tables.
/// Each script belongs to a single table (table_definition_id) and populates
/// exactly one target column in that table. The server:
/// - Validates script syntax (non-empty, balanced parentheses, starts with '(')
/// - Validates the target column (exists, not a system column, allowed type)
/// - Validates column/type usage inside math expressions
/// - Validates referenced tables/columns against the schema
/// - Enforces link constraints for structured access (see notes below)
/// - Analyzes dependencies and prevents cycles across the schema
/// - Transforms the script to decimal-safe math (steel_decimal)
/// - Upserts into table_scripts and records dependencies in script_dependencies
/// The whole operation is transactional.
#[derive(Debug)]
pub struct TableScriptServer<T> {
inner: Arc<T>,
@@ -283,6 +482,51 @@ pub mod table_script_server {
};
Box::pin(fut)
}
"/komp_ac.table_script.TableScript/GetTableScripts" => {
#[allow(non_camel_case_types)]
struct GetTableScriptsSvc<T: TableScript>(pub Arc<T>);
impl<
T: TableScript,
> tonic::server::UnaryService<super::GetTableScriptsRequest>
for GetTableScriptsSvc<T> {
type Response = super::GetTableScriptsResponse;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::GetTableScriptsRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
<T as TableScript>::get_table_scripts(&inner, request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let method = GetTableScriptsSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
_ => {
Box::pin(async move {
let mut response = http::Response::new(

View File

@@ -1,27 +1,56 @@
// This file is @generated by prost-build.
/// Request identifying the profile (schema) and tables to inspect.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetTableStructureRequest {
/// e.g., "default"
/// Required. Profile (PostgreSQL schema) name. Must exist in `schemas`.
#[prost(string, tag = "1")]
pub profile_name: ::prost::alloc::string::String,
/// e.g., "2025_adresar6"
#[prost(string, tag = "2")]
pub table_name: ::prost::alloc::string::String,
/// Required. Table names within the profile. Each must exist in
/// `table_definitions` for the given profile. The physical tables are then
/// introspected via information_schema.
#[prost(string, repeated, tag = "2")]
pub table_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
}
/// Batched response keyed by table name.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetTableStructureResponse {
/// Per-table physical column lists keyed by requested table name.
#[prost(map = "string, message", tag = "1")]
pub table_structures: ::std::collections::HashMap<
::prost::alloc::string::String,
TableStructureResponse,
>,
}
/// Response with the ordered list of columns (by ordinal position) for one table.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TableStructureResponse {
/// Columns of the physical table, including system columns (id, deleted,
/// created_at), user-defined columns, and any foreign-key columns such as
/// "<linked_table>_id". May be empty if the physical table is missing.
#[prost(message, repeated, tag = "1")]
pub columns: ::prost::alloc::vec::Vec<TableColumn>,
}
/// One physical column entry as reported by information_schema.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TableColumn {
/// Column name exactly as defined in PostgreSQL.
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
/// e.g., "TEXT", "BIGINT", "VARCHAR(255)", "TIMESTAMPTZ"
/// Normalized data type string derived from information_schema:
/// - VARCHAR(n) when udt_name='varchar' with character_maximum_length
/// - CHAR(n) when udt_name='bpchar' with character_maximum_length
/// - NUMERIC(p,s) when udt_name='numeric' with precision and scale
/// - NUMERIC(p) when udt_name='numeric' with precision only
/// - <TYPE>\[\] for array types (udt_name starting with '_', e.g., INT\[\] )
/// - Otherwise UPPER(udt_name), e.g., TEXT, BIGINT, TIMESTAMPTZ
/// Examples: "TEXT", "BIGINT", "VARCHAR(255)", "TIMESTAMPTZ", "NUMERIC(14,4)"
#[prost(string, tag = "2")]
pub data_type: ::prost::alloc::string::String,
/// True if information_schema reports the column as nullable.
#[prost(bool, tag = "3")]
pub is_nullable: bool,
/// True if the column is part of the table's PRIMARY KEY.
/// Typically true for the "id" column created by the system.
#[prost(bool, tag = "4")]
pub is_primary_key: bool,
}
@@ -36,6 +65,13 @@ pub mod table_structure_service_client {
)]
use tonic::codegen::*;
use tonic::codegen::http::Uri;
/// Introspects the physical PostgreSQL tables for one or more logical tables
/// (defined in table_definitions) and returns their column structures.
/// The server validates that:
/// - The profile (schema) exists in `schemas`
/// - Every table is defined for that profile in `table_definitions`
/// It then queries information_schema for the physical tables and returns
/// normalized column metadata.
#[derive(Debug, Clone)]
pub struct TableStructureServiceClient<T> {
inner: tonic::client::Grpc<T>,
@@ -116,11 +152,21 @@ pub mod table_structure_service_client {
self.inner = self.inner.max_encoding_message_size(limit);
self
}
/// Return the physical column list (name, normalized data_type,
/// nullability, primary key flag) for one or more tables in a profile.
///
/// Behavior:
/// - NOT_FOUND if profile doesn't exist in `schemas`
/// - NOT_FOUND if any table is not defined for that profile in `table_definitions`
/// - Queries information_schema.columns ordered by ordinal position
/// - Normalizes data_type text (details under TableColumn.data_type)
/// - Returns an error if any validated table has no visible columns in
/// information_schema (e.g., physical table missing)
pub async fn get_table_structure(
&mut self,
request: impl tonic::IntoRequest<super::GetTableStructureRequest>,
) -> std::result::Result<
tonic::Response<super::TableStructureResponse>,
tonic::Response<super::GetTableStructureResponse>,
tonic::Status,
> {
self.inner
@@ -160,14 +206,31 @@ pub mod table_structure_service_server {
/// Generated trait containing gRPC methods that should be implemented for use with TableStructureServiceServer.
#[async_trait]
pub trait TableStructureService: std::marker::Send + std::marker::Sync + 'static {
/// Return the physical column list (name, normalized data_type,
/// nullability, primary key flag) for one or more tables in a profile.
///
/// Behavior:
/// - NOT_FOUND if profile doesn't exist in `schemas`
/// - NOT_FOUND if any table is not defined for that profile in `table_definitions`
/// - Queries information_schema.columns ordered by ordinal position
/// - Normalizes data_type text (details under TableColumn.data_type)
/// - Returns an error if any validated table has no visible columns in
/// information_schema (e.g., physical table missing)
async fn get_table_structure(
&self,
request: tonic::Request<super::GetTableStructureRequest>,
) -> std::result::Result<
tonic::Response<super::TableStructureResponse>,
tonic::Response<super::GetTableStructureResponse>,
tonic::Status,
>;
}
/// Introspects the physical PostgreSQL tables for one or more logical tables
/// (defined in table_definitions) and returns their column structures.
/// The server validates that:
/// - The profile (schema) exists in `schemas`
/// - Every table is defined for that profile in `table_definitions`
/// It then queries information_schema for the physical tables and returns
/// normalized column metadata.
#[derive(Debug)]
pub struct TableStructureServiceServer<T> {
inner: Arc<T>,
@@ -252,7 +315,7 @@ pub mod table_structure_service_server {
T: TableStructureService,
> tonic::server::UnaryService<super::GetTableStructureRequest>
for GetTableStructureSvc<T> {
type Response = super::TableStructureResponse;
type Response = super::GetTableStructureResponse;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,

View File

@@ -26,7 +26,13 @@ pub struct FieldValidation {
#[prost(message, optional, tag = "10")]
pub limits: ::core::option::Option<CharacterLimits>,
/// Future expansion:
/// PatternRules pattern = 11;
///
/// Validation 2
#[prost(message, optional, tag = "11")]
pub pattern: ::core::option::Option<PatternRules>,
/// Validation 4 custom formatting logic
#[prost(message, optional, tag = "14")]
pub formatter: ::core::option::Option<CustomFormatter>,
#[prost(message, optional, tag = "3")]
pub mask: ::core::option::Option<DisplayMask>,
/// ExternalValidation external = 13;
@@ -65,6 +71,46 @@ pub struct DisplayMask {
#[prost(string, optional, tag = "3")]
pub template_char: ::core::option::Option<::prost::alloc::string::String>,
}
/// One positionbased validation rule, similar to CharacterFilter + PositionRange
#[derive(serde::Serialize, serde::Deserialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PatternRule {
/// Range descriptor: how far the rule applies
/// Examples:
/// - "0" → Single position 0
/// - "0-3" → Range 0..3 inclusive
/// - "from:5" → From position 5 onward
/// - "0,2,5" → Multiple discrete positions
#[prost(string, tag = "1")]
pub range: ::prost::alloc::string::String,
/// Character filter type, caseinsensitive keywords:
/// "ALPHABETIC", "NUMERIC", "ALPHANUMERIC",
/// "ONEOF(<chars>)", "EXACT(:)", "CUSTOM(<name>)"
#[prost(string, tag = "2")]
pub filter: ::prost::alloc::string::String,
}
#[derive(serde::Serialize, serde::Deserialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CustomFormatter {
/// Formatter type identifier; handled clientside.
/// Examples: "PSCFormatter", "PhoneFormatter", "CreditCardFormatter", "DateFormatter"
#[prost(string, tag = "1")]
pub r#type: ::prost::alloc::string::String,
/// Optional freetext note or parameters (e.g. locale, pattern)
#[prost(string, optional, tag = "2")]
pub description: ::core::option::Option<::prost::alloc::string::String>,
}
/// Collection of pattern rules for one field
#[derive(serde::Serialize, serde::Deserialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PatternRules {
/// All rules that make up the validation logic
#[prost(message, repeated, tag = "1")]
pub rules: ::prost::alloc::vec::Vec<PatternRule>,
/// Optional humanreadable description for UI/debug purposes
#[prost(string, optional, tag = "2")]
pub description: ::core::option::Option<::prost::alloc::string::String>,
}
#[derive(serde::Serialize, serde::Deserialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct UpdateFieldValidationRequest {

View File

@@ -1,92 +1,170 @@
// This file is @generated by prost-build.
/// Insert a new row.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PostTableDataRequest {
/// Required. Profile (PostgreSQL schema) name that owns the table.
/// Must exist in the schemas table.
#[prost(string, tag = "1")]
pub profile_name: ::prost::alloc::string::String,
/// Required. Logical table (definition) name within the profile.
/// Must exist in table_definitions for the given profile.
#[prost(string, tag = "2")]
pub table_name: ::prost::alloc::string::String,
/// Required. Key-value data for columns to insert.
///
/// Allowed keys:
/// - User-defined columns from the table definition
/// - System/FK columns:
/// • "deleted" (BOOLEAN), optional; default FALSE if not provided
/// • "<linked_table>_id" (BIGINT) for each table link
///
/// Type expectations by SQL type:
/// - TEXT: string value; empty string is treated as NULL
/// - BOOLEAN: bool value
/// - TIMESTAMPTZ: ISO 8601/RFC 3339 string (parsed to TIMESTAMPTZ)
/// - INTEGER: number with no fractional part and within i32 range
/// - BIGINT: number with no fractional part and within i64 range
/// - NUMERIC(p,s): string representation only; empty string becomes NULL
/// (numbers for NUMERIC are rejected to avoid precision loss)
///
/// Script validation rules:
/// - If a script exists for a target column, that column MUST be present here,
/// and its provided value MUST equal the scripts computed value (type-aware
/// comparison, e.g., decimals are compared numerically).
///
/// Notes:
/// - Unknown/invalid column names are rejected
/// - Some application-specific validations may apply (e.g., max length for
/// certain fields like "telefon")
#[prost(map = "string, message", tag = "3")]
pub data: ::std::collections::HashMap<
::prost::alloc::string::String,
::prost_types::Value,
>,
}
/// Insert response.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PostTableDataResponse {
/// True if the insert succeeded.
#[prost(bool, tag = "1")]
pub success: bool,
/// Human-readable message.
#[prost(string, tag = "2")]
pub message: ::prost::alloc::string::String,
/// The id of the inserted row.
#[prost(int64, tag = "3")]
pub inserted_id: i64,
}
/// Update an existing row.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PutTableDataRequest {
/// Required. Profile (schema) name.
#[prost(string, tag = "1")]
pub profile_name: ::prost::alloc::string::String,
/// Required. Table name within the profile.
#[prost(string, tag = "2")]
pub table_name: ::prost::alloc::string::String,
/// Required. Id of the row to update.
#[prost(int64, tag = "3")]
pub id: i64,
/// Required. Columns to update (same typing rules as PostTableDataRequest.data).
///
/// Special script rules:
/// - If a script targets column X and X is included here, the value for X must
/// equal the scripts result (type-aware).
/// - If X is not included here but the update would cause the scripts result
/// to change compared to the current stored value, the update is rejected with
/// FAILED_PRECONDITION, instructing the caller to include X explicitly.
///
/// Passing an empty map results in a no-op success response.
#[prost(map = "string, message", tag = "4")]
pub data: ::std::collections::HashMap<
::prost::alloc::string::String,
::prost_types::Value,
>,
}
/// Update response.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PutTableDataResponse {
/// True if the update succeeded (or no-op on empty data).
#[prost(bool, tag = "1")]
pub success: bool,
/// Human-readable message.
#[prost(string, tag = "2")]
pub message: ::prost::alloc::string::String,
/// The id of the updated row.
#[prost(int64, tag = "3")]
pub updated_id: i64,
}
/// Soft-delete a single row.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DeleteTableDataRequest {
/// Required. Profile (schema) name.
#[prost(string, tag = "1")]
pub profile_name: ::prost::alloc::string::String,
/// Required. Table name within the profile.
#[prost(string, tag = "2")]
pub table_name: ::prost::alloc::string::String,
/// Required. Row id to soft-delete.
#[prost(int64, tag = "3")]
pub record_id: i64,
}
/// Soft-delete response.
#[derive(Clone, Copy, PartialEq, ::prost::Message)]
pub struct DeleteTableDataResponse {
/// True if a row was marked deleted (id existed and was not already deleted).
#[prost(bool, tag = "1")]
pub success: bool,
}
/// Fetch a single non-deleted row by id.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetTableDataRequest {
/// Required. Profile (schema) name.
#[prost(string, tag = "1")]
pub profile_name: ::prost::alloc::string::String,
/// Required. Table name within the profile.
#[prost(string, tag = "2")]
pub table_name: ::prost::alloc::string::String,
/// Required. Id of the row to fetch.
#[prost(int64, tag = "3")]
pub id: i64,
}
/// Row payload: all columns returned as strings.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetTableDataResponse {
/// Map of column_name → stringified value for:
/// - id, deleted
/// - all user-defined columns from the table definition
/// - FK columns named "<linked_table>_id" for each table link
///
/// All values are returned as TEXT via col::TEXT and COALESCEed to empty string
/// (NULL becomes ""). The row is returned only if deleted = FALSE.
#[prost(map = "string, string", tag = "1")]
pub data: ::std::collections::HashMap<
::prost::alloc::string::String,
::prost::alloc::string::String,
>,
}
/// Count non-deleted rows.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetTableDataCountRequest {
/// Required. Profile (schema) name.
#[prost(string, tag = "1")]
pub profile_name: ::prost::alloc::string::String,
/// Required. Table name within the profile.
#[prost(string, tag = "2")]
pub table_name: ::prost::alloc::string::String,
}
/// Fetch by ordinal position among non-deleted rows (1-based).
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetTableDataByPositionRequest {
/// Required. Profile (schema) name.
#[prost(string, tag = "1")]
pub profile_name: ::prost::alloc::string::String,
/// Required. Table name within the profile.
#[prost(string, tag = "2")]
pub table_name: ::prost::alloc::string::String,
/// Required. 1-based position by id ascending among rows with deleted = FALSE.
#[prost(int32, tag = "3")]
pub position: i32,
}
@@ -101,6 +179,11 @@ pub mod tables_data_client {
)]
use tonic::codegen::*;
use tonic::codegen::http::Uri;
/// Read and write row data for user-defined tables inside profiles (schemas).
/// Operations are performed against the physical PostgreSQL table that
/// corresponds to the logical table definition and are scoped by profile
/// (schema). Deletions are soft (set deleted = true). Typed binding and
/// script-based validation are enforced consistently.
#[derive(Debug, Clone)]
pub struct TablesDataClient<T> {
inner: tonic::client::Grpc<T>,
@@ -181,6 +264,16 @@ pub mod tables_data_client {
self.inner = self.inner.max_encoding_message_size(limit);
self
}
/// Insert a new row into a table with strict type binding and script validation.
///
/// Behavior:
/// - Validates that profile (schema) exists and table is defined for it
/// - Validates provided columns exist (user-defined or allowed system/FK columns)
/// - For columns targeted by scripts in this table, the client MUST provide the
/// value, and it MUST equal the scripts calculated value (compared type-safely)
/// - Binds values with correct SQL types, rejects invalid formats/ranges
/// - Inserts the row and returns the new id; queues search indexing (best effort)
/// - If the physical table is missing but the definition exists, returns INTERNAL
pub async fn post_table_data(
&mut self,
request: impl tonic::IntoRequest<super::PostTableDataRequest>,
@@ -207,6 +300,17 @@ pub mod tables_data_client {
);
self.inner.unary(req, path, codec).await
}
/// Update existing row data with strict type binding and script validation.
///
/// Behavior:
/// - Validates profile and table, and that the record exists
/// - If request data is empty, returns success without changing the row
/// - For columns targeted by scripts:
/// • If included in update, provided value must equal the script result
/// • If not included, update must not cause the script result to differ
/// from the current stored value; otherwise FAILED_PRECONDITION is returned
/// - Binds values with correct SQL types; rejects invalid formats/ranges
/// - Updates the row and returns the id; queues search indexing (best effort)
pub async fn put_table_data(
&mut self,
request: impl tonic::IntoRequest<super::PutTableDataRequest>,
@@ -233,6 +337,13 @@ pub mod tables_data_client {
);
self.inner.unary(req, path, codec).await
}
/// Soft-delete a single record (sets deleted = true) if it exists and is not already deleted.
///
/// Behavior:
/// - Validates profile and table definition
/// - Updates only rows with deleted = false
/// - success = true means a row was actually changed; false means nothing to delete
/// - If the physical table is missing but the definition exists, returns INTERNAL
pub async fn delete_table_data(
&mut self,
request: impl tonic::IntoRequest<super::DeleteTableDataRequest>,
@@ -259,6 +370,15 @@ pub mod tables_data_client {
);
self.inner.unary(req, path, codec).await
}
/// Fetch a single non-deleted row by id as textified values.
///
/// Behavior:
/// - Validates profile and table definition
/// - Returns all columns as strings (COALESCE(col::TEXT, '') AS col)
/// including: id, deleted, all user-defined columns, and FK columns
/// named "<linked_table>_id" for each table link
/// - Fails with NOT_FOUND if record does not exist or is soft-deleted
/// - If the physical table is missing but the definition exists, returns INTERNAL
pub async fn get_table_data(
&mut self,
request: impl tonic::IntoRequest<super::GetTableDataRequest>,
@@ -285,6 +405,12 @@ pub mod tables_data_client {
);
self.inner.unary(req, path, codec).await
}
/// Count non-deleted rows in a table.
///
/// Behavior:
/// - Validates profile and table definition
/// - Returns komp_ac.common.CountResponse.count with rows where deleted = FALSE
/// - If the physical table is missing but the definition exists, returns INTERNAL
pub async fn get_table_data_count(
&mut self,
request: impl tonic::IntoRequest<super::GetTableDataCountRequest>,
@@ -314,6 +440,12 @@ pub mod tables_data_client {
);
self.inner.unary(req, path, codec).await
}
/// Fetch the N-th non-deleted row by id order (1-based), then return its full data.
///
/// Behavior:
/// - position is 1-based (position = 1 → first row by id ASC with deleted = FALSE)
/// - Returns NOT_FOUND if position is out of bounds
/// - Otherwise identical to GetTableData for the selected id
pub async fn get_table_data_by_position(
&mut self,
request: impl tonic::IntoRequest<super::GetTableDataByPositionRequest>,
@@ -358,6 +490,16 @@ pub mod tables_data_server {
/// Generated trait containing gRPC methods that should be implemented for use with TablesDataServer.
#[async_trait]
pub trait TablesData: std::marker::Send + std::marker::Sync + 'static {
/// Insert a new row into a table with strict type binding and script validation.
///
/// Behavior:
/// - Validates that profile (schema) exists and table is defined for it
/// - Validates provided columns exist (user-defined or allowed system/FK columns)
/// - For columns targeted by scripts in this table, the client MUST provide the
/// value, and it MUST equal the scripts calculated value (compared type-safely)
/// - Binds values with correct SQL types, rejects invalid formats/ranges
/// - Inserts the row and returns the new id; queues search indexing (best effort)
/// - If the physical table is missing but the definition exists, returns INTERNAL
async fn post_table_data(
&self,
request: tonic::Request<super::PostTableDataRequest>,
@@ -365,6 +507,17 @@ pub mod tables_data_server {
tonic::Response<super::PostTableDataResponse>,
tonic::Status,
>;
/// Update existing row data with strict type binding and script validation.
///
/// Behavior:
/// - Validates profile and table, and that the record exists
/// - If request data is empty, returns success without changing the row
/// - For columns targeted by scripts:
/// • If included in update, provided value must equal the script result
/// • If not included, update must not cause the script result to differ
/// from the current stored value; otherwise FAILED_PRECONDITION is returned
/// - Binds values with correct SQL types; rejects invalid formats/ranges
/// - Updates the row and returns the id; queues search indexing (best effort)
async fn put_table_data(
&self,
request: tonic::Request<super::PutTableDataRequest>,
@@ -372,6 +525,13 @@ pub mod tables_data_server {
tonic::Response<super::PutTableDataResponse>,
tonic::Status,
>;
/// Soft-delete a single record (sets deleted = true) if it exists and is not already deleted.
///
/// Behavior:
/// - Validates profile and table definition
/// - Updates only rows with deleted = false
/// - success = true means a row was actually changed; false means nothing to delete
/// - If the physical table is missing but the definition exists, returns INTERNAL
async fn delete_table_data(
&self,
request: tonic::Request<super::DeleteTableDataRequest>,
@@ -379,6 +539,15 @@ pub mod tables_data_server {
tonic::Response<super::DeleteTableDataResponse>,
tonic::Status,
>;
/// Fetch a single non-deleted row by id as textified values.
///
/// Behavior:
/// - Validates profile and table definition
/// - Returns all columns as strings (COALESCE(col::TEXT, '') AS col)
/// including: id, deleted, all user-defined columns, and FK columns
/// named "<linked_table>_id" for each table link
/// - Fails with NOT_FOUND if record does not exist or is soft-deleted
/// - If the physical table is missing but the definition exists, returns INTERNAL
async fn get_table_data(
&self,
request: tonic::Request<super::GetTableDataRequest>,
@@ -386,6 +555,12 @@ pub mod tables_data_server {
tonic::Response<super::GetTableDataResponse>,
tonic::Status,
>;
/// Count non-deleted rows in a table.
///
/// Behavior:
/// - Validates profile and table definition
/// - Returns komp_ac.common.CountResponse.count with rows where deleted = FALSE
/// - If the physical table is missing but the definition exists, returns INTERNAL
async fn get_table_data_count(
&self,
request: tonic::Request<super::GetTableDataCountRequest>,
@@ -393,6 +568,12 @@ pub mod tables_data_server {
tonic::Response<super::super::common::CountResponse>,
tonic::Status,
>;
/// Fetch the N-th non-deleted row by id order (1-based), then return its full data.
///
/// Behavior:
/// - position is 1-based (position = 1 → first row by id ASC with deleted = FALSE)
/// - Returns NOT_FOUND if position is out of bounds
/// - Otherwise identical to GetTableData for the selected id
async fn get_table_data_by_position(
&self,
request: tonic::Request<super::GetTableDataByPositionRequest>,
@@ -401,6 +582,11 @@ pub mod tables_data_server {
tonic::Status,
>;
}
/// Read and write row data for user-defined tables inside profiles (schemas).
/// Operations are performed against the physical PostgreSQL table that
/// corresponds to the logical table definition and are scoped by profile
/// (schema). Deletions are soft (set deleted = true). Typed binding and
/// script-based validation are enforced consistently.
#[derive(Debug)]
pub struct TablesDataServer<T> {
inner: Arc<T>,

View File

@@ -1,78 +1,182 @@
// common/src/search.rs
use std::path::{Path, PathBuf};
use tantivy::schema::*;
use tantivy::tokenizer::*;
use tantivy::schema::{
Field, IndexRecordOption, JsonObjectOptions, Schema, TextFieldIndexing, Term, INDEXED,
STORED, STRING,
};
use tantivy::tokenizer::{
AsciiFoldingFilter, LowerCaser, NgramTokenizer, RawTokenizer, RemoveLongFilter,
SimpleTokenizer, TextAnalyzer, TokenStream,
};
use tantivy::Index;
/// Creates a hybrid Slovak search schema with optimized prefix fields.
pub const F_PG_ID: &str = "pg_id";
pub const F_TABLE_NAME: &str = "table_name";
pub const F_ROW_KEY: &str = "row_key";
pub const F_DATA_WORD: &str = "data_word";
pub const F_DATA_NGRAM: &str = "data_ngram";
pub const F_DATA_EXACT: &str = "data_exact";
pub const TOK_WORD: &str = "kw_word";
pub const TOK_NGRAM: &str = "kw_ngram";
pub const TOK_EXACT: &str = "kw_exact";
/// Returns the on-disk path for a profile search index.
pub fn search_index_path(root: &Path, profile_name: &str) -> PathBuf {
root.join(profile_name)
}
/// Returns the unique index key for one table row inside a profile index.
pub fn search_row_key(table_name: &str, row_id: i64) -> String {
format!("{}:{}", table_name, row_id)
}
/// Normalizes user-entered values for exact-mode terms.
pub fn normalize_exact(input: &str) -> String {
let trimmed = input.trim();
if trimmed.is_empty() {
return String::new();
}
let mut analyzer = exact_analyzer();
let mut stream = analyzer.token_stream(trimmed);
let mut out = String::with_capacity(trimmed.len());
while let Some(token) = stream.next() {
out.push_str(&token.text);
}
out
}
/// Normalizes a column name to the JSON-key form used at index time.
pub fn normalize_column_name(column: &str) -> String {
column.to_ascii_lowercase()
}
/// Creates the column-aware search schema.
pub fn create_search_schema() -> Schema {
let mut schema_builder = Schema::builder();
schema_builder.add_u64_field("pg_id", INDEXED | STORED);
schema_builder.add_u64_field(F_PG_ID, INDEXED | STORED);
schema_builder.add_text_field(F_TABLE_NAME, STRING | STORED);
schema_builder.add_text_field(F_ROW_KEY, STRING | STORED);
// FIELD 1: For prefixes (1-4 chars).
let short_prefix_indexing = TextFieldIndexing::default()
.set_tokenizer("slovak_prefix_edge")
.set_index_option(IndexRecordOption::WithFreqsAndPositions);
let short_prefix_options = TextOptions::default()
.set_indexing_options(short_prefix_indexing)
.set_stored();
schema_builder.add_text_field("prefix_edge", short_prefix_options);
// FIELD 2: For the full word.
let full_word_indexing = TextFieldIndexing::default()
.set_tokenizer("slovak_prefix_full")
.set_index_option(IndexRecordOption::WithFreqsAndPositions);
let full_word_options = TextOptions::default()
.set_indexing_options(full_word_indexing)
.set_stored();
schema_builder.add_text_field("prefix_full", full_word_options);
// NGRAM FIELD: For substring matching.
let ngram_field_indexing = TextFieldIndexing::default()
.set_tokenizer("slovak_ngram")
.set_index_option(IndexRecordOption::WithFreqsAndPositions);
let ngram_options = TextOptions::default()
.set_indexing_options(ngram_field_indexing)
.set_stored();
schema_builder.add_text_field("text_ngram", ngram_options);
schema_builder.add_json_field(F_DATA_WORD, json_options(TOK_WORD, true, false));
schema_builder.add_json_field(F_DATA_NGRAM, json_options(TOK_NGRAM, true, false));
schema_builder.add_json_field(F_DATA_EXACT, json_options(TOK_EXACT, false, false));
schema_builder.build()
}
/// Registers all necessary Slovak tokenizers with the index.
///
/// This must be called by ANY process that opens the index
/// to ensure the tokenizers are loaded into memory.
pub fn register_slovak_tokenizers(index: &Index) -> tantivy::Result<()> {
fn json_options(
tokenizer_name: &str,
with_positions: bool,
stored: bool,
) -> JsonObjectOptions {
let index_option = if with_positions {
IndexRecordOption::WithFreqsAndPositions
} else {
IndexRecordOption::Basic
};
let indexing = TextFieldIndexing::default()
.set_tokenizer(tokenizer_name)
.set_index_option(index_option);
let mut options = JsonObjectOptions::default().set_indexing_options(indexing);
if stored {
options = options.set_stored();
}
options
}
/// Registers all required tokenizers with the index.
pub fn register_tokenizers(index: &Index) -> tantivy::Result<()> {
let tokenizer_manager = index.tokenizers();
// TOKENIZER for `prefix_edge`: Edge N-gram (1-4 chars)
let edge_tokenizer =
TextAnalyzer::builder(NgramTokenizer::new(1, 4, true)?)
.filter(RemoveLongFilter::limit(40))
.filter(LowerCaser)
.filter(AsciiFoldingFilter)
.build();
tokenizer_manager.register("slovak_prefix_edge", edge_tokenizer);
// TOKENIZER for `prefix_full`: Simple word tokenizer
let full_tokenizer =
TextAnalyzer::builder(SimpleTokenizer::default())
.filter(RemoveLongFilter::limit(40))
.filter(LowerCaser)
.filter(AsciiFoldingFilter)
.build();
tokenizer_manager.register("slovak_prefix_full", full_tokenizer);
// NGRAM TOKENIZER: For substring matching.
let ngram_tokenizer =
TextAnalyzer::builder(NgramTokenizer::new(3, 3, false)?)
.filter(RemoveLongFilter::limit(40))
.filter(LowerCaser)
.filter(AsciiFoldingFilter)
.build();
tokenizer_manager.register("slovak_ngram", ngram_tokenizer);
tokenizer_manager.register(TOK_WORD, word_analyzer());
tokenizer_manager.register(TOK_NGRAM, ngram_analyzer()?);
tokenizer_manager.register(TOK_EXACT, exact_analyzer());
Ok(())
}
fn word_analyzer() -> TextAnalyzer {
TextAnalyzer::builder(SimpleTokenizer::default())
.filter(RemoveLongFilter::limit(80))
.filter(LowerCaser)
.filter(AsciiFoldingFilter)
.build()
}
fn ngram_analyzer() -> tantivy::Result<TextAnalyzer> {
Ok(TextAnalyzer::builder(NgramTokenizer::new(3, 3, false)?)
.filter(RemoveLongFilter::limit(80))
.filter(LowerCaser)
.filter(AsciiFoldingFilter)
.build())
}
fn exact_analyzer() -> TextAnalyzer {
TextAnalyzer::builder(RawTokenizer::default())
.filter(LowerCaser)
.filter(AsciiFoldingFilter)
.build()
}
/// Tokenizes text the same way `data_word` is indexed.
pub fn tokenize_word(text: &str) -> Vec<String> {
tokenize_with(word_analyzer(), text)
}
/// Tokenizes text the same way `data_ngram` is indexed.
pub fn tokenize_ngram(text: &str) -> Vec<String> {
match ngram_analyzer() {
Ok(analyzer) => tokenize_with(analyzer, text),
Err(_) => Vec::new(),
}
}
fn tokenize_with(mut analyzer: TextAnalyzer, text: &str) -> Vec<String> {
let mut stream = analyzer.token_stream(text);
let mut out = Vec::new();
while let Some(token) = stream.next() {
out.push(token.text.clone());
}
out
}
/// Builds a term scoped to a specific JSON path within a JSON field.
pub fn json_path_term(field: Field, column: &str, text: &str) -> Term {
let mut term = Term::from_field_json_path(field, column, false);
term.append_type_and_str(text);
term
}
/// Returns all required schema fields or fails loudly on mismatch.
pub struct SchemaFields {
pub pg_id: Field,
pub table_name: Field,
pub row_key: Field,
pub data_word: Field,
pub data_ngram: Field,
pub data_exact: Field,
}
impl SchemaFields {
pub fn from(schema: &Schema) -> tantivy::Result<Self> {
Ok(Self {
pg_id: get_field(schema, F_PG_ID)?,
table_name: get_field(schema, F_TABLE_NAME)?,
row_key: get_field(schema, F_ROW_KEY)?,
data_word: get_field(schema, F_DATA_WORD)?,
data_ngram: get_field(schema, F_DATA_NGRAM)?,
data_exact: get_field(schema, F_DATA_EXACT)?,
})
}
}
fn get_field(schema: &Schema, name: &str) -> tantivy::Result<Field> {
schema.get_field(name).map_err(|e| {
tantivy::TantivyError::SchemaError(format!("schema is missing field '{name}': {e}"))
})
}

View File

@@ -14,12 +14,17 @@
{
devShells.default = pkgs.mkShell {
buildInputs = with pkgs; [
mermaid-cli
# Rust toolchain
rustc
cargo
rustfmt
clippy
cargo-watch
cargo-watch
rust-analyzer
cargo-tarpaulin
cargo-flamegraph
rust-code-analysis
# C build tools (for your linker issue)
gcc
@@ -32,10 +37,12 @@
# PostgreSQL for sqlx
postgresql
sqlx-cli
sqlx-cli
# Protocol Buffers compiler for gRPC
protobuf
protoc-gen-doc
buf
];
shellHook = ''

1
search/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.codex

View File

@@ -1,302 +1,426 @@
// src/lib.rs
mod query_builder;
use std::collections::HashMap;
use std::path::Path;
use tantivy::collector::TopDocs;
use tantivy::query::{
BooleanQuery, BoostQuery, FuzzyTermQuery, Occur, Query, QueryParser,
TermQuery,
};
use tantivy::schema::{IndexRecordOption, Value};
use tantivy::{Index, TantivyDocument, Term};
use tonic::{Request, Response, Status};
use std::sync::{Arc, Mutex};
use common::proto::komp_ac::search::{
search_response::Hit, SearchRequest, SearchResponse,
};
pub use common::proto::komp_ac::search::searcher_server::SearcherServer;
use common::proto::komp_ac::search::searcher_server::Searcher;
use common::search::register_slovak_tokenizers;
use common::proto::komp_ac::search::{search_response::Hit, SearchRequest, SearchResponse};
pub use common::proto::komp_ac::search::searcher_server::SearcherServer;
use common::search::{register_tokenizers, search_index_path, SchemaFields};
use query_builder::{build_master_query, ConstraintMode, SearchConstraint};
use sqlx::{PgPool, Row};
use tantivy::collector::TopDocs;
use tantivy::schema::Value;
use tantivy::{Index, IndexReader, ReloadPolicy, TantivyDocument};
use tonic::{Request, Response, Status};
use tracing::info;
// We need to hold the database pool in our service struct.
const INDEX_ROOT: &str = "./tantivy_indexes";
const DEFAULT_RESULT_LIMIT: usize = 25;
const HARD_RESULT_LIMIT: usize = 200;
const DEFAULT_LIST_LIMIT: usize = 5;
pub struct SearcherService {
pub pool: PgPool,
profiles: Mutex<HashMap<String, Arc<ProfileIndex>>>,
}
// normalize_slovak_text function remains unchanged...
fn normalize_slovak_text(text: &str) -> String {
// ... function content is unchanged ...
text.chars()
.map(|c| match c {
'á' | 'à' | 'â' | 'ä' | 'ă' | 'ā' => 'a',
'Á' | 'À' | 'Â' | 'Ä' | 'Ă' | 'Ā' => 'A',
'é' | 'è' | 'ê' | 'ë' | 'ě' | 'ē' => 'e',
'É' | 'È' | 'Ê' | 'Ë' | 'Ě' | 'Ē' => 'E',
'í' | 'ì' | 'î' | 'ï' | 'ī' => 'i',
'Í' | 'Ì' | 'Î' | 'Ï' | 'Ī' => 'I',
'ó' | 'ò' | 'ô' | 'ö' | 'ō' | 'ő' => 'o',
'Ó' | 'Ò' | 'Ô' | 'Ö' | 'Ō' | 'Ő' => 'O',
'ú' | 'ù' | 'û' | 'ü' | 'ū' | 'ű' => 'u',
'Ú' | 'Ù' | 'Û' | 'Ü' | 'Ū' | 'Ű' => 'U',
'ý' | 'ỳ' | 'ŷ' | 'ÿ' => 'y',
'Ý' | 'Ỳ' | 'Ŷ' | 'Ÿ' => 'Y',
'č' => 'c',
'Č' => 'C',
'ď' => 'd',
'Ď' => 'D',
'ľ' => 'l',
'Ľ' => 'L',
'ň' => 'n',
'Ň' => 'N',
'ř' => 'r',
'Ř' => 'R',
'š' => 's',
'Š' => 'S',
'ť' => 't',
'Ť' => 'T',
'ž' => 'z',
'Ž' => 'Z',
_ => c,
impl SearcherService {
pub fn new(pool: PgPool) -> Self {
Self {
pool,
profiles: Mutex::new(HashMap::new()),
}
}
async fn run_rpc(&self, request: Request<SearchRequest>) -> Result<Response<SearchResponse>, Status> {
let req = request.into_inner();
let normalized = normalize_request(req)?;
if !profile_exists(&self.pool, &normalized.profile_name).await? {
return Err(Status::not_found(format!(
"Profile '{}' was not found",
normalized.profile_name
)));
}
if let Some(table_name) = normalized.table_name.as_deref() {
if !table_exists(&self.pool, &normalized.profile_name, table_name).await? {
return Err(Status::not_found(format!(
"Table '{}' was not found in profile '{}'",
table_name, normalized.profile_name
)));
}
}
if !normalized.has_input() {
let Some(table_name) = normalized.table_name.as_deref() else {
return Err(Status::invalid_argument(
"table_name is required when query is empty",
));
};
let hits = fetch_latest_rows(
&self.pool,
&normalized.profile_name,
table_name,
normalized.limit.unwrap_or(DEFAULT_LIST_LIMIT),
)
.await?;
return Ok(Response::new(SearchResponse { hits }));
}
let index_path = search_index_path(Path::new(INDEX_ROOT), &normalized.profile_name);
if !index_path.exists() {
return Err(Status::not_found(format!(
"No search index found for profile '{}'",
normalized.profile_name
)));
}
let profile = profile_index(&self.profiles, &normalized.profile_name, &index_path)?;
let mut hits = run_search(
&self.pool,
&profile,
&normalized.profile_name,
normalized.table_name.as_deref(),
&normalized.free_query,
&normalized.must,
normalized.limit.unwrap_or(DEFAULT_RESULT_LIMIT),
)
.await?;
hits.sort_by(|left, right| right.score.total_cmp(&left.score));
if let Some(limit) = normalized.limit {
if hits.len() > limit {
hits.truncate(limit);
}
}
info!(
"search: profile={} table={:?} free='{}' constraints={} hits={}",
normalized.profile_name,
normalized.table_name,
normalized.free_query,
normalized.must.len(),
hits.len()
);
Ok(Response::new(SearchResponse { hits }))
}
}
struct ProfileIndex {
index: Index,
reader: IndexReader,
fields: SchemaFields,
}
impl ProfileIndex {
fn open(path: &Path) -> Result<Self, Status> {
let index = Index::open_in_dir(path)
.map_err(|e| Status::internal(format!("Failed to open index: {}", e)))?;
register_tokenizers(&index)
.map_err(|e| Status::internal(format!("Failed to register tokenizers: {}", e)))?;
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommitWithDelay)
.try_into()
.map_err(|e| Status::internal(format!("Failed to build index reader: {}", e)))?;
let fields = SchemaFields::from(&index.schema()).map_err(|e| {
Status::internal(format!(
"Search index schema mismatch. Reindex required: {}",
e
))
})?;
Ok(Self {
index,
reader,
fields,
})
.collect()
}
}
#[derive(Debug)]
struct NormalizedSearchRequest {
profile_name: String,
table_name: Option<String>,
free_query: String,
must: Vec<SearchConstraint>,
limit: Option<usize>,
}
impl NormalizedSearchRequest {
fn has_input(&self) -> bool {
!self.free_query.is_empty() || !self.must.is_empty()
}
}
fn profile_index(
cache: &Mutex<HashMap<String, Arc<ProfileIndex>>>,
profile_name: &str,
path: &Path,
) -> Result<Arc<ProfileIndex>, Status> {
{
let cache_guard = cache
.lock()
.map_err(|_| Status::internal("Profile index cache lock poisoned"))?;
if let Some(index) = cache_guard.get(profile_name) {
return Ok(index.clone());
}
}
let opened = Arc::new(ProfileIndex::open(path)?);
let mut cache_guard = cache
.lock()
.map_err(|_| Status::internal("Profile index cache lock poisoned"))?;
if let Some(index) = cache_guard.get(profile_name) {
return Ok(index.clone());
}
cache_guard.insert(profile_name.to_string(), opened.clone());
Ok(opened)
}
fn validate_identifier(value: &str, field_name: &str) -> Result<(), Status> {
let mut chars = value.chars();
let Some(first) = chars.next() else {
return Err(Status::invalid_argument(format!(
"{field_name} must not be empty"
)));
};
if !(first.is_ascii_alphabetic() || first == '_')
|| !chars.all(|ch| ch.is_ascii_alphanumeric() || ch == '_')
{
return Err(Status::invalid_argument(format!(
"{field_name} contains invalid characters"
)));
}
Ok(())
}
fn qualify_profile_table(profile_name: &str, table_name: &str) -> String {
format!("\"{}\".\"{}\"", profile_name, table_name)
}
async fn profile_exists(pool: &PgPool, profile_name: &str) -> Result<bool, Status> {
let exists =
sqlx::query_scalar::<_, bool>("SELECT EXISTS(SELECT 1 FROM schemas WHERE name = $1)")
.bind(profile_name)
.fetch_one(pool)
.await
.map_err(|e| Status::internal(format!("Profile lookup failed: {}", e)))?;
Ok(exists)
}
async fn table_exists(pool: &PgPool, profile_name: &str, table_name: &str) -> Result<bool, Status> {
let exists = sqlx::query_scalar::<_, bool>(
r#"
SELECT EXISTS(
SELECT 1
FROM table_definitions td
JOIN schemas s ON td.schema_id = s.id
WHERE s.name = $1 AND td.table_name = $2
)
"#,
)
.bind(profile_name)
.bind(table_name)
.fetch_one(pool)
.await
.map_err(|e| Status::internal(format!("Table lookup failed: {}", e)))?;
Ok(exists)
}
fn normalize_request(req: SearchRequest) -> Result<NormalizedSearchRequest, Status> {
let profile_name = req.profile_name.trim();
if profile_name.is_empty() {
return Err(Status::invalid_argument("profile_name is required"));
}
validate_identifier(profile_name, "profile_name")?;
let table_name = match req.table_name.as_deref().map(str::trim) {
Some(table_name) if !table_name.is_empty() => {
validate_identifier(table_name, "table_name")?;
Some(table_name.to_string())
}
_ => None,
};
let free_query = req.free_query.trim().to_string();
let mut must = Vec::new();
for constraint in req.must {
let column = constraint.column.trim();
if column.is_empty() {
return Err(Status::invalid_argument(
"constraint.column must not be empty",
));
}
validate_identifier(column, "constraint.column")?;
let query = constraint.query.trim();
if query.is_empty() {
return Err(Status::invalid_argument(
"constraint.query must not be empty",
));
}
must.push(SearchConstraint {
column: column.to_string(),
query: query.to_string(),
mode: constraint_mode_from_proto(constraint.mode),
});
}
let limit = req.limit.map(|value| (value as usize).min(HARD_RESULT_LIMIT));
Ok(NormalizedSearchRequest {
profile_name: profile_name.to_string(),
table_name,
free_query,
must,
limit,
})
}
fn constraint_mode_from_proto(raw_mode: i32) -> ConstraintMode {
match raw_mode {
2 => ConstraintMode::Exact,
_ => ConstraintMode::Fuzzy,
}
}
async fn fetch_latest_rows(
pool: &PgPool,
profile_name: &str,
table_name: &str,
limit: usize,
) -> Result<Vec<Hit>, Status> {
let sql = format!(
"SELECT id, to_jsonb(t) AS data FROM {} t WHERE deleted = FALSE ORDER BY id DESC LIMIT $1",
qualify_profile_table(profile_name, table_name)
);
let rows = sqlx::query(&sql)
.bind(limit as i64)
.fetch_all(pool)
.await
.map_err(|e| Status::internal(format!("DB query for default results failed: {}", e)))?;
Ok(rows
.into_iter()
.map(|row| {
let id: i64 = row.try_get("id").unwrap_or_default();
let json_data: serde_json::Value = row.try_get("data").unwrap_or_default();
Hit {
id,
score: 0.0,
content_json: json_data.to_string(),
table_name: table_name.to_string(),
}
})
.collect())
}
async fn run_search(
pool: &PgPool,
profile: &ProfileIndex,
profile_name: &str,
table_filter: Option<&str>,
free_query: &str,
must: &[SearchConstraint],
limit: usize,
) -> Result<Vec<Hit>, Status> {
let master_query =
build_master_query(&profile.index, &profile.fields, free_query, must, table_filter)?;
let searcher = profile.reader.searcher();
let top_docs = searcher
.search(&*master_query, &TopDocs::with_limit(limit))
.map_err(|e| Status::internal(format!("Search failed: {}", e)))?;
if top_docs.is_empty() {
return Ok(vec![]);
}
let mut candidates: Vec<(f32, i64, String)> = Vec::with_capacity(top_docs.len());
for (score, doc_address) in top_docs {
let doc: TantivyDocument = searcher
.doc(doc_address)
.map_err(|e| Status::internal(format!("Failed to retrieve document: {}", e)))?;
let Some(pg_id) = doc
.get_first(profile.fields.pg_id)
.and_then(|value| value.as_u64())
else {
continue;
};
let Some(table_name) = doc
.get_first(profile.fields.table_name)
.and_then(|value| value.as_str())
else {
continue;
};
candidates.push((score, pg_id as i64, table_name.to_string()));
}
if candidates.is_empty() {
return Ok(vec![]);
}
let mut ids_by_table: HashMap<String, Vec<i64>> = HashMap::new();
for (_, pg_id, table_name) in &candidates {
ids_by_table
.entry(table_name.clone())
.or_default()
.push(*pg_id);
}
let mut content_map: HashMap<(String, i64), String> = HashMap::new();
for (table_name, pg_ids) in ids_by_table {
validate_identifier(&table_name, "table_name")?;
let sql = format!(
"SELECT id, to_jsonb(t) AS data FROM {} t WHERE deleted = FALSE AND id = ANY($1)",
qualify_profile_table(profile_name, &table_name)
);
let rows = sqlx::query(&sql)
.bind(&pg_ids)
.fetch_all(pool)
.await
.map_err(|e| Status::internal(format!("Database query failed: {}", e)))?;
for row in rows {
let id: i64 = row.try_get("id").unwrap_or_default();
let json_data: serde_json::Value = row.try_get("data").unwrap_or_default();
content_map.insert((table_name.clone(), id), json_data.to_string());
}
}
Ok(candidates
.into_iter()
.filter_map(|(score, pg_id, table_name)| {
content_map
.get(&(table_name.clone(), pg_id))
.map(|content_json| Hit {
id: pg_id,
score,
content_json: content_json.clone(),
table_name,
})
})
.collect())
}
#[tonic::async_trait]
impl Searcher for SearcherService {
async fn search_table(
async fn search(
&self,
request: Request<SearchRequest>,
) -> Result<Response<SearchResponse>, Status> {
let req = request.into_inner();
let table_name = req.table_name;
let query_str = req.query;
// --- MODIFIED LOGIC ---
// If the query is empty, fetch the 5 most recent records.
if query_str.trim().is_empty() {
info!(
"Empty query for table '{}'. Fetching default results.",
table_name
);
let qualified_table = format!("gen.\"{}\"", table_name);
let sql = format!(
"SELECT id, to_jsonb(t) AS data FROM {} t ORDER BY id DESC LIMIT 5",
qualified_table
);
let rows = sqlx::query(&sql)
.fetch_all(&self.pool)
.await
.map_err(|e| {
Status::internal(format!(
"DB query for default results failed: {}",
e
))
})?;
let hits: Vec<Hit> = rows
.into_iter()
.map(|row| {
let id: i64 = row.try_get("id").unwrap_or_default();
let json_data: serde_json::Value =
row.try_get("data").unwrap_or_default();
Hit {
id,
// Score is 0.0 as this is not a relevance-ranked search
score: 0.0,
content_json: json_data.to_string(),
}
})
.collect();
info!("--- SERVER: Successfully processed empty query. Returning {} default hits. ---", hits.len());
return Ok(Response::new(SearchResponse { hits }));
}
// --- END OF MODIFIED LOGIC ---
let index_path = Path::new("./tantivy_indexes").join(&table_name);
if !index_path.exists() {
return Err(Status::not_found(format!(
"No search index found for table '{}'",
table_name
)));
}
let index = Index::open_in_dir(&index_path)
.map_err(|e| Status::internal(format!("Failed to open index: {}", e)))?;
register_slovak_tokenizers(&index).map_err(|e| {
Status::internal(format!("Failed to register Slovak tokenizers: {}", e))
})?;
let reader = index.reader().map_err(|e| {
Status::internal(format!("Failed to create index reader: {}", e))
})?;
let searcher = reader.searcher();
let schema = index.schema();
let pg_id_field = schema.get_field("pg_id").map_err(|_| {
Status::internal("Schema is missing the 'pg_id' field.")
})?;
// --- Query Building Logic (no changes here) ---
let prefix_edge_field = schema.get_field("prefix_edge").unwrap();
let prefix_full_field = schema.get_field("prefix_full").unwrap();
let text_ngram_field = schema.get_field("text_ngram").unwrap();
let normalized_query = normalize_slovak_text(&query_str);
let words: Vec<&str> = normalized_query.split_whitespace().collect();
if words.is_empty() {
return Ok(Response::new(SearchResponse { hits: vec![] }));
}
let mut query_layers: Vec<(Occur, Box<dyn Query>)> = Vec::new();
// ... all your query building layers remain exactly the same ...
// ===============================
// LAYER 1: PREFIX MATCHING (HIGHEST PRIORITY, Boost: 4.0)
// ===============================
{
let mut must_clauses: Vec<(Occur, Box<dyn Query>)> = Vec::new();
for word in &words {
let edge_term =
Term::from_field_text(prefix_edge_field, word);
let full_term =
Term::from_field_text(prefix_full_field, word);
let per_word_query = BooleanQuery::new(vec![
(
Occur::Should,
Box::new(TermQuery::new(
edge_term,
IndexRecordOption::Basic,
)),
),
(
Occur::Should,
Box::new(TermQuery::new(
full_term,
IndexRecordOption::Basic,
)),
),
]);
must_clauses.push((Occur::Must, Box::new(per_word_query) as Box<dyn Query>));
}
if !must_clauses.is_empty() {
let prefix_query = BooleanQuery::new(must_clauses);
let boosted_query =
BoostQuery::new(Box::new(prefix_query), 4.0);
query_layers.push((Occur::Should, Box::new(boosted_query)));
}
}
// ===============================
// LAYER 2: FUZZY MATCHING (HIGH PRIORITY, Boost: 3.0)
// ===============================
{
let last_word = words.last().unwrap();
let fuzzy_term =
Term::from_field_text(prefix_full_field, last_word);
let fuzzy_query = FuzzyTermQuery::new(fuzzy_term, 2, true);
let boosted_query = BoostQuery::new(Box::new(fuzzy_query), 3.0);
query_layers.push((Occur::Should, Box::new(boosted_query)));
}
// ===============================
// LAYER 3: PHRASE MATCHING WITH SLOP (MEDIUM PRIORITY, Boost: 2.0)
// ===============================
if words.len() > 1 {
let slop_parser =
QueryParser::for_index(&index, vec![prefix_full_field]);
let slop_query_str = format!("\"{}\"~3", normalized_query);
if let Ok(slop_query) = slop_parser.parse_query(&slop_query_str) {
let boosted_query = BoostQuery::new(slop_query, 2.0);
query_layers.push((Occur::Should, Box::new(boosted_query)));
}
}
// ===============================
// LAYER 4: NGRAM SUBSTRING MATCHING (LOWEST PRIORITY, Boost: 1.0)
// ===============================
{
let ngram_parser =
QueryParser::for_index(&index, vec![text_ngram_field]);
if let Ok(ngram_query) =
ngram_parser.parse_query(&normalized_query)
{
let boosted_query = BoostQuery::new(ngram_query, 1.0);
query_layers.push((Occur::Should, Box::new(boosted_query)));
}
}
let master_query = BooleanQuery::new(query_layers);
// --- End of Query Building Logic ---
let top_docs = searcher
.search(&master_query, &TopDocs::with_limit(100))
.map_err(|e| Status::internal(format!("Search failed: {}", e)))?;
if top_docs.is_empty() {
return Ok(Response::new(SearchResponse { hits: vec![] }));
}
// --- NEW LOGIC: Fetch from DB and combine results ---
// Step 1: Extract (score, pg_id) from Tantivy results.
let mut scored_ids: Vec<(f32, u64)> = Vec::new();
for (score, doc_address) in top_docs {
let doc: TantivyDocument = searcher.doc(doc_address).map_err(|e| {
Status::internal(format!("Failed to retrieve document: {}", e))
})?;
if let Some(pg_id_value) = doc.get_first(pg_id_field) {
if let Some(pg_id) = pg_id_value.as_u64() {
scored_ids.push((score, pg_id));
}
}
}
// Step 2: Fetch all corresponding rows from Postgres in a single query.
let pg_ids: Vec<i64> =
scored_ids.iter().map(|(_, id)| *id as i64).collect();
let qualified_table = format!("gen.\"{}\"", table_name);
let query_str = format!(
"SELECT id, to_jsonb(t) AS data FROM {} t WHERE id = ANY($1)",
qualified_table
);
let rows = sqlx::query(&query_str)
.bind(&pg_ids)
.fetch_all(&self.pool)
.await
.map_err(|e| {
Status::internal(format!("Database query failed: {}", e))
})?;
// Step 3: Map the database results by ID for quick lookup.
let mut content_map: HashMap<i64, String> = HashMap::new();
for row in rows {
let id: i64 = row.try_get("id").unwrap_or(0);
let json_data: serde_json::Value =
row.try_get("data").unwrap_or(serde_json::Value::Null);
content_map.insert(id, json_data.to_string());
}
// Step 4: Build the final response, combining Tantivy scores with PG content.
let hits: Vec<Hit> = scored_ids
.into_iter()
.filter_map(|(score, pg_id)| {
content_map
.get(&(pg_id as i64))
.map(|content_json| Hit {
id: pg_id as i64,
score,
content_json: content_json.clone(),
})
})
.collect();
info!("--- SERVER: Successfully processed search. Returning {} hits. ---", hits.len());
let response = SearchResponse { hits };
Ok(Response::new(response))
self.run_rpc(request).await
}
}

234
search/src/query_builder.rs Normal file
View File

@@ -0,0 +1,234 @@
use common::search::{
json_path_term, normalize_exact, tokenize_ngram, tokenize_word, SchemaFields,
};
use tantivy::query::{
BooleanQuery, BoostQuery, EmptyQuery, FuzzyTermQuery, Occur, PhraseQuery, Query, QueryParser,
TermQuery,
};
use tantivy::schema::{IndexRecordOption, Term};
use tantivy::Index;
use tonic::Status;
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ConstraintMode {
Fuzzy,
Exact,
}
#[derive(Clone, Debug)]
pub struct SearchConstraint {
pub column: String,
pub query: String,
pub mode: ConstraintMode,
}
pub fn build_master_query(
index: &Index,
fields: &SchemaFields,
free_query: &str,
must: &[SearchConstraint],
table_filter: Option<&str>,
) -> Result<Box<dyn Query>, Status> {
let mut clauses: Vec<(Occur, Box<dyn Query>)> = Vec::new();
let mut has_search_clause = false;
for constraint in must {
let predicate = match constraint.mode {
ConstraintMode::Exact => exact_predicate(fields, &constraint.column, &constraint.query)?,
ConstraintMode::Fuzzy => {
fuzzy_predicate_scoped(fields, &constraint.column, &constraint.query)?
}
};
clauses.push((Occur::Must, predicate));
has_search_clause = true;
}
let free_words = tokenize_word(free_query);
if !free_words.is_empty() {
let predicate = fuzzy_predicate_unscoped(index, fields, &free_words)?;
clauses.push((Occur::Should, predicate));
has_search_clause = true;
}
if let Some(table_name) = table_filter {
let term = Term::from_field_text(fields.table_name, table_name);
clauses.push((
Occur::Must,
Box::new(TermQuery::new(term, IndexRecordOption::Basic)),
));
}
if !has_search_clause {
return Ok(Box::new(EmptyQuery));
}
Ok(Box::new(BooleanQuery::new(clauses)))
}
fn exact_predicate(
fields: &SchemaFields,
column: &str,
query: &str,
) -> Result<Box<dyn Query>, Status> {
let normalized_value = normalize_exact(query);
if normalized_value.is_empty() {
return Err(Status::invalid_argument(
"exact query is empty after normalization",
));
}
let term = json_path_term(fields.data_exact, column, &normalized_value);
Ok(Box::new(TermQuery::new(term, IndexRecordOption::Basic)))
}
fn fuzzy_predicate_scoped(
fields: &SchemaFields,
column: &str,
query: &str,
) -> Result<Box<dyn Query>, Status> {
let words = tokenize_word(query);
if words.is_empty() {
return Err(Status::invalid_argument(
"fuzzy query has no searchable tokens",
));
}
let mut layers: Vec<(Occur, Box<dyn Query>)> = Vec::new();
let mut per_word_clauses: Vec<(Occur, Box<dyn Query>)> = Vec::new();
for word in &words {
let term = json_path_term(fields.data_word, column, word);
let mut alternates: Vec<(Occur, Box<dyn Query>)> = Vec::new();
alternates.push((
Occur::Should,
Box::new(BoostQuery::new(
Box::new(TermQuery::new(term.clone(), IndexRecordOption::WithFreqs)),
4.0,
)),
));
alternates.push((
Occur::Should,
Box::new(BoostQuery::new(
Box::new(FuzzyTermQuery::new_prefix(term.clone(), 0, false)),
3.0,
)),
));
if let Some(distance) = fuzzy_distance(word.chars().count()) {
alternates.push((
Occur::Should,
Box::new(BoostQuery::new(
Box::new(FuzzyTermQuery::new(term.clone(), distance, true)),
2.0,
)),
));
}
per_word_clauses.push((Occur::Must, Box::new(BooleanQuery::new(alternates))));
}
layers.push((Occur::Should, Box::new(BooleanQuery::new(per_word_clauses))));
if words.len() > 1 {
let phrase_terms: Vec<(usize, Term)> = words
.iter()
.enumerate()
.map(|(offset, word)| (offset, json_path_term(fields.data_word, column, word)))
.collect();
let phrase = PhraseQuery::new_with_offset_and_slop(phrase_terms, 3);
layers.push((
Occur::Should,
Box::new(BoostQuery::new(Box::new(phrase), 2.0)),
));
}
let ngrams = tokenize_ngram(query);
if !ngrams.is_empty() {
let ngram_clauses: Vec<(Occur, Box<dyn Query>)> = ngrams
.into_iter()
.map(|gram| {
let term = json_path_term(fields.data_ngram, column, &gram);
(
Occur::Must,
Box::new(TermQuery::new(term, IndexRecordOption::Basic)) as Box<dyn Query>,
)
})
.collect();
layers.push((
Occur::Should,
Box::new(BoostQuery::new(Box::new(BooleanQuery::new(ngram_clauses)), 1.0)),
));
}
Ok(Box::new(BooleanQuery::new(layers)))
}
fn fuzzy_predicate_unscoped(
index: &Index,
fields: &SchemaFields,
words: &[String],
) -> Result<Box<dyn Query>, Status> {
let mut layers: Vec<(Occur, Box<dyn Query>)> = Vec::new();
{
let parser = QueryParser::for_index(index, vec![fields.data_word]);
let query_string = words
.iter()
.map(|word| format!("+{}*", word))
.collect::<Vec<_>>()
.join(" ");
if let Ok(query) = parser.parse_query(&query_string) {
layers.push((Occur::Should, Box::new(BoostQuery::new(query, 4.0))));
}
}
{
let parser = QueryParser::for_index(index, vec![fields.data_word]);
let query_string = words
.iter()
.map(|word| match fuzzy_distance(word.chars().count()) {
Some(distance) => format!("+{}~{}", word, distance),
None => format!("+{}", word),
})
.collect::<Vec<_>>()
.join(" ");
if let Ok(query) = parser.parse_query(&query_string) {
layers.push((Occur::Should, Box::new(BoostQuery::new(query, 2.0))));
}
}
if words.len() > 1 {
let parser = QueryParser::for_index(index, vec![fields.data_word]);
let query_string = format!("\"{}\"~3", words.join(" "));
if let Ok(query) = parser.parse_query(&query_string) {
layers.push((Occur::Should, Box::new(BoostQuery::new(query, 2.0))));
}
}
{
let parser = QueryParser::for_index(index, vec![fields.data_ngram]);
let query_string = words
.iter()
.map(|word| format!("+{}", word))
.collect::<Vec<_>>()
.join(" ");
if let Ok(query) = parser.parse_query(&query_string) {
layers.push((Occur::Should, Box::new(BoostQuery::new(query, 1.0))));
}
}
if layers.is_empty() {
return Ok(Box::new(EmptyQuery));
}
Ok(Box::new(BooleanQuery::new(layers)))
}
fn fuzzy_distance(word_len: usize) -> Option<u8> {
match word_len {
0..=3 => None,
4..=6 => Some(1),
_ => Some(2),
}
}

2
server

Submodule server updated: e497676789...403785118a