diff --git a/common/proto/tables_data.proto b/common/proto/tables_data.proto index 14511e3..ae314aa 100644 --- a/common/proto/tables_data.proto +++ b/common/proto/tables_data.proto @@ -8,6 +8,7 @@ service TablesData { rpc PostTableData (PostTableDataRequest) returns (PostTableDataResponse); rpc PutTableData (PutTableDataRequest) returns (PutTableDataResponse); rpc DeleteTableData (DeleteTableDataRequest) returns (DeleteTableDataResponse); + rpc GetTableData(GetTableDataRequest) returns (GetTableDataResponse); } message PostTableDataRequest { @@ -44,3 +45,13 @@ message DeleteTableDataRequest { message DeleteTableDataResponse { bool success = 1; } + +message GetTableDataRequest { + string profile_name = 1; + string table_name = 2; + int64 id = 3; +} + +message GetTableDataResponse { + map data = 1; +} diff --git a/common/src/proto/descriptor.bin b/common/src/proto/descriptor.bin index d47b5b0..2bcaa00 100644 Binary files a/common/src/proto/descriptor.bin and b/common/src/proto/descriptor.bin differ diff --git a/common/src/proto/multieko2.tables_data.rs b/common/src/proto/multieko2.tables_data.rs index 3daf748..8dc41ac 100644 --- a/common/src/proto/multieko2.tables_data.rs +++ b/common/src/proto/multieko2.tables_data.rs @@ -57,6 +57,23 @@ pub struct DeleteTableDataResponse { #[prost(bool, tag = "1")] pub success: bool, } +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTableDataRequest { + #[prost(string, tag = "1")] + pub profile_name: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub table_name: ::prost::alloc::string::String, + #[prost(int64, tag = "3")] + pub id: i64, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTableDataResponse { + #[prost(map = "string, string", tag = "1")] + pub data: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, +} /// Generated client implementations. pub mod tables_data_client { #![allow( @@ -229,6 +246,32 @@ pub mod tables_data_client { ); self.inner.unary(req, path, codec).await } + pub async fn get_table_data( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/multieko2.tables_data.TablesData/GetTableData", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("multieko2.tables_data.TablesData", "GetTableData"), + ); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -265,6 +308,13 @@ pub mod tables_data_server { tonic::Response, tonic::Status, >; + async fn get_table_data( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } #[derive(Debug)] pub struct TablesDataServer { @@ -477,6 +527,51 @@ pub mod tables_data_server { }; Box::pin(fut) } + "/multieko2.tables_data.TablesData/GetTableData" => { + #[allow(non_camel_case_types)] + struct GetTableDataSvc(pub Arc); + impl< + T: TablesData, + > tonic::server::UnaryService + for GetTableDataSvc { + type Response = super::GetTableDataResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_table_data(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetTableDataSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { let mut response = http::Response::new(empty_body()); diff --git a/server/src/server/services/tables_data_service.rs b/server/src/server/services/tables_data_service.rs index 7cb0e34..d631867 100644 --- a/server/src/server/services/tables_data_service.rs +++ b/server/src/server/services/tables_data_service.rs @@ -5,8 +5,9 @@ use common::proto::multieko2::tables_data::{ PostTableDataRequest, PostTableDataResponse, PutTableDataRequest, PutTableDataResponse, DeleteTableDataRequest, DeleteTableDataResponse, + GetTableDataRequest, GetTableDataResponse, }; -use crate::tables_data::handlers::{post_table_data, put_table_data, delete_table_data,}; +use crate::tables_data::handlers::{post_table_data, put_table_data, delete_table_data, get_table_data,}; use sqlx::PgPool; #[derive(Debug)] @@ -43,4 +44,13 @@ impl TablesData for TablesDataService { let response = delete_table_data(&self.db_pool, request).await?; Ok(Response::new(response)) } + + async fn get_table_data( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + let response = get_table_data(&self.db_pool, request).await?; + Ok(Response::new(response)) + } } diff --git a/server/src/tables_data/docs/get_data.txt b/server/src/tables_data/docs/get_data.txt new file mode 100644 index 0000000..9644d84 --- /dev/null +++ b/server/src/tables_data/docs/get_data.txt @@ -0,0 +1,33 @@ +Valid get request: + +grpcurl -plaintext -d '{"profile_name": "default", "table_name": "2025_adresar", "id": 2}' localhost:50051 multieko2.tables_data.TablesData/GetTableData + +{ + "data": { + "banka": "New Banka", + "deleted": "false", + "drc": "New DRC", + "fax": "New Fax", + "firma": "New Firma", + "ico": "New ICO", + "id": "2", + "kontakt": "New Kontakt", + "kz": "New KZ", + "mesto": "New Mesto", + "psc": "New PSC", + "skladm": "New Skladm", + "skladu": "New Skladu", + "stat": "New Stat", + "telefon": "New Telefon", + "ucet": "New Ucet", + "ulica": "New Ulica" + } +} + +Request of a deleted data: + +grpcurl -plaintext -d '{"profile_name": "default", "table_name": "2025_adresar", "id": 1}' localhost:50051 multieko2.tables_data.TablesData/GetTableData + +ERROR: + Code: NotFound + Message: Record not found diff --git a/server/src/tables_data/handlers.rs b/server/src/tables_data/handlers.rs index eea735a..c3d6385 100644 --- a/server/src/tables_data/handlers.rs +++ b/server/src/tables_data/handlers.rs @@ -2,7 +2,9 @@ pub mod post_table_data; pub mod put_table_data; pub mod delete_table_data; +pub mod get_table_data; pub use post_table_data::post_table_data; pub use put_table_data::put_table_data; pub use delete_table_data::delete_table_data; +pub use get_table_data::get_table_data; diff --git a/server/src/tables_data/handlers/get_table_data.rs b/server/src/tables_data/handlers/get_table_data.rs new file mode 100644 index 0000000..a180895 --- /dev/null +++ b/server/src/tables_data/handlers/get_table_data.rs @@ -0,0 +1,97 @@ +// src/tables_data/handlers/get_table_data.rs +use tonic::Status; +use sqlx::{PgPool, Row}; +use std::collections::HashMap; +use common::proto::multieko2::tables_data::{GetTableDataRequest, GetTableDataResponse}; + +pub async fn get_table_data( + db_pool: &PgPool, + request: GetTableDataRequest, +) -> Result { + let profile_name = request.profile_name; + let table_name = request.table_name; + let record_id = request.id; + + // Lookup profile + let profile = sqlx::query!( + "SELECT id FROM profiles WHERE name = $1", + profile_name + ) + .fetch_optional(db_pool) + .await + .map_err(|e| Status::internal(format!("Profile lookup error: {}", e)))?; + + let profile_id = profile.ok_or_else(|| Status::not_found("Profile not found"))?.id; + + // Lookup table_definition + let table_def = sqlx::query!( + r#"SELECT id, columns FROM table_definitions + WHERE profile_id = $1 AND table_name = $2"#, + profile_id, + table_name + ) + .fetch_optional(db_pool) + .await + .map_err(|e| Status::internal(format!("Table lookup error: {}", e)))?; + + let table_def = table_def.ok_or_else(|| Status::not_found("Table not found"))?; + + // Parse user-defined columns from JSON + let columns_json: Vec = serde_json::from_value(table_def.columns.clone()) + .map_err(|e| Status::internal(format!("Column parsing error: {}", e)))?; + + let mut user_columns = Vec::new(); + for col_def in columns_json { + let parts: Vec<&str> = col_def.splitn(2, ' ').collect(); + if parts.len() != 2 { + return Err(Status::internal("Invalid column format")); + } + let name = parts[0].trim_matches('"').to_string(); + let sql_type = parts[1].to_string(); + user_columns.push((name, sql_type)); + } + + // Prepare all columns (system + user-defined) + let system_columns = vec![ + ("id".to_string(), "BIGINT".to_string()), + ("deleted".to_string(), "BOOLEAN".to_string()), + ("firma".to_string(), "TEXT".to_string()), + ]; + let all_columns: Vec<(String, String)> = system_columns + .into_iter() + .chain(user_columns.into_iter()) + .collect(); + + // Build SELECT clause with COALESCE and type casting + let columns_clause = all_columns + .iter() + .map(|(name, _)| format!("COALESCE(\"{0}\"::TEXT, '') AS \"{0}\"", name)) + .collect::>() + .join(", "); + + let sql = format!( + "SELECT {} FROM \"{}\" WHERE id = $1 AND deleted = false", + columns_clause, table_name + ); + + // Execute query + let row = sqlx::query(&sql) + .bind(record_id) + .fetch_one(db_pool) + .await + .map_err(|e| match e { + sqlx::Error::RowNotFound => Status::not_found("Record not found"), + _ => Status::internal(format!("Database error: {}", e)), + })?; + + // Build response data + let mut data = HashMap::new(); + for (column_name, _) in &all_columns { + let value: String = row + .try_get(column_name.as_str()) + .map_err(|e| Status::internal(format!("Failed to get column {}: {}", column_name, e)))?; + data.insert(column_name.clone(), value); + } + + Ok(GetTableDataResponse { data }) +}