post to the user defined table, broken push

This commit is contained in:
filipriec
2025-03-03 16:14:31 +01:00
parent 2ac988303e
commit d59e5b60cf
11 changed files with 533 additions and 8 deletions

View File

@@ -4,15 +4,17 @@ use tonic_reflection::server::Builder as ReflectionBuilder;
use common::proto::multieko2::FILE_DESCRIPTOR_SET;
use crate::server::services::{
AdresarService,
UctovnictvoService,
AdresarService,
UctovnictvoService,
TableStructureHandler,
TableDefinitionService
TableDefinitionService,
TablesDataService, // Add this
};
use common::proto::multieko2::adresar::adresar_server::AdresarServer;
use common::proto::multieko2::uctovnictvo::uctovnictvo_server::UctovnictvoServer;
use common::proto::multieko2::table_structure::table_structure_service_server::TableStructureServiceServer;
use common::proto::multieko2::table_definition::table_definition_server::TableDefinitionServer;
use common::proto::multieko2::tables_data::tables_data_server::TablesDataServer; // Add this
pub async fn run_server(db_pool: sqlx::PgPool) -> Result<(), Box<dyn std::error::Error>> {
let addr = "[::1]:50051".parse()?;
@@ -21,14 +23,16 @@ pub async fn run_server(db_pool: sqlx::PgPool) -> Result<(), Box<dyn std::error:
.register_encoded_file_descriptor_set(FILE_DESCRIPTOR_SET)
.build_v1()?;
// Initialize the TableDefinitionService
// Initialize services
let table_definition_service = TableDefinitionService { db_pool: db_pool.clone() };
let tables_data_service = TablesDataService { db_pool: db_pool.clone() }; // Add this
Server::builder()
.add_service(AdresarServer::new(AdresarService { db_pool: db_pool.clone() }))
.add_service(UctovnictvoServer::new(UctovnictvoService { db_pool: db_pool.clone() }))
.add_service(TableStructureServiceServer::new(TableStructureHandler { db_pool: db_pool.clone() }))
.add_service(TableDefinitionServer::new(table_definition_service)) // Add the service here
.add_service(TableDefinitionServer::new(table_definition_service))
.add_service(TablesDataServer::new(tables_data_service)) // Add this
.add_service(reflection_service)
.serve(addr)
.await?;

View File

@@ -4,8 +4,10 @@ pub mod adresar_service;
pub mod table_structure_service;
pub mod uctovnictvo_service;
pub mod table_definition_service;
pub mod tables_data_service;
pub use adresar_service::AdresarService;
pub use table_structure_service::TableStructureHandler;
pub use uctovnictvo_service::UctovnictvoService;
pub use table_definition_service::TableDefinitionService;
pub use tables_data_service::TablesDataService;

View File

@@ -0,0 +1,22 @@
// src/server/services/tables_data_service.rs
use tonic::{Request, Response, Status};
use common::proto::multieko2::tables_data::{TablesData, PostTableDataRequest, PostTableDataResponse};
use crate::tables_data::handlers::post_table_data;
use sqlx::PgPool;
#[derive(Debug)]
pub struct TablesDataService {
pub db_pool: PgPool,
}
#[tonic::async_trait]
impl TablesData for TablesDataService {
async fn post_table_data(
&self,
request: Request<PostTableDataRequest>,
) -> Result<Response<PostTableDataResponse>, Status> {
let request = request.into_inner();
let response = post_table_data(&self.db_pool, request).await?;
Ok(Response::new(response))
}
}

View File

@@ -1,4 +1,4 @@
// server/src/tables_data/handlers.rs
pub mod post_tables;
pub mod post_table_data;
pub use post_tables::post_tables;
pub use post_table_data::post_table_data;

View File

@@ -0,0 +1,151 @@
// src/tables_data/handlers/post_table_data.rs
use tonic::{Status, Response};
use sqlx::{PgPool, Postgres, Arguments, Row};
use sqlx::postgres::PgArguments;
use chrono::{DateTime, Utc};
use std::collections::HashMap;
use common::proto::multieko2::tables_data::{PostTableDataRequest, PostTableDataResponse};
pub async fn post_table_data(
db_pool: &PgPool,
request: PostTableDataRequest,
) -> Result<Response<PostTableDataResponse>, Status> {
let profile_name = request.profile_name;
let table_name = request.table_name;
let data = request.data;
// Lookup profile
let profile = sqlx::query!(
"SELECT id FROM profiles WHERE name = $1",
profile_name
)
.fetch_optional(db_pool)
.await
.map_err(|e| Status::internal(format!("Profile lookup error: {}", e)))?;
let profile_id = profile.ok_or_else(|| Status::not_found("Profile not found"))?.id;
// Lookup table_definition
let table_def = sqlx::query!(
r#"SELECT id, columns, linked_table_id FROM table_definitions
WHERE profile_id = $1 AND table_name = $2"#,
profile_id,
table_name
)
.fetch_optional(db_pool)
.await
.map_err(|e| Status::internal(format!("Table lookup error: {}", e)))?;
let table_def = table_def.ok_or_else(|| Status::not_found("Table not found"))?;
// Parse columns from JSON
let columns_json: Vec<String> = serde_json::from_value(table_def.columns.clone())
.map_err(|e| Status::internal(format!("Column parsing error: {}", e)))?;
let mut columns = Vec::new();
for col_def in columns_json {
let parts: Vec<&str> = col_def.splitn(2, ' ').collect();
if parts.len() != 2 {
return Err(Status::internal("Invalid column format"));
}
let name = parts[0].trim_matches('"').to_string();
let sql_type = parts[1].to_string();
columns.push((name, sql_type));
}
// Check required system columns
let mut required_columns = vec!["firma".to_string()];
if let Some(linked_table_id) = table_def.linked_table_id {
let linked_table = sqlx::query!(
"SELECT table_name FROM table_definitions WHERE id = $1",
linked_table_id
)
.fetch_one(db_pool)
.await
.map_err(|e| Status::internal(format!("Linked table error: {}", e)))?;
let base_name = linked_table.table_name.splitn(2, '_').last().unwrap_or(&linked_table.table_name);
required_columns.push(format!("{}_id", base_name));
}
// Validate required columns
for col in &required_columns {
if !data.contains_key(col) {
return Err(Status::invalid_argument(format!("Missing required column: {}", col)));
}
}
// Validate all data columns
let system_columns = ["firma", "deleted"];
let user_columns: Vec<&String> = columns.iter().map(|(name, _)| name).collect();
for key in data.keys() {
if !system_columns.contains(&key.as_str()) && !user_columns.contains(&key) {
return Err(Status::invalid_argument(format!("Invalid column: {}", key)));
}
}
// Prepare SQL parameters
let mut params = PgArguments::default();
let mut columns_list = Vec::new();
let mut placeholders = Vec::new();
let mut param_idx = 1;
for (col, value) in data {
let sql_type = if system_columns.contains(&col.as_str()) {
match col.as_str() {
"firma" => "TEXT",
"deleted" => "BOOLEAN",
_ => return Err(Status::invalid_argument("Invalid system column")),
}
} else {
columns.iter()
.find(|(name, _)| name == &col)
.map(|(_, sql_type)| sql_type.as_str())
.ok_or_else(|| Status::invalid_argument(format!("Column not found: {}", col)))?
};
match sql_type {
"TEXT" | "VARCHAR(15)" | "VARCHAR(255)" => {
if let Some(max_len) = sql_type.strip_prefix("VARCHAR(").and_then(|s| s.strip_suffix(')')).and_then(|s| s.parse::<usize>().ok()) {
if value.len() > max_len {
return Err(Status::invalid_argument(format!("Value too long for {}", col)));
}
}
params.add(value);
},
"BOOLEAN" => {
let val = value.parse::<bool>()
.map_err(|_| Status::invalid_argument(format!("Invalid boolean for {}", col)))?;
params.add(val);
},
"TIMESTAMPTZ" => {
let dt = DateTime::parse_from_rfc3339(&value)
.map_err(|_| Status::invalid_argument(format!("Invalid timestamp for {}", col)))?;
params.add(dt.with_timezone(&Utc));
},
_ => return Err(Status::invalid_argument(format!("Unsupported type {}", sql_type))),
}
columns_list.push(format!("\"{}\"", col));
placeholders.push(format!("${}", param_idx));
param_idx += 1;
}
let sql = format!(
"INSERT INTO \"{}\" ({}) VALUES ({}) RETURNING id",
table_name,
columns_list.join(", "),
placeholders.join(", ")
);
let inserted_id: i64 = sqlx::query_scalar_with(&sql, params)
.fetch_one(db_pool)
.await
.map_err(|e| Status::internal(format!("Insert failed: {}", e)))?;
Ok(Response::new(PostTableDataResponse {
success: true,
message: "Data inserted successfully".into(),
inserted_id,
}))
}