From 462b1f14e23eb053c337c1ec5fdb8bb4b65a9a50 Mon Sep 17 00:00:00 2001 From: filipriec Date: Tue, 27 May 2025 22:21:40 +0200 Subject: [PATCH] generated tables are now in gen schema, breaking change, needs crucial fixes NOW --- .../handlers/post_table_definition.rs | 90 +++++++++---------- .../handlers/table_structure.rs | 2 +- 2 files changed, 43 insertions(+), 49 deletions(-) diff --git a/server/src/table_definition/handlers/post_table_definition.rs b/server/src/table_definition/handlers/post_table_definition.rs index 4c7c532..75387a4 100644 --- a/server/src/table_definition/handlers/post_table_definition.rs +++ b/server/src/table_definition/handlers/post_table_definition.rs @@ -1,10 +1,11 @@ -// src/table_definition/handlers/post_table_definition.rs use tonic::Status; use sqlx::{PgPool, Transaction, Postgres}; use serde_json::json; use time::OffsetDateTime; use common::proto::multieko2::table_definition::{PostTableDefinitionRequest, TableDefinitionResponse}; +const GENERATED_SCHEMA_NAME: &str = "gen"; + const PREDEFINED_FIELD_TYPES: &[(&str, &str)] = &[ ("text", "TEXT"), ("psc", "TEXT"), @@ -27,7 +28,6 @@ fn sanitize_table_name(s: &str) -> String { let cleaned = s.replace(|c: char| !c.is_ascii_alphanumeric() && c != '_', "") .trim() .to_lowercase(); - format!("{}_{}", year, cleaned) } @@ -47,31 +47,30 @@ fn map_field_type(field_type: &str) -> Result<&str, Status> { pub async fn post_table_definition( db_pool: &PgPool, - request: PostTableDefinitionRequest, // Removed `mut` since it's not needed here + request: PostTableDefinitionRequest, ) -> Result { - // Validate and sanitize table name - let table_name = sanitize_table_name(&request.table_name); - if !is_valid_identifier(&request.table_name) { + let base_name = sanitize_table_name(&request.table_name); + let user_part_cleaned = request.table_name + .replace(|c: char| !c.is_ascii_alphanumeric() && c != '_', "") + .trim_matches('_') + .to_lowercase(); + + if !user_part_cleaned.is_empty() && !is_valid_identifier(&user_part_cleaned) { return Err(Status::invalid_argument("Invalid table name")); + } else if user_part_cleaned.is_empty() { + return Err(Status::invalid_argument("Table name cannot be empty")); } - // Start a transaction to ensure atomicity let mut tx = db_pool.begin().await .map_err(|e| Status::internal(format!("Failed to start transaction: {}", e)))?; - // Execute all database operations within the transaction - let result = execute_table_definition(&mut tx, request, table_name).await; - - // Commit or rollback based on the result - match result { + match execute_table_definition(&mut tx, request, base_name).await { Ok(response) => { - // Commit the transaction tx.commit().await .map_err(|e| Status::internal(format!("Failed to commit transaction: {}", e)))?; Ok(response) }, Err(e) => { - // Explicitly roll back the transaction (optional but good for clarity) let _ = tx.rollback().await; Err(e) } @@ -83,7 +82,6 @@ async fn execute_table_definition( mut request: PostTableDefinitionRequest, table_name: String, ) -> Result { - // Lookup or create profile let profile = sqlx::query!( "INSERT INTO profiles (name) VALUES ($1) ON CONFLICT (name) DO UPDATE SET name = EXCLUDED.name @@ -94,7 +92,6 @@ async fn execute_table_definition( .await .map_err(|e| Status::internal(format!("Profile error: {}", e)))?; - // Process table links let mut links = Vec::new(); for link in request.links.drain(..) { let linked_table = sqlx::query!( @@ -114,7 +111,6 @@ async fn execute_table_definition( links.push((linked_id, link.required)); } - // Process columns let mut columns = Vec::new(); for col_def in request.columns.drain(..) { let col_name = sanitize_identifier(&col_def.name); @@ -125,20 +121,20 @@ async fn execute_table_definition( columns.push(format!("\"{}\" {}", col_name, sql_type)); } - // Process indexes let mut indexes = Vec::new(); for idx in request.indexes.drain(..) { let idx_name = sanitize_identifier(&idx); if !is_valid_identifier(&idx) { return Err(Status::invalid_argument(format!("Invalid index name: {}", idx))); } + if !columns.iter().any(|c| c.starts_with(&format!("\"{}\"", idx_name))) { + return Err(Status::invalid_argument(format!("Index column {} not found", idx_name))); + } indexes.push(idx_name); } - // Generate SQL with multiple links let (create_sql, index_sql) = generate_table_sql(tx, &table_name, &columns, &indexes, &links).await?; - // Store main table definition let table_def = sqlx::query!( r#"INSERT INTO table_definitions (profile_id, table_name, columns, indexes) @@ -146,8 +142,8 @@ async fn execute_table_definition( RETURNING id"#, profile.id, &table_name, - json!(columns), - json!(indexes) + json!(request.columns.iter().map(|c| c.name.clone()).collect::>()), + json!(request.indexes.iter().map(|i| i.clone()).collect::>()) ) .fetch_one(&mut **tx) .await @@ -160,7 +156,6 @@ async fn execute_table_definition( Status::internal(format!("Database error: {}", e)) })?; - // Store relationships for (linked_id, is_required) in links { sqlx::query!( "INSERT INTO table_definition_links @@ -175,7 +170,6 @@ async fn execute_table_definition( .map_err(|e| Status::internal(format!("Failed to save link: {}", e)))?; } - // Execute generated SQL within the transaction sqlx::query(&create_sql) .execute(&mut **tx) .await @@ -201,60 +195,60 @@ async fn generate_table_sql( indexes: &[String], links: &[(i64, bool)], ) -> Result<(String, Vec), Status> { + let qualified_table = format!("{}.\"{}\"", GENERATED_SCHEMA_NAME, table_name); + let mut system_columns = vec![ "id BIGSERIAL PRIMARY KEY".to_string(), "deleted BOOLEAN NOT NULL DEFAULT FALSE".to_string(), ]; - // Add foreign key columns - let mut link_info = Vec::new(); for (linked_id, required) in links { let linked_table = get_table_name_by_id(tx, *linked_id).await?; - - // Extract base name after year prefix + let qualified_linked_table = format!("{}.\"{}\"", GENERATED_SCHEMA_NAME, linked_table); let base_name = linked_table.split_once('_') .map(|(_, rest)| rest) .unwrap_or(&linked_table) .to_string(); let null_clause = if *required { "NOT NULL" } else { "" }; + system_columns.push( - format!("\"{0}_id\" BIGINT {1} REFERENCES \"{2}\"(id)", - base_name, null_clause, linked_table + format!("\"{0}_id\" BIGINT {1} REFERENCES {2}(id)", + base_name, null_clause, qualified_linked_table ) ); - link_info.push((base_name, linked_table)); } - // Combine all columns let all_columns = system_columns .iter() .chain(columns.iter()) .cloned() .collect::>(); - // Build CREATE TABLE statement let create_sql = format!( - "CREATE TABLE \"{}\" (\n {},\n created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP\n)", - table_name, + "CREATE TABLE {} (\n {},\n created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP\n)", + qualified_table, all_columns.join(",\n ") ); - // Generate indexes - let mut system_indexes = Vec::new(); - for (base_name, _) in &link_info { - system_indexes.push(format!( - "CREATE INDEX idx_{}_{}_fk ON \"{}\" (\"{}_id\")", - table_name, base_name, table_name, base_name + let mut all_indexes = Vec::new(); + for (linked_id, _) in links { + let linked_table = get_table_name_by_id(tx, *linked_id).await?; + let base_name = linked_table.split_once('_') + .map(|(_, rest)| rest) + .unwrap_or(&linked_table) + .to_string(); + all_indexes.push(format!( + "CREATE INDEX \"idx_{}_{}_fk\" ON {} (\"{}_id\")", + table_name, base_name, qualified_table, base_name )); } - let all_indexes = system_indexes - .into_iter() - .chain(indexes.iter().map(|idx| { - format!("CREATE INDEX idx_{}_{} ON \"{}\" (\"{}\")", - table_name, idx, table_name, idx) - })) - .collect(); + for idx in indexes { + all_indexes.push(format!( + "CREATE INDEX \"idx_{}_{}\" ON {} (\"{}\")", + table_name, idx, qualified_table, idx + )); + } Ok((create_sql, all_indexes)) } diff --git a/server/src/table_structure/handlers/table_structure.rs b/server/src/table_structure/handlers/table_structure.rs index afe22f3..d74d851 100644 --- a/server/src/table_structure/handlers/table_structure.rs +++ b/server/src/table_structure/handlers/table_structure.rs @@ -2,7 +2,7 @@ use common::proto::multieko2::table_structure::{ GetTableStructureRequest, TableColumn, TableStructureResponse, }; -use sqlx::{PgPool, Row}; +use sqlx::PgPool; use tonic::Status; // Helper struct to map query results