generated tables are now in gen schema, breaking change, needs crucial fixes NOW

This commit is contained in:
filipriec
2025-05-27 22:21:40 +02:00
parent 7a8f18b116
commit 462b1f14e2
2 changed files with 43 additions and 49 deletions

View File

@@ -1,10 +1,11 @@
// src/table_definition/handlers/post_table_definition.rs
use tonic::Status; use tonic::Status;
use sqlx::{PgPool, Transaction, Postgres}; use sqlx::{PgPool, Transaction, Postgres};
use serde_json::json; use serde_json::json;
use time::OffsetDateTime; use time::OffsetDateTime;
use common::proto::multieko2::table_definition::{PostTableDefinitionRequest, TableDefinitionResponse}; use common::proto::multieko2::table_definition::{PostTableDefinitionRequest, TableDefinitionResponse};
const GENERATED_SCHEMA_NAME: &str = "gen";
const PREDEFINED_FIELD_TYPES: &[(&str, &str)] = &[ const PREDEFINED_FIELD_TYPES: &[(&str, &str)] = &[
("text", "TEXT"), ("text", "TEXT"),
("psc", "TEXT"), ("psc", "TEXT"),
@@ -27,7 +28,6 @@ fn sanitize_table_name(s: &str) -> String {
let cleaned = s.replace(|c: char| !c.is_ascii_alphanumeric() && c != '_', "") let cleaned = s.replace(|c: char| !c.is_ascii_alphanumeric() && c != '_', "")
.trim() .trim()
.to_lowercase(); .to_lowercase();
format!("{}_{}", year, cleaned) format!("{}_{}", year, cleaned)
} }
@@ -47,31 +47,30 @@ fn map_field_type(field_type: &str) -> Result<&str, Status> {
pub async fn post_table_definition( pub async fn post_table_definition(
db_pool: &PgPool, db_pool: &PgPool,
request: PostTableDefinitionRequest, // Removed `mut` since it's not needed here request: PostTableDefinitionRequest,
) -> Result<TableDefinitionResponse, Status> { ) -> Result<TableDefinitionResponse, Status> {
// Validate and sanitize table name let base_name = sanitize_table_name(&request.table_name);
let table_name = sanitize_table_name(&request.table_name); let user_part_cleaned = request.table_name
if !is_valid_identifier(&request.table_name) { .replace(|c: char| !c.is_ascii_alphanumeric() && c != '_', "")
.trim_matches('_')
.to_lowercase();
if !user_part_cleaned.is_empty() && !is_valid_identifier(&user_part_cleaned) {
return Err(Status::invalid_argument("Invalid table name")); return Err(Status::invalid_argument("Invalid table name"));
} else if user_part_cleaned.is_empty() {
return Err(Status::invalid_argument("Table name cannot be empty"));
} }
// Start a transaction to ensure atomicity
let mut tx = db_pool.begin().await let mut tx = db_pool.begin().await
.map_err(|e| Status::internal(format!("Failed to start transaction: {}", e)))?; .map_err(|e| Status::internal(format!("Failed to start transaction: {}", e)))?;
// Execute all database operations within the transaction match execute_table_definition(&mut tx, request, base_name).await {
let result = execute_table_definition(&mut tx, request, table_name).await;
// Commit or rollback based on the result
match result {
Ok(response) => { Ok(response) => {
// Commit the transaction
tx.commit().await tx.commit().await
.map_err(|e| Status::internal(format!("Failed to commit transaction: {}", e)))?; .map_err(|e| Status::internal(format!("Failed to commit transaction: {}", e)))?;
Ok(response) Ok(response)
}, },
Err(e) => { Err(e) => {
// Explicitly roll back the transaction (optional but good for clarity)
let _ = tx.rollback().await; let _ = tx.rollback().await;
Err(e) Err(e)
} }
@@ -83,7 +82,6 @@ async fn execute_table_definition(
mut request: PostTableDefinitionRequest, mut request: PostTableDefinitionRequest,
table_name: String, table_name: String,
) -> Result<TableDefinitionResponse, Status> { ) -> Result<TableDefinitionResponse, Status> {
// Lookup or create profile
let profile = sqlx::query!( let profile = sqlx::query!(
"INSERT INTO profiles (name) VALUES ($1) "INSERT INTO profiles (name) VALUES ($1)
ON CONFLICT (name) DO UPDATE SET name = EXCLUDED.name ON CONFLICT (name) DO UPDATE SET name = EXCLUDED.name
@@ -94,7 +92,6 @@ async fn execute_table_definition(
.await .await
.map_err(|e| Status::internal(format!("Profile error: {}", e)))?; .map_err(|e| Status::internal(format!("Profile error: {}", e)))?;
// Process table links
let mut links = Vec::new(); let mut links = Vec::new();
for link in request.links.drain(..) { for link in request.links.drain(..) {
let linked_table = sqlx::query!( let linked_table = sqlx::query!(
@@ -114,7 +111,6 @@ async fn execute_table_definition(
links.push((linked_id, link.required)); links.push((linked_id, link.required));
} }
// Process columns
let mut columns = Vec::new(); let mut columns = Vec::new();
for col_def in request.columns.drain(..) { for col_def in request.columns.drain(..) {
let col_name = sanitize_identifier(&col_def.name); let col_name = sanitize_identifier(&col_def.name);
@@ -125,20 +121,20 @@ async fn execute_table_definition(
columns.push(format!("\"{}\" {}", col_name, sql_type)); columns.push(format!("\"{}\" {}", col_name, sql_type));
} }
// Process indexes
let mut indexes = Vec::new(); let mut indexes = Vec::new();
for idx in request.indexes.drain(..) { for idx in request.indexes.drain(..) {
let idx_name = sanitize_identifier(&idx); let idx_name = sanitize_identifier(&idx);
if !is_valid_identifier(&idx) { if !is_valid_identifier(&idx) {
return Err(Status::invalid_argument(format!("Invalid index name: {}", idx))); return Err(Status::invalid_argument(format!("Invalid index name: {}", idx)));
} }
if !columns.iter().any(|c| c.starts_with(&format!("\"{}\"", idx_name))) {
return Err(Status::invalid_argument(format!("Index column {} not found", idx_name)));
}
indexes.push(idx_name); indexes.push(idx_name);
} }
// Generate SQL with multiple links
let (create_sql, index_sql) = generate_table_sql(tx, &table_name, &columns, &indexes, &links).await?; let (create_sql, index_sql) = generate_table_sql(tx, &table_name, &columns, &indexes, &links).await?;
// Store main table definition
let table_def = sqlx::query!( let table_def = sqlx::query!(
r#"INSERT INTO table_definitions r#"INSERT INTO table_definitions
(profile_id, table_name, columns, indexes) (profile_id, table_name, columns, indexes)
@@ -146,8 +142,8 @@ async fn execute_table_definition(
RETURNING id"#, RETURNING id"#,
profile.id, profile.id,
&table_name, &table_name,
json!(columns), json!(request.columns.iter().map(|c| c.name.clone()).collect::<Vec<_>>()),
json!(indexes) json!(request.indexes.iter().map(|i| i.clone()).collect::<Vec<_>>())
) )
.fetch_one(&mut **tx) .fetch_one(&mut **tx)
.await .await
@@ -160,7 +156,6 @@ async fn execute_table_definition(
Status::internal(format!("Database error: {}", e)) Status::internal(format!("Database error: {}", e))
})?; })?;
// Store relationships
for (linked_id, is_required) in links { for (linked_id, is_required) in links {
sqlx::query!( sqlx::query!(
"INSERT INTO table_definition_links "INSERT INTO table_definition_links
@@ -175,7 +170,6 @@ async fn execute_table_definition(
.map_err(|e| Status::internal(format!("Failed to save link: {}", e)))?; .map_err(|e| Status::internal(format!("Failed to save link: {}", e)))?;
} }
// Execute generated SQL within the transaction
sqlx::query(&create_sql) sqlx::query(&create_sql)
.execute(&mut **tx) .execute(&mut **tx)
.await .await
@@ -201,60 +195,60 @@ async fn generate_table_sql(
indexes: &[String], indexes: &[String],
links: &[(i64, bool)], links: &[(i64, bool)],
) -> Result<(String, Vec<String>), Status> { ) -> Result<(String, Vec<String>), Status> {
let qualified_table = format!("{}.\"{}\"", GENERATED_SCHEMA_NAME, table_name);
let mut system_columns = vec![ let mut system_columns = vec![
"id BIGSERIAL PRIMARY KEY".to_string(), "id BIGSERIAL PRIMARY KEY".to_string(),
"deleted BOOLEAN NOT NULL DEFAULT FALSE".to_string(), "deleted BOOLEAN NOT NULL DEFAULT FALSE".to_string(),
]; ];
// Add foreign key columns
let mut link_info = Vec::new();
for (linked_id, required) in links { for (linked_id, required) in links {
let linked_table = get_table_name_by_id(tx, *linked_id).await?; let linked_table = get_table_name_by_id(tx, *linked_id).await?;
let qualified_linked_table = format!("{}.\"{}\"", GENERATED_SCHEMA_NAME, linked_table);
// Extract base name after year prefix
let base_name = linked_table.split_once('_') let base_name = linked_table.split_once('_')
.map(|(_, rest)| rest) .map(|(_, rest)| rest)
.unwrap_or(&linked_table) .unwrap_or(&linked_table)
.to_string(); .to_string();
let null_clause = if *required { "NOT NULL" } else { "" }; let null_clause = if *required { "NOT NULL" } else { "" };
system_columns.push( system_columns.push(
format!("\"{0}_id\" BIGINT {1} REFERENCES \"{2}\"(id)", format!("\"{0}_id\" BIGINT {1} REFERENCES {2}(id)",
base_name, null_clause, linked_table base_name, null_clause, qualified_linked_table
) )
); );
link_info.push((base_name, linked_table));
} }
// Combine all columns
let all_columns = system_columns let all_columns = system_columns
.iter() .iter()
.chain(columns.iter()) .chain(columns.iter())
.cloned() .cloned()
.collect::<Vec<_>>(); .collect::<Vec<_>>();
// Build CREATE TABLE statement
let create_sql = format!( let create_sql = format!(
"CREATE TABLE \"{}\" (\n {},\n created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP\n)", "CREATE TABLE {} (\n {},\n created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP\n)",
table_name, qualified_table,
all_columns.join(",\n ") all_columns.join(",\n ")
); );
// Generate indexes let mut all_indexes = Vec::new();
let mut system_indexes = Vec::new(); for (linked_id, _) in links {
for (base_name, _) in &link_info { let linked_table = get_table_name_by_id(tx, *linked_id).await?;
system_indexes.push(format!( let base_name = linked_table.split_once('_')
"CREATE INDEX idx_{}_{}_fk ON \"{}\" (\"{}_id\")", .map(|(_, rest)| rest)
table_name, base_name, table_name, base_name .unwrap_or(&linked_table)
.to_string();
all_indexes.push(format!(
"CREATE INDEX \"idx_{}_{}_fk\" ON {} (\"{}_id\")",
table_name, base_name, qualified_table, base_name
)); ));
} }
let all_indexes = system_indexes for idx in indexes {
.into_iter() all_indexes.push(format!(
.chain(indexes.iter().map(|idx| { "CREATE INDEX \"idx_{}_{}\" ON {} (\"{}\")",
format!("CREATE INDEX idx_{}_{} ON \"{}\" (\"{}\")", table_name, idx, qualified_table, idx
table_name, idx, table_name, idx) ));
})) }
.collect();
Ok((create_sql, all_indexes)) Ok((create_sql, all_indexes))
} }

View File

@@ -2,7 +2,7 @@
use common::proto::multieko2::table_structure::{ use common::proto::multieko2::table_structure::{
GetTableStructureRequest, TableColumn, TableStructureResponse, GetTableStructureRequest, TableColumn, TableStructureResponse,
}; };
use sqlx::{PgPool, Row}; use sqlx::PgPool;
use tonic::Status; use tonic::Status;
// Helper struct to map query results // Helper struct to map query results