// src/table_definition/handlers/post_table_definition.rs use tonic::Status; use sqlx::{PgPool, Transaction, Postgres}; use serde_json::json; use common::proto::komp_ac::table_definition::{PostTableDefinitionRequest, TableDefinitionResponse}; // TODO CRITICAL add decimal with optional precision" const PREDEFINED_FIELD_TYPES: &[(&str, &str)] = &[ ("text", "TEXT"), ("string", "TEXT"), ("boolean", "BOOLEAN"), ("timestamp", "TIMESTAMPTZ"), ("timestamptz", "TIMESTAMPTZ"), ("time", "TIMESTAMPTZ"), ("money", "NUMERIC(14, 4)"), ("integer", "INTEGER"), ("int", "INTEGER"), ("biginteger", "BIGINT"), ("bigint", "BIGINT"), ("date", "DATE"), ]; // NEW: Helper function to provide detailed error messages fn validate_identifier_format(s: &str, identifier_type: &str) -> Result<(), Status> { if s.is_empty() { return Err(Status::invalid_argument(format!("{} cannot be empty", identifier_type))); } if s.starts_with('_') { return Err(Status::invalid_argument(format!("{} cannot start with underscore", identifier_type))); } if s.chars().next().unwrap().is_ascii_digit() { return Err(Status::invalid_argument(format!("{} cannot start with a number", identifier_type))); } // Check for invalid characters let invalid_chars: Vec = s.chars() .filter(|c| !c.is_ascii_lowercase() && !c.is_ascii_digit() && *c != '_') .collect(); if !invalid_chars.is_empty() { return Err(Status::invalid_argument(format!( "{} contains invalid characters: {:?}. Only lowercase letters, numbers, and underscores are allowed", identifier_type, invalid_chars ))); } // Check for uppercase letters specifically to give a helpful message if s.chars().any(|c| c.is_ascii_uppercase()) { return Err(Status::invalid_argument(format!( "{} contains uppercase letters. Only lowercase letters are allowed", identifier_type ))); } Ok(()) } fn validate_decimal_number_format(num_str: &str, param_name: &str) -> Result<(), Status> { if num_str.is_empty() { return Err(Status::invalid_argument(format!( "{} cannot be empty", param_name ))); } // Check for explicit signs if num_str.starts_with('+') || num_str.starts_with('-') { return Err(Status::invalid_argument(format!( "{} cannot have explicit positive or negative signs", param_name ))); } // Check for decimal points if num_str.contains('.') { return Err(Status::invalid_argument(format!( "{} must be a whole number (no decimal points)", param_name ))); } // Check for leading zeros (but allow "0" itself) if num_str.len() > 1 && num_str.starts_with('0') { let trimmed = num_str.trim_start_matches('0'); let suggestion = if trimmed.is_empty() { "0" } else { trimmed }; return Err(Status::invalid_argument(format!( "{} cannot have leading zeros (use '{}' instead of '{}')", param_name, suggestion, num_str ))); } // Check that all characters are digits if !num_str.chars().all(|c| c.is_ascii_digit()) { return Err(Status::invalid_argument(format!( "{} contains invalid characters. Only digits 0-9 are allowed", param_name ))); } Ok(()) } fn map_field_type(field_type: &str) -> Result { let lower_field_type = field_type.to_lowercase(); // Special handling for "decimal(precision, scale)" if lower_field_type.starts_with("decimal(") && lower_field_type.ends_with(')') { // Extract the part inside the parentheses, e.g., "10, 2" let args = lower_field_type .strip_prefix("decimal(") .and_then(|s| s.strip_suffix(')')) .unwrap_or(""); // Should always succeed due to the checks above // Split into precision and scale parts if let Some((p_str, s_str)) = args.split_once(',') { let precision_str = p_str.trim(); let scale_str = s_str.trim(); // NEW: Validate format BEFORE parsing validate_decimal_number_format(precision_str, "precision")?; validate_decimal_number_format(scale_str, "scale")?; // Parse precision, returning an error if it's not a valid number let precision = precision_str.parse::().map_err(|_| { Status::invalid_argument("Invalid precision in decimal type") })?; // Parse scale, returning an error if it's not a valid number let scale = scale_str.parse::().map_err(|_| { Status::invalid_argument("Invalid scale in decimal type") })?; // Add validation based on PostgreSQL rules if precision < 1 { return Err(Status::invalid_argument("Precision must be at least 1")); } if scale > precision { return Err(Status::invalid_argument( "Scale cannot be greater than precision", )); } // If everything is valid, build and return the NUMERIC type string return Ok(format!("NUMERIC({}, {})", precision, scale)); } else { // The format was wrong, e.g., "decimal(10)" or "decimal()" return Err(Status::invalid_argument( "Invalid decimal format. Expected: decimal(precision, scale)", )); } } // If not a decimal, fall back to the predefined list PREDEFINED_FIELD_TYPES .iter() .find(|(key, _)| *key == lower_field_type.as_str()) .map(|(_, sql_type)| sql_type.to_string()) // Convert to an owned String .ok_or_else(|| { Status::invalid_argument(format!( "Invalid field type: {}", field_type )) }) } fn is_invalid_table_name(table_name: &str) -> bool { table_name.ends_with("_id") || table_name == "id" || table_name == "deleted" || table_name == "created_at" } fn is_reserved_schema(schema_name: &str) -> bool { let lower = schema_name.to_lowercase(); lower == "public" || lower == "information_schema" || lower.starts_with("pg_") } pub async fn post_table_definition( db_pool: &PgPool, request: PostTableDefinitionRequest, ) -> Result { // Create owned copies of the strings after validation let profile_name = { let trimmed = request.profile_name.trim(); validate_identifier_format(trimmed, "Profile name")?; trimmed.to_string() }; // Add validation to prevent reserved schemas if is_reserved_schema(&profile_name) { return Err(Status::invalid_argument("Profile name is reserved and cannot be used")); } const MAX_IDENTIFIER_LENGTH: usize = 63; if profile_name.len() > MAX_IDENTIFIER_LENGTH { return Err(Status::invalid_argument(format!( "Profile name '{}' exceeds the {} character limit.", profile_name, MAX_IDENTIFIER_LENGTH ))); } let table_name = { let trimmed = request.table_name.trim(); validate_identifier_format(trimmed, "Table name")?; if trimmed.len() > MAX_IDENTIFIER_LENGTH { return Err(Status::invalid_argument(format!( "Table name '{}' exceeds the {} character limit.", trimmed, MAX_IDENTIFIER_LENGTH ))); } // Check invalid table names on the original input if is_invalid_table_name(trimmed) { return Err(Status::invalid_argument( "Table name cannot be 'id', 'deleted', 'created_at' or end with '_id'" )); } trimmed.to_string() }; let mut tx = db_pool.begin().await .map_err(|e| Status::internal(format!("Failed to start transaction: {}", e)))?; match execute_table_definition(&mut tx, request, table_name, profile_name).await { Ok(response) => { tx.commit().await .map_err(|e| Status::internal(format!("Failed to commit transaction: {}", e)))?; Ok(response) }, Err(e) => { let _ = tx.rollback().await; Err(e) } } } async fn execute_table_definition( tx: &mut Transaction<'_, Postgres>, mut request: PostTableDefinitionRequest, table_name: String, profile_name: String, ) -> Result { // Use the validated profile_name for schema insertion let schema = sqlx::query!( "INSERT INTO schemas (name) VALUES ($1) ON CONFLICT (name) DO UPDATE SET name = EXCLUDED.name RETURNING id", profile_name // Use the validated profile name ) .fetch_one(&mut **tx) .await .map_err(|e| Status::internal(format!("Schema error: {}", e)))?; // Create PostgreSQL schema if it doesn't exist let create_schema_sql = format!("CREATE SCHEMA IF NOT EXISTS \"{}\"", profile_name); sqlx::query(&create_schema_sql) .execute(&mut **tx) .await .map_err(|e| Status::internal(format!("Schema creation failed: {}", e)))?; let mut links = Vec::new(); let mut seen_tables = std::collections::HashSet::new(); for link in request.links.drain(..) { // Check for duplicate link if !seen_tables.insert(link.linked_table_name.clone()) { return Err(Status::invalid_argument(format!( "Duplicate link to table '{}'", link.linked_table_name ))); } let linked_table = sqlx::query!( "SELECT id FROM table_definitions WHERE schema_id = $1 AND table_name = $2", schema.id, link.linked_table_name ) .fetch_optional(&mut **tx) .await .map_err(|e| Status::internal(format!("Linked table lookup failed: {}", e)))?; let linked_id = linked_table.ok_or_else(|| Status::not_found(format!("Linked table {} not found", link.linked_table_name)) )?.id; links.push((linked_id, link.required)); } let mut columns = Vec::new(); for col_def in request.columns.drain(..) { let col_name = col_def.name.trim().to_string(); validate_identifier_format(&col_name, "Column name")?; if col_name.ends_with("_id") || col_name == "id" || col_name == "deleted" || col_name == "created_at" { return Err(Status::invalid_argument(format!( "Column name '{}' cannot be 'id', 'deleted', 'created_at' or end with '_id'", col_name ))); } let sql_type = map_field_type(&col_def.field_type)?; columns.push(format!("\"{}\" {}", col_name, sql_type)); } let mut indexes = Vec::new(); for idx in request.indexes.drain(..) { let idx_name = idx.trim().to_string(); validate_identifier_format(&idx_name, "Index name")?; if !columns.iter().any(|c| c.starts_with(&format!("\"{}\"", idx_name))) { return Err(Status::invalid_argument(format!("Index column '{}' not found", idx_name))); } indexes.push(idx_name); } let (create_sql, index_sql) = generate_table_sql(tx, &profile_name, &table_name, &columns, &indexes, &links).await?; // Use schema_id instead of profile_id let table_def = sqlx::query!( r#"INSERT INTO table_definitions (schema_id, table_name, columns, indexes) VALUES ($1, $2, $3, $4) RETURNING id"#, schema.id, &table_name, json!(columns), json!(indexes) ) .fetch_one(&mut **tx) .await .map_err(|e| { if let Some(db_err) = e.as_database_error() { // Update constraint name to match new schema if db_err.constraint() == Some("idx_table_definitions_schema_table") { return Status::already_exists("Table already exists in this profile"); } } Status::internal(format!("Database error: {}", e)) })?; for col_def in &columns { // Column string looks like "\"name\" TYPE", split out identifier let col_name = col_def.split_whitespace().next().unwrap_or(""); let clean_col = col_name.trim_matches('"'); // Default empty config — currently only character_limits block, none set. let default_cfg = serde_json::json!({ "character_limits": { "min": 0, "max": 0, "warn_at": null, "count_mode": "CHARS" } }); sqlx::query!( r#"INSERT INTO table_validation_rules (table_def_id, data_key, config) VALUES ($1, $2, $3) ON CONFLICT (table_def_id, data_key) DO NOTHING"#, table_def.id, clean_col, default_cfg ) .execute(&mut **tx) .await .map_err(|e| Status::internal(format!("Failed to insert default validation rule for column {}: {}", clean_col, e)))?; } for (linked_id, is_required) in links { sqlx::query!( "INSERT INTO table_definition_links (source_table_id, linked_table_id, is_required) VALUES ($1, $2, $3)", table_def.id, linked_id, is_required ) .execute(&mut **tx) .await .map_err(|e| Status::internal(format!("Failed to save link: {}", e)))?; } sqlx::query(&create_sql) .execute(&mut **tx) .await .map_err(|e| Status::internal(format!("Table creation failed: {}", e)))?; for sql in &index_sql { sqlx::query(sql) .execute(&mut **tx) .await .map_err(|e| Status::internal(format!("Index creation failed: {}", e)))?; } Ok(TableDefinitionResponse { success: true, sql: format!("{}\n{}", create_sql, index_sql.join("\n")), }) } async fn generate_table_sql( tx: &mut Transaction<'_, Postgres>, profile_name: &str, table_name: &str, columns: &[String], indexes: &[String], links: &[(i64, bool)], ) -> Result<(String, Vec), Status> { let qualified_table = format!("\"{}\".\"{}\"", profile_name, table_name); let mut system_columns = vec![ "id BIGSERIAL PRIMARY KEY".to_string(), "deleted BOOLEAN NOT NULL DEFAULT FALSE".to_string(), ]; for (linked_id, required) in links { let linked_table = get_table_name_by_id(tx, *linked_id).await?; let qualified_linked_table = format!("\"{}\".\"{}\"", profile_name, linked_table); // Simply use the full table name - no truncation! let null_clause = if *required { "NOT NULL" } else { "" }; system_columns.push( format!("\"{}_id\" BIGINT {} REFERENCES {}(id)", linked_table, null_clause, qualified_linked_table ) ); } let all_columns = system_columns .iter() .chain(columns.iter()) .cloned() .collect::>(); let create_sql = format!( "CREATE TABLE {} (\n {},\n created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP\n)", qualified_table, all_columns.join(",\n ") ); let mut all_indexes = Vec::new(); for (linked_id, _) in links { let linked_table = get_table_name_by_id(tx, *linked_id).await?; all_indexes.push(format!( "CREATE INDEX \"idx_{}_{}_fk\" ON {} (\"{}_id\")", table_name, linked_table, qualified_table, linked_table )); } for idx in indexes { all_indexes.push(format!( "CREATE INDEX \"idx_{}_{}\" ON {} (\"{}\")", table_name, idx, qualified_table, idx )); } Ok((create_sql, all_indexes)) } async fn get_table_name_by_id( tx: &mut Transaction<'_, Postgres>, table_id: i64, ) -> Result { let record = sqlx::query!( "SELECT table_name FROM table_definitions WHERE id = $1", table_id ) .fetch_one(&mut **tx) .await .map_err(|e| Status::internal(format!("Table lookup failed: {}", e)))?; Ok(record.table_name) }