very general setup with custom profiles

This commit is contained in:
filipriec
2025-03-01 16:28:33 +01:00
parent b31242e63b
commit 1fa69d8d88
2 changed files with 56 additions and 39 deletions

View File

@@ -1,6 +1,6 @@
// src/table_definition/handlers/post_table_definition.rs
use tonic::Status;
use sqlx::PgPool;
use sqlx::{PgPool, Row};
use serde_json::json;
use common::proto::multieko2::table_definition::{PostTableDefinitionRequest, TableDefinitionResponse};
@@ -23,63 +23,92 @@ pub async fn post_table_definition(
db_pool: &PgPool,
mut request: PostTableDefinitionRequest,
) -> Result<TableDefinitionResponse, Status> {
// Validate and sanitize table name
// Validate and sanitize inputs
let table_name = sanitize_identifier(&request.table_name);
if !is_valid_identifier(&table_name) {
return Err(Status::invalid_argument("Invalid table name"));
}
// Validate and sanitize columns
let mut columns = Vec::with_capacity(request.columns.len());
for col in request.columns.drain(..) {
let clean_col = sanitize_identifier(&col);
if !is_valid_identifier(&clean_col) {
return Err(Status::invalid_argument(format!("Invalid column name: {}", col)));
// Lookup or create profile
let profile = sqlx::query!(
"INSERT INTO profiles (name) VALUES ($1)
ON CONFLICT (name) DO UPDATE SET name = EXCLUDED.name
RETURNING id",
request.profile_name
)
.fetch_one(db_pool)
.await
.map_err(|e| Status::internal(format!("Profile error: {}", e)))?;
// Validate linked table if provided
let linked_table_id = if let Some(linked_table) = &request.linked_table_name {
let lt = sqlx::query!(
"SELECT id FROM table_definitions
WHERE profile_id = $1 AND table_name = $2",
profile.id,
sanitize_identifier(linked_table)
)
.fetch_optional(db_pool)
.await
.map_err(|e| Status::internal(format!("Linked table lookup failed: {}", e)))?;
lt.map(|r| r.id)
.ok_or_else(|| Status::not_found("Linked table not found in profile"))?
} else {
None
};
// Validate columns and indexes (add data type support)
let mut columns = Vec::new();
for col_def in request.columns.drain(..) {
let col_name = sanitize_identifier(&col_def.name);
if !is_valid_identifier(&col_name) {
return Err(Status::invalid_argument(format!("Invalid column name: {}", col_def.name)));
}
columns.push(clean_col);
// Add data type validation here
columns.push(format!("\"{}\" {}", col_name, col_def.data_type));
}
// Validate and sanitize indexes
let mut indexes = Vec::with_capacity(request.indexes.len());
let mut indexes = Vec::new();
for idx in request.indexes.drain(..) {
let clean_idx = sanitize_identifier(&idx);
if !is_valid_identifier(&clean_idx) {
let idx_name = sanitize_identifier(&idx);
if !is_valid_identifier(&idx_name) {
return Err(Status::invalid_argument(format!("Invalid index name: {}", idx)));
}
indexes.push(clean_idx);
indexes.push(idx_name);
}
// Generate SQL with proper quoting
// Generate SQL
let (create_sql, index_sql) = generate_table_sql(&table_name, &columns, &indexes);
// Store definition in table_definitions
// Store definition
sqlx::query!(
r#"INSERT INTO table_definitions
(firma, table_name, columns, indexes)
VALUES ($1, $2, $3, $4)"#,
"system", // Or get from auth context
(profile_id, table_name, columns, indexes, linked_table_id)
VALUES ($1, $2, $3, $4, $5)"#,
profile.id,
&table_name,
json!(columns), // Use serde_json::json! to convert Vec<String> to JsonValue
json!(indexes) // Use serde_json::json! to convert Vec<String> to JsonValue
json!(columns),
json!(indexes),
linked_table_id
)
.execute(db_pool)
.await
.map_err(|e| {
if let Some(db_err) = e.as_database_error() {
if db_err.constraint() == Some("table_definitions_table_name_key") {
return Status::already_exists("Table already exists");
if db_err.constraint() == Some("idx_table_definitions_profile_table") {
return Status::already_exists("Table already exists in this profile");
}
}
Status::internal(format!("Database error: {}", e))
})?;
// Execute the generated SQL
// Execute generated SQL
sqlx::query(&create_sql)
.execute(db_pool)
.await
.map_err(|e| Status::internal(format!("Table creation failed: {}", e)))?;
// Iterate over a reference to index_sql to avoid moving it
for sql in &index_sql {
sqlx::query(&sql)
.execute(db_pool)
@@ -94,29 +123,17 @@ pub async fn post_table_definition(
}
fn generate_table_sql(table_name: &str, columns: &[String], indexes: &[String]) -> (String, Vec<String>) {
// Generate quoted column definitions
let columns_sql = columns.iter()
.map(|c| format!("\"{}\" TEXT", c))
.collect::<Vec<_>>()
.join(",\n ");
let columns_sql = columns.join(",\n ");
// Create table with proper quoting
let create_sql = format!(
"CREATE TABLE \"{}\" (
id BIGSERIAL PRIMARY KEY,
deleted BOOLEAN NOT NULL DEFAULT FALSE,
firma TEXT NOT NULL,
{},
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
)", table_name, columns_sql);
// Always include firma index
let mut all_indexes = vec!["firma".to_string()];
all_indexes.extend(indexes.iter().cloned());
all_indexes.dedup();
// Generate safe index SQL
let index_sql = all_indexes.iter()
let index_sql = indexes.iter()
.map(|i| format!(
"CREATE INDEX idx_{}_{} ON \"{}\" (\"{}\")",
table_name, i, table_name, i