315 lines
12 KiB
Rust
315 lines
12 KiB
Rust
// src/tables_data/handlers/post_table_data.rs
|
|
|
|
use tonic::Status;
|
|
use sqlx::{PgPool, Arguments};
|
|
use sqlx::postgres::PgArguments;
|
|
use chrono::{DateTime, Utc};
|
|
use common::proto::multieko2::tables_data::{PostTableDataRequest, PostTableDataResponse};
|
|
use std::collections::HashMap;
|
|
use std::sync::Arc;
|
|
use prost_types::value::Kind;
|
|
|
|
use crate::steel::server::execution::{self, Value};
|
|
use crate::steel::server::functions::SteelContext;
|
|
|
|
use crate::indexer::{IndexCommand, IndexCommandData};
|
|
use tokio::sync::mpsc;
|
|
use tracing::error;
|
|
|
|
pub async fn post_table_data(
|
|
db_pool: &PgPool,
|
|
request: PostTableDataRequest,
|
|
indexer_tx: &mpsc::Sender<IndexCommand>,
|
|
) -> Result<PostTableDataResponse, Status> {
|
|
let profile_name = request.profile_name;
|
|
let table_name = request.table_name;
|
|
|
|
// Lookup profile
|
|
let schema = sqlx::query!(
|
|
"SELECT id FROM schemas WHERE name = $1",
|
|
profile_name
|
|
)
|
|
.fetch_optional(db_pool)
|
|
.await
|
|
.map_err(|e| Status::internal(format!("Profile lookup error: {}", e)))?;
|
|
|
|
let schema_id = schema.ok_or_else(|| Status::not_found("Profile not found"))?.id;
|
|
|
|
// Lookup table_definition
|
|
let table_def = sqlx::query!(
|
|
r#"SELECT id, columns FROM table_definitions
|
|
WHERE schema_id = $1 AND table_name = $2"#,
|
|
schema_id,
|
|
table_name
|
|
)
|
|
.fetch_optional(db_pool)
|
|
.await
|
|
.map_err(|e| Status::internal(format!("Table lookup error: {}", e)))?;
|
|
|
|
let table_def = table_def.ok_or_else(|| Status::not_found("Table not found"))?;
|
|
|
|
// Parse columns from JSON
|
|
let columns_json: Vec<String> = serde_json::from_value(table_def.columns.clone())
|
|
.map_err(|e| Status::internal(format!("Column parsing error: {}", e)))?;
|
|
|
|
let mut columns = Vec::new();
|
|
for col_def in columns_json {
|
|
let parts: Vec<&str> = col_def.splitn(2, ' ').collect();
|
|
if parts.len() != 2 {
|
|
return Err(Status::internal("Invalid column format"));
|
|
}
|
|
let name = parts[0].trim_matches('"').to_string();
|
|
let sql_type = parts[1].to_string();
|
|
columns.push((name, sql_type));
|
|
}
|
|
|
|
// Get all foreign key columns for this table
|
|
let fk_columns = sqlx::query!(
|
|
r#"SELECT ltd.table_name
|
|
FROM table_definition_links tdl
|
|
JOIN table_definitions ltd ON tdl.linked_table_id = ltd.id
|
|
WHERE tdl.source_table_id = $1"#,
|
|
table_def.id
|
|
)
|
|
.fetch_all(db_pool)
|
|
.await
|
|
.map_err(|e| Status::internal(format!("Foreign key lookup error: {}", e)))?;
|
|
|
|
// Build system columns with foreign keys
|
|
let mut system_columns = vec!["deleted".to_string()];
|
|
for fk in fk_columns {
|
|
let base_name = fk.table_name.split_once('_').map_or(fk.table_name.as_str(), |(_, rest)| rest);
|
|
system_columns.push(format!("{}_id", base_name));
|
|
}
|
|
|
|
// Convert to HashSet for faster lookups
|
|
let system_columns_set: std::collections::HashSet<_> = system_columns.iter().map(|s| s.as_str()).collect();
|
|
|
|
// Validate all data columns
|
|
let user_columns: Vec<&String> = columns.iter().map(|(name, _)| name).collect();
|
|
for key in request.data.keys() {
|
|
if !system_columns_set.contains(key.as_str()) &&
|
|
!user_columns.contains(&&key.to_string()) {
|
|
return Err(Status::invalid_argument(format!("Invalid column: {}", key)));
|
|
}
|
|
}
|
|
|
|
// ========================================================================
|
|
// FIX #1: SCRIPT VALIDATION LOOP
|
|
// This loop now correctly handles JSON `null` (which becomes `None`).
|
|
// ========================================================================
|
|
let mut string_data_for_scripts = HashMap::new();
|
|
for (key, proto_value) in &request.data {
|
|
let str_val = match &proto_value.kind {
|
|
Some(Kind::StringValue(s)) => s.clone(),
|
|
Some(Kind::NumberValue(n)) => n.to_string(),
|
|
Some(Kind::BoolValue(b)) => b.to_string(),
|
|
// This now correctly skips both protobuf `NULL` and JSON `null`.
|
|
Some(Kind::NullValue(_)) | None => continue,
|
|
Some(Kind::StructValue(_)) | Some(Kind::ListValue(_)) => {
|
|
return Err(Status::invalid_argument(format!("Unsupported type for script validation in column '{}'", key)));
|
|
}
|
|
};
|
|
string_data_for_scripts.insert(key.clone(), str_val);
|
|
}
|
|
|
|
// Validate Steel scripts
|
|
let scripts = sqlx::query!(
|
|
"SELECT target_column, script FROM table_scripts WHERE table_definitions_id = $1",
|
|
table_def.id
|
|
)
|
|
.fetch_all(db_pool)
|
|
.await
|
|
.map_err(|e| Status::internal(format!("Failed to fetch scripts: {}", e)))?;
|
|
|
|
for script_record in scripts {
|
|
let target_column = script_record.target_column;
|
|
|
|
let user_value = string_data_for_scripts.get(&target_column)
|
|
.ok_or_else(|| Status::invalid_argument(
|
|
format!("Script target column '{}' is required", target_column)
|
|
))?;
|
|
|
|
let context = SteelContext {
|
|
current_table: table_name.clone(),
|
|
schema_id,
|
|
schema_name: profile_name.clone(),
|
|
row_data: string_data_for_scripts.clone(),
|
|
db_pool: Arc::new(db_pool.clone()),
|
|
};
|
|
|
|
let script_result = execution::execute_script(
|
|
script_record.script,
|
|
"STRINGS",
|
|
Arc::new(db_pool.clone()),
|
|
context,
|
|
)
|
|
.map_err(|e| Status::invalid_argument(
|
|
format!("Script execution failed for '{}': {}", target_column, e)
|
|
))?;
|
|
|
|
let Value::Strings(mut script_output) = script_result else {
|
|
return Err(Status::internal("Script must return string values"));
|
|
};
|
|
|
|
let expected_value = script_output.pop()
|
|
.ok_or_else(|| Status::internal("Script returned no values"))?;
|
|
|
|
if user_value != &expected_value {
|
|
return Err(Status::invalid_argument(format!(
|
|
"Validation failed for column '{}': Expected '{}', Got '{}'",
|
|
target_column, expected_value, user_value
|
|
)));
|
|
}
|
|
}
|
|
|
|
// Prepare SQL parameters
|
|
let mut params = PgArguments::default();
|
|
let mut columns_list = Vec::new();
|
|
let mut placeholders = Vec::new();
|
|
let mut param_idx = 1;
|
|
|
|
// ========================================================================
|
|
// FIX #2: DATABASE INSERTION LOOP
|
|
// This loop now correctly handles JSON `null` (which becomes `None`)
|
|
// without crashing and correctly inserts a SQL NULL.
|
|
// ========================================================================
|
|
for (col, proto_value) in request.data {
|
|
let sql_type = if system_columns_set.contains(col.as_str()) {
|
|
match col.as_str() {
|
|
"deleted" => "BOOLEAN",
|
|
_ if col.ends_with("_id") => "BIGINT",
|
|
_ => return Err(Status::invalid_argument("Invalid system column")),
|
|
}
|
|
} else {
|
|
columns.iter()
|
|
.find(|(name, _)| name == &col)
|
|
.map(|(_, sql_type)| sql_type.as_str())
|
|
.ok_or_else(|| Status::invalid_argument(format!("Column not found: {}", col)))?
|
|
};
|
|
|
|
// Check for `None` (from JSON null) or `Some(NullValue)` first.
|
|
let kind = match &proto_value.kind {
|
|
None | Some(Kind::NullValue(_)) => {
|
|
// It's a null value. Add the correct SQL NULL type and continue.
|
|
match sql_type {
|
|
"BOOLEAN" => params.add(None::<bool>),
|
|
"TEXT" | "VARCHAR(15)" | "VARCHAR(255)" => params.add(None::<String>),
|
|
"TIMESTAMPTZ" => params.add(None::<DateTime<Utc>>),
|
|
"BIGINT" => params.add(None::<i64>),
|
|
_ => return Err(Status::invalid_argument(format!("Unsupported type for null value: {}", sql_type))),
|
|
}.map_err(|e| Status::internal(format!("Failed to add null parameter for {}: {}", col, e)))?;
|
|
|
|
columns_list.push(format!("\"{}\"", col));
|
|
placeholders.push(format!("${}", param_idx));
|
|
param_idx += 1;
|
|
continue; // Skip to the next column in the loop
|
|
}
|
|
// If it's not null, just pass the inner `Kind` through.
|
|
Some(k) => k,
|
|
};
|
|
|
|
// From here, we know `kind` is not a null type.
|
|
match sql_type {
|
|
"TEXT" | "VARCHAR(15)" | "VARCHAR(255)" => {
|
|
if let Kind::StringValue(value) = kind {
|
|
if let Some(max_len) = sql_type.strip_prefix("VARCHAR(").and_then(|s| s.strip_suffix(')')).and_then(|s| s.parse::<usize>().ok()) {
|
|
if value.len() > max_len {
|
|
return Err(Status::internal(format!("Value too long for {}", col)));
|
|
}
|
|
}
|
|
params.add(value).map_err(|e| Status::invalid_argument(format!("Failed to add text parameter for {}: {}", col, e)))?;
|
|
} else {
|
|
return Err(Status::invalid_argument(format!("Expected string for column '{}'", col)));
|
|
}
|
|
},
|
|
"BOOLEAN" => {
|
|
if let Kind::BoolValue(val) = kind {
|
|
params.add(val).map_err(|e| Status::invalid_argument(format!("Failed to add boolean parameter for {}: {}", col, e)))?;
|
|
} else {
|
|
return Err(Status::invalid_argument(format!("Expected boolean for column '{}'", col)));
|
|
}
|
|
},
|
|
"TIMESTAMPTZ" => {
|
|
if let Kind::StringValue(value) = kind {
|
|
let dt = DateTime::parse_from_rfc3339(value).map_err(|_| Status::invalid_argument(format!("Invalid timestamp for {}", col)))?;
|
|
params.add(dt.with_timezone(&Utc)).map_err(|e| Status::invalid_argument(format!("Failed to add timestamp parameter for {}: {}", col, e)))?;
|
|
} else {
|
|
return Err(Status::invalid_argument(format!("Expected ISO 8601 string for column '{}'", col)));
|
|
}
|
|
},
|
|
"BIGINT" => {
|
|
if let Kind::NumberValue(val) = kind {
|
|
if val.fract() != 0.0 {
|
|
return Err(Status::invalid_argument(format!("Expected integer for column '{}', but got a float", col)));
|
|
}
|
|
params.add(*val as i64).map_err(|e| Status::invalid_argument(format!("Failed to add integer parameter for {}: {}", col, e)))?;
|
|
} else {
|
|
return Err(Status::invalid_argument(format!("Expected number for column '{}'", col)));
|
|
}
|
|
},
|
|
_ => return Err(Status::invalid_argument(format!("Unsupported type {}", sql_type))),
|
|
}
|
|
|
|
columns_list.push(format!("\"{}\"", col));
|
|
placeholders.push(format!("${}", param_idx));
|
|
param_idx += 1;
|
|
}
|
|
|
|
if columns_list.is_empty() {
|
|
return Err(Status::invalid_argument("No valid columns to insert"));
|
|
}
|
|
|
|
// Qualify table name with schema
|
|
let qualified_table = crate::shared::schema_qualifier::qualify_table_name_for_data(
|
|
db_pool,
|
|
&profile_name,
|
|
&table_name,
|
|
)
|
|
.await?;
|
|
|
|
let sql = format!(
|
|
"INSERT INTO {} ({}) VALUES ({}) RETURNING id",
|
|
qualified_table,
|
|
columns_list.join(", "),
|
|
placeholders.join(", ")
|
|
);
|
|
|
|
let result = sqlx::query_scalar_with::<_, i64, _>(&sql, params)
|
|
.fetch_one(db_pool)
|
|
.await;
|
|
|
|
let inserted_id = match result {
|
|
Ok(id) => id,
|
|
Err(e) => {
|
|
if let Some(db_err) = e.as_database_error() {
|
|
if db_err.code() == Some(std::borrow::Cow::Borrowed("42P01")) {
|
|
return Err(Status::internal(format!(
|
|
"Table '{}' is defined but does not physically exist in the database as {}",
|
|
table_name, qualified_table
|
|
)));
|
|
}
|
|
}
|
|
return Err(Status::internal(format!("Insert failed: {}", e)));
|
|
}
|
|
};
|
|
|
|
let command = IndexCommand::AddOrUpdate(IndexCommandData {
|
|
table_name: table_name.clone(),
|
|
row_id: inserted_id,
|
|
});
|
|
|
|
if let Err(e) = indexer_tx.send(command).await {
|
|
error!(
|
|
"CRITICAL: DB insert for table '{}' (id: {}) succeeded but failed to queue for indexing: {}. Search index is now inconsistent.",
|
|
table_name, inserted_id, e
|
|
);
|
|
}
|
|
|
|
Ok(PostTableDataResponse {
|
|
success: true,
|
|
message: "Data inserted successfully".into(),
|
|
inserted_id,
|
|
})
|
|
}
|