delete tests robustness not yet fully working

This commit is contained in:
filipriec
2025-06-25 08:44:36 +02:00
parent b297c2b311
commit 560d8b7234
5 changed files with 1063 additions and 102 deletions

View File

@@ -1,19 +1,52 @@
// tests/tables_data/handlers/delete_table_data_test.rs
use rstest::{fixture, rstest};
use server::tables_data::handlers::delete_table_data;
use common::proto::multieko2::tables_data::DeleteTableDataRequest;
use crate::common::setup_test_db;
use sqlx::{PgPool, Row};
use tonic;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::Mutex;
use chrono::Utc;
use tokio::sync::{mpsc, Mutex};
use serde_json::json;
use chrono::Utc;
use futures::future::join_all;
use prost_types::{value::Kind, Value};
use rand::Rng;
use rand::distr::Alphanumeric; // Corrected import
// Common imports from other modules
use common::proto::multieko2::table_definition::{
PostTableDefinitionRequest, ColumnDefinition as TableColumnDefinition, TableLink,
};
use common::proto::multieko2::tables_data::{
DeleteTableDataRequest, DeleteTableDataResponse, PostTableDataRequest, PutTableDataRequest,
};
use server::indexer::IndexCommand;
use server::table_definition::handlers::post_table_definition;
use server::tables_data::handlers::{delete_table_data, post_table_data, put_table_data};
use crate::common::setup_test_db;
lazy_static::lazy_static! {
static ref TEST_MUTEX: Arc<Mutex<()>> = Arc::new(Mutex::new(()));
}
// ========= Test Helpers =========
fn generate_unique_id() -> String {
rand::rng() // Corrected function call
.sample_iter(&Alphanumeric)
.take(8)
.map(char::from)
.collect::<String>()
.to_lowercase()
}
fn string_to_proto_value(s: &str) -> Value {
Value {
kind: Some(Kind::StringValue(s.to_string())),
}
}
// ========= Fixtures =========
#[fixture]
async fn pool() -> PgPool {
setup_test_db().await
@@ -30,8 +63,10 @@ async fn closed_pool(#[future] pool: PgPool) -> PgPool {
async fn existing_profile(#[future] pool: PgPool) -> (PgPool, String, i64) {
let pool = pool.await;
let profile_name = format!("TestProfile_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
// FIX: The table is `schemas`, not `profiles`.
let profile = sqlx::query!(
"INSERT INTO profiles (name) VALUES ($1) RETURNING id",
"INSERT INTO schemas (name) VALUES ($1) RETURNING id",
profile_name
)
.fetch_one(&pool)
@@ -44,29 +79,19 @@ async fn existing_profile(#[future] pool: PgPool) -> (PgPool, String, i64) {
async fn existing_table(
#[future] existing_profile: (PgPool, String, i64),
) -> (PgPool, String, i64, String) {
let (pool, profile_name, profile_id) = existing_profile.await;
let (pool, profile_name, schema_id) = existing_profile.await; // Renamed for clarity
let table_name = format!("test_table_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
// Define columns for the table
let columns = json!([
{
"name": "id",
"type": "BIGSERIAL",
"primary_key": true
},
{
"name": "deleted",
"type": "BOOLEAN",
"default": false
}
{ "name": "id", "type": "BIGSERIAL", "primary_key": true },
{ "name": "deleted", "type": "BOOLEAN", "default": false }
]);
// Add indexes definition - this is what's missing
let indexes = json!([]); // Empty array if no indexes, but not null
let indexes = json!([]);
// FIX: The column is `schema_id`, not `profile_id`.
sqlx::query!(
"INSERT INTO table_definitions (profile_id, table_name, columns, indexes) VALUES ($1, $2, $3, $4)",
profile_id,
"INSERT INTO table_definitions (schema_id, table_name, columns, indexes) VALUES ($1, $2, $3, $4)",
schema_id,
table_name,
columns,
indexes
@@ -75,39 +100,28 @@ async fn existing_table(
.await
.unwrap();
// Create the physical schema and table
sqlx::query(&format!("CREATE SCHEMA IF NOT EXISTS \"{}\"", profile_name))
.execute(&pool).await.unwrap();
let create_table = format!(
r#"
CREATE TABLE "{}" (
id BIGSERIAL PRIMARY KEY,
deleted BOOLEAN NOT NULL DEFAULT false
)
"#,
table_name
r#"CREATE TABLE "{}"."{}" (id BIGSERIAL PRIMARY KEY, deleted BOOLEAN NOT NULL DEFAULT false)"#,
profile_name, table_name
);
sqlx::query(&create_table).execute(&pool).await.unwrap();
sqlx::query(&create_table)
.execute(&pool)
.await
.unwrap();
(pool, profile_name, profile_id, table_name)
(pool, profile_name, schema_id, table_name)
}
#[fixture]
async fn existing_record(
#[future] existing_table: (PgPool, String, i64, String),
) -> (PgPool, String, String, i64) {
let (pool, profile_name, _profile_id, table_name) = existing_table.await;
let (pool, profile_name, _schema_id, table_name) = existing_table.await;
let query = format!(
"INSERT INTO \"{}\" (deleted) VALUES (false) RETURNING id",
table_name
"INSERT INTO \"{}\".\"{}\" (deleted) VALUES (false) RETURNING id",
profile_name, table_name
);
let row = sqlx::query(&query)
.fetch_one(&pool)
.await
.unwrap();
let row = sqlx::query(&query).fetch_one(&pool).await.unwrap();
let id: i64 = row.get("id");
(pool, profile_name, table_name, id)
}
@@ -116,45 +130,67 @@ async fn existing_record(
async fn existing_deleted_record(
#[future] existing_table: (PgPool, String, i64, String),
) -> (PgPool, String, String, i64) {
let (pool, profile_name, _profile_id, table_name) = existing_table.await;
let (pool, profile_name, _schema_id, table_name) = existing_table.await;
let query = format!(
"INSERT INTO \"{}\" (deleted) VALUES (true) RETURNING id",
table_name
"INSERT INTO \"{}\".\"{}\" (deleted) VALUES (true) RETURNING id",
profile_name, table_name
);
let row = sqlx::query(&query)
.fetch_one(&pool)
.await
.unwrap();
let row = sqlx::query(&query).fetch_one(&pool).await.unwrap();
let id: i64 = row.get("id");
(pool, profile_name, table_name, id)
}
async fn cleanup_test_data(pool: &PgPool, table_name: &str) {
// Clean up table definition
sqlx::query!(
"DELETE FROM table_definitions WHERE table_name = $1",
table_name
)
.execute(pool)
.await
.unwrap();
// Clean up physical table
let drop_table = format!(r#"DROP TABLE IF EXISTS "{}" CASCADE"#, table_name);
sqlx::query(&drop_table)
.execute(pool)
.await
.unwrap();
// New fixture for advanced tests
#[derive(Clone)]
struct AdvancedDeleteContext {
pool: PgPool,
profile_name: String,
category_table: String,
product_table: String,
indexer_tx: mpsc::Sender<IndexCommand>,
indexer_rx: Arc<tokio::sync::Mutex<mpsc::Receiver<IndexCommand>>>,
}
#[fixture]
async fn advanced_delete_context() -> AdvancedDeleteContext {
let pool = setup_test_db().await;
let unique_id = generate_unique_id();
let profile_name = format!("adv_del_profile_{}", unique_id);
let category_table = format!("categories_adv_del_{}", unique_id);
let product_table = format!("products_adv_del_{}", unique_id);
let category_def = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: category_table.clone(),
columns: vec![TableColumnDefinition { name: "name".into(), field_type: "text".into() }],
links: vec![], indexes: vec![],
};
post_table_definition(&pool, category_def).await.unwrap();
let product_def = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: product_table.clone(),
columns: vec![TableColumnDefinition { name: "name".into(), field_type: "text".into() }],
links: vec![TableLink { linked_table_name: category_table.clone(), required: true }],
indexes: vec![],
};
post_table_definition(&pool, product_def).await.unwrap();
let (tx, rx) = mpsc::channel(100);
AdvancedDeleteContext {
pool, profile_name, category_table, product_table,
indexer_tx: tx,
indexer_rx: Arc::new(tokio::sync::Mutex::new(rx)),
}
}
// ========= Basic Tests (from your original file) =========
#[rstest]
#[tokio::test]
async fn test_delete_table_data_success(
#[future] existing_record: (PgPool, String, String, i64),
) {
let _guard = TEST_MUTEX.lock().await;
let (pool, profile_name, table_name, record_id) = existing_record.await;
let request = DeleteTableDataRequest {
profile_name: profile_name.clone(),
@@ -164,27 +200,14 @@ async fn test_delete_table_data_success(
let response = delete_table_data(&pool, request).await.unwrap();
assert!(response.success);
let query = format!(
"SELECT deleted FROM \"{}\" WHERE id = $1",
table_name
);
let row = sqlx::query(&query)
.bind(record_id)
.fetch_one(&pool)
.await
.unwrap();
let query = format!("SELECT deleted FROM \"{}\".\"{}\" WHERE id = $1", profile_name, table_name);
let row = sqlx::query(&query).bind(record_id).fetch_one(&pool).await.unwrap();
assert!(row.get::<bool, _>("deleted"));
cleanup_test_data(&pool, &table_name).await;
}
#[rstest]
#[tokio::test]
async fn test_delete_table_data_profile_not_found(
#[future] pool: PgPool,
) {
let _guard = TEST_MUTEX.lock().await;
async fn test_delete_table_data_profile_not_found(#[future] pool: PgPool) {
let pool = pool.await;
let request = DeleteTableDataRequest {
profile_name: "NonExistentProfile".to_string(),
@@ -201,7 +224,6 @@ async fn test_delete_table_data_profile_not_found(
async fn test_delete_table_data_table_not_found(
#[future] existing_profile: (PgPool, String, i64),
) {
let _guard = TEST_MUTEX.lock().await;
let (pool, profile_name, _) = existing_profile.await;
let request = DeleteTableDataRequest {
profile_name,
@@ -218,17 +240,14 @@ async fn test_delete_table_data_table_not_found(
async fn test_delete_table_data_record_not_found(
#[future] existing_table: (PgPool, String, i64, String),
) {
let _guard = TEST_MUTEX.lock().await;
let (pool, profile_name, _, table_name) = existing_table.await;
let request = DeleteTableDataRequest {
profile_name,
table_name: table_name.clone(), // Clone here
table_name: table_name.clone(),
record_id: 9999,
};
let response = delete_table_data(&pool, request).await.unwrap();
assert!(!response.success);
cleanup_test_data(&pool, &table_name).await;
}
#[rstest]
@@ -236,24 +255,19 @@ async fn test_delete_table_data_record_not_found(
async fn test_delete_table_data_already_deleted(
#[future] existing_deleted_record: (PgPool, String, String, i64),
) {
let _guard = TEST_MUTEX.lock().await;
let (pool, profile_name, table_name, record_id) = existing_deleted_record.await;
let request = DeleteTableDataRequest {
profile_name,
table_name: table_name.clone(), // Clone here
table_name: table_name.clone(),
record_id,
};
let response = delete_table_data(&pool, request).await.unwrap();
assert!(!response.success);
cleanup_test_data(&pool, &table_name).await;
}
#[rstest]
#[tokio::test]
async fn test_delete_table_data_database_error(
#[future] closed_pool: PgPool,
) {
async fn test_delete_table_data_database_error(#[future] closed_pool: PgPool) {
let closed_pool = closed_pool.await;
let request = DeleteTableDataRequest {
profile_name: "test".to_string(),
@@ -264,3 +278,7 @@ async fn test_delete_table_data_database_error(
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
}
// Include the new, more advanced tests
include!("delete_table_data_test2.rs");
include!("delete_table_data_test3.rs");

View File

@@ -0,0 +1,238 @@
// tests/tables_data/handlers/delete_table_data_test2.rs
// ========================================================================
// Foreign Key Integrity Tests
// ========================================================================
#[rstest]
#[tokio::test]
async fn test_soft_delete_does_not_break_foreign_key_references(
#[future] advanced_delete_context: AdvancedDeleteContext,
) {
// Arrange: Create a category and a product that links to it.
let context = advanced_delete_context.await;
let mut category_data = HashMap::new();
category_data.insert("name".to_string(), string_to_proto_value("Electronics"));
let category_req = PostTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
data: category_data,
};
let category_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
let category_id = category_res.inserted_id;
let mut product_data = HashMap::new();
product_data.insert("name".to_string(), string_to_proto_value("Laptop"));
product_data.insert(
format!("{}_id", context.category_table),
Value { kind: Some(Kind::NumberValue(category_id as f64)) },
);
let product_req = PostTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.product_table.clone(),
data: product_data,
};
let product_res = post_table_data(&context.pool, product_req, &context.indexer_tx).await.unwrap();
let product_id = product_res.inserted_id;
// Act: Soft-delete the category record.
let delete_req = DeleteTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
record_id: category_id,
};
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
assert!(delete_res.success);
// Assert: The product record still exists and its foreign key still points to the (now soft-deleted) category ID.
let query = format!(
r#"SELECT "{}_id" FROM "{}"."{}" WHERE id = $1"#,
context.category_table, context.profile_name, context.product_table
);
let fk_id: i64 = sqlx::query_scalar(&query)
.bind(product_id)
.fetch_one(&context.pool)
.await
.unwrap();
assert_eq!(fk_id, category_id, "Foreign key reference should remain intact after soft delete.");
}
// ========================================================================
// Indexer Integration Tests
// ========================================================================
#[rstest]
#[tokio::test]
async fn test_delete_does_not_send_indexer_command(
#[future] advanced_delete_context: AdvancedDeleteContext,
) {
// Arrange
let context = advanced_delete_context.await;
let mut category_data = HashMap::new();
category_data.insert("name".to_string(), string_to_proto_value("Test Category"));
let category_req = PostTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
data: category_data,
};
let category_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
let category_id = category_res.inserted_id;
// Drain the create command from the channel
let _ = context.indexer_rx.lock().await.recv().await;
// Act
let delete_req = DeleteTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
record_id: category_id,
};
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
assert!(delete_res.success);
// Assert: Check that NO command was sent. This verifies current behavior.
let recv_result = tokio::time::timeout(
std::time::Duration::from_millis(50),
context.indexer_rx.lock().await.recv()
).await;
assert!(recv_result.is_err(), "Expected no indexer command to be sent on delete, but one was received.");
}
// ========================================================================
// Concurrency and State Mismatch Tests
// ========================================================================
#[rstest]
#[tokio::test]
async fn test_concurrent_deletes_on_same_record(
#[future] advanced_delete_context: AdvancedDeleteContext,
) {
// Arrange
let context = advanced_delete_context.await;
let mut category_data = HashMap::new();
category_data.insert("name".to_string(), string_to_proto_value("Concurrent Delete Test"));
let category_req = PostTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
data: category_data,
};
let category_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
let category_id = category_res.inserted_id;
// Act: Spawn multiple tasks to delete the same record.
let mut tasks = vec![];
for _ in 0..5 {
let pool = context.pool.clone();
let req = DeleteTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
record_id: category_id,
};
tasks.push(tokio::spawn(async move {
delete_table_data(&pool, req).await
}));
}
let results = join_all(tasks).await;
// Assert: Exactly one delete should succeed, the rest should fail (softly).
let success_count = results.iter().filter(|res|
res.is_ok() && res.as_ref().unwrap().is_ok() && res.as_ref().unwrap().as_ref().unwrap().success
).count();
assert_eq!(success_count, 1, "Exactly one concurrent delete operation should succeed.");
}
#[rstest]
#[tokio::test]
async fn test_delete_fails_if_physical_table_is_missing(
#[future] advanced_delete_context: AdvancedDeleteContext,
) {
// Arrange: Create definitions, then manually drop the physical table to create a state mismatch.
let context = advanced_delete_context.await;
let qualified_table = format!("\"{}\".\"{}\"", context.profile_name, context.category_table);
sqlx::query(&format!("DROP TABLE {}", qualified_table)).execute(&context.pool).await.unwrap();
// Act: Attempt to delete a record from the logically-defined but physically-absent table.
let delete_req = DeleteTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
record_id: 1, // ID doesn't matter
};
let result = delete_table_data(&context.pool, delete_req).await;
// Assert: The operation should fail with the specific internal error for a missing relation.
assert!(result.is_err());
let err = result.unwrap_err();
assert_eq!(err.code(), tonic::Code::Internal);
assert!(
err.message().contains("is defined but does not physically exist"),
"Error message should indicate a state mismatch."
);
}
// ========================================================================
// Interaction with Other Endpoints
// ========================================================================
#[rstest]
#[tokio::test]
async fn test_update_succeeds_on_soft_deleted_record(
#[future] advanced_delete_context: AdvancedDeleteContext,
) {
// Arrange: Create and then soft-delete a record.
let context = advanced_delete_context.await;
let mut category_data = HashMap::new();
category_data.insert("name".to_string(), string_to_proto_value("Original Name"));
let category_req = PostTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
data: category_data,
};
let category_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
let category_id = category_res.inserted_id;
let delete_req = DeleteTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
record_id: category_id,
};
delete_table_data(&context.pool, delete_req).await.unwrap();
// Act: Attempt to update the soft-deleted record using the PUT handler.
let mut update_data = HashMap::new();
update_data.insert("name".to_string(), string_to_proto_value("Updated After Delete"));
let put_req = PutTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
id: category_id,
data: update_data,
};
let put_result = put_table_data(&context.pool, put_req, &context.indexer_tx).await;
// Assert: This test is crucial as it verifies your requirement to "freeze operations".
// Currently, the PUT handler does NOT check the deleted flag, so it will succeed.
// This test documents that behavior. To make it fail, you would need to add a check
// in `put_table_data` to see if the record is already deleted.
assert!(put_result.is_ok(), "PUT should succeed on a soft-deleted record (current behavior).");
let put_res = put_result.unwrap();
assert!(put_res.success);
// Verify the name was updated, but the record remains marked as deleted.
let row = sqlx::query(&format!(
r#"SELECT name, deleted FROM "{}"."{}" WHERE id = $1"#,
context.profile_name, context.category_table
))
.bind(category_id)
.fetch_one(&context.pool)
.await
.unwrap();
let name: String = row.get("name");
let deleted: bool = row.get("deleted");
assert_eq!(name, "Updated After Delete");
assert!(deleted, "Record should remain soft-deleted after an update.");
}

View File

@@ -0,0 +1,567 @@
// tests/tables_data/handlers/delete_table_data_test3.rs
// ========================================================================
// Input Validation and Edge Cases
// ========================================================================
#[rstest]
#[tokio::test]
async fn test_delete_with_negative_record_id(
#[future] existing_table: (PgPool, String, i64, String),
) {
let (pool, profile_name, _, table_name) = existing_table.await;
let request = DeleteTableDataRequest {
profile_name,
table_name,
record_id: -1,
};
let response = delete_table_data(&pool, request).await.unwrap();
assert!(!response.success, "Delete with negative ID should fail gracefully");
}
#[rstest]
#[tokio::test]
async fn test_delete_with_zero_record_id(
#[future] existing_table: (PgPool, String, i64, String),
) {
let (pool, profile_name, _, table_name) = existing_table.await;
let request = DeleteTableDataRequest {
profile_name,
table_name,
record_id: 0,
};
let response = delete_table_data(&pool, request).await.unwrap();
assert!(!response.success, "Delete with zero ID should fail gracefully");
}
#[rstest]
#[tokio::test]
async fn test_delete_with_max_int64_record_id(
#[future] existing_table: (PgPool, String, i64, String),
) {
let (pool, profile_name, _, table_name) = existing_table.await;
let request = DeleteTableDataRequest {
profile_name,
table_name,
record_id: i64::MAX,
};
let response = delete_table_data(&pool, request).await.unwrap();
assert!(!response.success, "Delete with max int64 ID should fail gracefully");
}
// ========================================================================
// Malformed Input Handling
// ========================================================================
#[rstest]
#[tokio::test]
async fn test_delete_with_empty_profile_name(#[future] pool: PgPool) {
let pool = pool.await;
let request = DeleteTableDataRequest {
profile_name: "".to_string(),
table_name: "test_table".to_string(),
record_id: 1,
};
let result = delete_table_data(&pool, request).await;
assert!(result.is_err(), "Empty profile name should be rejected");
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
}
#[rstest]
#[tokio::test]
async fn test_delete_with_whitespace_only_profile_name(#[future] pool: PgPool) {
let pool = pool.await;
let request = DeleteTableDataRequest {
profile_name: " ".to_string(),
table_name: "test_table".to_string(),
record_id: 1,
};
let result = delete_table_data(&pool, request).await;
assert!(result.is_err(), "Whitespace-only profile name should be rejected");
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
}
#[rstest]
#[tokio::test]
async fn test_delete_with_empty_table_name(
#[future] existing_profile: (PgPool, String, i64),
) {
let (pool, profile_name, _) = existing_profile.await;
let request = DeleteTableDataRequest {
profile_name,
table_name: "".to_string(),
record_id: 1,
};
let result = delete_table_data(&pool, request).await;
assert!(result.is_err(), "Empty table name should be rejected");
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
}
#[rstest]
#[tokio::test]
async fn test_delete_with_sql_injection_attempt(
#[future] existing_profile: (PgPool, String, i64),
) {
let (pool, profile_name, _) = existing_profile.await;
let request = DeleteTableDataRequest {
profile_name,
table_name: "test'; DROP TABLE users; --".to_string(),
record_id: 1,
};
let result = delete_table_data(&pool, request).await;
assert!(result.is_err(), "SQL injection attempt should be rejected");
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
}
// ========================================================================
// Data Integrity Verification Tests
// ========================================================================
#[rstest]
#[tokio::test]
async fn test_delete_only_affects_target_record(
#[future] existing_table: (PgPool, String, i64, String),
) {
// Arrange: Create multiple records
let (pool, profile_name, _, table_name) = existing_table.await;
let mut record_ids = Vec::new();
for i in 0..5 {
let query = format!(
"INSERT INTO \"{}\".\"{}\" (deleted) VALUES (false) RETURNING id",
profile_name, table_name
);
let row = sqlx::query(&query).fetch_one(&pool).await.unwrap();
let id: i64 = row.get("id");
record_ids.push(id);
}
let target_id = record_ids[2]; // Delete the middle record
// Act: Delete one specific record
let request = DeleteTableDataRequest {
profile_name: profile_name.clone(),
table_name: table_name.clone(),
record_id: target_id,
};
let response = delete_table_data(&pool, request).await.unwrap();
assert!(response.success);
// Assert: Verify only the target record is deleted
for &id in &record_ids {
let query = format!(
"SELECT deleted FROM \"{}\".\"{}\" WHERE id = $1",
profile_name, table_name
);
let row = sqlx::query(&query).bind(id).fetch_one(&pool).await.unwrap();
let is_deleted: bool = row.get("deleted");
if id == target_id {
assert!(is_deleted, "Target record should be marked as deleted");
} else {
assert!(!is_deleted, "Non-target records should remain undeleted");
}
}
}
#[rstest]
#[tokio::test]
async fn test_delete_preserves_all_other_fields(
#[future] advanced_delete_context: AdvancedDeleteContext,
) {
// Arrange: Create a record with rich data
let context = advanced_delete_context.await;
let mut category_data = HashMap::new();
category_data.insert("name".to_string(), string_to_proto_value("Preserve Test Category"));
let category_req = PostTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
data: category_data,
};
let category_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
let category_id = category_res.inserted_id;
// Capture state before deletion
let before_query = format!(
"SELECT id, name, deleted, created_at FROM \"{}\".\"{}\" WHERE id = $1",
context.profile_name, context.category_table
);
let before_row = sqlx::query(&before_query)
.bind(category_id)
.fetch_one(&context.pool)
.await
.unwrap();
let before_id: i64 = before_row.get("id");
let before_name: String = before_row.get("name");
let before_deleted: bool = before_row.get("deleted");
let before_created_at: chrono::DateTime<chrono::Utc> = before_row.get("created_at");
// Act: Delete the record
let delete_req = DeleteTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
record_id: category_id,
};
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
assert!(delete_res.success);
// Assert: Verify only 'deleted' field changed
let after_query = format!(
"SELECT id, name, deleted, created_at FROM \"{}\".\"{}\" WHERE id = $1",
context.profile_name, context.category_table
);
let after_row = sqlx::query(&after_query)
.bind(category_id)
.fetch_one(&context.pool)
.await
.unwrap();
let after_id: i64 = after_row.get("id");
let after_name: String = after_row.get("name");
let after_deleted: bool = after_row.get("deleted");
let after_created_at: chrono::DateTime<chrono::Utc> = after_row.get("created_at");
assert_eq!(before_id, after_id, "ID should not change");
assert_eq!(before_name, after_name, "Name should not change");
assert_eq!(before_created_at, after_created_at, "Created timestamp should not change");
assert_eq!(before_deleted, false, "Record should initially be not deleted");
assert_eq!(after_deleted, true, "Record should be marked as deleted after operation");
}
#[rstest]
#[tokio::test]
async fn test_delete_count_verification(
#[future] existing_table: (PgPool, String, i64, String),
) {
// Arrange: Create records and count them
let (pool, profile_name, _, table_name) = existing_table.await;
// Create 3 records
let mut record_ids = Vec::new();
for _ in 0..3 {
let query = format!(
"INSERT INTO \"{}\".\"{}\" (deleted) VALUES (false) RETURNING id",
profile_name, table_name
);
let row = sqlx::query(&query).fetch_one(&pool).await.unwrap();
let id: i64 = row.get("id");
record_ids.push(id);
}
// Verify initial count
let count_query = format!(
"SELECT COUNT(*) as total, COUNT(*) FILTER (WHERE deleted = false) as active FROM \"{}\".\"{}\"",
profile_name, table_name
);
let count_row = sqlx::query(&count_query).fetch_one(&pool).await.unwrap();
let initial_total: i64 = count_row.get("total");
let initial_active: i64 = count_row.get("active");
// Act: Delete one record
let request = DeleteTableDataRequest {
profile_name: profile_name.clone(),
table_name: table_name.clone(),
record_id: record_ids[0],
};
let response = delete_table_data(&pool, request).await.unwrap();
assert!(response.success);
// Assert: Verify counts after deletion
let final_count_row = sqlx::query(&count_query).fetch_one(&pool).await.unwrap();
let final_total: i64 = final_count_row.get("total");
let final_active: i64 = final_count_row.get("active");
assert_eq!(initial_total, final_total, "Total record count should not change (soft delete)");
assert_eq!(initial_active - 1, final_active, "Active record count should decrease by 1");
}
// ========================================================================
// Multiple Operations Sequence Testing
// ========================================================================
#[rstest]
#[tokio::test]
async fn test_delete_then_post_same_data(
#[future] advanced_delete_context: AdvancedDeleteContext,
) {
// Arrange: Create and delete a record
let context = advanced_delete_context.await;
let mut category_data = HashMap::new();
category_data.insert("name".to_string(), string_to_proto_value("Reusable Name"));
let category_req = PostTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
data: category_data.clone(),
};
let first_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
let first_id = first_res.inserted_id;
let delete_req = DeleteTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
record_id: first_id,
};
delete_table_data(&context.pool, delete_req).await.unwrap();
// Act: Try to POST the same data again
let second_req = PostTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
data: category_data,
};
let second_res = post_table_data(&context.pool, second_req, &context.indexer_tx).await.unwrap();
// Assert: Should succeed with a new ID
assert!(second_res.success);
assert_ne!(first_id, second_res.inserted_id, "New record should have different ID");
// Verify both records exist in database
let count_query = format!(
"SELECT COUNT(*) as total FROM \"{}\".\"{}\" WHERE name = 'Reusable Name'",
context.profile_name, context.category_table
);
let count: i64 = sqlx::query_scalar(&count_query).fetch_one(&context.pool).await.unwrap();
assert_eq!(count, 2, "Should have 2 records with same name (one deleted, one active)");
}
#[rstest]
#[tokio::test]
async fn test_multiple_deletes_then_recreate_pattern(
#[future] advanced_delete_context: AdvancedDeleteContext,
) {
// Test a realistic pattern: create, delete, recreate multiple times
let context = advanced_delete_context.await;
let mut all_ids = Vec::new();
for i in 0..3 {
// Create
let mut category_data = HashMap::new();
category_data.insert("name".to_string(), string_to_proto_value(&format!("Cycle Name {}", i)));
let create_req = PostTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
data: category_data,
};
let create_res = post_table_data(&context.pool, create_req, &context.indexer_tx).await.unwrap();
all_ids.push(create_res.inserted_id);
// Delete immediately
let delete_req = DeleteTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
record_id: create_res.inserted_id,
};
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
assert!(delete_res.success);
}
// Verify all records are marked as deleted
for &id in &all_ids {
let query = format!(
"SELECT deleted FROM \"{}\".\"{}\" WHERE id = $1",
context.profile_name, context.category_table
);
let is_deleted: bool = sqlx::query_scalar(&query)
.bind(id)
.fetch_one(&context.pool)
.await
.unwrap();
assert!(is_deleted, "Record {} should be deleted", id);
}
}
// ========================================================================
// Performance and Stress Tests
// ========================================================================
#[rstest]
#[tokio::test]
async fn test_delete_performance_with_many_records(
#[future] advanced_delete_context: AdvancedDeleteContext,
) {
// Arrange: Create many records
let context = advanced_delete_context.await;
let record_count = 100; // Adjust based on test environment
let mut record_ids = Vec::new();
for i in 0..record_count {
let mut category_data = HashMap::new();
category_data.insert("name".to_string(), string_to_proto_value(&format!("Perf Test {}", i)));
let create_req = PostTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
data: category_data,
};
let create_res = post_table_data(&context.pool, create_req, &context.indexer_tx).await.unwrap();
record_ids.push(create_res.inserted_id);
}
// Act: Delete a record from the middle (worst case for performance)
let target_id = record_ids[record_count / 2];
let start_time = std::time::Instant::now();
let delete_req = DeleteTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
record_id: target_id,
};
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
let elapsed = start_time.elapsed();
// Assert: Operation should succeed and be reasonably fast
assert!(delete_res.success);
assert!(elapsed.as_millis() < 1000, "Delete should complete within 1 second even with {} records", record_count);
}
#[rstest]
#[tokio::test]
async fn test_rapid_sequential_deletes(
#[future] advanced_delete_context: AdvancedDeleteContext,
) {
// Arrange: Create multiple records
let context = advanced_delete_context.await;
let mut record_ids = Vec::new();
for i in 0..10 {
let mut category_data = HashMap::new();
category_data.insert("name".to_string(), string_to_proto_value(&format!("Rapid Delete {}", i)));
let create_req = PostTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
data: category_data,
};
let create_res = post_table_data(&context.pool, create_req, &context.indexer_tx).await.unwrap();
record_ids.push(create_res.inserted_id);
}
// Act: Delete all records rapidly in sequence
let start_time = std::time::Instant::now();
for &record_id in &record_ids {
let delete_req = DeleteTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
record_id,
};
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
assert!(delete_res.success, "Delete of record {} should succeed", record_id);
}
let elapsed = start_time.elapsed();
// Assert: All deletes should complete in reasonable time
assert!(elapsed.as_millis() < 5000, "10 sequential deletes should complete within 5 seconds");
// Verify all records are deleted
let count_query = format!(
"SELECT COUNT(*) FILTER (WHERE deleted = true) as deleted_count FROM \"{}\".\"{}\"",
context.profile_name, context.category_table
);
let deleted_count: i64 = sqlx::query_scalar(&count_query).fetch_one(&context.pool).await.unwrap();
assert_eq!(deleted_count as usize, record_ids.len(), "All records should be marked as deleted");
}
// ========================================================================
// Error Message Quality and Handling Tests
// ========================================================================
#[rstest]
#[tokio::test]
async fn test_error_messages_are_descriptive(#[future] pool: PgPool) {
let pool = pool.await;
// Test profile not found error
let request = DeleteTableDataRequest {
profile_name: "NonExistentProfile123".to_string(),
table_name: "test_table".to_string(),
record_id: 1,
};
let result = delete_table_data(&pool, request).await;
assert!(result.is_err());
let error = result.unwrap_err();
assert_eq!(error.code(), tonic::Code::NotFound);
assert_eq!(error.message(), "Profile not found");
}
#[rstest]
#[tokio::test]
async fn test_table_not_found_error_message(
#[future] existing_profile: (PgPool, String, i64),
) {
let (pool, profile_name, _) = existing_profile.await;
let request = DeleteTableDataRequest {
profile_name,
table_name: "definitely_does_not_exist_12345".to_string(),
record_id: 1,
};
let result = delete_table_data(&pool, request).await;
assert!(result.is_err());
let error = result.unwrap_err();
assert_eq!(error.code(), tonic::Code::NotFound);
assert_eq!(error.message(), "Table not found in profile");
}
// ========================================================================
// Database State Consistency Tests
// ========================================================================
#[rstest]
#[tokio::test]
async fn test_delete_maintains_foreign_key_constraints(
#[future] advanced_delete_context: AdvancedDeleteContext,
) {
// This test ensures that soft deletes don't interfere with FK constraint validation
let context = advanced_delete_context.await;
// Create category
let mut category_data = HashMap::new();
category_data.insert("name".to_string(), string_to_proto_value("FK Test Category"));
let category_req = PostTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
data: category_data,
};
let category_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
let category_id = category_res.inserted_id;
// Create product referencing the category
let mut product_data = HashMap::new();
product_data.insert("name".to_string(), string_to_proto_value("FK Test Product"));
product_data.insert(
format!("{}_id", context.category_table),
Value { kind: Some(Kind::NumberValue(category_id as f64)) },
);
let product_req = PostTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.product_table.clone(),
data: product_data,
};
let product_res = post_table_data(&context.pool, product_req, &context.indexer_tx).await.unwrap();
// Soft delete the category
let delete_req = DeleteTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.category_table.clone(),
record_id: category_id,
};
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
assert!(delete_res.success);
// The product should still exist and reference the soft-deleted category
let fk_query = format!(
"SELECT \"{}_id\" FROM \"{}\".\"{}\" WHERE id = $1",
context.category_table, context.profile_name, context.product_table
);
let fk_value: i64 = sqlx::query_scalar(&fk_query)
.bind(product_res.inserted_id)
.fetch_one(&context.pool)
.await
.unwrap();
assert_eq!(fk_value, category_id, "Foreign key should still point to soft-deleted category");
}

View File

@@ -1,7 +1,7 @@
// tests/tables_data/mod.rs
// pub mod post_table_data_test;
pub mod put_table_data_test;
// pub mod delete_table_data_test;
// pub mod put_table_data_test;
pub mod delete_table_data_test;
// pub mod get_table_data_test;
// pub mod get_table_data_count_test;
// pub mod get_table_data_by_position_test;

View File

@@ -119,3 +119,141 @@ async fn test_update_required_foreign_key_to_null_fails(
assert_eq!(err.code(), tonic::Code::Internal);
assert!(err.message().contains("Update failed"));
}
// tests/tables_data/handlers/put_table_data_test6.rs
// ========================================================================
// MISSING DATA TYPE VALIDATION TESTS FOR PUT HANDLER
// ========================================================================
// Note: These tests are replicated from post_table_data_test3.rs to ensure
// the PUT handler has the same level of type validation coverage as the POST handler.
#[rstest]
#[tokio::test]
async fn test_update_type_mismatch_string_for_integer(
#[future] data_type_test_context: DataTypeTestContext,
) {
// Arrange
let context = data_type_test_context.await;
let record_id = create_initial_data_type_record(&context).await;
// Act: Attempt to update an integer column with a string value.
let mut update_data = HashMap::new();
update_data.insert(
"my_bigint".to_string(),
create_string_value("not-an-integer"),
);
let request = PutTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.table_name.clone(),
id: record_id,
data: update_data,
};
let result =
put_table_data(&context.pool, request, &context.indexer_tx).await;
// Assert
assert!(result.is_err());
if let Err(err) = result {
assert_eq!(err.code(), tonic::Code::InvalidArgument);
assert!(err
.message()
.contains("Expected number for column 'my_bigint'"));
}
}
#[rstest]
#[tokio::test]
async fn test_update_type_mismatch_number_for_boolean(
#[future] data_type_test_context: DataTypeTestContext,
) {
// Arrange
let context = data_type_test_context.await;
let record_id = create_initial_data_type_record(&context).await;
// Act: Attempt to update a boolean column with a number value.
let mut update_data = HashMap::new();
update_data.insert("my_bool".to_string(), create_number_value(1.0));
let request = PutTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.table_name.clone(),
id: record_id,
data: update_data,
};
let result =
put_table_data(&context.pool, request, &context.indexer_tx).await;
// Assert
assert!(result.is_err());
if let Err(err) = result {
assert_eq!(err.code(), tonic::Code::InvalidArgument);
assert!(err
.message()
.contains("Expected boolean for column 'my_bool'"));
}
}
#[rstest]
#[tokio::test]
async fn test_update_with_various_valid_timestamp_formats(
#[future] data_type_test_context: DataTypeTestContext,
) {
// Arrange
let context = data_type_test_context.await;
let record_id = create_initial_data_type_record(&context).await;
let valid_timestamps = vec![
"2025-06-24T18:30:00Z",
"2023-01-01T00:00:00+00:00",
"2024-02-29T12:00:00.123456Z",
"1999-12-31T23:59:59.999Z",
];
for timestamp_str in valid_timestamps {
// Act
let mut update_data = HashMap::new();
update_data.insert(
"my_timestamp".to_string(),
create_string_value(timestamp_str),
);
let request = PutTableDataRequest {
profile_name: context.profile_name.clone(),
table_name: context.table_name.clone(),
id: record_id,
data: update_data,
};
let result =
put_table_data(&context.pool, request, &context.indexer_tx).await;
// Assert
assert!(
result.is_ok(),
"Update should succeed for valid timestamp format: {}",
timestamp_str
);
// Verify the value was stored correctly
let stored_timestamp: chrono::DateTime<chrono::Utc> =
sqlx::query_scalar(&format!(
r#"SELECT my_timestamp FROM "{}"."{}" WHERE id = $1"#,
context.profile_name, context.table_name
))
.bind(record_id)
.fetch_one(&context.pool)
.await
.unwrap();
let expected_timestamp =
chrono::DateTime::parse_from_rfc3339(timestamp_str)
.unwrap()
.with_timezone(&chrono::Utc);
assert_eq!(stored_timestamp, expected_timestamp);
}
}