From 38c82389f74a0817a416f2fb9cdaa0d6e2e430d7 Mon Sep 17 00:00:00 2001 From: filipriec Date: Wed, 25 Jun 2025 12:37:37 +0200 Subject: [PATCH] count gets a full passer in tests --- .../handlers/get_table_data_count_test.rs | 10 + .../handlers/get_table_data_count_test3.rs | 715 ++++++++++++++++++ 2 files changed, 725 insertions(+) diff --git a/server/tests/tables_data/handlers/get_table_data_count_test.rs b/server/tests/tables_data/handlers/get_table_data_count_test.rs index 8cb82e8..4c45614 100644 --- a/server/tests/tables_data/handlers/get_table_data_count_test.rs +++ b/server/tests/tables_data/handlers/get_table_data_count_test.rs @@ -4,6 +4,7 @@ use tonic; use sqlx::PgPool; use common::proto::multieko2::tables_data::GetTableDataCountRequest; use common::proto::multieko2::table_definition::{PostTableDefinitionRequest, ColumnDefinition}; +use common::proto::multieko2::table_definition::TableLink; use server::tables_data::handlers::get_table_data_count; use server::table_definition::handlers::post_table_definition; use crate::common::setup_test_db; @@ -63,6 +64,15 @@ async fn cleanup_test_environment(pool: &PgPool, schema_id: i64, profile_name: & .await .unwrap(); + // Delete foreign key relationships first + sqlx::query!( + "DELETE FROM table_definition_links WHERE source_table_id IN (SELECT id FROM table_definitions WHERE schema_id = $1) OR linked_table_id IN (SELECT id FROM table_definitions WHERE schema_id = $1)", + schema_id + ) + .execute(&mut *tx) + .await + .unwrap(); + sqlx::query!("DELETE FROM table_definitions WHERE schema_id = $1", schema_id) .execute(&mut *tx) .await diff --git a/server/tests/tables_data/handlers/get_table_data_count_test3.rs b/server/tests/tables_data/handlers/get_table_data_count_test3.rs index e69de29..d50aba9 100644 --- a/server/tests/tables_data/handlers/get_table_data_count_test3.rs +++ b/server/tests/tables_data/handlers/get_table_data_count_test3.rs @@ -0,0 +1,715 @@ +// tests/tables_data/handlers/get_table_data_count_test3.rs + +#[rstest] +#[tokio::test] +async fn test_table_with_foreign_keys(#[future] pool: PgPool) { + let pool = pool.await; + let profile_name = format!("fk_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0)); + let parent_table = "parent_table"; + let child_table = "child_table"; + + // Create parent table first + let parent_request = PostTableDefinitionRequest { + profile_name: profile_name.clone(), + table_name: parent_table.to_string(), + columns: vec![ + ColumnDefinition { + name: "name".to_string(), + field_type: "text".to_string(), + } + ], + indexes: vec![], + links: vec![], + }; + + post_table_definition(&pool, parent_request).await.unwrap(); + + // Create child table with link to parent + let child_request = PostTableDefinitionRequest { + profile_name: profile_name.clone(), + table_name: child_table.to_string(), + columns: vec![ + ColumnDefinition { + name: "description".to_string(), + field_type: "text".to_string(), + } + ], + indexes: vec![], + links: vec![ + TableLink { + linked_table_name: parent_table.to_string(), + required: false, + } + ], + }; + + post_table_definition(&pool, child_request).await.unwrap(); + + // Insert test data + let mut tx = pool.begin().await.unwrap(); + let parent_id: i64 = sqlx::query_scalar(&format!( + r#"INSERT INTO "{}"."{}" (name) VALUES ('Parent 1') RETURNING id"#, + profile_name, parent_table + )) + .fetch_one(&mut *tx) + .await + .unwrap(); + + sqlx::query(&format!( + r#"INSERT INTO "{}"."{}" (parent_table_id, description) VALUES ({}, 'Child 1')"#, + profile_name, child_table, parent_id + )) + .execute(&mut *tx) + .await + .unwrap(); + + sqlx::query(&format!( + r#"INSERT INTO "{}"."{}" (parent_table_id, description) VALUES ({}, 'Child 2')"#, + profile_name, child_table, parent_id + )) + .execute(&mut *tx) + .await + .unwrap(); + tx.commit().await.unwrap(); + + // Test parent table count + let parent_request = GetTableDataCountRequest { + profile_name: profile_name.clone(), + table_name: parent_table.to_string(), + }; + let parent_response = get_table_data_count(&pool, parent_request).await.unwrap(); + assert_eq!(parent_response.count, 1); + + // Test child table count + let child_request = GetTableDataCountRequest { + profile_name: profile_name.clone(), + table_name: child_table.to_string(), + }; + let child_response = get_table_data_count(&pool, child_request).await.unwrap(); + assert_eq!(child_response.count, 2); + + // Cleanup + let schema_id = sqlx::query_scalar!( + "SELECT id FROM schemas WHERE name = $1", + profile_name + ) + .fetch_one(&pool) + .await + .unwrap(); + + cleanup_test_environment(&pool, schema_id, &profile_name).await; +} + +#[rstest] +#[tokio::test] +async fn test_multiple_foreign_keys(#[future] pool: PgPool) { + let pool = pool.await; + let profile_name = format!("multi_fk_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0)); + + // Create three parent tables + for table_name in ["users", "categories", "tags"] { + let request = PostTableDefinitionRequest { + profile_name: profile_name.clone(), + table_name: table_name.to_string(), + columns: vec![ + ColumnDefinition { + name: "name".to_string(), + field_type: "text".to_string(), + } + ], + indexes: vec![], + links: vec![], + }; + post_table_definition(&pool, request).await.unwrap(); + } + + // Create child table with links to all three parents + let child_request = PostTableDefinitionRequest { + profile_name: profile_name.clone(), + table_name: "posts".to_string(), + columns: vec![ + ColumnDefinition { + name: "title".to_string(), + field_type: "text".to_string(), + } + ], + indexes: vec![], + links: vec![ + TableLink { + linked_table_name: "users".to_string(), + required: true, + }, + TableLink { + linked_table_name: "categories".to_string(), + required: true, + }, + TableLink { + linked_table_name: "tags".to_string(), + required: false, + } + ], + }; + + post_table_definition(&pool, child_request).await.unwrap(); + + // Insert test data + let mut tx = pool.begin().await.unwrap(); + + let user_id: i64 = sqlx::query_scalar(&format!( + r#"INSERT INTO "{}"."{}" (name) VALUES ('User1') RETURNING id"#, + profile_name, "users" + )).fetch_one(&mut *tx).await.unwrap(); + + let category_id: i64 = sqlx::query_scalar(&format!( + r#"INSERT INTO "{}"."{}" (name) VALUES ('Tech') RETURNING id"#, + profile_name, "categories" + )).fetch_one(&mut *tx).await.unwrap(); + + let tag_id: i64 = sqlx::query_scalar(&format!( + r#"INSERT INTO "{}"."{}" (name) VALUES ('Important') RETURNING id"#, + profile_name, "tags" + )).fetch_one(&mut *tx).await.unwrap(); + + // Insert posts with foreign keys + sqlx::query(&format!( + r#"INSERT INTO "{}"."{}" (title, users_id, categories_id, tags_id) VALUES ('Post 1', {}, {}, {})"#, + profile_name, "posts", user_id, category_id, tag_id + )).execute(&mut *tx).await.unwrap(); + + sqlx::query(&format!( + r#"INSERT INTO "{}"."{}" (title, users_id, categories_id) VALUES ('Post 2', {}, {})"#, + profile_name, "posts", user_id, category_id + )).execute(&mut *tx).await.unwrap(); + + tx.commit().await.unwrap(); + + // Test counts + let posts_count = get_table_data_count(&pool, GetTableDataCountRequest { + profile_name: profile_name.clone(), + table_name: "posts".to_string(), + }).await.unwrap(); + assert_eq!(posts_count.count, 2); + + // Cleanup + let schema_id = sqlx::query_scalar!( + "SELECT id FROM schemas WHERE name = $1", + profile_name + ).fetch_one(&pool).await.unwrap(); + + cleanup_test_environment(&pool, schema_id, &profile_name).await; +} + +#[rstest] +#[tokio::test] +async fn test_required_vs_optional_foreign_keys(#[future] pool: PgPool) { + let pool = pool.await; + let profile_name = format!("req_opt_fk_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0)); + + // Create parent table + let parent_request = PostTableDefinitionRequest { + profile_name: profile_name.clone(), + table_name: "companies".to_string(), + columns: vec![ + ColumnDefinition { + name: "name".to_string(), + field_type: "text".to_string(), + } + ], + indexes: vec![], + links: vec![], + }; + post_table_definition(&pool, parent_request).await.unwrap(); + + // Create child table with required and optional links + let child_request = PostTableDefinitionRequest { + profile_name: profile_name.clone(), + table_name: "employees".to_string(), + columns: vec![ + ColumnDefinition { + name: "name".to_string(), + field_type: "text".to_string(), + } + ], + indexes: vec![], + links: vec![ + TableLink { + linked_table_name: "companies".to_string(), + required: true, // Required foreign key + } + ], + }; + post_table_definition(&pool, child_request).await.unwrap(); + + // Insert test data + let mut tx = pool.begin().await.unwrap(); + + let company_id: i64 = sqlx::query_scalar(&format!( + r#"INSERT INTO "{}"."{}" (name) VALUES ('TechCorp') RETURNING id"#, + profile_name, "companies" + )).fetch_one(&mut *tx).await.unwrap(); + + // Insert employees with required foreign key + for i in 1..=5 { + sqlx::query(&format!( + r#"INSERT INTO "{}"."{}" (name, companies_id) VALUES ('Employee {}', {})"#, + profile_name, "employees", i, company_id + )).execute(&mut *tx).await.unwrap(); + } + + tx.commit().await.unwrap(); + + // Test counts + let companies_count = get_table_data_count(&pool, GetTableDataCountRequest { + profile_name: profile_name.clone(), + table_name: "companies".to_string(), + }).await.unwrap(); + assert_eq!(companies_count.count, 1); + + let employees_count = get_table_data_count(&pool, GetTableDataCountRequest { + profile_name: profile_name.clone(), + table_name: "employees".to_string(), + }).await.unwrap(); + assert_eq!(employees_count.count, 5); + + // Cleanup + let schema_id = sqlx::query_scalar!( + "SELECT id FROM schemas WHERE name = $1", + profile_name + ).fetch_one(&pool).await.unwrap(); + + cleanup_test_environment(&pool, schema_id, &profile_name).await; +} + +#[rstest] +#[tokio::test] +async fn test_performance_stress_large_dataset(#[future] pool: PgPool) { + let pool = pool.await; + let profile_name = format!("stress_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0)); + let table_name = "stress_table"; + + // Create table + let request = PostTableDefinitionRequest { + profile_name: profile_name.clone(), + table_name: table_name.to_string(), + columns: vec![ + ColumnDefinition { + name: "data".to_string(), + field_type: "text".to_string(), + }, + ColumnDefinition { + name: "number".to_string(), + field_type: "integer".to_string(), + } + ], + indexes: vec!["number".to_string()], // Add index for better performance + links: vec![], + }; + post_table_definition(&pool, request).await.unwrap(); + + // Insert 10,000 records in batches + let mut tx = pool.begin().await.unwrap(); + for batch in 0..100 { + let mut values = Vec::new(); + for i in 1..=100 { + let record_num = batch * 100 + i; + values.push(format!("('Data {}', {})", record_num, record_num)); + } + + let sql = format!( + r#"INSERT INTO "{}"."{}" (data, number) VALUES {}"#, + profile_name, table_name, values.join(", ") + ); + sqlx::query(&sql).execute(&mut *tx).await.unwrap(); + } + + // Mark some as deleted + sqlx::query(&format!( + r#"UPDATE "{}"."{}" SET deleted = true WHERE number <= 1000"#, + profile_name, table_name + )).execute(&mut *tx).await.unwrap(); + + tx.commit().await.unwrap(); + + // Test count performance + let start = std::time::Instant::now(); + let response = get_table_data_count(&pool, GetTableDataCountRequest { + profile_name: profile_name.clone(), + table_name: table_name.to_string(), + }).await.unwrap(); + let duration = start.elapsed(); + + assert_eq!(response.count, 9000); // 10000 - 1000 deleted + + // Performance assertion - should complete within reasonable time + assert!(duration.as_secs() < 5, "Count operation took too long: {:?}", duration); + + // Cleanup + let schema_id = sqlx::query_scalar!( + "SELECT id FROM schemas WHERE name = $1", + profile_name + ).fetch_one(&pool).await.unwrap(); + + cleanup_test_environment(&pool, schema_id, &profile_name).await; +} + +#[rstest] +#[tokio::test] +async fn test_maximum_identifier_lengths(#[future] pool: PgPool) { + let pool = pool.await; + + // Test with maximum length names (63 characters - PostgreSQL limit) + let max_profile_name = "a".repeat(63); + let max_table_name = "b".repeat(63); + + let request = PostTableDefinitionRequest { + profile_name: max_profile_name.clone(), + table_name: max_table_name.clone(), + columns: vec![ + ColumnDefinition { + name: "c".repeat(63), // Max column name + field_type: "text".to_string(), + } + ], + indexes: vec![], + links: vec![], + }; + + post_table_definition(&pool, request).await.unwrap(); + + // Insert test data + let mut tx = pool.begin().await.unwrap(); + sqlx::query(&format!( + r#"INSERT INTO "{}"."{}" ("{}") VALUES ('test')"#, + max_profile_name, max_table_name, "c".repeat(63) + )).execute(&mut *tx).await.unwrap(); + tx.commit().await.unwrap(); + + // Test count + let response = get_table_data_count(&pool, GetTableDataCountRequest { + profile_name: max_profile_name.clone(), + table_name: max_table_name.clone(), + }).await.unwrap(); + assert_eq!(response.count, 1); + + // Cleanup + let schema_id = sqlx::query_scalar!( + "SELECT id FROM schemas WHERE name = $1", + max_profile_name + ).fetch_one(&pool).await.unwrap(); + + cleanup_test_environment(&pool, schema_id, &max_profile_name).await; +} + +#[rstest] +#[tokio::test] +async fn test_complex_schema_hierarchy(#[future] pool: PgPool) { + let pool = pool.await; + let profile_name = format!("hierarchy_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0)); + + // Create A -> B -> C -> D hierarchy + let tables = ["table_a", "table_b", "table_c", "table_d"]; + + // Create first table (no dependencies) + let request_a = PostTableDefinitionRequest { + profile_name: profile_name.clone(), + table_name: tables[0].to_string(), + columns: vec![ + ColumnDefinition { + name: "name".to_string(), + field_type: "text".to_string(), + } + ], + indexes: vec![], + links: vec![], + }; + post_table_definition(&pool, request_a).await.unwrap(); + + // Create subsequent tables with dependencies + for i in 1..tables.len() { + let request = PostTableDefinitionRequest { + profile_name: profile_name.clone(), + table_name: tables[i].to_string(), + columns: vec![ + ColumnDefinition { + name: "data".to_string(), + field_type: "text".to_string(), + } + ], + indexes: vec![], + links: vec![ + TableLink { + linked_table_name: tables[i-1].to_string(), + required: true, + } + ], + }; + post_table_definition(&pool, request).await.unwrap(); + } + + // Insert hierarchical data + let mut tx = pool.begin().await.unwrap(); + + let a_id: i64 = sqlx::query_scalar(&format!( + r#"INSERT INTO "{}"."{}" (name) VALUES ('Root') RETURNING id"#, + profile_name, tables[0] + )).fetch_one(&mut *tx).await.unwrap(); + + let b_id: i64 = sqlx::query_scalar(&format!( + r#"INSERT INTO "{}"."{}" (data, table_a_id) VALUES ('Level B', {}) RETURNING id"#, + profile_name, tables[1], a_id + )).fetch_one(&mut *tx).await.unwrap(); + + let c_id: i64 = sqlx::query_scalar(&format!( + r#"INSERT INTO "{}"."{}" (data, table_b_id) VALUES ('Level C', {}) RETURNING id"#, + profile_name, tables[2], b_id + )).fetch_one(&mut *tx).await.unwrap(); + + sqlx::query(&format!( + r#"INSERT INTO "{}"."{}" (data, table_c_id) VALUES ('Level D', {})"#, + profile_name, tables[3], c_id + )).execute(&mut *tx).await.unwrap(); + + tx.commit().await.unwrap(); + + // Test counts for each level + for (i, table_name) in tables.iter().enumerate() { + let response = get_table_data_count(&pool, GetTableDataCountRequest { + profile_name: profile_name.clone(), + table_name: table_name.to_string(), + }).await.unwrap(); + assert_eq!(response.count, 1, "Table {} should have count 1", table_name); + } + + // Cleanup + let schema_id = sqlx::query_scalar!( + "SELECT id FROM schemas WHERE name = $1", + profile_name + ).fetch_one(&pool).await.unwrap(); + + cleanup_test_environment(&pool, schema_id, &profile_name).await; +} + +#[rstest] +#[tokio::test] +async fn test_concurrent_insert_and_count(#[future] pool: PgPool) { + let pool = pool.await; + let profile_name = format!("concurrent_insert_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0)); + let table_name = "concurrent_ops_table"; + + // Create table + let request = PostTableDefinitionRequest { + profile_name: profile_name.clone(), + table_name: table_name.to_string(), + columns: vec![ + ColumnDefinition { + name: "value".to_string(), + field_type: "integer".to_string(), + } + ], + indexes: vec![], + links: vec![], + }; + post_table_definition(&pool, request).await.unwrap(); + + // Insert initial data + let mut tx = pool.begin().await.unwrap(); + for i in 1..=100 { + sqlx::query(&format!( + r#"INSERT INTO "{}"."{}" (value) VALUES ({})"#, + profile_name, table_name, i + )).execute(&mut *tx).await.unwrap(); + } + tx.commit().await.unwrap(); + + // Run concurrent operations + let mut count_handles = vec![]; + let mut insert_handles = vec![]; + + // Spawn count operations + for _ in 0..5 { + let pool_clone = pool.clone(); + let profile_name_clone = profile_name.clone(); + let table_name_clone = table_name.to_string(); + + let handle = tokio::spawn(async move { + let mut counts = Vec::new(); + for _ in 0..10 { + let response = get_table_data_count(&pool_clone, GetTableDataCountRequest { + profile_name: profile_name_clone.clone(), + table_name: table_name_clone.clone(), + }).await.unwrap(); + counts.push(response.count); + tokio::time::sleep(tokio::time::Duration::from_millis(10)).await; + } + counts + }); + count_handles.push(handle); + } + + // Spawn insert operations + for i in 0..3 { + let pool_clone = pool.clone(); + let profile_name_clone = profile_name.clone(); + let table_name_clone = table_name.to_string(); + + let handle = tokio::spawn(async move { + for j in 1..=20 { + let value = (i * 100) + j + 1000; // Ensure unique values + sqlx::query(&format!( + r#"INSERT INTO "{}"."{}" (value) VALUES ({})"#, + profile_name_clone, table_name_clone, value + )).execute(&pool_clone).await.unwrap(); + tokio::time::sleep(tokio::time::Duration::from_millis(5)).await; + } + }); + insert_handles.push(handle); + } + + // Wait for all operations to complete + for handle in count_handles { + handle.await.unwrap(); + } + for handle in insert_handles { + handle.await.unwrap(); + } + + // Final count should be 100 + (3 * 20) = 160 + let final_response = get_table_data_count(&pool, GetTableDataCountRequest { + profile_name: profile_name.clone(), + table_name: table_name.to_string(), + }).await.unwrap(); + assert_eq!(final_response.count, 160); + + // Cleanup + let schema_id = sqlx::query_scalar!( + "SELECT id FROM schemas WHERE name = $1", + profile_name + ).fetch_one(&pool).await.unwrap(); + + cleanup_test_environment(&pool, schema_id, &profile_name).await; +} + +#[rstest] +#[tokio::test] +async fn test_edge_case_all_records_deleted(#[future] pool: PgPool) { + let pool = pool.await; + let profile_name = format!("all_deleted_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0)); + let table_name = "all_deleted_table"; + + // Create table + let request = PostTableDefinitionRequest { + profile_name: profile_name.clone(), + table_name: table_name.to_string(), + columns: vec![ + ColumnDefinition { + name: "data".to_string(), + field_type: "text".to_string(), + } + ], + indexes: vec![], + links: vec![], + }; + post_table_definition(&pool, request).await.unwrap(); + + // Insert and then delete all records + let mut tx = pool.begin().await.unwrap(); + for i in 1..=50 { + sqlx::query(&format!( + r#"INSERT INTO "{}"."{}" (data) VALUES ('Record {}')"#, + profile_name, table_name, i + )).execute(&mut *tx).await.unwrap(); + } + + // Mark all as deleted + sqlx::query(&format!( + r#"UPDATE "{}"."{}" SET deleted = true"#, + profile_name, table_name + )).execute(&mut *tx).await.unwrap(); + + tx.commit().await.unwrap(); + + // Count should be 0 + let response = get_table_data_count(&pool, GetTableDataCountRequest { + profile_name: profile_name.clone(), + table_name: table_name.to_string(), + }).await.unwrap(); + assert_eq!(response.count, 0); + + // Cleanup + let schema_id = sqlx::query_scalar!( + "SELECT id FROM schemas WHERE name = $1", + profile_name + ).fetch_one(&pool).await.unwrap(); + + cleanup_test_environment(&pool, schema_id, &profile_name).await; +} + +#[rstest] +#[tokio::test] +async fn test_cross_schema_isolation(#[future] pool: PgPool) { + let pool = pool.await; + let profile1 = format!("schema1_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0)); + let profile2 = format!("schema2_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0)); + let table_name = "isolation_test_table"; + + // Create identical tables in two different schemas + for profile_name in [&profile1, &profile2] { + let request = PostTableDefinitionRequest { + profile_name: profile_name.clone(), + table_name: table_name.to_string(), + columns: vec![ + ColumnDefinition { + name: "data".to_string(), + field_type: "text".to_string(), + } + ], + indexes: vec![], + links: vec![], + }; + post_table_definition(&pool, request).await.unwrap(); + } + + // Insert different amounts of data in each schema + let mut tx = pool.begin().await.unwrap(); + + // Schema 1: 10 records + for i in 1..=10 { + sqlx::query(&format!( + r#"INSERT INTO "{}"."{}" (data) VALUES ('Schema1 Record {}')"#, + profile1, table_name, i + )).execute(&mut *tx).await.unwrap(); + } + + // Schema 2: 25 records + for i in 1..=25 { + sqlx::query(&format!( + r#"INSERT INTO "{}"."{}" (data) VALUES ('Schema2 Record {}')"#, + profile2, table_name, i + )).execute(&mut *tx).await.unwrap(); + } + + tx.commit().await.unwrap(); + + // Test counts are isolated + let count1 = get_table_data_count(&pool, GetTableDataCountRequest { + profile_name: profile1.clone(), + table_name: table_name.to_string(), + }).await.unwrap(); + assert_eq!(count1.count, 10); + + let count2 = get_table_data_count(&pool, GetTableDataCountRequest { + profile_name: profile2.clone(), + table_name: table_name.to_string(), + }).await.unwrap(); + assert_eq!(count2.count, 25); + + // Cleanup both schemas + for profile_name in [&profile1, &profile2] { + let schema_id = sqlx::query_scalar!( + "SELECT id FROM schemas WHERE name = $1", + profile_name + ).fetch_one(&pool).await.unwrap(); + + cleanup_test_environment(&pool, schema_id, profile_name).await; + } +}