From 5c23f61a10271f9f1502fd3b1bcd2a886ac192a1 Mon Sep 17 00:00:00 2001 From: filipriec Date: Wed, 25 Jun 2025 09:44:38 +0200 Subject: [PATCH] get method passing without any problem --- .../tables_data/handlers/get_table_data.rs | 6 +- .../handlers/get_table_data_test.rs | 19 +- .../handlers/get_table_data_test2.rs | 1112 +++++++++++++++++ 3 files changed, 1133 insertions(+), 4 deletions(-) create mode 100644 server/tests/tables_data/handlers/get_table_data_test2.rs diff --git a/server/src/tables_data/handlers/get_table_data.rs b/server/src/tables_data/handlers/get_table_data.rs index 6eb23dc..fe58e5e 100644 --- a/server/src/tables_data/handlers/get_table_data.rs +++ b/server/src/tables_data/handlers/get_table_data.rs @@ -66,11 +66,11 @@ pub async fn get_table_data( .await .map_err(|e| Status::internal(format!("Foreign key lookup error: {}", e)))?; - // 2. Build the list of foreign key column names + // 2. Build the list of foreign key column names using full table names let mut foreign_key_columns = Vec::new(); for fk in fk_columns_query { - let base_name = fk.table_name.split_once('_').map_or(fk.table_name.as_str(), |(_, rest)| rest); - foreign_key_columns.push(format!("{}_id", base_name)); + // Use the full table name, not a stripped version + foreign_key_columns.push(format!("{}_id", fk.table_name)); } // 3. Prepare a complete list of all columns to select diff --git a/server/tests/tables_data/handlers/get_table_data_test.rs b/server/tests/tables_data/handlers/get_table_data_test.rs index 6048746..98052cf 100644 --- a/server/tests/tables_data/handlers/get_table_data_test.rs +++ b/server/tests/tables_data/handlers/get_table_data_test.rs @@ -5,9 +5,24 @@ use common::proto::multieko2::tables_data::GetTableDataRequest; use crate::common::setup_test_db; use sqlx::{PgPool, Row}; use tonic; -use chrono::Utc; +use chrono::{DateTime, Utc}; use serde_json::json; use std::collections::HashMap; +use futures::future::join_all; +use rand::distr::Alphanumeric; +use rand::Rng; +use rust_decimal::Decimal; +use rust_decimal_macros::dec; +use server::table_definition::handlers::post_table_definition; +use server::tables_data::handlers::post_table_data; +use common::proto::multieko2::table_definition::{ + PostTableDefinitionRequest, ColumnDefinition as TableColumnDefinition, TableLink +}; +use common::proto::multieko2::tables_data::PostTableDataRequest; +use prost_types::Value; +use prost_types::value::Kind; +use tokio::sync::mpsc; +use server::indexer::IndexCommand; #[fixture] async fn pool() -> PgPool { @@ -466,3 +481,5 @@ async fn test_get_invalid_column( cleanup_test_data(&pool, &schema_name, &table_name).await; } + +include!("get_table_data_test2.rs"); diff --git a/server/tests/tables_data/handlers/get_table_data_test2.rs b/server/tests/tables_data/handlers/get_table_data_test2.rs new file mode 100644 index 0000000..5dfe35e --- /dev/null +++ b/server/tests/tables_data/handlers/get_table_data_test2.rs @@ -0,0 +1,1112 @@ +// tests/tables_data/handlers/get_table_data_test2.rs + +// ======================================================================== +// ADDITIONAL HELPER FUNCTIONS FOR COMPREHENSIVE GET TESTS +// ======================================================================== + +// Helper function to generate unique identifiers for test isolation +fn generate_test_id() -> String { + rand::rng() + .sample_iter(&Alphanumeric) + .take(12) + .map(char::from) + .collect::() + .to_lowercase() +} + +// Helper to create protobuf values +fn proto_string(s: &str) -> Value { + Value { kind: Some(Kind::StringValue(s.to_string())) } +} + +fn proto_number(n: f64) -> Value { + Value { kind: Some(Kind::NumberValue(n)) } +} + +fn proto_bool(b: bool) -> Value { + Value { kind: Some(Kind::BoolValue(b)) } +} + +fn proto_null() -> Value { + Value { kind: Some(Kind::NullValue(0)) } +} + +// ======================================================================== +// SPECIALIZED TEST CONTEXTS FOR DIFFERENT SCENARIOS +// ======================================================================== + +#[derive(Clone)] +struct DataTypeRetrievalContext { + pool: PgPool, + schema_name: String, + table_name: String, + record_ids: Vec, // Multiple records for testing +} + +#[derive(Clone)] +struct ForeignKeyRetrievalContext { + pool: PgPool, + schema_name: String, + category_table: String, + product_table: String, + order_table: String, + category_id: i64, + product_id: i64, + order_id: i64, +} + +#[derive(Clone)] +struct LargeDataContext { + pool: PgPool, + schema_name: String, + table_name: String, + small_record_id: i64, + large_record_id: i64, + unicode_record_id: i64, +} + +// ======================================================================== +// CONTEXT CREATION FUNCTIONS +// ======================================================================== + +// Create a table with comprehensive data types for retrieval testing +async fn create_comprehensive_data_table(pool: &PgPool, table_name: &str, schema_name: &str) -> Result { + let table_def_request = PostTableDefinitionRequest { + profile_name: schema_name.into(), + table_name: table_name.into(), + columns: vec![ + TableColumnDefinition { name: "text_field".into(), field_type: "text".into() }, + TableColumnDefinition { name: "bool_field".into(), field_type: "boolean".into() }, + TableColumnDefinition { name: "int_field".into(), field_type: "integer".into() }, + TableColumnDefinition { name: "bigint_field".into(), field_type: "biginteger".into() }, + TableColumnDefinition { name: "decimal_field".into(), field_type: "decimal(10,2)".into() }, + TableColumnDefinition { name: "money_field".into(), field_type: "decimal(19,4)".into() }, + TableColumnDefinition { name: "timestamp_field".into(), field_type: "timestamptz".into() }, + TableColumnDefinition { name: "date_field".into(), field_type: "date".into() }, + ], + indexes: vec!["text_field".into()], + links: vec![], + }; + + let response = post_table_definition(pool, table_def_request).await?; + + // Get the table definition ID for later use + let table_def = sqlx::query!( + "SELECT id FROM table_definitions WHERE table_name = $1", + table_name + ) + .fetch_one(pool) + .await + .map_err(|e| tonic::Status::internal(format!("Failed to get table def id: {}", e)))?; + + Ok(table_def.id) +} + +async fn insert_test_records_for_retrieval(context: &DataTypeRetrievalContext) -> Result, Box> { + let (indexer_tx, _rx) = mpsc::channel(100); + let mut record_ids = Vec::new(); + + // Record 1: Complete data with all fields populated + let mut complete_data = HashMap::new(); + complete_data.insert("text_field".into(), proto_string("Complete Record")); + complete_data.insert("bool_field".into(), proto_bool(true)); + complete_data.insert("int_field".into(), proto_number(42.0)); + complete_data.insert("bigint_field".into(), proto_number(1234567890123.0)); + complete_data.insert("decimal_field".into(), proto_string("123.45")); + complete_data.insert("money_field".into(), proto_string("9999.9999")); + complete_data.insert("timestamp_field".into(), proto_string("2024-01-15T10:30:00Z")); + + let complete_request = PostTableDataRequest { + profile_name: context.schema_name.clone(), + table_name: context.table_name.clone(), + data: complete_data, + }; + + let response = post_table_data(&context.pool, complete_request, &indexer_tx).await?; + record_ids.push(response.inserted_id); + + // Record 2: Sparse data with many NULL fields + let mut sparse_data = HashMap::new(); + sparse_data.insert("text_field".into(), proto_string("Sparse Record")); + sparse_data.insert("bool_field".into(), proto_null()); + sparse_data.insert("int_field".into(), proto_null()); + sparse_data.insert("decimal_field".into(), proto_string("")); // Empty string becomes NULL + + let sparse_request = PostTableDataRequest { + profile_name: context.schema_name.clone(), + table_name: context.table_name.clone(), + data: sparse_data, + }; + + let response = post_table_data(&context.pool, sparse_request, &indexer_tx).await?; + record_ids.push(response.inserted_id); + + // Record 3: Edge case values + let mut edge_data = HashMap::new(); + edge_data.insert("text_field".into(), proto_string("Edge Cases 🚀")); + edge_data.insert("bool_field".into(), proto_bool(false)); + edge_data.insert("int_field".into(), proto_number(2147483647.0)); // i32::MAX + edge_data.insert("bigint_field".into(), proto_number(-9223372036854774784.0)); // Near i64::MIN + edge_data.insert("decimal_field".into(), proto_string("-999.99")); + edge_data.insert("money_field".into(), proto_string("0.0001")); + + let edge_request = PostTableDataRequest { + profile_name: context.schema_name.clone(), + table_name: context.table_name.clone(), + data: edge_data, + }; + + let response = post_table_data(&context.pool, edge_request, &indexer_tx).await?; + record_ids.push(response.inserted_id); + + Ok(record_ids) +} + +// Create foreign key test setup with actual data +async fn create_foreign_key_retrieval_setup(pool: &PgPool, schema_name: &str) -> Result> { + let unique_id = generate_test_id(); + let category_table = format!("fk_category_{}", unique_id); + let product_table = format!("fk_product_{}", unique_id); + let order_table = format!("fk_order_{}", unique_id); + + // Create category table + let category_def = PostTableDefinitionRequest { + profile_name: schema_name.into(), + table_name: category_table.clone(), + columns: vec![ + TableColumnDefinition { name: "name".into(), field_type: "text".into() }, + TableColumnDefinition { name: "description".into(), field_type: "text".into() }, + ], + indexes: vec![], + links: vec![], + }; + post_table_definition(pool, category_def).await?; + + // Create product table with FK to category + let product_def = PostTableDefinitionRequest { + profile_name: schema_name.into(), + table_name: product_table.clone(), + columns: vec![ + TableColumnDefinition { name: "name".into(), field_type: "text".into() }, + TableColumnDefinition { name: "price".into(), field_type: "decimal(10,2)".into() }, + ], + indexes: vec![], + links: vec![ + TableLink { linked_table_name: category_table.clone(), required: true }, + ], + }; + post_table_definition(pool, product_def).await?; + + // Create order table with FKs to both category and product + let order_def = PostTableDefinitionRequest { + profile_name: schema_name.into(), + table_name: order_table.clone(), + columns: vec![ + TableColumnDefinition { name: "quantity".into(), field_type: "integer".into() }, + TableColumnDefinition { name: "notes".into(), field_type: "text".into() }, + ], + indexes: vec![], + links: vec![ + TableLink { linked_table_name: product_table.clone(), required: true }, + TableLink { linked_table_name: category_table.clone(), required: false }, + ], + }; + post_table_definition(pool, order_def).await?; + + // Insert test data + let (indexer_tx, _rx) = mpsc::channel(100); + + // Insert category + let mut category_data = HashMap::new(); + category_data.insert("name".into(), proto_string("Electronics")); + category_data.insert("description".into(), proto_string("Electronic devices and accessories")); + + let category_request = PostTableDataRequest { + profile_name: schema_name.into(), + table_name: category_table.clone(), + data: category_data, + }; + + let category_response = post_table_data(pool, category_request, &indexer_tx).await?; + + // Insert product + let mut product_data = HashMap::new(); + product_data.insert("name".into(), proto_string("Gaming Laptop")); + product_data.insert("price".into(), proto_string("1299.99")); + product_data.insert(format!("{}_id", category_table), proto_number(category_response.inserted_id as f64)); + + let product_request = PostTableDataRequest { + profile_name: schema_name.into(), + table_name: product_table.clone(), + data: product_data, + }; + + let product_response = post_table_data(pool, product_request, &indexer_tx).await?; + + // Insert order + let mut order_data = HashMap::new(); + order_data.insert("quantity".into(), proto_number(2.0)); + order_data.insert("notes".into(), proto_string("Express shipping requested")); + order_data.insert(format!("{}_id", product_table), proto_number(product_response.inserted_id as f64)); + order_data.insert(format!("{}_id", category_table), proto_number(category_response.inserted_id as f64)); + + let order_request = PostTableDataRequest { + profile_name: schema_name.into(), + table_name: order_table.clone(), + data: order_data, + }; + + let order_response = post_table_data(pool, order_request, &indexer_tx).await?; + + Ok(ForeignKeyRetrievalContext { + pool: pool.clone(), + schema_name: schema_name.to_string(), + category_table, + product_table, + order_table, + category_id: category_response.inserted_id, + product_id: product_response.inserted_id, + order_id: order_response.inserted_id, + }) +} + +// Create large data context for performance testing +async fn create_large_data_context(pool: &PgPool, schema_name: &str) -> Result> { + let unique_id = generate_test_id(); + let table_name = format!("large_data_{}", unique_id); + + // Create table with text fields + let table_def = PostTableDefinitionRequest { + profile_name: schema_name.into(), + table_name: table_name.clone(), + columns: vec![ + TableColumnDefinition { name: "small_text".into(), field_type: "text".into() }, + TableColumnDefinition { name: "large_text".into(), field_type: "text".into() }, + TableColumnDefinition { name: "unicode_text".into(), field_type: "text".into() }, + ], + indexes: vec![], + links: vec![], + }; + post_table_definition(pool, table_def).await?; + + let (indexer_tx, _rx) = mpsc::channel(100); + + // Insert small record + let mut small_data = HashMap::new(); + small_data.insert("small_text".into(), proto_string("Small")); + small_data.insert("large_text".into(), proto_string("Not large")); + + let small_request = PostTableDataRequest { + profile_name: schema_name.into(), + table_name: table_name.clone(), + data: small_data, + }; + + let small_response = post_table_data(pool, small_request, &indexer_tx).await?; + + // Insert large record + let large_content = "A".repeat(50000); // 50KB of text + let mut large_data = HashMap::new(); + large_data.insert("small_text".into(), proto_string("Large record")); + large_data.insert("large_text".into(), proto_string(&large_content)); + + let large_request = PostTableDataRequest { + profile_name: schema_name.into(), + table_name: table_name.clone(), + data: large_data, + }; + + let large_response = post_table_data(pool, large_request, &indexer_tx).await?; + + // Insert unicode record + let unicode_content = "🌟 Unicode Test: Москва, 北京, José María González, Náměstí 28. října 123/456 🚀"; + let mut unicode_data = HashMap::new(); + unicode_data.insert("small_text".into(), proto_string("Unicode")); + unicode_data.insert("unicode_text".into(), proto_string(unicode_content)); + + let unicode_request = PostTableDataRequest { + profile_name: schema_name.into(), + table_name: table_name.clone(), + data: unicode_data, + }; + + let unicode_response = post_table_data(pool, unicode_request, &indexer_tx).await?; + + Ok(LargeDataContext { + pool: pool.clone(), + schema_name: schema_name.to_string(), + table_name, + small_record_id: small_response.inserted_id, + large_record_id: large_response.inserted_id, + unicode_record_id: unicode_response.inserted_id, + }) +} + +// ======================================================================== +// COMPREHENSIVE DATA TYPE RETRIEVAL TESTS +// ======================================================================== + +#[rstest] +#[tokio::test] +async fn test_retrieve_complete_record_all_types() { + let pool = setup_test_db().await; + let unique_id = generate_test_id(); + let schema_name = format!("data_get_schema_{}", unique_id); + let table_name = format!("data_get_table_{}", unique_id); + + create_comprehensive_data_table(&pool, &table_name, &schema_name).await + .expect("Failed to create comprehensive data table"); + + let context = DataTypeRetrievalContext { + pool: pool.clone(), + schema_name, + table_name, + record_ids: Vec::new(), + }; + + let record_ids = insert_test_records_for_retrieval(&context).await + .expect("Failed to insert test records"); + + let complete_record_id = record_ids[0]; // First record has all fields + + let request = GetTableDataRequest { + profile_name: context.schema_name.clone(), + table_name: context.table_name.clone(), + id: complete_record_id, + }; + + let response = get_table_data(&pool, request).await.unwrap(); + + // Verify all expected columns are present + let expected_columns = vec![ + "id", "deleted", "text_field", "bool_field", "int_field", + "bigint_field", "decimal_field", "money_field", "timestamp_field", "date_field" + ]; + + for column in expected_columns { + assert!(response.data.contains_key(column), "Missing column: {}", column); + } + + // Verify specific values + assert_eq!(response.data["id"], complete_record_id.to_string()); + assert_eq!(response.data["deleted"], "false"); + assert_eq!(response.data["text_field"], "Complete Record"); + assert_eq!(response.data["bool_field"], "true"); + assert_eq!(response.data["int_field"], "42"); + assert_eq!(response.data["bigint_field"], "1234567890123"); + assert_eq!(response.data["decimal_field"], "123.45"); + assert_eq!(response.data["money_field"], "9999.9999"); + + // Verify timestamp is properly formatted + assert!(response.data["timestamp_field"].contains("2024-01-15")); + + cleanup_test_data(&pool, &context.schema_name, &context.table_name).await; +} + +#[rstest] +#[tokio::test] +async fn test_retrieve_sparse_record_null_handling() { + let pool = setup_test_db().await; + let unique_id = generate_test_id(); + let schema_name = format!("data_get_schema_{}", unique_id); + let table_name = format!("data_get_table_{}", unique_id); + + create_comprehensive_data_table(&pool, &table_name, &schema_name).await + .expect("Failed to create comprehensive data table"); + + let context = DataTypeRetrievalContext { + pool: pool.clone(), + schema_name, + table_name, + record_ids: Vec::new(), + }; + + let record_ids = insert_test_records_for_retrieval(&context).await + .expect("Failed to insert test records"); + + let sparse_record_id = record_ids[1]; // Second record has many NULLs + + let request = GetTableDataRequest { + profile_name: context.schema_name.clone(), + table_name: context.table_name.clone(), + id: sparse_record_id, + }; + + let response = get_table_data(&pool, request).await.unwrap(); + + // Verify NULL fields are returned as empty strings + assert_eq!(response.data["text_field"], "Sparse Record"); + assert_eq!(response.data["bool_field"], ""); // NULL boolean becomes empty string + assert_eq!(response.data["int_field"], ""); // NULL integer becomes empty string + assert_eq!(response.data["bigint_field"], ""); // NULL bigint becomes empty string + assert_eq!(response.data["decimal_field"], ""); // NULL decimal becomes empty string + assert_eq!(response.data["money_field"], ""); // NULL money becomes empty string + assert_eq!(response.data["timestamp_field"], ""); // NULL timestamp becomes empty string + + cleanup_test_data(&pool, &context.schema_name, &context.table_name).await; +} + +#[rstest] +#[tokio::test] +async fn test_retrieve_edge_case_values() { + let pool = setup_test_db().await; + let unique_id = generate_test_id(); + let schema_name = format!("data_get_schema_{}", unique_id); + let table_name = format!("data_get_table_{}", unique_id); + + create_comprehensive_data_table(&pool, &table_name, &schema_name).await + .expect("Failed to create comprehensive data table"); + + let context = DataTypeRetrievalContext { + pool: pool.clone(), + schema_name, + table_name, + record_ids: Vec::new(), + }; + + let record_ids = insert_test_records_for_retrieval(&context).await + .expect("Failed to insert test records"); + + let edge_record_id = record_ids[2]; // Third record has edge case values + + let request = GetTableDataRequest { + profile_name: context.schema_name.clone(), + table_name: context.table_name.clone(), + id: edge_record_id, + }; + + let response = get_table_data(&pool, request).await.unwrap(); + + // Verify edge case values + assert_eq!(response.data["text_field"], "Edge Cases 🚀"); + assert_eq!(response.data["bool_field"], "false"); + assert_eq!(response.data["int_field"], "2147483647"); // i32::MAX + assert_eq!(response.data["bigint_field"], "-9223372036854774784"); // Large negative + assert_eq!(response.data["decimal_field"], "-999.99"); + assert_eq!(response.data["money_field"], "0.0001"); + + cleanup_test_data(&pool, &context.schema_name, &context.table_name).await; +} + +// ======================================================================== +// FOREIGN KEY COLUMN RETRIEVAL TESTS +// ======================================================================== + +#[rstest] +#[tokio::test] +async fn test_retrieve_record_with_foreign_keys() { + let pool = setup_test_db().await; + let unique_id = generate_test_id(); + let schema_name = format!("fk_get_schema_{}", unique_id); + + let context = create_foreign_key_retrieval_setup(&pool, &schema_name).await + .expect("Failed to create foreign key retrieval setup"); + + let request = GetTableDataRequest { + profile_name: context.schema_name.clone(), + table_name: context.order_table.clone(), + id: context.order_id, + }; + + let response = get_table_data(&pool, request).await.unwrap(); + + // Verify order data + assert_eq!(response.data["id"], context.order_id.to_string()); + assert_eq!(response.data["quantity"], "2"); + assert_eq!(response.data["notes"], "Express shipping requested"); + + // Verify foreign key columns are included + let product_fk_column = format!("{}_id", context.product_table); + let category_fk_column = format!("{}_id", context.category_table); + + assert!(response.data.contains_key(&product_fk_column), "Missing required FK column: {}", product_fk_column); + assert!(response.data.contains_key(&category_fk_column), "Missing optional FK column: {}", category_fk_column); + + // Verify foreign key values + assert_eq!(response.data[&product_fk_column], context.product_id.to_string()); + assert_eq!(response.data[&category_fk_column], context.category_id.to_string()); + + cleanup_test_data(&pool, &context.schema_name, &context.category_table).await; + cleanup_test_data(&pool, &context.schema_name, &context.product_table).await; + cleanup_test_data(&pool, &context.schema_name, &context.order_table).await; +} + +#[rstest] +#[tokio::test] +async fn test_retrieve_product_with_single_foreign_key() { + let pool = setup_test_db().await; + let unique_id = generate_test_id(); + let schema_name = format!("fk_get_schema_{}", unique_id); + + let context = create_foreign_key_retrieval_setup(&pool, &schema_name).await + .expect("Failed to create foreign key retrieval setup"); + + let request = GetTableDataRequest { + profile_name: context.schema_name.clone(), + table_name: context.product_table.clone(), + id: context.product_id, + }; + + let response = get_table_data(&pool, request).await.unwrap(); + + // Verify product data + assert_eq!(response.data["name"], "Gaming Laptop"); + assert_eq!(response.data["price"], "1299.99"); + + // Verify foreign key column is included + let category_fk_column = format!("{}_id", context.category_table); + assert!(response.data.contains_key(&category_fk_column)); + assert_eq!(response.data[&category_fk_column], context.category_id.to_string()); + + cleanup_test_data(&pool, &context.schema_name, &context.category_table).await; + cleanup_test_data(&pool, &context.schema_name, &context.product_table).await; + cleanup_test_data(&pool, &context.schema_name, &context.order_table).await; +} + +#[rstest] +#[tokio::test] +async fn test_retrieve_category_no_foreign_keys() { + let pool = setup_test_db().await; + let unique_id = generate_test_id(); + let schema_name = format!("fk_get_schema_{}", unique_id); + + let context = create_foreign_key_retrieval_setup(&pool, &schema_name).await + .expect("Failed to create foreign key retrieval setup"); + + let request = GetTableDataRequest { + profile_name: context.schema_name.clone(), + table_name: context.category_table.clone(), + id: context.category_id, + }; + + let response = get_table_data(&pool, request).await.unwrap(); + + // Verify category data + assert_eq!(response.data["name"], "Electronics"); + assert_eq!(response.data["description"], "Electronic devices and accessories"); + + // Verify no foreign key columns (category is the root table) + let expected_columns = vec!["id", "deleted", "name", "description"]; + assert_eq!(response.data.len(), expected_columns.len()); + + for column in expected_columns { + assert!(response.data.contains_key(column), "Missing expected column: {}", column); + } + + cleanup_test_data(&pool, &context.schema_name, &context.category_table).await; + cleanup_test_data(&pool, &context.schema_name, &context.product_table).await; + cleanup_test_data(&pool, &context.schema_name, &context.order_table).await; +} + +// ======================================================================== +// LARGE DATA AND PERFORMANCE TESTS +// ======================================================================== + +#[rstest] +#[tokio::test] +async fn test_retrieve_large_text_fields() { + let pool = setup_test_db().await; + let unique_id = generate_test_id(); + let schema_name = format!("large_get_schema_{}", unique_id); + + let context = create_large_data_context(&pool, &schema_name).await + .expect("Failed to create large data context"); + + let request = GetTableDataRequest { + profile_name: context.schema_name.clone(), + table_name: context.table_name.clone(), + id: context.large_record_id, + }; + + let start_time = std::time::Instant::now(); + let response = get_table_data(&pool, request).await.unwrap(); + let duration = start_time.elapsed(); + + // Verify large data retrieval + assert_eq!(response.data["small_text"], "Large record"); + assert_eq!(response.data["large_text"].len(), 50000); + assert!(response.data["large_text"].starts_with("AAAA")); + assert!(response.data["large_text"].ends_with("AAAA")); + + // Performance check - should complete reasonably quickly + assert!(duration.as_millis() < 1000, "Large data retrieval took too long: {:?}", duration); + + println!("Large text retrieval (50KB) took: {:?}", duration); + + cleanup_test_data(&pool, &context.schema_name, &context.table_name).await; +} + +#[rstest] +#[tokio::test] +async fn test_retrieve_unicode_content() { + let pool = setup_test_db().await; + let unique_id = generate_test_id(); + let schema_name = format!("large_get_schema_{}", unique_id); + + let context = create_large_data_context(&pool, &schema_name).await + .expect("Failed to create large data context"); + + let request = GetTableDataRequest { + profile_name: context.schema_name.clone(), + table_name: context.table_name.clone(), + id: context.unicode_record_id, + }; + + let response = get_table_data(&pool, request).await.unwrap(); + + // Verify Unicode handling + assert_eq!(response.data["small_text"], "Unicode"); + let unicode_content = &response.data["unicode_text"]; + + assert!(unicode_content.contains("🌟")); + assert!(unicode_content.contains("🚀")); + assert!(unicode_content.contains("Москва")); + assert!(unicode_content.contains("北京")); + assert!(unicode_content.contains("José María González")); + assert!(unicode_content.contains("Náměstí 28. října 123/456")); + + cleanup_test_data(&pool, &context.schema_name, &context.table_name).await; +} + +#[rstest] +#[tokio::test] +async fn test_concurrent_retrieval_operations() { + let pool = setup_test_db().await; + let unique_id = generate_test_id(); + let schema_name = format!("data_get_schema_{}", unique_id); + let table_name = format!("data_get_table_{}", unique_id); + + create_comprehensive_data_table(&pool, &table_name, &schema_name).await + .expect("Failed to create comprehensive data table"); + + let context = DataTypeRetrievalContext { + pool: pool.clone(), + schema_name, + table_name, + record_ids: Vec::new(), + }; + + let record_ids = insert_test_records_for_retrieval(&context).await + .expect("Failed to insert test records"); + + // Create concurrent retrieval tasks + let tasks: Vec<_> = record_ids.iter().cycle().take(20).enumerate().map(|(i, &record_id)| { + let context = context.clone(); + tokio::spawn(async move { + let request = GetTableDataRequest { + profile_name: context.schema_name.clone(), + table_name: context.table_name.clone(), + id: record_id, + }; + + let result = get_table_data(&context.pool, request).await; + (i, result) + }) + }).collect(); + + let start_time = std::time::Instant::now(); + let results = join_all(tasks).await; + let duration = start_time.elapsed(); + + // All concurrent operations should succeed + for (i, task_result) in results.into_iter().enumerate() { + let (task_id, get_result) = task_result.expect("Task should not panic"); + assert!(get_result.is_ok(), "Concurrent retrieval {} should succeed", task_id); + + let response = get_result.unwrap(); + assert!(response.data.contains_key("id")); + assert!(response.data.contains_key("text_field")); + } + + println!("20 concurrent retrievals took: {:?}", duration); + assert!(duration.as_millis() < 2000, "Concurrent retrievals took too long: {:?}", duration); + + cleanup_test_data(&pool, &context.schema_name, &context.table_name).await; +} + +// ======================================================================== +// ERROR SCENARIO TESTS +// ======================================================================== + +#[rstest] +#[tokio::test] +async fn test_retrieve_nonexistent_record() { + let pool = setup_test_db().await; + let unique_id = generate_test_id(); + let schema_name = format!("data_get_schema_{}", unique_id); + let table_name = format!("data_get_table_{}", unique_id); + + create_comprehensive_data_table(&pool, &table_name, &schema_name).await + .expect("Failed to create comprehensive data table"); + + let request = GetTableDataRequest { + profile_name: schema_name.clone(), + table_name: table_name.clone(), + id: 999999, // Non-existent ID + }; + + let result = get_table_data(&pool, request).await; + assert!(result.is_err()); + assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound); + + cleanup_test_data(&pool, &schema_name, &table_name).await; +} + +#[rstest] +#[tokio::test] +async fn test_retrieve_from_nonexistent_table() { + let pool = setup_test_db().await; + let unique_id = generate_test_id(); + let schema_name = format!("data_get_schema_{}", unique_id); + let table_name = format!("data_get_table_{}", unique_id); + + create_comprehensive_data_table(&pool, &table_name, &schema_name).await + .expect("Failed to create comprehensive data table"); + + let request = GetTableDataRequest { + profile_name: schema_name.clone(), + table_name: "nonexistent_table".into(), + id: 1, + }; + + let result = get_table_data(&pool, request).await; + assert!(result.is_err()); + assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound); + + cleanup_test_data(&pool, &schema_name, &table_name).await; +} + +#[rstest] +#[tokio::test] +async fn test_retrieve_from_nonexistent_schema() { + let pool = setup_test_db().await; + + let request = GetTableDataRequest { + profile_name: "nonexistent_schema".into(), + table_name: "test_table".into(), + id: 1, + }; + + let result = get_table_data(&pool, request).await; + assert!(result.is_err()); + assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound); +} + +#[rstest] +#[tokio::test] +async fn test_retrieve_with_database_connection_error() { + let mut closed_pool = setup_test_db().await; + closed_pool.close().await; + + let request = GetTableDataRequest { + profile_name: "test_schema".into(), + table_name: "test_table".into(), + id: 1, + }; + + let result = get_table_data(&closed_pool, request).await; + assert!(result.is_err()); + assert_eq!(result.unwrap_err().code(), tonic::Code::Internal); +} + +// ======================================================================== +// BOUNDARY AND EDGE CASE TESTS +// ======================================================================== + +#[rstest] +#[tokio::test] +async fn test_retrieve_record_at_boundaries() { + let pool = setup_test_db().await; + let unique_id = generate_test_id(); + let schema_name = format!("data_get_schema_{}", unique_id); + let table_name = format!("data_get_table_{}", unique_id); + + create_comprehensive_data_table(&pool, &table_name, &schema_name).await + .expect("Failed to create comprehensive data table"); + + // Test with ID 1 (boundary) + let request = GetTableDataRequest { + profile_name: schema_name.clone(), + table_name: table_name.clone(), + id: 1, + }; + + // This might not exist, but should handle gracefully + let result = get_table_data(&pool, request).await; + if result.is_err() { + assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound); + } + + // Test with large ID + let request = GetTableDataRequest { + profile_name: schema_name.clone(), + table_name: table_name.clone(), + id: i64::MAX, + }; + + let result = get_table_data(&pool, request).await; + assert!(result.is_err()); + assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound); + + cleanup_test_data(&pool, &schema_name, &table_name).await; +} + +#[rstest] +#[tokio::test] +async fn test_retrieve_deleted_record() { + let pool = setup_test_db().await; + let unique_id = generate_test_id(); + let schema_name = format!("deleted_test_{}", unique_id); + let table_name = format!("test_table_{}", unique_id); + + // Create a simple table + let table_def = PostTableDefinitionRequest { + profile_name: schema_name.clone(), + table_name: table_name.clone(), + columns: vec![ + TableColumnDefinition { name: "name".into(), field_type: "text".into() }, + ], + indexes: vec![], + links: vec![], + }; + post_table_definition(&pool, table_def).await.unwrap(); + + // Insert a record + let (indexer_tx, _rx) = mpsc::channel(100); + let mut data = HashMap::new(); + data.insert("name".into(), proto_string("Test Record")); + + let insert_request = PostTableDataRequest { + profile_name: schema_name.clone(), + table_name: table_name.clone(), + data, + }; + + let insert_response = post_table_data(&pool, insert_request, &indexer_tx).await.unwrap(); + + // Manually mark the record as deleted + let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name); + sqlx::query(&format!("UPDATE {} SET deleted = true WHERE id = $1", qualified_table)) + .bind(insert_response.inserted_id) + .execute(&pool) + .await + .unwrap(); + + // Try to retrieve the deleted record + let get_request = GetTableDataRequest { + profile_name: schema_name.clone(), + table_name: table_name.clone(), + id: insert_response.inserted_id, + }; + + let result = get_table_data(&pool, get_request).await; + assert!(result.is_err()); + assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound); + + cleanup_test_data(&pool, &schema_name, &table_name).await; +} + +// ======================================================================== +// STRESS AND PERFORMANCE TESTS +// ======================================================================== + +#[rstest] +#[tokio::test] +async fn test_rapid_sequential_retrievals() { + let pool = setup_test_db().await; + let unique_id = generate_test_id(); + let schema_name = format!("data_get_schema_{}", unique_id); + let table_name = format!("data_get_table_{}", unique_id); + + create_comprehensive_data_table(&pool, &table_name, &schema_name).await + .expect("Failed to create comprehensive data table"); + + let context = DataTypeRetrievalContext { + pool: pool.clone(), + schema_name, + table_name, + record_ids: Vec::new(), + }; + + let record_ids = insert_test_records_for_retrieval(&context).await + .expect("Failed to insert test records"); + + let record_id = record_ids[0]; + + let start_time = std::time::Instant::now(); + + // Perform rapid sequential retrievals + for i in 0..100 { + let request = GetTableDataRequest { + profile_name: context.schema_name.clone(), + table_name: context.table_name.clone(), + id: record_id, + }; + + let result = get_table_data(&pool, request).await; + assert!(result.is_ok(), "Rapid retrieval {} should succeed", i); + } + + let duration = start_time.elapsed(); + println!("100 rapid sequential retrievals took: {:?}", duration); + assert!(duration.as_millis() < 5000, "Rapid retrievals took too long: {:?}", duration); + + cleanup_test_data(&pool, &context.schema_name, &context.table_name).await; +} + +#[rstest] +#[tokio::test] +async fn test_retrieval_with_high_concurrency() { + let pool = setup_test_db().await; + let unique_id = generate_test_id(); + let schema_name = format!("data_get_schema_{}", unique_id); + let table_name = format!("data_get_table_{}", unique_id); + + create_comprehensive_data_table(&pool, &table_name, &schema_name).await + .expect("Failed to create comprehensive data table"); + + let context = DataTypeRetrievalContext { + pool: pool.clone(), + schema_name, + table_name, + record_ids: Vec::new(), + }; + + let record_ids = insert_test_records_for_retrieval(&context).await + .expect("Failed to insert test records"); + + let record_id = record_ids[0]; + + // Create many concurrent retrieval tasks + let tasks: Vec<_> = (0..50).map(|i| { + let context = context.clone(); + tokio::spawn(async move { + let request = GetTableDataRequest { + profile_name: context.schema_name.clone(), + table_name: context.table_name.clone(), + id: record_id, + }; + + let result = get_table_data(&context.pool, request).await; + (i, result) + }) + }).collect(); + + let start_time = std::time::Instant::now(); + let results = join_all(tasks).await; + let duration = start_time.elapsed(); + + // All concurrent operations should succeed + for (task_id, task_result) in results.into_iter().enumerate() { + let (i, get_result) = task_result.expect("Task should not panic"); + assert!(get_result.is_ok(), "High concurrency retrieval {} should succeed", task_id); + } + + println!("50 high concurrency retrievals took: {:?}", duration); + assert!(duration.as_millis() < 3000, "High concurrency retrievals took too long: {:?}", duration); + + cleanup_test_data(&pool, &context.schema_name, &context.table_name).await; +} + +// ======================================================================== +// COLUMN COMPLETENESS AND CONSISTENCY TESTS +// ======================================================================== + +#[rstest] +#[tokio::test] +async fn test_all_expected_columns_present() { + let pool = setup_test_db().await; + let unique_id = generate_test_id(); + let schema_name = format!("data_get_schema_{}", unique_id); + let table_name = format!("data_get_table_{}", unique_id); + + create_comprehensive_data_table(&pool, &table_name, &schema_name).await + .expect("Failed to create comprehensive data table"); + + let context = DataTypeRetrievalContext { + pool: pool.clone(), + schema_name, + table_name, + record_ids: Vec::new(), + }; + + let record_ids = insert_test_records_for_retrieval(&context).await + .expect("Failed to insert test records"); + + for &record_id in &record_ids { + let request = GetTableDataRequest { + profile_name: context.schema_name.clone(), + table_name: context.table_name.clone(), + id: record_id, + }; + + let response = get_table_data(&pool, request).await.unwrap(); + + // Verify system columns are always present + let system_columns = vec!["id", "deleted"]; + for column in system_columns { + assert!(response.data.contains_key(column), "Missing system column '{}' for record {}", column, record_id); + assert!(!response.data[column].is_empty() || column == "deleted", "System column '{}' should not be empty for record {}", column, record_id); + } + + // Verify user-defined columns are present (may be empty for NULL values) + let user_columns = vec![ + "text_field", "bool_field", "int_field", "bigint_field", + "decimal_field", "money_field", "timestamp_field", "date_field" + ]; + for column in user_columns { + assert!(response.data.contains_key(column), "Missing user column '{}' for record {}", column, record_id); + } + } + + cleanup_test_data(&pool, &context.schema_name, &context.table_name).await; +} + +#[rstest] +#[tokio::test] +async fn test_data_type_consistency_across_retrievals() { + let pool = setup_test_db().await; + let unique_id = generate_test_id(); + let schema_name = format!("data_get_schema_{}", unique_id); + let table_name = format!("data_get_table_{}", unique_id); + + create_comprehensive_data_table(&pool, &table_name, &schema_name).await + .expect("Failed to create comprehensive data table"); + + let context = DataTypeRetrievalContext { + pool: pool.clone(), + schema_name, + table_name, + record_ids: Vec::new(), + }; + + let record_ids = insert_test_records_for_retrieval(&context).await + .expect("Failed to insert test records"); + + let record_id = record_ids[0]; // Use complete record + + // Retrieve the same record multiple times + let mut responses = Vec::new(); + for _ in 0..5 { + let request = GetTableDataRequest { + profile_name: context.schema_name.clone(), + table_name: context.table_name.clone(), + id: record_id, + }; + + let response = get_table_data(&pool, request).await.unwrap(); + responses.push(response); + } + + // Verify all responses are identical + let first_response = &responses[0]; + for (i, response) in responses.iter().enumerate().skip(1) { + assert_eq!(response.data.len(), first_response.data.len(), "Response {} has different number of columns", i); + + for (key, value) in &first_response.data { + assert_eq!(response.data.get(key), Some(value), "Response {} has different value for column '{}'", i, key); + } + } + + cleanup_test_data(&pool, &context.schema_name, &context.table_name).await; +}