count is now fixed and working properly

This commit is contained in:
filipriec
2025-06-25 12:40:27 +02:00
parent 38c82389f7
commit b7a3f0f8d9
5 changed files with 2 additions and 2 deletions

View File

@@ -0,0 +1,252 @@
// tests/tables_data/handlers/get_table_data_count_test.rs
use rstest::{fixture, rstest};
use tonic;
use sqlx::PgPool;
use common::proto::multieko2::tables_data::GetTableDataCountRequest;
use common::proto::multieko2::table_definition::{PostTableDefinitionRequest, ColumnDefinition};
use common::proto::multieko2::table_definition::TableLink;
use server::tables_data::handlers::get_table_data_count;
use server::table_definition::handlers::post_table_definition;
use crate::common::setup_test_db;
use chrono::Utc;
#[fixture]
async fn pool() -> PgPool {
setup_test_db().await
}
#[fixture]
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
let pool = pool.await;
pool.close().await;
pool
}
async fn setup_test_environment(pool: &PgPool) -> (String, String, i64) {
// Create unique profile and table names
let profile_name = format!("test_profile_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
let table_name = format!("test_table_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
// Use the table definition handler to create the table properly
let request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: table_name.clone(),
columns: vec![
ColumnDefinition {
name: "firma".to_string(),
field_type: "text".to_string(),
}
],
indexes: vec![],
links: vec![],
};
post_table_definition(pool, request).await.unwrap();
// Get the schema_id for cleanup
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
)
.fetch_one(pool)
.await
.unwrap();
(profile_name, table_name, schema_id)
}
async fn cleanup_test_environment(pool: &PgPool, schema_id: i64, profile_name: &str) {
let mut tx = pool.begin().await.unwrap();
// Cleanup order matters!
sqlx::query(&format!(r#"DROP SCHEMA IF EXISTS "{}" CASCADE"#, profile_name))
.execute(&mut *tx)
.await
.unwrap();
// Delete foreign key relationships first
sqlx::query!(
"DELETE FROM table_definition_links WHERE source_table_id IN (SELECT id FROM table_definitions WHERE schema_id = $1) OR linked_table_id IN (SELECT id FROM table_definitions WHERE schema_id = $1)",
schema_id
)
.execute(&mut *tx)
.await
.unwrap();
sqlx::query!("DELETE FROM table_definitions WHERE schema_id = $1", schema_id)
.execute(&mut *tx)
.await
.unwrap();
sqlx::query!("DELETE FROM schemas WHERE id = $1", schema_id)
.execute(&mut *tx)
.await
.unwrap();
tx.commit().await.unwrap();
}
#[rstest]
#[tokio::test]
async fn test_returns_correct_count(#[future] pool: PgPool) {
let pool = pool.await;
let (profile_name, table_name, schema_id) = setup_test_environment(&pool).await;
// Insert test data
let mut tx = pool.begin().await.unwrap();
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (firma) VALUES ('Test 1')"#,
profile_name, table_name
))
.execute(&mut *tx)
.await
.unwrap();
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (firma) VALUES ('Test 2')"#,
profile_name, table_name
))
.execute(&mut *tx)
.await
.unwrap();
tx.commit().await.unwrap();
// Test
let request = GetTableDataCountRequest {
profile_name: profile_name.clone(),
table_name: table_name.clone(),
};
let response = get_table_data_count(&pool, request).await.unwrap();
assert_eq!(response.count, 2);
cleanup_test_environment(&pool, schema_id, &profile_name).await;
}
#[rstest]
#[tokio::test]
async fn test_excludes_deleted_records(#[future] pool: PgPool) {
let pool = pool.await;
let (profile_name, table_name, schema_id) = setup_test_environment(&pool).await;
// Insert test data
let mut tx = pool.begin().await.unwrap();
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (firma, deleted) VALUES ('Active', false)"#,
profile_name, table_name
))
.execute(&mut *tx)
.await
.unwrap();
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (firma, deleted) VALUES ('Deleted', true)"#,
profile_name, table_name
))
.execute(&mut *tx)
.await
.unwrap();
tx.commit().await.unwrap();
// Test
let request = GetTableDataCountRequest {
profile_name: profile_name.clone(),
table_name: table_name.clone(),
};
let response = get_table_data_count(&pool, request).await.unwrap();
assert_eq!(response.count, 1);
cleanup_test_environment(&pool, schema_id, &profile_name).await;
}
#[rstest]
#[tokio::test]
async fn test_table_not_in_profile(#[future] pool: PgPool) {
let pool = pool.await;
let (profile_name, _, schema_id) = setup_test_environment(&pool).await;
// Test with non-existent table
let request = GetTableDataCountRequest {
profile_name: profile_name.clone(),
table_name: "non_existent_table".to_string(),
};
let result = get_table_data_count(&pool, request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
cleanup_test_environment(&pool, schema_id, &profile_name).await;
}
#[rstest]
#[tokio::test]
async fn test_profile_not_found(#[future] pool: PgPool) {
let pool = pool.await;
let request = GetTableDataCountRequest {
profile_name: "nonexistent_profile".to_string(),
table_name: "adresar".to_string(),
};
let result = get_table_data_count(&pool, request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
}
#[rstest]
#[tokio::test]
async fn test_database_error(#[future] closed_pool: PgPool) {
let closed_pool = closed_pool.await;
let request = GetTableDataCountRequest {
profile_name: "test".to_string(),
table_name: "test".to_string(),
};
let result = get_table_data_count(&closed_pool, request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
}
#[rstest]
#[tokio::test]
async fn test_empty_table_count(#[future] pool: PgPool) {
let pool = pool.await;
let profile_name = format!("empty_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
// Use table definition handler to create the table
let request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: "adresar".to_string(),
columns: vec![
ColumnDefinition {
name: "name".to_string(),
field_type: "text".to_string(),
}
],
indexes: vec![],
links: vec![],
};
post_table_definition(&pool, request).await.unwrap();
let request = GetTableDataCountRequest {
profile_name: profile_name.clone(),
table_name: "adresar".to_string(),
};
let response = get_table_data_count(&pool, request).await.unwrap();
assert!(response.count >= 0);
// Cleanup
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
)
.fetch_one(&pool)
.await
.unwrap();
cleanup_test_environment(&pool, schema_id, &profile_name).await;
}
include!("get_table_data_count_test2.rs");
include!("get_table_data_count_test3.rs");

View File

@@ -0,0 +1,520 @@
// tests/tables_data/handlers/get_table_data_count_test2.rs
#[rstest]
#[tokio::test]
async fn test_schema_with_special_characters(#[future] pool: PgPool) {
let pool = pool.await;
let profile_name = "test_underscore_profile";
let table_name = "test_underscore_table";
// Use table definition handler to create the table
let request = PostTableDefinitionRequest {
profile_name: profile_name.to_string(),
table_name: table_name.to_string(),
columns: vec![
ColumnDefinition {
name: "name".to_string(),
field_type: "text".to_string(),
}
],
indexes: vec![],
links: vec![],
};
post_table_definition(&pool, request).await.unwrap();
// Insert test data
let mut tx = pool.begin().await.unwrap();
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (name) VALUES ('Test Data')"#,
profile_name, table_name
))
.execute(&mut *tx)
.await
.unwrap();
tx.commit().await.unwrap();
let request = GetTableDataCountRequest {
profile_name: profile_name.to_string(),
table_name: table_name.to_string(),
};
let response = get_table_data_count(&pool, request).await.unwrap();
assert_eq!(response.count, 1);
// Cleanup
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
)
.fetch_one(&pool)
.await
.unwrap();
cleanup_test_environment(&pool, schema_id, profile_name).await;
}
#[rstest]
#[tokio::test]
async fn test_large_dataset_count(#[future] pool: PgPool) {
let pool = pool.await;
let profile_name = format!("large_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
let table_name = "large_dataset_table";
// Use table definition handler to create the table
let request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
columns: vec![
ColumnDefinition {
name: "value".to_string(),
field_type: "integer".to_string(),
}
],
indexes: vec![],
links: vec![],
};
post_table_definition(&pool, request).await.unwrap();
// Insert 1000 records
let mut tx = pool.begin().await.unwrap();
for i in 1..=1000 {
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (value) VALUES ({})"#,
profile_name, table_name, i
))
.execute(&mut *tx)
.await
.unwrap();
}
// Mark some as deleted
sqlx::query(&format!(
r#"UPDATE "{}"."{}" SET deleted = true WHERE value <= 100"#,
profile_name, table_name
))
.execute(&mut *tx)
.await
.unwrap();
tx.commit().await.unwrap();
let request = GetTableDataCountRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
};
let response = get_table_data_count(&pool, request).await.unwrap();
assert_eq!(response.count, 900); // 1000 - 100 deleted
// Cleanup
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
)
.fetch_one(&pool)
.await
.unwrap();
cleanup_test_environment(&pool, schema_id, &profile_name).await;
}
#[rstest]
#[tokio::test]
async fn test_mixed_deleted_states(#[future] pool: PgPool) {
let pool = pool.await;
let profile_name = format!("mixed_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
let table_name = "mixed_states_table";
// Use table definition handler to create the table
let request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
columns: vec![
ColumnDefinition {
name: "status".to_string(),
field_type: "text".to_string(),
}
],
indexes: vec![],
links: vec![],
};
post_table_definition(&pool, request).await.unwrap();
// Insert various combinations
let mut tx = pool.begin().await.unwrap();
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (status, deleted) VALUES ('active', false)"#,
profile_name, table_name
))
.execute(&mut *tx)
.await
.unwrap();
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (status, deleted) VALUES ('active', true)"#,
profile_name, table_name
))
.execute(&mut *tx)
.await
.unwrap();
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (status, deleted) VALUES ('inactive', false)"#,
profile_name, table_name
))
.execute(&mut *tx)
.await
.unwrap();
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (status, deleted) VALUES ('inactive', true)"#,
profile_name, table_name
))
.execute(&mut *tx)
.await
.unwrap();
tx.commit().await.unwrap();
let request = GetTableDataCountRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
};
let response = get_table_data_count(&pool, request).await.unwrap();
assert_eq!(response.count, 2); // Only non-deleted records
// Cleanup
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
)
.fetch_one(&pool)
.await
.unwrap();
cleanup_test_environment(&pool, schema_id, &profile_name).await;
}
#[rstest]
#[tokio::test]
async fn test_case_sensitivity_in_names(#[future] pool: PgPool) {
let pool = pool.await;
let profile_name = "case_test_schema";
let table_name = "case_test_table";
// Use table definition handler to create the table
let request = PostTableDefinitionRequest {
profile_name: profile_name.to_string(),
table_name: table_name.to_string(),
columns: vec![
ColumnDefinition {
name: "data".to_string(),
field_type: "text".to_string(),
}
],
indexes: vec![],
links: vec![],
};
post_table_definition(&pool, request).await.unwrap();
let mut tx = pool.begin().await.unwrap();
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (data) VALUES ('test data')"#,
profile_name, table_name
))
.execute(&mut *tx)
.await
.unwrap();
tx.commit().await.unwrap();
// Test exact case
let request = GetTableDataCountRequest {
profile_name: profile_name.to_string(),
table_name: table_name.to_string(),
};
let response = get_table_data_count(&pool, request).await.unwrap();
assert_eq!(response.count, 1);
// Test wrong case should fail
let wrong_case_request = GetTableDataCountRequest {
profile_name: "CASE_TEST_SCHEMA".to_string(),
table_name: table_name.to_string(),
};
let wrong_case_result = get_table_data_count(&pool, wrong_case_request).await;
assert!(wrong_case_result.is_err());
assert_eq!(wrong_case_result.unwrap_err().code(), tonic::Code::NotFound);
// Cleanup
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
)
.fetch_one(&pool)
.await
.unwrap();
cleanup_test_environment(&pool, schema_id, profile_name).await;
}
#[rstest]
#[tokio::test]
async fn test_concurrent_count_requests(#[future] pool: PgPool) {
let pool = pool.await;
let profile_name = format!("concurrent_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
let table_name = "concurrent_table";
// Use table definition handler to create the table
let request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
columns: vec![
ColumnDefinition {
name: "counter".to_string(),
field_type: "integer".to_string(),
}
],
indexes: vec![],
links: vec![],
};
post_table_definition(&pool, request).await.unwrap();
// Insert initial data
let mut tx = pool.begin().await.unwrap();
for i in 1..=50 {
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (counter) VALUES ({})"#,
profile_name, table_name, i
))
.execute(&mut *tx)
.await
.unwrap();
}
tx.commit().await.unwrap();
// Run multiple concurrent count requests
let mut handles = vec![];
for _ in 0..10 {
let pool_clone = pool.clone();
let profile_name_clone = profile_name.clone();
let table_name_clone = table_name.to_string();
let handle = tokio::spawn(async move {
let request = GetTableDataCountRequest {
profile_name: profile_name_clone,
table_name: table_name_clone,
};
get_table_data_count(&pool_clone, request).await
});
handles.push(handle);
}
// Wait for all requests to complete
for handle in handles {
let response = handle.await.unwrap().unwrap();
assert_eq!(response.count, 50);
}
// Cleanup
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
)
.fetch_one(&pool)
.await
.unwrap();
cleanup_test_environment(&pool, schema_id, &profile_name).await;
}
#[rstest]
#[tokio::test]
async fn test_table_without_physical_existence(#[future] pool: PgPool) {
let pool = pool.await;
let profile_name = format!("missing_table_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
let table_name = "missing_physical_table";
// Create table definition but then manually drop the physical table
let request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
columns: vec![
ColumnDefinition {
name: "data".to_string(),
field_type: "text".to_string(),
}
],
indexes: vec![],
links: vec![],
};
post_table_definition(&pool, request).await.unwrap();
// Manually drop the physical table while keeping the definition
let mut tx = pool.begin().await.unwrap();
sqlx::query(&format!(
r#"DROP TABLE "{}"."{}" CASCADE"#,
profile_name, table_name
))
.execute(&mut *tx)
.await
.unwrap();
tx.commit().await.unwrap();
let request = GetTableDataCountRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
};
let result = get_table_data_count(&pool, request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
// Cleanup
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
)
.fetch_one(&pool)
.await
.unwrap();
cleanup_test_environment(&pool, schema_id, &profile_name).await;
}
#[rstest]
#[tokio::test]
async fn test_numeric_column_types_count(#[future] pool: PgPool) {
let pool = pool.await;
let profile_name = format!("numeric_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
let table_name = "numeric_types_table";
// Use table definition handler to create the table with various numeric types
let request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
columns: vec![
ColumnDefinition {
name: "big_number".to_string(),
field_type: "bigint".to_string(),
},
ColumnDefinition {
name: "small_number".to_string(),
field_type: "integer".to_string(),
},
ColumnDefinition {
name: "decimal_number".to_string(),
field_type: "decimal(10,2)".to_string(),
},
ColumnDefinition {
name: "timestamp_col".to_string(),
field_type: "timestamptz".to_string(),
}
],
indexes: vec![],
links: vec![],
};
post_table_definition(&pool, request).await.unwrap();
let mut tx = pool.begin().await.unwrap();
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (big_number, small_number, decimal_number, timestamp_col)
VALUES (9223372036854775807, 2147483647, 99999999.99, NOW())"#,
profile_name, table_name
))
.execute(&mut *tx)
.await
.unwrap();
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (big_number, small_number, decimal_number, timestamp_col, deleted)
VALUES (1, 1, 1.00, NOW(), true)"#,
profile_name, table_name
))
.execute(&mut *tx)
.await
.unwrap();
tx.commit().await.unwrap();
let request = GetTableDataCountRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
};
let response = get_table_data_count(&pool, request).await.unwrap();
assert_eq!(response.count, 1); // Only the non-deleted record
// Cleanup
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
)
.fetch_one(&pool)
.await
.unwrap();
cleanup_test_environment(&pool, schema_id, &profile_name).await;
}
#[rstest]
#[tokio::test]
async fn test_indexed_columns_count(#[future] pool: PgPool) {
let pool = pool.await;
let profile_name = format!("index_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
let table_name = "indexed_table";
// Use table definition handler to create the table with indexes
let request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
columns: vec![
ColumnDefinition {
name: "searchable_field".to_string(),
field_type: "text".to_string(),
},
ColumnDefinition {
name: "category".to_string(),
field_type: "text".to_string(),
}
],
indexes: vec!["searchable_field".to_string(), "category".to_string()],
links: vec![],
};
post_table_definition(&pool, request).await.unwrap();
let mut tx = pool.begin().await.unwrap();
for i in 1..=20 {
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (searchable_field, category) VALUES ('data_{}', 'cat_{}')"#,
profile_name, table_name, i, i % 3
))
.execute(&mut *tx)
.await
.unwrap();
}
tx.commit().await.unwrap();
let request = GetTableDataCountRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
};
let response = get_table_data_count(&pool, request).await.unwrap();
assert_eq!(response.count, 20);
// Cleanup
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
)
.fetch_one(&pool)
.await
.unwrap();
cleanup_test_environment(&pool, schema_id, &profile_name).await;
}

View File

@@ -0,0 +1,715 @@
// tests/tables_data/handlers/get_table_data_count_test3.rs
#[rstest]
#[tokio::test]
async fn test_table_with_foreign_keys(#[future] pool: PgPool) {
let pool = pool.await;
let profile_name = format!("fk_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
let parent_table = "parent_table";
let child_table = "child_table";
// Create parent table first
let parent_request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: parent_table.to_string(),
columns: vec![
ColumnDefinition {
name: "name".to_string(),
field_type: "text".to_string(),
}
],
indexes: vec![],
links: vec![],
};
post_table_definition(&pool, parent_request).await.unwrap();
// Create child table with link to parent
let child_request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: child_table.to_string(),
columns: vec![
ColumnDefinition {
name: "description".to_string(),
field_type: "text".to_string(),
}
],
indexes: vec![],
links: vec![
TableLink {
linked_table_name: parent_table.to_string(),
required: false,
}
],
};
post_table_definition(&pool, child_request).await.unwrap();
// Insert test data
let mut tx = pool.begin().await.unwrap();
let parent_id: i64 = sqlx::query_scalar(&format!(
r#"INSERT INTO "{}"."{}" (name) VALUES ('Parent 1') RETURNING id"#,
profile_name, parent_table
))
.fetch_one(&mut *tx)
.await
.unwrap();
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (parent_table_id, description) VALUES ({}, 'Child 1')"#,
profile_name, child_table, parent_id
))
.execute(&mut *tx)
.await
.unwrap();
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (parent_table_id, description) VALUES ({}, 'Child 2')"#,
profile_name, child_table, parent_id
))
.execute(&mut *tx)
.await
.unwrap();
tx.commit().await.unwrap();
// Test parent table count
let parent_request = GetTableDataCountRequest {
profile_name: profile_name.clone(),
table_name: parent_table.to_string(),
};
let parent_response = get_table_data_count(&pool, parent_request).await.unwrap();
assert_eq!(parent_response.count, 1);
// Test child table count
let child_request = GetTableDataCountRequest {
profile_name: profile_name.clone(),
table_name: child_table.to_string(),
};
let child_response = get_table_data_count(&pool, child_request).await.unwrap();
assert_eq!(child_response.count, 2);
// Cleanup
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
)
.fetch_one(&pool)
.await
.unwrap();
cleanup_test_environment(&pool, schema_id, &profile_name).await;
}
#[rstest]
#[tokio::test]
async fn test_multiple_foreign_keys(#[future] pool: PgPool) {
let pool = pool.await;
let profile_name = format!("multi_fk_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
// Create three parent tables
for table_name in ["users", "categories", "tags"] {
let request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
columns: vec![
ColumnDefinition {
name: "name".to_string(),
field_type: "text".to_string(),
}
],
indexes: vec![],
links: vec![],
};
post_table_definition(&pool, request).await.unwrap();
}
// Create child table with links to all three parents
let child_request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: "posts".to_string(),
columns: vec![
ColumnDefinition {
name: "title".to_string(),
field_type: "text".to_string(),
}
],
indexes: vec![],
links: vec![
TableLink {
linked_table_name: "users".to_string(),
required: true,
},
TableLink {
linked_table_name: "categories".to_string(),
required: true,
},
TableLink {
linked_table_name: "tags".to_string(),
required: false,
}
],
};
post_table_definition(&pool, child_request).await.unwrap();
// Insert test data
let mut tx = pool.begin().await.unwrap();
let user_id: i64 = sqlx::query_scalar(&format!(
r#"INSERT INTO "{}"."{}" (name) VALUES ('User1') RETURNING id"#,
profile_name, "users"
)).fetch_one(&mut *tx).await.unwrap();
let category_id: i64 = sqlx::query_scalar(&format!(
r#"INSERT INTO "{}"."{}" (name) VALUES ('Tech') RETURNING id"#,
profile_name, "categories"
)).fetch_one(&mut *tx).await.unwrap();
let tag_id: i64 = sqlx::query_scalar(&format!(
r#"INSERT INTO "{}"."{}" (name) VALUES ('Important') RETURNING id"#,
profile_name, "tags"
)).fetch_one(&mut *tx).await.unwrap();
// Insert posts with foreign keys
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (title, users_id, categories_id, tags_id) VALUES ('Post 1', {}, {}, {})"#,
profile_name, "posts", user_id, category_id, tag_id
)).execute(&mut *tx).await.unwrap();
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (title, users_id, categories_id) VALUES ('Post 2', {}, {})"#,
profile_name, "posts", user_id, category_id
)).execute(&mut *tx).await.unwrap();
tx.commit().await.unwrap();
// Test counts
let posts_count = get_table_data_count(&pool, GetTableDataCountRequest {
profile_name: profile_name.clone(),
table_name: "posts".to_string(),
}).await.unwrap();
assert_eq!(posts_count.count, 2);
// Cleanup
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
).fetch_one(&pool).await.unwrap();
cleanup_test_environment(&pool, schema_id, &profile_name).await;
}
#[rstest]
#[tokio::test]
async fn test_required_vs_optional_foreign_keys(#[future] pool: PgPool) {
let pool = pool.await;
let profile_name = format!("req_opt_fk_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
// Create parent table
let parent_request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: "companies".to_string(),
columns: vec![
ColumnDefinition {
name: "name".to_string(),
field_type: "text".to_string(),
}
],
indexes: vec![],
links: vec![],
};
post_table_definition(&pool, parent_request).await.unwrap();
// Create child table with required and optional links
let child_request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: "employees".to_string(),
columns: vec![
ColumnDefinition {
name: "name".to_string(),
field_type: "text".to_string(),
}
],
indexes: vec![],
links: vec![
TableLink {
linked_table_name: "companies".to_string(),
required: true, // Required foreign key
}
],
};
post_table_definition(&pool, child_request).await.unwrap();
// Insert test data
let mut tx = pool.begin().await.unwrap();
let company_id: i64 = sqlx::query_scalar(&format!(
r#"INSERT INTO "{}"."{}" (name) VALUES ('TechCorp') RETURNING id"#,
profile_name, "companies"
)).fetch_one(&mut *tx).await.unwrap();
// Insert employees with required foreign key
for i in 1..=5 {
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (name, companies_id) VALUES ('Employee {}', {})"#,
profile_name, "employees", i, company_id
)).execute(&mut *tx).await.unwrap();
}
tx.commit().await.unwrap();
// Test counts
let companies_count = get_table_data_count(&pool, GetTableDataCountRequest {
profile_name: profile_name.clone(),
table_name: "companies".to_string(),
}).await.unwrap();
assert_eq!(companies_count.count, 1);
let employees_count = get_table_data_count(&pool, GetTableDataCountRequest {
profile_name: profile_name.clone(),
table_name: "employees".to_string(),
}).await.unwrap();
assert_eq!(employees_count.count, 5);
// Cleanup
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
).fetch_one(&pool).await.unwrap();
cleanup_test_environment(&pool, schema_id, &profile_name).await;
}
#[rstest]
#[tokio::test]
async fn test_performance_stress_large_dataset(#[future] pool: PgPool) {
let pool = pool.await;
let profile_name = format!("stress_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
let table_name = "stress_table";
// Create table
let request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
columns: vec![
ColumnDefinition {
name: "data".to_string(),
field_type: "text".to_string(),
},
ColumnDefinition {
name: "number".to_string(),
field_type: "integer".to_string(),
}
],
indexes: vec!["number".to_string()], // Add index for better performance
links: vec![],
};
post_table_definition(&pool, request).await.unwrap();
// Insert 10,000 records in batches
let mut tx = pool.begin().await.unwrap();
for batch in 0..100 {
let mut values = Vec::new();
for i in 1..=100 {
let record_num = batch * 100 + i;
values.push(format!("('Data {}', {})", record_num, record_num));
}
let sql = format!(
r#"INSERT INTO "{}"."{}" (data, number) VALUES {}"#,
profile_name, table_name, values.join(", ")
);
sqlx::query(&sql).execute(&mut *tx).await.unwrap();
}
// Mark some as deleted
sqlx::query(&format!(
r#"UPDATE "{}"."{}" SET deleted = true WHERE number <= 1000"#,
profile_name, table_name
)).execute(&mut *tx).await.unwrap();
tx.commit().await.unwrap();
// Test count performance
let start = std::time::Instant::now();
let response = get_table_data_count(&pool, GetTableDataCountRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
}).await.unwrap();
let duration = start.elapsed();
assert_eq!(response.count, 9000); // 10000 - 1000 deleted
// Performance assertion - should complete within reasonable time
assert!(duration.as_secs() < 5, "Count operation took too long: {:?}", duration);
// Cleanup
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
).fetch_one(&pool).await.unwrap();
cleanup_test_environment(&pool, schema_id, &profile_name).await;
}
#[rstest]
#[tokio::test]
async fn test_maximum_identifier_lengths(#[future] pool: PgPool) {
let pool = pool.await;
// Test with maximum length names (63 characters - PostgreSQL limit)
let max_profile_name = "a".repeat(63);
let max_table_name = "b".repeat(63);
let request = PostTableDefinitionRequest {
profile_name: max_profile_name.clone(),
table_name: max_table_name.clone(),
columns: vec![
ColumnDefinition {
name: "c".repeat(63), // Max column name
field_type: "text".to_string(),
}
],
indexes: vec![],
links: vec![],
};
post_table_definition(&pool, request).await.unwrap();
// Insert test data
let mut tx = pool.begin().await.unwrap();
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" ("{}") VALUES ('test')"#,
max_profile_name, max_table_name, "c".repeat(63)
)).execute(&mut *tx).await.unwrap();
tx.commit().await.unwrap();
// Test count
let response = get_table_data_count(&pool, GetTableDataCountRequest {
profile_name: max_profile_name.clone(),
table_name: max_table_name.clone(),
}).await.unwrap();
assert_eq!(response.count, 1);
// Cleanup
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
max_profile_name
).fetch_one(&pool).await.unwrap();
cleanup_test_environment(&pool, schema_id, &max_profile_name).await;
}
#[rstest]
#[tokio::test]
async fn test_complex_schema_hierarchy(#[future] pool: PgPool) {
let pool = pool.await;
let profile_name = format!("hierarchy_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
// Create A -> B -> C -> D hierarchy
let tables = ["table_a", "table_b", "table_c", "table_d"];
// Create first table (no dependencies)
let request_a = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: tables[0].to_string(),
columns: vec![
ColumnDefinition {
name: "name".to_string(),
field_type: "text".to_string(),
}
],
indexes: vec![],
links: vec![],
};
post_table_definition(&pool, request_a).await.unwrap();
// Create subsequent tables with dependencies
for i in 1..tables.len() {
let request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: tables[i].to_string(),
columns: vec![
ColumnDefinition {
name: "data".to_string(),
field_type: "text".to_string(),
}
],
indexes: vec![],
links: vec![
TableLink {
linked_table_name: tables[i-1].to_string(),
required: true,
}
],
};
post_table_definition(&pool, request).await.unwrap();
}
// Insert hierarchical data
let mut tx = pool.begin().await.unwrap();
let a_id: i64 = sqlx::query_scalar(&format!(
r#"INSERT INTO "{}"."{}" (name) VALUES ('Root') RETURNING id"#,
profile_name, tables[0]
)).fetch_one(&mut *tx).await.unwrap();
let b_id: i64 = sqlx::query_scalar(&format!(
r#"INSERT INTO "{}"."{}" (data, table_a_id) VALUES ('Level B', {}) RETURNING id"#,
profile_name, tables[1], a_id
)).fetch_one(&mut *tx).await.unwrap();
let c_id: i64 = sqlx::query_scalar(&format!(
r#"INSERT INTO "{}"."{}" (data, table_b_id) VALUES ('Level C', {}) RETURNING id"#,
profile_name, tables[2], b_id
)).fetch_one(&mut *tx).await.unwrap();
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (data, table_c_id) VALUES ('Level D', {})"#,
profile_name, tables[3], c_id
)).execute(&mut *tx).await.unwrap();
tx.commit().await.unwrap();
// Test counts for each level
for (i, table_name) in tables.iter().enumerate() {
let response = get_table_data_count(&pool, GetTableDataCountRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
}).await.unwrap();
assert_eq!(response.count, 1, "Table {} should have count 1", table_name);
}
// Cleanup
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
).fetch_one(&pool).await.unwrap();
cleanup_test_environment(&pool, schema_id, &profile_name).await;
}
#[rstest]
#[tokio::test]
async fn test_concurrent_insert_and_count(#[future] pool: PgPool) {
let pool = pool.await;
let profile_name = format!("concurrent_insert_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
let table_name = "concurrent_ops_table";
// Create table
let request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
columns: vec![
ColumnDefinition {
name: "value".to_string(),
field_type: "integer".to_string(),
}
],
indexes: vec![],
links: vec![],
};
post_table_definition(&pool, request).await.unwrap();
// Insert initial data
let mut tx = pool.begin().await.unwrap();
for i in 1..=100 {
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (value) VALUES ({})"#,
profile_name, table_name, i
)).execute(&mut *tx).await.unwrap();
}
tx.commit().await.unwrap();
// Run concurrent operations
let mut count_handles = vec![];
let mut insert_handles = vec![];
// Spawn count operations
for _ in 0..5 {
let pool_clone = pool.clone();
let profile_name_clone = profile_name.clone();
let table_name_clone = table_name.to_string();
let handle = tokio::spawn(async move {
let mut counts = Vec::new();
for _ in 0..10 {
let response = get_table_data_count(&pool_clone, GetTableDataCountRequest {
profile_name: profile_name_clone.clone(),
table_name: table_name_clone.clone(),
}).await.unwrap();
counts.push(response.count);
tokio::time::sleep(tokio::time::Duration::from_millis(10)).await;
}
counts
});
count_handles.push(handle);
}
// Spawn insert operations
for i in 0..3 {
let pool_clone = pool.clone();
let profile_name_clone = profile_name.clone();
let table_name_clone = table_name.to_string();
let handle = tokio::spawn(async move {
for j in 1..=20 {
let value = (i * 100) + j + 1000; // Ensure unique values
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (value) VALUES ({})"#,
profile_name_clone, table_name_clone, value
)).execute(&pool_clone).await.unwrap();
tokio::time::sleep(tokio::time::Duration::from_millis(5)).await;
}
});
insert_handles.push(handle);
}
// Wait for all operations to complete
for handle in count_handles {
handle.await.unwrap();
}
for handle in insert_handles {
handle.await.unwrap();
}
// Final count should be 100 + (3 * 20) = 160
let final_response = get_table_data_count(&pool, GetTableDataCountRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
}).await.unwrap();
assert_eq!(final_response.count, 160);
// Cleanup
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
).fetch_one(&pool).await.unwrap();
cleanup_test_environment(&pool, schema_id, &profile_name).await;
}
#[rstest]
#[tokio::test]
async fn test_edge_case_all_records_deleted(#[future] pool: PgPool) {
let pool = pool.await;
let profile_name = format!("all_deleted_test_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
let table_name = "all_deleted_table";
// Create table
let request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
columns: vec![
ColumnDefinition {
name: "data".to_string(),
field_type: "text".to_string(),
}
],
indexes: vec![],
links: vec![],
};
post_table_definition(&pool, request).await.unwrap();
// Insert and then delete all records
let mut tx = pool.begin().await.unwrap();
for i in 1..=50 {
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (data) VALUES ('Record {}')"#,
profile_name, table_name, i
)).execute(&mut *tx).await.unwrap();
}
// Mark all as deleted
sqlx::query(&format!(
r#"UPDATE "{}"."{}" SET deleted = true"#,
profile_name, table_name
)).execute(&mut *tx).await.unwrap();
tx.commit().await.unwrap();
// Count should be 0
let response = get_table_data_count(&pool, GetTableDataCountRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
}).await.unwrap();
assert_eq!(response.count, 0);
// Cleanup
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
).fetch_one(&pool).await.unwrap();
cleanup_test_environment(&pool, schema_id, &profile_name).await;
}
#[rstest]
#[tokio::test]
async fn test_cross_schema_isolation(#[future] pool: PgPool) {
let pool = pool.await;
let profile1 = format!("schema1_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
let profile2 = format!("schema2_{}", Utc::now().timestamp_nanos_opt().unwrap_or(0));
let table_name = "isolation_test_table";
// Create identical tables in two different schemas
for profile_name in [&profile1, &profile2] {
let request = PostTableDefinitionRequest {
profile_name: profile_name.clone(),
table_name: table_name.to_string(),
columns: vec![
ColumnDefinition {
name: "data".to_string(),
field_type: "text".to_string(),
}
],
indexes: vec![],
links: vec![],
};
post_table_definition(&pool, request).await.unwrap();
}
// Insert different amounts of data in each schema
let mut tx = pool.begin().await.unwrap();
// Schema 1: 10 records
for i in 1..=10 {
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (data) VALUES ('Schema1 Record {}')"#,
profile1, table_name, i
)).execute(&mut *tx).await.unwrap();
}
// Schema 2: 25 records
for i in 1..=25 {
sqlx::query(&format!(
r#"INSERT INTO "{}"."{}" (data) VALUES ('Schema2 Record {}')"#,
profile2, table_name, i
)).execute(&mut *tx).await.unwrap();
}
tx.commit().await.unwrap();
// Test counts are isolated
let count1 = get_table_data_count(&pool, GetTableDataCountRequest {
profile_name: profile1.clone(),
table_name: table_name.to_string(),
}).await.unwrap();
assert_eq!(count1.count, 10);
let count2 = get_table_data_count(&pool, GetTableDataCountRequest {
profile_name: profile2.clone(),
table_name: table_name.to_string(),
}).await.unwrap();
assert_eq!(count2.count, 25);
// Cleanup both schemas
for profile_name in [&profile1, &profile2] {
let schema_id = sqlx::query_scalar!(
"SELECT id FROM schemas WHERE name = $1",
profile_name
).fetch_one(&pool).await.unwrap();
cleanup_test_environment(&pool, schema_id, profile_name).await;
}
}

View File

@@ -1,3 +1,4 @@
// tests/tables_data/get/mod.rs
pub mod get_table_data_count_test;
pub mod get_table_data_test;