From f87e3c03cb6b9dc11a214279fdc94bd98aa85ce5 Mon Sep 17 00:00:00 2001 From: filipriec Date: Wed, 25 Jun 2025 09:16:32 +0200 Subject: [PATCH] get test updated, working now --- .../handlers/get_table_data_test.rs | 262 ++++++++++-------- server/tests/tables_data/handlers/mod.rs | 4 +- 2 files changed, 142 insertions(+), 124 deletions(-) diff --git a/server/tests/tables_data/handlers/get_table_data_test.rs b/server/tests/tables_data/handlers/get_table_data_test.rs index ddf9a05..6048746 100644 --- a/server/tests/tables_data/handlers/get_table_data_test.rs +++ b/server/tests/tables_data/handlers/get_table_data_test.rs @@ -1,7 +1,7 @@ // tests/tables_data/handlers/get_table_data_test.rs use rstest::{fixture, rstest}; use server::tables_data::handlers::get_table_data; -use common::proto::multieko2::tables_data::{GetTableDataRequest, GetTableDataResponse}; +use common::proto::multieko2::tables_data::GetTableDataRequest; use crate::common::setup_test_db; use sqlx::{PgPool, Row}; use tonic; @@ -22,60 +22,70 @@ async fn closed_pool(#[future] pool: PgPool) -> PgPool { } #[fixture] -async fn profile(#[future] pool: PgPool) -> (PgPool, String, i64) { +async fn schema(#[future] pool: PgPool) -> (PgPool, String, i64) { let pool = pool.await; - let profile_name = format!("testprofile_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default()); - - let profile = sqlx::query!( - "INSERT INTO profiles (name) VALUES ($1) RETURNING id", - profile_name + let schema_name = format!("testschema_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default()); + + // Insert into schemas table instead of profiles + let schema = sqlx::query!( + "INSERT INTO schemas (name) VALUES ($1) RETURNING id", + schema_name ) .fetch_one(&pool) .await .unwrap(); - - (pool, profile_name, profile.id) + + // Create the actual PostgreSQL schema + let create_schema_sql = format!("CREATE SCHEMA IF NOT EXISTS \"{}\"", schema_name); + sqlx::query(&create_schema_sql) + .execute(&pool) + .await + .unwrap(); + + (pool, schema_name, schema.id) } #[fixture] -async fn table_definition(#[future] profile: (PgPool, String, i64)) -> (PgPool, String, String, i64) { - let (pool, profile_name, profile_id) = profile.await; +async fn table_definition(#[future] schema: (PgPool, String, i64)) -> (PgPool, String, String, i64) { + let (pool, schema_name, schema_id) = schema.await; let table_name = format!("test_table_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default()); // Define columns and indexes for the table let columns = json!([ - "\"name\" VARCHAR(255)", + "\"name\" TEXT", "\"age\" INTEGER", - "\"email\" VARCHAR(100)", + "\"email\" TEXT", "\"is_active\" BOOLEAN" ]); - let indexes = json!([]); // Add empty indexes array + let indexes = json!([]); + // Use schema_id instead of profile_id let table_def = sqlx::query!( - "INSERT INTO table_definitions (profile_id, table_name, columns, indexes) VALUES ($1, $2, $3, $4) RETURNING id", - profile_id, + "INSERT INTO table_definitions (schema_id, table_name, columns, indexes) VALUES ($1, $2, $3, $4) RETURNING id", + schema_id, table_name, columns, - indexes // Add indexes to the insert + indexes ) .fetch_one(&pool) .await .unwrap(); - // Create actual table + // Create actual table in the schema + let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name); let create_table = format!( r#" - CREATE TABLE "{}" ( + CREATE TABLE {} ( id BIGSERIAL PRIMARY KEY, - deleted BOOLEAN NOT NULL DEFAULT false, - firma TEXT NOT NULL, - name VARCHAR(255), + deleted BOOLEAN NOT NULL DEFAULT FALSE, + name TEXT, age INTEGER, - email VARCHAR(100), - is_active BOOLEAN + email TEXT, + is_active BOOLEAN, + created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP ) "#, - table_name + qualified_table ); sqlx::query(&create_table) @@ -83,23 +93,23 @@ async fn table_definition(#[future] profile: (PgPool, String, i64)) -> (PgPool, .await .unwrap(); - (pool, profile_name, table_name, table_def.id) + (pool, schema_name, table_name, table_def.id) } #[fixture] async fn regular_record(#[future] table_definition: (PgPool, String, String, i64)) -> (PgPool, String, String, i64) { - let (pool, profile_name, table_name, _) = table_definition.await; + let (pool, schema_name, table_name, _) = table_definition.await; // Insert a record with all fields + let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name); let query = format!( - r#"INSERT INTO "{}" (firma, name, age, email, is_active) - VALUES ($1, $2, $3, $4, $5) + r#"INSERT INTO {} (name, age, email, is_active) + VALUES ($1, $2, $3, $4) RETURNING id"#, - table_name + qualified_table ); let record = sqlx::query(&query) - .bind("Test Company") .bind("John Doe") .bind(30) .bind("john@example.com") @@ -109,59 +119,58 @@ async fn regular_record(#[future] table_definition: (PgPool, String, String, i64 .unwrap(); let id: i64 = record.get("id"); - (pool, profile_name, table_name, id) + (pool, schema_name, table_name, id) } #[fixture] async fn null_fields_record(#[future] table_definition: (PgPool, String, String, i64)) -> (PgPool, String, String, i64) { - let (pool, profile_name, table_name, _) = table_definition.await; + let (pool, schema_name, table_name, _) = table_definition.await; - // Insert a record with only required fields + // Insert a record with only basic fields (all others will be NULL) + let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name); let query = format!( - r#"INSERT INTO "{}" (firma) - VALUES ($1) + r#"INSERT INTO {} DEFAULT VALUES RETURNING id"#, - table_name + qualified_table ); let record = sqlx::query(&query) - .bind("Null Fields Company") .fetch_one(&pool) .await .unwrap(); let id: i64 = record.get("id"); - (pool, profile_name, table_name, id) + (pool, schema_name, table_name, id) } #[fixture] async fn deleted_record(#[future] table_definition: (PgPool, String, String, i64)) -> (PgPool, String, String, i64) { - let (pool, profile_name, table_name, _) = table_definition.await; + let (pool, schema_name, table_name, _) = table_definition.await; // Insert a deleted record + let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name); let query = format!( - r#"INSERT INTO "{}" (firma, deleted) - VALUES ($1, true) + r#"INSERT INTO {} (deleted) + VALUES (true) RETURNING id"#, - table_name + qualified_table ); let record = sqlx::query(&query) - .bind("Deleted Company") + .bind(true) .fetch_one(&pool) .await .unwrap(); let id: i64 = record.get("id"); - (pool, profile_name, table_name, id) + (pool, schema_name, table_name, id) } -async fn assert_response_matches(pool: &PgPool, table_name: &str, id: i64, response: &HashMap) { - let columns = format!( - "id, deleted, firma, name, age, email, is_active" - ); - let query = format!(r#"SELECT {} FROM "{}" WHERE id = $1"#, columns, table_name); - +async fn assert_response_matches(pool: &PgPool, schema_name: &str, table_name: &str, id: i64, response: &HashMap) { + let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name); + let columns = "id, deleted, name, age, email, is_active"; + let query = format!(r#"SELECT {} FROM {} WHERE id = $1"#, columns, qualified_table); + let row = sqlx::query(&query) .bind(id) .fetch_one(pool) @@ -170,30 +179,38 @@ async fn assert_response_matches(pool: &PgPool, table_name: &str, id: i64, respo assert_eq!(row.get::("id").to_string(), response["id"]); assert_eq!(row.get::("deleted").to_string(), response["deleted"]); - assert_eq!(row.get::("firma"), response["firma"]); - + // Check optional fields let name: Option = row.try_get("name").unwrap_or(None); assert_eq!(name.unwrap_or_default(), response["name"]); - + let age: Option = row.try_get("age").unwrap_or(None); assert_eq!(age.map(|v| v.to_string()).unwrap_or_default(), response["age"]); - + let email: Option = row.try_get("email").unwrap_or(None); assert_eq!(email.unwrap_or_default(), response["email"]); - + let is_active: Option = row.try_get("is_active").unwrap_or(None); assert_eq!(is_active.map(|v| v.to_string()).unwrap_or_default(), response["is_active"]); } -async fn cleanup_test_data(pool: &PgPool, table_name: &str) { - let _ = sqlx::query(&format!(r#"DROP TABLE IF EXISTS "{}" CASCADE"#, table_name)) +async fn cleanup_test_data(pool: &PgPool, schema_name: &str, table_name: &str) { + let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name); + let _ = sqlx::query(&format!(r#"DROP TABLE IF EXISTS {} CASCADE"#, qualified_table)) .execute(pool) .await; - + let _ = sqlx::query!("DELETE FROM table_definitions WHERE table_name = $1", table_name) .execute(pool) .await; + + let _ = sqlx::query(&format!(r#"DROP SCHEMA IF EXISTS "{}" CASCADE"#, schema_name)) + .execute(pool) + .await; + + let _ = sqlx::query!("DELETE FROM schemas WHERE name = $1", schema_name) + .execute(pool) + .await; } #[rstest] @@ -201,27 +218,26 @@ async fn cleanup_test_data(pool: &PgPool, table_name: &str) { async fn test_get_table_data_success( #[future] regular_record: (PgPool, String, String, i64), ) { - let (pool, profile_name, table_name, id) = regular_record.await; - + let (pool, schema_name, table_name, id) = regular_record.await; + let request = GetTableDataRequest { - profile_name: profile_name.clone(), + profile_name: schema_name.clone(), table_name: table_name.clone(), id, }; - + let response = get_table_data(&pool, request).await.unwrap(); assert_eq!(response.data["id"], id.to_string()); - assert_eq!(response.data["firma"], "Test Company"); assert_eq!(response.data["name"], "John Doe"); assert_eq!(response.data["age"], "30"); assert_eq!(response.data["email"], "john@example.com"); assert_eq!(response.data["is_active"], "true"); assert_eq!(response.data["deleted"], "false"); - - assert_response_matches(&pool, &table_name, id, &response.data).await; - - cleanup_test_data(&pool, &table_name).await; + + assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await; + + cleanup_test_data(&pool, &schema_name, &table_name).await; } #[rstest] @@ -229,10 +245,10 @@ async fn test_get_table_data_success( async fn test_get_optional_fields_null( #[future] null_fields_record: (PgPool, String, String, i64), ) { - let (pool, profile_name, table_name, id) = null_fields_record.await; - + let (pool, schema_name, table_name, id) = null_fields_record.await; + let request = GetTableDataRequest { - profile_name: profile_name.clone(), + profile_name: schema_name.clone(), table_name: table_name.clone(), id, }; @@ -244,10 +260,10 @@ async fn test_get_optional_fields_null( assert_eq!(response.data["email"], ""); assert_eq!(response.data["is_active"], ""); assert_eq!(response.data["deleted"], "false"); - - assert_response_matches(&pool, &table_name, id, &response.data).await; - - cleanup_test_data(&pool, &table_name).await; + + assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await; + + cleanup_test_data(&pool, &schema_name, &table_name).await; } #[rstest] @@ -255,20 +271,20 @@ async fn test_get_optional_fields_null( async fn test_get_nonexistent_id( #[future] table_definition: (PgPool, String, String, i64), ) { - let (pool, profile_name, table_name, _) = table_definition.await; - + let (pool, schema_name, table_name, _) = table_definition.await; + let request = GetTableDataRequest { - profile_name: profile_name.clone(), + profile_name: schema_name.clone(), table_name: table_name.clone(), id: 9999, }; let result = get_table_data(&pool, request).await; - + assert!(result.is_err()); assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound); - - cleanup_test_data(&pool, &table_name).await; + + cleanup_test_data(&pool, &schema_name, &table_name).await; } #[rstest] @@ -276,20 +292,20 @@ async fn test_get_nonexistent_id( async fn test_get_deleted_record( #[future] deleted_record: (PgPool, String, String, i64), ) { - let (pool, profile_name, table_name, id) = deleted_record.await; - + let (pool, schema_name, table_name, id) = deleted_record.await; + let request = GetTableDataRequest { - profile_name: profile_name.clone(), + profile_name: schema_name.clone(), table_name: table_name.clone(), id, }; let result = get_table_data(&pool, request).await; - + assert!(result.is_err()); assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound); - - cleanup_test_data(&pool, &table_name).await; + + cleanup_test_data(&pool, &schema_name, &table_name).await; } #[rstest] @@ -298,7 +314,7 @@ async fn test_get_database_error( #[future] closed_pool: PgPool, ) { let closed_pool = closed_pool.await; - + let request = GetTableDataRequest { profile_name: "test".into(), table_name: "test".into(), @@ -306,7 +322,7 @@ async fn test_get_database_error( }; let result = get_table_data(&closed_pool, request).await; - + assert!(result.is_err()); assert_eq!(result.unwrap_err().code(), tonic::Code::Internal); } @@ -316,17 +332,17 @@ async fn test_get_database_error( async fn test_get_special_characters( #[future] table_definition: (PgPool, String, String, i64), ) { - let (pool, profile_name, table_name, _) = table_definition.await; + let (pool, schema_name, table_name, _) = table_definition.await; + let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name); let query = format!( - r#"INSERT INTO "{}" (firma, name, email) - VALUES ($1, $2, $3) + r#"INSERT INTO {} (name, email) + VALUES ($1, $2) RETURNING id"#, - table_name + qualified_table ); let record = sqlx::query(&query) - .bind("Test Company") .bind("Náměstí ČR") .bind("čšěř@example.com") .fetch_one(&pool) @@ -336,7 +352,7 @@ async fn test_get_special_characters( let id: i64 = record.get("id"); let request = GetTableDataRequest { - profile_name: profile_name.clone(), + profile_name: schema_name.clone(), table_name: table_name.clone(), id, }; @@ -345,10 +361,10 @@ async fn test_get_special_characters( assert_eq!(response.data["name"], "Náměstí ČR"); assert_eq!(response.data["email"], "čšěř@example.com"); - - assert_response_matches(&pool, &table_name, id, &response.data).await; - - cleanup_test_data(&pool, &table_name).await; + + assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await; + + cleanup_test_data(&pool, &schema_name, &table_name).await; } #[rstest] @@ -356,18 +372,18 @@ async fn test_get_special_characters( async fn test_get_max_length_fields( #[future] table_definition: (PgPool, String, String, i64), ) { - let (pool, profile_name, table_name, _) = table_definition.await; + let (pool, schema_name, table_name, _) = table_definition.await; let long_name = "a".repeat(255); + let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name); let query = format!( - r#"INSERT INTO "{}" (firma, name) - VALUES ($1, $2) + r#"INSERT INTO {} (name) + VALUES ($1) RETURNING id"#, - table_name + qualified_table ); let record = sqlx::query(&query) - .bind("Test Company") .bind(&long_name) .fetch_one(&pool) .await @@ -376,7 +392,7 @@ async fn test_get_max_length_fields( let id: i64 = record.get("id"); let request = GetTableDataRequest { - profile_name: profile_name.clone(), + profile_name: schema_name.clone(), table_name: table_name.clone(), id, }; @@ -385,10 +401,10 @@ async fn test_get_max_length_fields( assert_eq!(response.data["name"], long_name); assert_eq!(response.data["name"].len(), 255); - - assert_response_matches(&pool, &table_name, id, &response.data).await; - - cleanup_test_data(&pool, &table_name).await; + + assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await; + + cleanup_test_data(&pool, &schema_name, &table_name).await; } #[rstest] @@ -397,7 +413,7 @@ async fn test_get_invalid_profile( #[future] pool: PgPool, ) { let pool = pool.await; - + let request = GetTableDataRequest { profile_name: "non_existent_profile".into(), table_name: "test_table".into(), @@ -405,7 +421,7 @@ async fn test_get_invalid_profile( }; let result = get_table_data(&pool, request).await; - + assert!(result.is_err()); assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound); } @@ -413,20 +429,22 @@ async fn test_get_invalid_profile( #[rstest] #[tokio::test] async fn test_get_invalid_table( - #[future] profile: (PgPool, String, i64), + #[future] schema: (PgPool, String, i64), ) { - let (pool, profile_name, _) = profile.await; - + let (pool, schema_name, _) = schema.await; + let request = GetTableDataRequest { - profile_name, + profile_name: schema_name.clone(), table_name: "non_existent_table".into(), id: 1, }; let result = get_table_data(&pool, request).await; - + assert!(result.is_err()); assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound); + + cleanup_test_data(&pool, &schema_name, "non_existent_table").await; } #[rstest] @@ -434,17 +452,17 @@ async fn test_get_invalid_table( async fn test_get_invalid_column( #[future] regular_record: (PgPool, String, String, i64), ) { - let (pool, profile_name, table_name, id) = regular_record.await; - + let (pool, schema_name, table_name, id) = regular_record.await; + let request = GetTableDataRequest { - profile_name: profile_name.clone(), + profile_name: schema_name.clone(), table_name: table_name.clone(), id, }; let result = get_table_data(&pool, request).await; - + assert!(result.is_ok()); // Should still succeed as we're not filtering columns - - cleanup_test_data(&pool, &table_name).await; + + cleanup_test_data(&pool, &schema_name, &table_name).await; } diff --git a/server/tests/tables_data/handlers/mod.rs b/server/tests/tables_data/handlers/mod.rs index 0e498a6..861dbea 100644 --- a/server/tests/tables_data/handlers/mod.rs +++ b/server/tests/tables_data/handlers/mod.rs @@ -1,7 +1,7 @@ // tests/tables_data/mod.rs // pub mod post_table_data_test; // pub mod put_table_data_test; -pub mod delete_table_data_test; -// pub mod get_table_data_test; +// pub mod delete_table_data_test; +pub mod get_table_data_test; // pub mod get_table_data_count_test; // pub mod get_table_data_by_position_test;