ordering of the tests for tables data

This commit is contained in:
filipriec
2025-06-25 10:34:58 +02:00
parent 5c23f61a10
commit dc99131794
21 changed files with 18 additions and 4 deletions

View File

@@ -0,0 +1,485 @@
// tests/tables_data/handlers/get_table_data_test.rs
use rstest::{fixture, rstest};
use server::tables_data::handlers::get_table_data;
use common::proto::multieko2::tables_data::GetTableDataRequest;
use crate::common::setup_test_db;
use sqlx::{PgPool, Row};
use tonic;
use chrono::{DateTime, Utc};
use serde_json::json;
use std::collections::HashMap;
use futures::future::join_all;
use rand::distr::Alphanumeric;
use rand::Rng;
use rust_decimal::Decimal;
use rust_decimal_macros::dec;
use server::table_definition::handlers::post_table_definition;
use server::tables_data::handlers::post_table_data;
use common::proto::multieko2::table_definition::{
PostTableDefinitionRequest, ColumnDefinition as TableColumnDefinition, TableLink
};
use common::proto::multieko2::tables_data::PostTableDataRequest;
use prost_types::Value;
use prost_types::value::Kind;
use tokio::sync::mpsc;
use server::indexer::IndexCommand;
#[fixture]
async fn pool() -> PgPool {
setup_test_db().await
}
#[fixture]
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
let pool = pool.await;
pool.close().await;
pool
}
#[fixture]
async fn schema(#[future] pool: PgPool) -> (PgPool, String, i64) {
let pool = pool.await;
let schema_name = format!("testschema_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
// Insert into schemas table instead of profiles
let schema = sqlx::query!(
"INSERT INTO schemas (name) VALUES ($1) RETURNING id",
schema_name
)
.fetch_one(&pool)
.await
.unwrap();
// Create the actual PostgreSQL schema
let create_schema_sql = format!("CREATE SCHEMA IF NOT EXISTS \"{}\"", schema_name);
sqlx::query(&create_schema_sql)
.execute(&pool)
.await
.unwrap();
(pool, schema_name, schema.id)
}
#[fixture]
async fn table_definition(#[future] schema: (PgPool, String, i64)) -> (PgPool, String, String, i64) {
let (pool, schema_name, schema_id) = schema.await;
let table_name = format!("test_table_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
// Define columns and indexes for the table
let columns = json!([
"\"name\" TEXT",
"\"age\" INTEGER",
"\"email\" TEXT",
"\"is_active\" BOOLEAN"
]);
let indexes = json!([]);
// Use schema_id instead of profile_id
let table_def = sqlx::query!(
"INSERT INTO table_definitions (schema_id, table_name, columns, indexes) VALUES ($1, $2, $3, $4) RETURNING id",
schema_id,
table_name,
columns,
indexes
)
.fetch_one(&pool)
.await
.unwrap();
// Create actual table in the schema
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
let create_table = format!(
r#"
CREATE TABLE {} (
id BIGSERIAL PRIMARY KEY,
deleted BOOLEAN NOT NULL DEFAULT FALSE,
name TEXT,
age INTEGER,
email TEXT,
is_active BOOLEAN,
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
)
"#,
qualified_table
);
sqlx::query(&create_table)
.execute(&pool)
.await
.unwrap();
(pool, schema_name, table_name, table_def.id)
}
#[fixture]
async fn regular_record(#[future] table_definition: (PgPool, String, String, i64)) -> (PgPool, String, String, i64) {
let (pool, schema_name, table_name, _) = table_definition.await;
// Insert a record with all fields
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
let query = format!(
r#"INSERT INTO {} (name, age, email, is_active)
VALUES ($1, $2, $3, $4)
RETURNING id"#,
qualified_table
);
let record = sqlx::query(&query)
.bind("John Doe")
.bind(30)
.bind("john@example.com")
.bind(true)
.fetch_one(&pool)
.await
.unwrap();
let id: i64 = record.get("id");
(pool, schema_name, table_name, id)
}
#[fixture]
async fn null_fields_record(#[future] table_definition: (PgPool, String, String, i64)) -> (PgPool, String, String, i64) {
let (pool, schema_name, table_name, _) = table_definition.await;
// Insert a record with only basic fields (all others will be NULL)
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
let query = format!(
r#"INSERT INTO {} DEFAULT VALUES
RETURNING id"#,
qualified_table
);
let record = sqlx::query(&query)
.fetch_one(&pool)
.await
.unwrap();
let id: i64 = record.get("id");
(pool, schema_name, table_name, id)
}
#[fixture]
async fn deleted_record(#[future] table_definition: (PgPool, String, String, i64)) -> (PgPool, String, String, i64) {
let (pool, schema_name, table_name, _) = table_definition.await;
// Insert a deleted record
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
let query = format!(
r#"INSERT INTO {} (deleted)
VALUES (true)
RETURNING id"#,
qualified_table
);
let record = sqlx::query(&query)
.bind(true)
.fetch_one(&pool)
.await
.unwrap();
let id: i64 = record.get("id");
(pool, schema_name, table_name, id)
}
async fn assert_response_matches(pool: &PgPool, schema_name: &str, table_name: &str, id: i64, response: &HashMap<String, String>) {
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
let columns = "id, deleted, name, age, email, is_active";
let query = format!(r#"SELECT {} FROM {} WHERE id = $1"#, columns, qualified_table);
let row = sqlx::query(&query)
.bind(id)
.fetch_one(pool)
.await
.unwrap();
assert_eq!(row.get::<i64, _>("id").to_string(), response["id"]);
assert_eq!(row.get::<bool, _>("deleted").to_string(), response["deleted"]);
// Check optional fields
let name: Option<String> = row.try_get("name").unwrap_or(None);
assert_eq!(name.unwrap_or_default(), response["name"]);
let age: Option<i32> = row.try_get("age").unwrap_or(None);
assert_eq!(age.map(|v| v.to_string()).unwrap_or_default(), response["age"]);
let email: Option<String> = row.try_get("email").unwrap_or(None);
assert_eq!(email.unwrap_or_default(), response["email"]);
let is_active: Option<bool> = row.try_get("is_active").unwrap_or(None);
assert_eq!(is_active.map(|v| v.to_string()).unwrap_or_default(), response["is_active"]);
}
async fn cleanup_test_data(pool: &PgPool, schema_name: &str, table_name: &str) {
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
let _ = sqlx::query(&format!(r#"DROP TABLE IF EXISTS {} CASCADE"#, qualified_table))
.execute(pool)
.await;
let _ = sqlx::query!("DELETE FROM table_definitions WHERE table_name = $1", table_name)
.execute(pool)
.await;
let _ = sqlx::query(&format!(r#"DROP SCHEMA IF EXISTS "{}" CASCADE"#, schema_name))
.execute(pool)
.await;
let _ = sqlx::query!("DELETE FROM schemas WHERE name = $1", schema_name)
.execute(pool)
.await;
}
#[rstest]
#[tokio::test]
async fn test_get_table_data_success(
#[future] regular_record: (PgPool, String, String, i64),
) {
let (pool, schema_name, table_name, id) = regular_record.await;
let request = GetTableDataRequest {
profile_name: schema_name.clone(),
table_name: table_name.clone(),
id,
};
let response = get_table_data(&pool, request).await.unwrap();
assert_eq!(response.data["id"], id.to_string());
assert_eq!(response.data["name"], "John Doe");
assert_eq!(response.data["age"], "30");
assert_eq!(response.data["email"], "john@example.com");
assert_eq!(response.data["is_active"], "true");
assert_eq!(response.data["deleted"], "false");
assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await;
cleanup_test_data(&pool, &schema_name, &table_name).await;
}
#[rstest]
#[tokio::test]
async fn test_get_optional_fields_null(
#[future] null_fields_record: (PgPool, String, String, i64),
) {
let (pool, schema_name, table_name, id) = null_fields_record.await;
let request = GetTableDataRequest {
profile_name: schema_name.clone(),
table_name: table_name.clone(),
id,
};
let response = get_table_data(&pool, request).await.unwrap();
assert_eq!(response.data["name"], "");
assert_eq!(response.data["age"], "");
assert_eq!(response.data["email"], "");
assert_eq!(response.data["is_active"], "");
assert_eq!(response.data["deleted"], "false");
assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await;
cleanup_test_data(&pool, &schema_name, &table_name).await;
}
#[rstest]
#[tokio::test]
async fn test_get_nonexistent_id(
#[future] table_definition: (PgPool, String, String, i64),
) {
let (pool, schema_name, table_name, _) = table_definition.await;
let request = GetTableDataRequest {
profile_name: schema_name.clone(),
table_name: table_name.clone(),
id: 9999,
};
let result = get_table_data(&pool, request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
cleanup_test_data(&pool, &schema_name, &table_name).await;
}
#[rstest]
#[tokio::test]
async fn test_get_deleted_record(
#[future] deleted_record: (PgPool, String, String, i64),
) {
let (pool, schema_name, table_name, id) = deleted_record.await;
let request = GetTableDataRequest {
profile_name: schema_name.clone(),
table_name: table_name.clone(),
id,
};
let result = get_table_data(&pool, request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
cleanup_test_data(&pool, &schema_name, &table_name).await;
}
#[rstest]
#[tokio::test]
async fn test_get_database_error(
#[future] closed_pool: PgPool,
) {
let closed_pool = closed_pool.await;
let request = GetTableDataRequest {
profile_name: "test".into(),
table_name: "test".into(),
id: 1,
};
let result = get_table_data(&closed_pool, request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
}
#[rstest]
#[tokio::test]
async fn test_get_special_characters(
#[future] table_definition: (PgPool, String, String, i64),
) {
let (pool, schema_name, table_name, _) = table_definition.await;
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
let query = format!(
r#"INSERT INTO {} (name, email)
VALUES ($1, $2)
RETURNING id"#,
qualified_table
);
let record = sqlx::query(&query)
.bind("Náměstí ČR")
.bind("čšěř@example.com")
.fetch_one(&pool)
.await
.unwrap();
let id: i64 = record.get("id");
let request = GetTableDataRequest {
profile_name: schema_name.clone(),
table_name: table_name.clone(),
id,
};
let response = get_table_data(&pool, request).await.unwrap();
assert_eq!(response.data["name"], "Náměstí ČR");
assert_eq!(response.data["email"], "čšěř@example.com");
assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await;
cleanup_test_data(&pool, &schema_name, &table_name).await;
}
#[rstest]
#[tokio::test]
async fn test_get_max_length_fields(
#[future] table_definition: (PgPool, String, String, i64),
) {
let (pool, schema_name, table_name, _) = table_definition.await;
let long_name = "a".repeat(255);
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
let query = format!(
r#"INSERT INTO {} (name)
VALUES ($1)
RETURNING id"#,
qualified_table
);
let record = sqlx::query(&query)
.bind(&long_name)
.fetch_one(&pool)
.await
.unwrap();
let id: i64 = record.get("id");
let request = GetTableDataRequest {
profile_name: schema_name.clone(),
table_name: table_name.clone(),
id,
};
let response = get_table_data(&pool, request).await.unwrap();
assert_eq!(response.data["name"], long_name);
assert_eq!(response.data["name"].len(), 255);
assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await;
cleanup_test_data(&pool, &schema_name, &table_name).await;
}
#[rstest]
#[tokio::test]
async fn test_get_invalid_profile(
#[future] pool: PgPool,
) {
let pool = pool.await;
let request = GetTableDataRequest {
profile_name: "non_existent_profile".into(),
table_name: "test_table".into(),
id: 1,
};
let result = get_table_data(&pool, request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
}
#[rstest]
#[tokio::test]
async fn test_get_invalid_table(
#[future] schema: (PgPool, String, i64),
) {
let (pool, schema_name, _) = schema.await;
let request = GetTableDataRequest {
profile_name: schema_name.clone(),
table_name: "non_existent_table".into(),
id: 1,
};
let result = get_table_data(&pool, request).await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
cleanup_test_data(&pool, &schema_name, "non_existent_table").await;
}
#[rstest]
#[tokio::test]
async fn test_get_invalid_column(
#[future] regular_record: (PgPool, String, String, i64),
) {
let (pool, schema_name, table_name, id) = regular_record.await;
let request = GetTableDataRequest {
profile_name: schema_name.clone(),
table_name: table_name.clone(),
id,
};
let result = get_table_data(&pool, request).await;
assert!(result.is_ok()); // Should still succeed as we're not filtering columns
cleanup_test_data(&pool, &schema_name, &table_name).await;
}
include!("get_table_data_test2.rs");

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,3 @@
// tests/tables_data/get/mod.rs
pub mod get_table_data_test;