ordering of the tests for tables data
This commit is contained in:
@@ -1,296 +0,0 @@
|
||||
// tests/tables_data/handlers/delete_table_data_test.rs
|
||||
|
||||
use rstest::{fixture, rstest};
|
||||
use sqlx::{PgPool, Row};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::{mpsc, Mutex};
|
||||
use serde_json::json;
|
||||
use chrono::Utc;
|
||||
use futures::future::join_all;
|
||||
use prost_types::{value::Kind, Value};
|
||||
use rand::Rng;
|
||||
use rand::distr::Alphanumeric; // Corrected import
|
||||
|
||||
// Common imports from other modules
|
||||
use common::proto::multieko2::table_definition::{
|
||||
PostTableDefinitionRequest, ColumnDefinition as TableColumnDefinition, TableLink,
|
||||
};
|
||||
use common::proto::multieko2::tables_data::{
|
||||
DeleteTableDataRequest, DeleteTableDataResponse, PostTableDataRequest, PutTableDataRequest,
|
||||
};
|
||||
use server::indexer::IndexCommand;
|
||||
use server::table_definition::handlers::post_table_definition;
|
||||
use server::tables_data::handlers::{delete_table_data, post_table_data, put_table_data};
|
||||
use crate::common::setup_test_db;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref TEST_MUTEX: Arc<Mutex<()>> = Arc::new(Mutex::new(()));
|
||||
}
|
||||
|
||||
// ========= Test Helpers =========
|
||||
|
||||
fn generate_unique_id() -> String {
|
||||
rand::rng() // Corrected function call
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(8)
|
||||
.map(char::from)
|
||||
.collect::<String>()
|
||||
.to_lowercase()
|
||||
}
|
||||
|
||||
fn string_to_proto_value(s: &str) -> Value {
|
||||
Value {
|
||||
kind: Some(Kind::StringValue(s.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
// ========= Fixtures =========
|
||||
|
||||
#[fixture]
|
||||
async fn pool() -> PgPool {
|
||||
setup_test_db().await
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
|
||||
let pool = pool.await;
|
||||
pool.close().await;
|
||||
pool
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_profile(#[future] pool: PgPool) -> (PgPool, String, i64) {
|
||||
let pool = pool.await;
|
||||
let profile_name = format!("testprofile_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
||||
|
||||
// FIX: The table is `schemas`, not `profiles`.
|
||||
let profile = sqlx::query!(
|
||||
"INSERT INTO schemas (name) VALUES ($1) RETURNING id",
|
||||
profile_name
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
(pool, profile_name, profile.id)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_table(
|
||||
#[future] existing_profile: (PgPool, String, i64),
|
||||
) -> (PgPool, String, i64, String) {
|
||||
let (pool, profile_name, schema_id) = existing_profile.await;
|
||||
let table_name = format!("test_table_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
||||
|
||||
// Use post_table_definition instead of manual table creation
|
||||
let table_def_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition {
|
||||
name: "test_data".into(),
|
||||
field_type: "text".into(),
|
||||
}
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(&pool, table_def_request).await.unwrap();
|
||||
(pool, profile_name, schema_id, table_name)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_record(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) -> (PgPool, String, String, i64) {
|
||||
let (pool, profile_name, _schema_id, table_name) = existing_table.await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("test_data".to_string(), string_to_proto_value("Test Record"));
|
||||
|
||||
let post_req = PostTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let (indexer_tx, _indexer_rx) = mpsc::channel(1);
|
||||
let response = post_table_data(&pool, post_req, &indexer_tx).await.unwrap();
|
||||
|
||||
(pool, profile_name, table_name, response.inserted_id)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn existing_deleted_record(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) -> (PgPool, String, String, i64) {
|
||||
let (pool, profile_name, _schema_id, table_name) = existing_table.await;
|
||||
|
||||
// First create a record
|
||||
let mut data = HashMap::new();
|
||||
data.insert("test_data".to_string(), string_to_proto_value("Test Deleted Record"));
|
||||
|
||||
let post_req = PostTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let (indexer_tx, _indexer_rx) = mpsc::channel(1);
|
||||
let response = post_table_data(&pool, post_req, &indexer_tx).await.unwrap();
|
||||
let record_id = response.inserted_id;
|
||||
|
||||
// Then delete it
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
record_id,
|
||||
};
|
||||
delete_table_data(&pool, delete_req).await.unwrap();
|
||||
|
||||
(pool, profile_name, table_name, record_id)
|
||||
}
|
||||
|
||||
// New fixture for advanced tests
|
||||
#[derive(Clone)]
|
||||
struct AdvancedDeleteContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
category_table: String,
|
||||
product_table: String,
|
||||
indexer_tx: mpsc::Sender<IndexCommand>,
|
||||
indexer_rx: Arc<tokio::sync::Mutex<mpsc::Receiver<IndexCommand>>>,
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn advanced_delete_context() -> AdvancedDeleteContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("adv_del_profile_{}", unique_id);
|
||||
let category_table = format!("categories_adv_del_{}", unique_id);
|
||||
let product_table = format!("products_adv_del_{}", unique_id);
|
||||
|
||||
let category_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: category_table.clone(),
|
||||
columns: vec![TableColumnDefinition { name: "name".into(), field_type: "text".into() }],
|
||||
links: vec![], indexes: vec![],
|
||||
};
|
||||
post_table_definition(&pool, category_def).await.unwrap();
|
||||
|
||||
let product_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: product_table.clone(),
|
||||
columns: vec![TableColumnDefinition { name: "name".into(), field_type: "text".into() }],
|
||||
links: vec![TableLink { linked_table_name: category_table.clone(), required: true }],
|
||||
indexes: vec![],
|
||||
};
|
||||
post_table_definition(&pool, product_def).await.unwrap();
|
||||
|
||||
let (tx, rx) = mpsc::channel(100);
|
||||
AdvancedDeleteContext {
|
||||
pool, profile_name, category_table, product_table,
|
||||
indexer_tx: tx,
|
||||
indexer_rx: Arc::new(tokio::sync::Mutex::new(rx)),
|
||||
}
|
||||
}
|
||||
|
||||
// ========= Basic Tests (from your original file) =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_data_success(
|
||||
#[future] existing_record: (PgPool, String, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, table_name, record_id) = existing_record.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
record_id,
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
let query = format!("SELECT deleted FROM \"{}\".\"{}\" WHERE id = $1", profile_name, table_name);
|
||||
let row = sqlx::query(&query).bind(record_id).fetch_one(&pool).await.unwrap();
|
||||
assert!(row.get::<bool, _>("deleted"));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_data_profile_not_found(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: "NonExistentProfile".to_string(),
|
||||
table_name: "test_table".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_data_table_not_found(
|
||||
#[future] existing_profile: (PgPool, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, _) = existing_profile.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name: "non_existent_table".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_data_record_not_found(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) {
|
||||
let (pool, profile_name, _, table_name) = existing_table.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name: table_name.clone(),
|
||||
record_id: 9999,
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(!response.success);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_data_already_deleted(
|
||||
#[future] existing_deleted_record: (PgPool, String, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, table_name, record_id) = existing_deleted_record.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name: table_name.clone(),
|
||||
record_id,
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(!response.success);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_table_data_database_error(#[future] closed_pool: PgPool) {
|
||||
let closed_pool = closed_pool.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: "test".to_string(),
|
||||
table_name: "test".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&closed_pool, request).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
|
||||
// Include the new, more advanced tests
|
||||
include!("delete_table_data_test2.rs");
|
||||
include!("delete_table_data_test3.rs");
|
||||
@@ -1,241 +0,0 @@
|
||||
// tests/tables_data/handlers/delete_table_data_test2.rs
|
||||
|
||||
// ========================================================================
|
||||
// Foreign Key Integrity Tests
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_soft_delete_does_not_break_foreign_key_references(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Arrange: Create a category and a product that links to it.
|
||||
let context = advanced_delete_context.await;
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value("Electronics"));
|
||||
let category_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let category_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
|
||||
let category_id = category_res.inserted_id;
|
||||
|
||||
let mut product_data = HashMap::new();
|
||||
product_data.insert("name".to_string(), string_to_proto_value("Laptop"));
|
||||
product_data.insert(
|
||||
format!("{}_id", context.category_table),
|
||||
Value { kind: Some(Kind::NumberValue(category_id as f64)) },
|
||||
);
|
||||
let product_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
data: product_data,
|
||||
};
|
||||
let product_res = post_table_data(&context.pool, product_req, &context.indexer_tx).await.unwrap();
|
||||
let product_id = product_res.inserted_id;
|
||||
|
||||
// Act: Soft-delete the category record.
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: category_id,
|
||||
};
|
||||
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
|
||||
assert!(delete_res.success);
|
||||
|
||||
// Assert: The product record still exists and its foreign key still points to the (now soft-deleted) category ID.
|
||||
let query = format!(
|
||||
r#"SELECT "{}_id" FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.category_table, context.profile_name, context.product_table
|
||||
);
|
||||
let fk_id: i64 = sqlx::query_scalar(&query)
|
||||
.bind(product_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(fk_id, category_id, "Foreign key reference should remain intact after soft delete.");
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Indexer Integration Tests
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_does_not_send_indexer_command(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = advanced_delete_context.await;
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value("Test Category"));
|
||||
let category_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let category_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
|
||||
let category_id = category_res.inserted_id;
|
||||
|
||||
// Drain the create command from the channel
|
||||
let _ = context.indexer_rx.lock().await.recv().await;
|
||||
|
||||
// Act
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: category_id,
|
||||
};
|
||||
|
||||
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
|
||||
assert!(delete_res.success);
|
||||
|
||||
// Assert: Check that NO command was sent. This verifies current behavior.
|
||||
let recv_result = tokio::time::timeout(
|
||||
std::time::Duration::from_millis(50),
|
||||
context.indexer_rx.lock().await.recv()
|
||||
).await;
|
||||
|
||||
assert!(recv_result.is_err(), "Expected no indexer command to be sent on delete, but one was received.");
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Concurrency and State Mismatch Tests
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_deletes_on_same_record(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = advanced_delete_context.await;
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value("Concurrent Delete Test"));
|
||||
let category_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let category_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
|
||||
let category_id = category_res.inserted_id;
|
||||
|
||||
// Act: Spawn multiple tasks to delete the same record.
|
||||
let mut tasks = vec![];
|
||||
for _ in 0..5 {
|
||||
let pool = context.pool.clone();
|
||||
let req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: category_id,
|
||||
};
|
||||
tasks.push(tokio::spawn(async move {
|
||||
delete_table_data(&pool, req).await
|
||||
}));
|
||||
}
|
||||
let results = join_all(tasks).await;
|
||||
|
||||
// Assert: Exactly one delete should succeed, the rest should fail (softly).
|
||||
let success_count = results.iter().filter(|res|
|
||||
res.is_ok() && res.as_ref().unwrap().is_ok() && res.as_ref().unwrap().as_ref().unwrap().success
|
||||
).count();
|
||||
|
||||
assert_eq!(success_count, 1, "Exactly one concurrent delete operation should succeed.");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_fails_if_physical_table_is_missing(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Arrange: Create definitions, then manually drop the physical table to create a state mismatch.
|
||||
let context = advanced_delete_context.await;
|
||||
let qualified_table = format!("\"{}\".\"{}\"", context.profile_name, context.category_table);
|
||||
sqlx::query(&format!("DROP TABLE {} CASCADE", qualified_table))
|
||||
.execute(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Act: Attempt to delete a record from the logically-defined but physically-absent table.
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: 1, // ID doesn't matter
|
||||
};
|
||||
let result = delete_table_data(&context.pool, delete_req).await;
|
||||
|
||||
// Assert: The operation should fail with the specific internal error for a missing relation.
|
||||
assert!(result.is_err());
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(err.code(), tonic::Code::Internal);
|
||||
assert!(
|
||||
err.message().contains("is defined but does not physically exist"),
|
||||
"Error message should indicate a state mismatch."
|
||||
);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Interaction with Other Endpoints
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_succeeds_on_soft_deleted_record(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Arrange: Create and then soft-delete a record.
|
||||
let context = advanced_delete_context.await;
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value("Original Name"));
|
||||
let category_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let category_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
|
||||
let category_id = category_res.inserted_id;
|
||||
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: category_id,
|
||||
};
|
||||
delete_table_data(&context.pool, delete_req).await.unwrap();
|
||||
|
||||
// Act: Attempt to update the soft-deleted record using the PUT handler.
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("name".to_string(), string_to_proto_value("Updated After Delete"));
|
||||
let put_req = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
id: category_id,
|
||||
data: update_data,
|
||||
};
|
||||
let put_result = put_table_data(&context.pool, put_req, &context.indexer_tx).await;
|
||||
|
||||
// Assert: This test is crucial as it verifies your requirement to "freeze operations".
|
||||
// Currently, the PUT handler does NOT check the deleted flag, so it will succeed.
|
||||
// This test documents that behavior. To make it fail, you would need to add a check
|
||||
// in `put_table_data` to see if the record is already deleted.
|
||||
assert!(put_result.is_ok(), "PUT should succeed on a soft-deleted record (current behavior).");
|
||||
let put_res = put_result.unwrap();
|
||||
assert!(put_res.success);
|
||||
|
||||
// Verify the name was updated, but the record remains marked as deleted.
|
||||
let row = sqlx::query(&format!(
|
||||
r#"SELECT name, deleted FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.category_table
|
||||
))
|
||||
.bind(category_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let name: String = row.get("name");
|
||||
let deleted: bool = row.get("deleted");
|
||||
|
||||
assert_eq!(name, "Updated After Delete");
|
||||
assert!(deleted, "Record should remain soft-deleted after an update.");
|
||||
}
|
||||
@@ -1,567 +0,0 @@
|
||||
// tests/tables_data/handlers/delete_table_data_test3.rs
|
||||
|
||||
// ========================================================================
|
||||
// Input Validation and Edge Cases
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_with_negative_record_id(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) {
|
||||
let (pool, profile_name, _, table_name) = existing_table.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name,
|
||||
record_id: -1,
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(!response.success, "Delete with negative ID should fail gracefully");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_with_zero_record_id(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) {
|
||||
let (pool, profile_name, _, table_name) = existing_table.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name,
|
||||
record_id: 0,
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(!response.success, "Delete with zero ID should fail gracefully");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_with_max_int64_record_id(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) {
|
||||
let (pool, profile_name, _, table_name) = existing_table.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name,
|
||||
record_id: i64::MAX,
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(!response.success, "Delete with max int64 ID should fail gracefully");
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Malformed Input Handling
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_with_empty_profile_name(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: "".to_string(),
|
||||
table_name: "test_table".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&pool, request).await;
|
||||
assert!(result.is_err(), "Empty profile name should be rejected");
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_with_whitespace_only_profile_name(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: " ".to_string(),
|
||||
table_name: "test_table".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&pool, request).await;
|
||||
assert!(result.is_err(), "Whitespace-only profile name should be rejected");
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_with_empty_table_name(
|
||||
#[future] existing_profile: (PgPool, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, _) = existing_profile.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name: "".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&pool, request).await;
|
||||
assert!(result.is_err(), "Empty table name should be rejected");
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_with_sql_injection_attempt(
|
||||
#[future] existing_profile: (PgPool, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, _) = existing_profile.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name: "test'; DROP TABLE users; --".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&pool, request).await;
|
||||
assert!(result.is_err(), "SQL injection attempt should be rejected");
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Data Integrity Verification Tests
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_only_affects_target_record(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) {
|
||||
// Arrange: Create multiple records
|
||||
let (pool, profile_name, _, table_name) = existing_table.await;
|
||||
|
||||
let mut record_ids = Vec::new();
|
||||
for i in 0..5 {
|
||||
let query = format!(
|
||||
"INSERT INTO \"{}\".\"{}\" (deleted) VALUES (false) RETURNING id",
|
||||
profile_name, table_name
|
||||
);
|
||||
let row = sqlx::query(&query).fetch_one(&pool).await.unwrap();
|
||||
let id: i64 = row.get("id");
|
||||
record_ids.push(id);
|
||||
}
|
||||
|
||||
let target_id = record_ids[2]; // Delete the middle record
|
||||
|
||||
// Act: Delete one specific record
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
record_id: target_id,
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Assert: Verify only the target record is deleted
|
||||
for &id in &record_ids {
|
||||
let query = format!(
|
||||
"SELECT deleted FROM \"{}\".\"{}\" WHERE id = $1",
|
||||
profile_name, table_name
|
||||
);
|
||||
let row = sqlx::query(&query).bind(id).fetch_one(&pool).await.unwrap();
|
||||
let is_deleted: bool = row.get("deleted");
|
||||
|
||||
if id == target_id {
|
||||
assert!(is_deleted, "Target record should be marked as deleted");
|
||||
} else {
|
||||
assert!(!is_deleted, "Non-target records should remain undeleted");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_preserves_all_other_fields(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Arrange: Create a record with rich data
|
||||
let context = advanced_delete_context.await;
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value("Preserve Test Category"));
|
||||
|
||||
let category_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let category_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
|
||||
let category_id = category_res.inserted_id;
|
||||
|
||||
// Capture state before deletion
|
||||
let before_query = format!(
|
||||
"SELECT id, name, deleted, created_at FROM \"{}\".\"{}\" WHERE id = $1",
|
||||
context.profile_name, context.category_table
|
||||
);
|
||||
let before_row = sqlx::query(&before_query)
|
||||
.bind(category_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let before_id: i64 = before_row.get("id");
|
||||
let before_name: String = before_row.get("name");
|
||||
let before_deleted: bool = before_row.get("deleted");
|
||||
let before_created_at: chrono::DateTime<chrono::Utc> = before_row.get("created_at");
|
||||
|
||||
// Act: Delete the record
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: category_id,
|
||||
};
|
||||
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
|
||||
assert!(delete_res.success);
|
||||
|
||||
// Assert: Verify only 'deleted' field changed
|
||||
let after_query = format!(
|
||||
"SELECT id, name, deleted, created_at FROM \"{}\".\"{}\" WHERE id = $1",
|
||||
context.profile_name, context.category_table
|
||||
);
|
||||
let after_row = sqlx::query(&after_query)
|
||||
.bind(category_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let after_id: i64 = after_row.get("id");
|
||||
let after_name: String = after_row.get("name");
|
||||
let after_deleted: bool = after_row.get("deleted");
|
||||
let after_created_at: chrono::DateTime<chrono::Utc> = after_row.get("created_at");
|
||||
|
||||
assert_eq!(before_id, after_id, "ID should not change");
|
||||
assert_eq!(before_name, after_name, "Name should not change");
|
||||
assert_eq!(before_created_at, after_created_at, "Created timestamp should not change");
|
||||
assert_eq!(before_deleted, false, "Record should initially be not deleted");
|
||||
assert_eq!(after_deleted, true, "Record should be marked as deleted after operation");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_count_verification(
|
||||
#[future] existing_table: (PgPool, String, i64, String),
|
||||
) {
|
||||
// Arrange: Create records and count them
|
||||
let (pool, profile_name, _, table_name) = existing_table.await;
|
||||
|
||||
// Create 3 records
|
||||
let mut record_ids = Vec::new();
|
||||
for _ in 0..3 {
|
||||
let query = format!(
|
||||
"INSERT INTO \"{}\".\"{}\" (deleted) VALUES (false) RETURNING id",
|
||||
profile_name, table_name
|
||||
);
|
||||
let row = sqlx::query(&query).fetch_one(&pool).await.unwrap();
|
||||
let id: i64 = row.get("id");
|
||||
record_ids.push(id);
|
||||
}
|
||||
|
||||
// Verify initial count
|
||||
let count_query = format!(
|
||||
"SELECT COUNT(*) as total, COUNT(*) FILTER (WHERE deleted = false) as active FROM \"{}\".\"{}\"",
|
||||
profile_name, table_name
|
||||
);
|
||||
let count_row = sqlx::query(&count_query).fetch_one(&pool).await.unwrap();
|
||||
let initial_total: i64 = count_row.get("total");
|
||||
let initial_active: i64 = count_row.get("active");
|
||||
|
||||
// Act: Delete one record
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: profile_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
record_id: record_ids[0],
|
||||
};
|
||||
let response = delete_table_data(&pool, request).await.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Assert: Verify counts after deletion
|
||||
let final_count_row = sqlx::query(&count_query).fetch_one(&pool).await.unwrap();
|
||||
let final_total: i64 = final_count_row.get("total");
|
||||
let final_active: i64 = final_count_row.get("active");
|
||||
|
||||
assert_eq!(initial_total, final_total, "Total record count should not change (soft delete)");
|
||||
assert_eq!(initial_active - 1, final_active, "Active record count should decrease by 1");
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Multiple Operations Sequence Testing
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_then_post_same_data(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Arrange: Create and delete a record
|
||||
let context = advanced_delete_context.await;
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value("Reusable Name"));
|
||||
|
||||
let category_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data.clone(),
|
||||
};
|
||||
let first_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
|
||||
let first_id = first_res.inserted_id;
|
||||
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: first_id,
|
||||
};
|
||||
delete_table_data(&context.pool, delete_req).await.unwrap();
|
||||
|
||||
// Act: Try to POST the same data again
|
||||
let second_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let second_res = post_table_data(&context.pool, second_req, &context.indexer_tx).await.unwrap();
|
||||
|
||||
// Assert: Should succeed with a new ID
|
||||
assert!(second_res.success);
|
||||
assert_ne!(first_id, second_res.inserted_id, "New record should have different ID");
|
||||
|
||||
// Verify both records exist in database
|
||||
let count_query = format!(
|
||||
"SELECT COUNT(*) as total FROM \"{}\".\"{}\" WHERE name = 'Reusable Name'",
|
||||
context.profile_name, context.category_table
|
||||
);
|
||||
let count: i64 = sqlx::query_scalar(&count_query).fetch_one(&context.pool).await.unwrap();
|
||||
assert_eq!(count, 2, "Should have 2 records with same name (one deleted, one active)");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_multiple_deletes_then_recreate_pattern(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Test a realistic pattern: create, delete, recreate multiple times
|
||||
let context = advanced_delete_context.await;
|
||||
let mut all_ids = Vec::new();
|
||||
|
||||
for i in 0..3 {
|
||||
// Create
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value(&format!("Cycle Name {}", i)));
|
||||
|
||||
let create_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let create_res = post_table_data(&context.pool, create_req, &context.indexer_tx).await.unwrap();
|
||||
all_ids.push(create_res.inserted_id);
|
||||
|
||||
// Delete immediately
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: create_res.inserted_id,
|
||||
};
|
||||
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
|
||||
assert!(delete_res.success);
|
||||
}
|
||||
|
||||
// Verify all records are marked as deleted
|
||||
for &id in &all_ids {
|
||||
let query = format!(
|
||||
"SELECT deleted FROM \"{}\".\"{}\" WHERE id = $1",
|
||||
context.profile_name, context.category_table
|
||||
);
|
||||
let is_deleted: bool = sqlx::query_scalar(&query)
|
||||
.bind(id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(is_deleted, "Record {} should be deleted", id);
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Performance and Stress Tests
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_performance_with_many_records(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Arrange: Create many records
|
||||
let context = advanced_delete_context.await;
|
||||
let record_count = 100; // Adjust based on test environment
|
||||
let mut record_ids = Vec::new();
|
||||
|
||||
for i in 0..record_count {
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value(&format!("Perf Test {}", i)));
|
||||
|
||||
let create_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let create_res = post_table_data(&context.pool, create_req, &context.indexer_tx).await.unwrap();
|
||||
record_ids.push(create_res.inserted_id);
|
||||
}
|
||||
|
||||
// Act: Delete a record from the middle (worst case for performance)
|
||||
let target_id = record_ids[record_count / 2];
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: target_id,
|
||||
};
|
||||
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
|
||||
|
||||
let elapsed = start_time.elapsed();
|
||||
|
||||
// Assert: Operation should succeed and be reasonably fast
|
||||
assert!(delete_res.success);
|
||||
assert!(elapsed.as_millis() < 1000, "Delete should complete within 1 second even with {} records", record_count);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_rapid_sequential_deletes(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// Arrange: Create multiple records
|
||||
let context = advanced_delete_context.await;
|
||||
let mut record_ids = Vec::new();
|
||||
|
||||
for i in 0..10 {
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value(&format!("Rapid Delete {}", i)));
|
||||
|
||||
let create_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let create_res = post_table_data(&context.pool, create_req, &context.indexer_tx).await.unwrap();
|
||||
record_ids.push(create_res.inserted_id);
|
||||
}
|
||||
|
||||
// Act: Delete all records rapidly in sequence
|
||||
let start_time = std::time::Instant::now();
|
||||
for &record_id in &record_ids {
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id,
|
||||
};
|
||||
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
|
||||
assert!(delete_res.success, "Delete of record {} should succeed", record_id);
|
||||
}
|
||||
let elapsed = start_time.elapsed();
|
||||
|
||||
// Assert: All deletes should complete in reasonable time
|
||||
assert!(elapsed.as_millis() < 5000, "10 sequential deletes should complete within 5 seconds");
|
||||
|
||||
// Verify all records are deleted
|
||||
let count_query = format!(
|
||||
"SELECT COUNT(*) FILTER (WHERE deleted = true) as deleted_count FROM \"{}\".\"{}\"",
|
||||
context.profile_name, context.category_table
|
||||
);
|
||||
let deleted_count: i64 = sqlx::query_scalar(&count_query).fetch_one(&context.pool).await.unwrap();
|
||||
assert_eq!(deleted_count as usize, record_ids.len(), "All records should be marked as deleted");
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Error Message Quality and Handling Tests
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_error_messages_are_descriptive(#[future] pool: PgPool) {
|
||||
let pool = pool.await;
|
||||
|
||||
// Test profile not found error
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name: "NonExistentProfile123".to_string(),
|
||||
table_name: "test_table".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
assert_eq!(error.code(), tonic::Code::NotFound);
|
||||
assert_eq!(error.message(), "Profile not found");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_table_not_found_error_message(
|
||||
#[future] existing_profile: (PgPool, String, i64),
|
||||
) {
|
||||
let (pool, profile_name, _) = existing_profile.await;
|
||||
let request = DeleteTableDataRequest {
|
||||
profile_name,
|
||||
table_name: "definitely_does_not_exist_12345".to_string(),
|
||||
record_id: 1,
|
||||
};
|
||||
let result = delete_table_data(&pool, request).await;
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
assert_eq!(error.code(), tonic::Code::NotFound);
|
||||
assert_eq!(error.message(), "Table not found in profile");
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Database State Consistency Tests
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_delete_maintains_foreign_key_constraints(
|
||||
#[future] advanced_delete_context: AdvancedDeleteContext,
|
||||
) {
|
||||
// This test ensures that soft deletes don't interfere with FK constraint validation
|
||||
let context = advanced_delete_context.await;
|
||||
|
||||
// Create category
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".to_string(), string_to_proto_value("FK Test Category"));
|
||||
let category_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let category_res = post_table_data(&context.pool, category_req, &context.indexer_tx).await.unwrap();
|
||||
let category_id = category_res.inserted_id;
|
||||
|
||||
// Create product referencing the category
|
||||
let mut product_data = HashMap::new();
|
||||
product_data.insert("name".to_string(), string_to_proto_value("FK Test Product"));
|
||||
product_data.insert(
|
||||
format!("{}_id", context.category_table),
|
||||
Value { kind: Some(Kind::NumberValue(category_id as f64)) },
|
||||
);
|
||||
let product_req = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
data: product_data,
|
||||
};
|
||||
let product_res = post_table_data(&context.pool, product_req, &context.indexer_tx).await.unwrap();
|
||||
|
||||
// Soft delete the category
|
||||
let delete_req = DeleteTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
record_id: category_id,
|
||||
};
|
||||
let delete_res = delete_table_data(&context.pool, delete_req).await.unwrap();
|
||||
assert!(delete_res.success);
|
||||
|
||||
// The product should still exist and reference the soft-deleted category
|
||||
let fk_query = format!(
|
||||
"SELECT \"{}_id\" FROM \"{}\".\"{}\" WHERE id = $1",
|
||||
context.category_table, context.profile_name, context.product_table
|
||||
);
|
||||
let fk_value: i64 = sqlx::query_scalar(&fk_query)
|
||||
.bind(product_res.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(fk_value, category_id, "Foreign key should still point to soft-deleted category");
|
||||
}
|
||||
@@ -1,485 +0,0 @@
|
||||
// tests/tables_data/handlers/get_table_data_test.rs
|
||||
use rstest::{fixture, rstest};
|
||||
use server::tables_data::handlers::get_table_data;
|
||||
use common::proto::multieko2::tables_data::GetTableDataRequest;
|
||||
use crate::common::setup_test_db;
|
||||
use sqlx::{PgPool, Row};
|
||||
use tonic;
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
use futures::future::join_all;
|
||||
use rand::distr::Alphanumeric;
|
||||
use rand::Rng;
|
||||
use rust_decimal::Decimal;
|
||||
use rust_decimal_macros::dec;
|
||||
use server::table_definition::handlers::post_table_definition;
|
||||
use server::tables_data::handlers::post_table_data;
|
||||
use common::proto::multieko2::table_definition::{
|
||||
PostTableDefinitionRequest, ColumnDefinition as TableColumnDefinition, TableLink
|
||||
};
|
||||
use common::proto::multieko2::tables_data::PostTableDataRequest;
|
||||
use prost_types::Value;
|
||||
use prost_types::value::Kind;
|
||||
use tokio::sync::mpsc;
|
||||
use server::indexer::IndexCommand;
|
||||
|
||||
#[fixture]
|
||||
async fn pool() -> PgPool {
|
||||
setup_test_db().await
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn closed_pool(#[future] pool: PgPool) -> PgPool {
|
||||
let pool = pool.await;
|
||||
pool.close().await;
|
||||
pool
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn schema(#[future] pool: PgPool) -> (PgPool, String, i64) {
|
||||
let pool = pool.await;
|
||||
let schema_name = format!("testschema_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
||||
|
||||
// Insert into schemas table instead of profiles
|
||||
let schema = sqlx::query!(
|
||||
"INSERT INTO schemas (name) VALUES ($1) RETURNING id",
|
||||
schema_name
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create the actual PostgreSQL schema
|
||||
let create_schema_sql = format!("CREATE SCHEMA IF NOT EXISTS \"{}\"", schema_name);
|
||||
sqlx::query(&create_schema_sql)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
(pool, schema_name, schema.id)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn table_definition(#[future] schema: (PgPool, String, i64)) -> (PgPool, String, String, i64) {
|
||||
let (pool, schema_name, schema_id) = schema.await;
|
||||
let table_name = format!("test_table_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
||||
|
||||
// Define columns and indexes for the table
|
||||
let columns = json!([
|
||||
"\"name\" TEXT",
|
||||
"\"age\" INTEGER",
|
||||
"\"email\" TEXT",
|
||||
"\"is_active\" BOOLEAN"
|
||||
]);
|
||||
let indexes = json!([]);
|
||||
|
||||
// Use schema_id instead of profile_id
|
||||
let table_def = sqlx::query!(
|
||||
"INSERT INTO table_definitions (schema_id, table_name, columns, indexes) VALUES ($1, $2, $3, $4) RETURNING id",
|
||||
schema_id,
|
||||
table_name,
|
||||
columns,
|
||||
indexes
|
||||
)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create actual table in the schema
|
||||
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
|
||||
let create_table = format!(
|
||||
r#"
|
||||
CREATE TABLE {} (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
name TEXT,
|
||||
age INTEGER,
|
||||
email TEXT,
|
||||
is_active BOOLEAN,
|
||||
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
"#,
|
||||
qualified_table
|
||||
);
|
||||
|
||||
sqlx::query(&create_table)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
(pool, schema_name, table_name, table_def.id)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn regular_record(#[future] table_definition: (PgPool, String, String, i64)) -> (PgPool, String, String, i64) {
|
||||
let (pool, schema_name, table_name, _) = table_definition.await;
|
||||
|
||||
// Insert a record with all fields
|
||||
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
|
||||
let query = format!(
|
||||
r#"INSERT INTO {} (name, age, email, is_active)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
RETURNING id"#,
|
||||
qualified_table
|
||||
);
|
||||
|
||||
let record = sqlx::query(&query)
|
||||
.bind("John Doe")
|
||||
.bind(30)
|
||||
.bind("john@example.com")
|
||||
.bind(true)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let id: i64 = record.get("id");
|
||||
(pool, schema_name, table_name, id)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn null_fields_record(#[future] table_definition: (PgPool, String, String, i64)) -> (PgPool, String, String, i64) {
|
||||
let (pool, schema_name, table_name, _) = table_definition.await;
|
||||
|
||||
// Insert a record with only basic fields (all others will be NULL)
|
||||
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
|
||||
let query = format!(
|
||||
r#"INSERT INTO {} DEFAULT VALUES
|
||||
RETURNING id"#,
|
||||
qualified_table
|
||||
);
|
||||
|
||||
let record = sqlx::query(&query)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let id: i64 = record.get("id");
|
||||
(pool, schema_name, table_name, id)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn deleted_record(#[future] table_definition: (PgPool, String, String, i64)) -> (PgPool, String, String, i64) {
|
||||
let (pool, schema_name, table_name, _) = table_definition.await;
|
||||
|
||||
// Insert a deleted record
|
||||
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
|
||||
let query = format!(
|
||||
r#"INSERT INTO {} (deleted)
|
||||
VALUES (true)
|
||||
RETURNING id"#,
|
||||
qualified_table
|
||||
);
|
||||
|
||||
let record = sqlx::query(&query)
|
||||
.bind(true)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let id: i64 = record.get("id");
|
||||
(pool, schema_name, table_name, id)
|
||||
}
|
||||
|
||||
async fn assert_response_matches(pool: &PgPool, schema_name: &str, table_name: &str, id: i64, response: &HashMap<String, String>) {
|
||||
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
|
||||
let columns = "id, deleted, name, age, email, is_active";
|
||||
let query = format!(r#"SELECT {} FROM {} WHERE id = $1"#, columns, qualified_table);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.bind(id)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(row.get::<i64, _>("id").to_string(), response["id"]);
|
||||
assert_eq!(row.get::<bool, _>("deleted").to_string(), response["deleted"]);
|
||||
|
||||
// Check optional fields
|
||||
let name: Option<String> = row.try_get("name").unwrap_or(None);
|
||||
assert_eq!(name.unwrap_or_default(), response["name"]);
|
||||
|
||||
let age: Option<i32> = row.try_get("age").unwrap_or(None);
|
||||
assert_eq!(age.map(|v| v.to_string()).unwrap_or_default(), response["age"]);
|
||||
|
||||
let email: Option<String> = row.try_get("email").unwrap_or(None);
|
||||
assert_eq!(email.unwrap_or_default(), response["email"]);
|
||||
|
||||
let is_active: Option<bool> = row.try_get("is_active").unwrap_or(None);
|
||||
assert_eq!(is_active.map(|v| v.to_string()).unwrap_or_default(), response["is_active"]);
|
||||
}
|
||||
|
||||
async fn cleanup_test_data(pool: &PgPool, schema_name: &str, table_name: &str) {
|
||||
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
|
||||
let _ = sqlx::query(&format!(r#"DROP TABLE IF EXISTS {} CASCADE"#, qualified_table))
|
||||
.execute(pool)
|
||||
.await;
|
||||
|
||||
let _ = sqlx::query!("DELETE FROM table_definitions WHERE table_name = $1", table_name)
|
||||
.execute(pool)
|
||||
.await;
|
||||
|
||||
let _ = sqlx::query(&format!(r#"DROP SCHEMA IF EXISTS "{}" CASCADE"#, schema_name))
|
||||
.execute(pool)
|
||||
.await;
|
||||
|
||||
let _ = sqlx::query!("DELETE FROM schemas WHERE name = $1", schema_name)
|
||||
.execute(pool)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_get_table_data_success(
|
||||
#[future] regular_record: (PgPool, String, String, i64),
|
||||
) {
|
||||
let (pool, schema_name, table_name, id) = regular_record.await;
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
id,
|
||||
};
|
||||
|
||||
let response = get_table_data(&pool, request).await.unwrap();
|
||||
|
||||
assert_eq!(response.data["id"], id.to_string());
|
||||
assert_eq!(response.data["name"], "John Doe");
|
||||
assert_eq!(response.data["age"], "30");
|
||||
assert_eq!(response.data["email"], "john@example.com");
|
||||
assert_eq!(response.data["is_active"], "true");
|
||||
assert_eq!(response.data["deleted"], "false");
|
||||
|
||||
assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await;
|
||||
|
||||
cleanup_test_data(&pool, &schema_name, &table_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_get_optional_fields_null(
|
||||
#[future] null_fields_record: (PgPool, String, String, i64),
|
||||
) {
|
||||
let (pool, schema_name, table_name, id) = null_fields_record.await;
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
id,
|
||||
};
|
||||
|
||||
let response = get_table_data(&pool, request).await.unwrap();
|
||||
|
||||
assert_eq!(response.data["name"], "");
|
||||
assert_eq!(response.data["age"], "");
|
||||
assert_eq!(response.data["email"], "");
|
||||
assert_eq!(response.data["is_active"], "");
|
||||
assert_eq!(response.data["deleted"], "false");
|
||||
|
||||
assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await;
|
||||
|
||||
cleanup_test_data(&pool, &schema_name, &table_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_get_nonexistent_id(
|
||||
#[future] table_definition: (PgPool, String, String, i64),
|
||||
) {
|
||||
let (pool, schema_name, table_name, _) = table_definition.await;
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
id: 9999,
|
||||
};
|
||||
|
||||
let result = get_table_data(&pool, request).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
|
||||
cleanup_test_data(&pool, &schema_name, &table_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_get_deleted_record(
|
||||
#[future] deleted_record: (PgPool, String, String, i64),
|
||||
) {
|
||||
let (pool, schema_name, table_name, id) = deleted_record.await;
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
id,
|
||||
};
|
||||
|
||||
let result = get_table_data(&pool, request).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
|
||||
cleanup_test_data(&pool, &schema_name, &table_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_get_database_error(
|
||||
#[future] closed_pool: PgPool,
|
||||
) {
|
||||
let closed_pool = closed_pool.await;
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: "test".into(),
|
||||
table_name: "test".into(),
|
||||
id: 1,
|
||||
};
|
||||
|
||||
let result = get_table_data(&closed_pool, request).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_get_special_characters(
|
||||
#[future] table_definition: (PgPool, String, String, i64),
|
||||
) {
|
||||
let (pool, schema_name, table_name, _) = table_definition.await;
|
||||
|
||||
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
|
||||
let query = format!(
|
||||
r#"INSERT INTO {} (name, email)
|
||||
VALUES ($1, $2)
|
||||
RETURNING id"#,
|
||||
qualified_table
|
||||
);
|
||||
|
||||
let record = sqlx::query(&query)
|
||||
.bind("Náměstí ČR")
|
||||
.bind("čšěř@example.com")
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let id: i64 = record.get("id");
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
id,
|
||||
};
|
||||
|
||||
let response = get_table_data(&pool, request).await.unwrap();
|
||||
|
||||
assert_eq!(response.data["name"], "Náměstí ČR");
|
||||
assert_eq!(response.data["email"], "čšěř@example.com");
|
||||
|
||||
assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await;
|
||||
|
||||
cleanup_test_data(&pool, &schema_name, &table_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_get_max_length_fields(
|
||||
#[future] table_definition: (PgPool, String, String, i64),
|
||||
) {
|
||||
let (pool, schema_name, table_name, _) = table_definition.await;
|
||||
|
||||
let long_name = "a".repeat(255);
|
||||
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
|
||||
let query = format!(
|
||||
r#"INSERT INTO {} (name)
|
||||
VALUES ($1)
|
||||
RETURNING id"#,
|
||||
qualified_table
|
||||
);
|
||||
|
||||
let record = sqlx::query(&query)
|
||||
.bind(&long_name)
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let id: i64 = record.get("id");
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
id,
|
||||
};
|
||||
|
||||
let response = get_table_data(&pool, request).await.unwrap();
|
||||
|
||||
assert_eq!(response.data["name"], long_name);
|
||||
assert_eq!(response.data["name"].len(), 255);
|
||||
|
||||
assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await;
|
||||
|
||||
cleanup_test_data(&pool, &schema_name, &table_name).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_get_invalid_profile(
|
||||
#[future] pool: PgPool,
|
||||
) {
|
||||
let pool = pool.await;
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: "non_existent_profile".into(),
|
||||
table_name: "test_table".into(),
|
||||
id: 1,
|
||||
};
|
||||
|
||||
let result = get_table_data(&pool, request).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_get_invalid_table(
|
||||
#[future] schema: (PgPool, String, i64),
|
||||
) {
|
||||
let (pool, schema_name, _) = schema.await;
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: schema_name.clone(),
|
||||
table_name: "non_existent_table".into(),
|
||||
id: 1,
|
||||
};
|
||||
|
||||
let result = get_table_data(&pool, request).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
|
||||
|
||||
cleanup_test_data(&pool, &schema_name, "non_existent_table").await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_get_invalid_column(
|
||||
#[future] regular_record: (PgPool, String, String, i64),
|
||||
) {
|
||||
let (pool, schema_name, table_name, id) = regular_record.await;
|
||||
|
||||
let request = GetTableDataRequest {
|
||||
profile_name: schema_name.clone(),
|
||||
table_name: table_name.clone(),
|
||||
id,
|
||||
};
|
||||
|
||||
let result = get_table_data(&pool, request).await;
|
||||
|
||||
assert!(result.is_ok()); // Should still succeed as we're not filtering columns
|
||||
|
||||
cleanup_test_data(&pool, &schema_name, &table_name).await;
|
||||
}
|
||||
|
||||
include!("get_table_data_test2.rs");
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,4 @@
|
||||
// tests/tables_data/mod.rs
|
||||
// pub mod post_table_data_test;
|
||||
// pub mod put_table_data_test;
|
||||
// pub mod delete_table_data_test;
|
||||
pub mod get_table_data_test;
|
||||
|
||||
// pub mod get_table_data_count_test;
|
||||
// pub mod get_table_data_by_position_test;
|
||||
|
||||
@@ -1,484 +0,0 @@
|
||||
// tests/tables_data/handlers/post_table_data_test.rs
|
||||
use rstest::{fixture, rstest};
|
||||
use sqlx::PgPool;
|
||||
use std::collections::HashMap;
|
||||
use prost_types::Value;
|
||||
use prost_types::value::Kind;
|
||||
use common::proto::multieko2::tables_data::{PostTableDataRequest, PostTableDataResponse};
|
||||
use common::proto::multieko2::table_definition::TableLink;
|
||||
use common::proto::multieko2::table_definition::{
|
||||
PostTableDefinitionRequest, ColumnDefinition as TableColumnDefinition
|
||||
};
|
||||
use server::tables_data::handlers::post_table_data;
|
||||
use server::table_definition::handlers::post_table_definition;
|
||||
use crate::common::setup_test_db;
|
||||
use tonic;
|
||||
use chrono::Utc;
|
||||
use sqlx::types::chrono::DateTime;
|
||||
use tokio::sync::mpsc;
|
||||
use server::indexer::IndexCommand;
|
||||
use sqlx::Row;
|
||||
use rand::distr::Alphanumeric;
|
||||
use rand::Rng;
|
||||
use rust_decimal::prelude::FromPrimitive;
|
||||
|
||||
// Helper function to generate unique identifiers for test isolation
|
||||
fn generate_unique_id() -> String {
|
||||
rand::rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(8)
|
||||
.map(char::from)
|
||||
.collect::<String>()
|
||||
.to_lowercase()
|
||||
}
|
||||
|
||||
// Helper function to convert string to protobuf Value
|
||||
fn string_to_proto_value(s: String) -> Value {
|
||||
Value {
|
||||
kind: Some(Kind::StringValue(s)),
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to convert HashMap<String, String> to HashMap<String, Value>
|
||||
fn convert_to_proto_values(data: HashMap<String, String>) -> HashMap<String, Value> {
|
||||
data.into_iter()
|
||||
.map(|(k, v)| (k, string_to_proto_value(v)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
// Create the table definition for adresar test with unique name
|
||||
async fn create_adresar_table(pool: &PgPool, table_name: &str, profile_name: &str) -> Result<(), tonic::Status> {
|
||||
let table_def_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: table_name.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition {
|
||||
name: "firma".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "kz".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "drc".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "ulica".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "psc".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "mesto".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "stat".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "banka".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "ucet".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "skladm".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "ico".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "kontakt".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "telefon".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "skladu".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
TableColumnDefinition {
|
||||
name: "fax".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(pool, table_def_request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Test context structure to hold unique identifiers
|
||||
#[derive(Clone)]
|
||||
struct TestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
table_name: String,
|
||||
indexer_tx: mpsc::Sender<IndexCommand>,
|
||||
}
|
||||
|
||||
// Fixtures
|
||||
#[fixture]
|
||||
async fn test_context() -> TestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("test_profile_{}", unique_id);
|
||||
let table_name = format!("adresar_test_{}", unique_id);
|
||||
|
||||
// Create the table for this specific test
|
||||
create_adresar_table(&pool, &table_name, &profile_name).await
|
||||
.expect("Failed to create test table");
|
||||
|
||||
let (tx, _rx) = mpsc::channel(100);
|
||||
|
||||
TestContext {
|
||||
pool,
|
||||
profile_name,
|
||||
table_name,
|
||||
indexer_tx: tx,
|
||||
}
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn closed_test_context() -> TestContext {
|
||||
let mut context = test_context().await;
|
||||
context.pool.close().await;
|
||||
context
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn valid_request() -> HashMap<String, String> {
|
||||
let mut map = HashMap::new();
|
||||
map.insert("firma".into(), "Test Company".into());
|
||||
map.insert("kz".into(), "KZ123".into());
|
||||
map.insert("drc".into(), "DRC456".into());
|
||||
map.insert("ulica".into(), "Test Street".into());
|
||||
map.insert("psc".into(), "12345".into());
|
||||
map.insert("mesto".into(), "Test City".into());
|
||||
map.insert("stat".into(), "Test Country".into());
|
||||
map.insert("banka".into(), "Test Bank".into());
|
||||
map.insert("ucet".into(), "123456789".into());
|
||||
map.insert("skladm".into(), "Warehouse M".into());
|
||||
map.insert("ico".into(), "12345678".into());
|
||||
map.insert("kontakt".into(), "John Doe".into());
|
||||
map.insert("telefon".into(), "+421123456789".into());
|
||||
map.insert("skladu".into(), "Warehouse U".into());
|
||||
map.insert("fax".into(), "+421123456700".into());
|
||||
map
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
fn minimal_request() -> HashMap<String, String> {
|
||||
let mut map = HashMap::new();
|
||||
map.insert("firma".into(), "Required Only".into());
|
||||
map
|
||||
}
|
||||
|
||||
fn create_table_request(context: &TestContext, data: HashMap<String, String>) -> PostTableDataRequest {
|
||||
PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data: convert_to_proto_values(data),
|
||||
}
|
||||
}
|
||||
|
||||
async fn assert_table_response(context: &TestContext, response: &PostTableDataResponse, expected: &HashMap<String, String>) {
|
||||
// Use dynamic query since table is created at runtime with unique names
|
||||
let query = format!(
|
||||
r#"SELECT * FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Get values from Row dynamically
|
||||
let firma: String = row.get("firma");
|
||||
let deleted: bool = row.get("deleted");
|
||||
|
||||
assert_eq!(firma, expected["firma"]);
|
||||
assert!(!deleted);
|
||||
|
||||
// Check optional fields
|
||||
let check_field = |field: &str, expected_value: &str| {
|
||||
let db_value: Option<String> = row.get(field);
|
||||
assert_eq!(db_value.as_deref().unwrap_or(""), expected_value);
|
||||
};
|
||||
|
||||
check_field("kz", expected.get("kz").unwrap_or(&String::new()));
|
||||
check_field("drc", expected.get("drc").unwrap_or(&String::new()));
|
||||
check_field("ulica", expected.get("ulica").unwrap_or(&String::new()));
|
||||
check_field("psc", expected.get("psc").unwrap_or(&String::new()));
|
||||
check_field("mesto", expected.get("mesto").unwrap_or(&String::new()));
|
||||
check_field("stat", expected.get("stat").unwrap_or(&String::new()));
|
||||
check_field("banka", expected.get("banka").unwrap_or(&String::new()));
|
||||
check_field("ucet", expected.get("ucet").unwrap_or(&String::new()));
|
||||
check_field("skladm", expected.get("skladm").unwrap_or(&String::new()));
|
||||
check_field("ico", expected.get("ico").unwrap_or(&String::new()));
|
||||
check_field("kontakt", expected.get("kontakt").unwrap_or(&String::new()));
|
||||
check_field("telefon", expected.get("telefon").unwrap_or(&String::new()));
|
||||
check_field("skladu", expected.get("skladu").unwrap_or(&String::new()));
|
||||
check_field("fax", expected.get("fax").unwrap_or(&String::new()));
|
||||
|
||||
// Handle timestamp conversion
|
||||
let created_at: Option<chrono::DateTime<Utc>> = row.get("created_at");
|
||||
assert!(created_at.unwrap() <= Utc::now());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_success(
|
||||
#[future] test_context: TestContext,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let context = test_context.await;
|
||||
let request = create_table_request(&context, valid_request.clone());
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx).await.unwrap();
|
||||
|
||||
assert!(response.inserted_id > 0);
|
||||
assert!(response.success);
|
||||
assert_eq!(response.message, "Data inserted successfully");
|
||||
assert_table_response(&context, &response, &valid_request).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_whitespace_trimming(
|
||||
#[future] test_context: TestContext,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let context = test_context.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("firma".into(), " Test Company ".into());
|
||||
request.insert("telefon".into(), " +421123456789 ".into());
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, request), &context.indexer_tx).await.unwrap();
|
||||
|
||||
let query = format!(
|
||||
r#"SELECT firma, telefon FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let firma: String = row.get("firma");
|
||||
let telefon: Option<String> = row.get("telefon");
|
||||
|
||||
assert_eq!(firma, "Test Company");
|
||||
assert_eq!(telefon.unwrap(), "+421123456789");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_empty_optional_fields(
|
||||
#[future] test_context: TestContext,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let context = test_context.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("telefon".into(), " ".into());
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, request), &context.indexer_tx).await.unwrap();
|
||||
|
||||
let query = format!(
|
||||
r#"SELECT telefon FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let telefon: Option<String> = sqlx::query_scalar(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(telefon.is_none());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_minimal_request(
|
||||
#[future] test_context: TestContext,
|
||||
minimal_request: HashMap<String, String>,
|
||||
) {
|
||||
let context = test_context.await;
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, minimal_request.clone()), &context.indexer_tx).await.unwrap();
|
||||
assert!(response.inserted_id > 0);
|
||||
assert_table_response(&context, &response, &minimal_request).await;
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_telefon_length_limit(
|
||||
#[future] test_context: TestContext,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let context = test_context.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("telefon".into(), "1".repeat(16));
|
||||
|
||||
let result = post_table_data(&context.pool, create_table_request(&context, request), &context.indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_special_characters(
|
||||
#[future] test_context: TestContext,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let context = test_context.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("ulica".into(), "Náměstí 28. října 123/456".into());
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, request), &context.indexer_tx).await.unwrap();
|
||||
|
||||
let query = format!(
|
||||
r#"SELECT ulica FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let ulica: Option<String> = row.get("ulica");
|
||||
assert_eq!(ulica.unwrap(), "Náměstí 28. října 123/456");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_database_error(
|
||||
#[future] closed_test_context: TestContext,
|
||||
minimal_request: HashMap<String, String>,
|
||||
) {
|
||||
let context = closed_test_context.await;
|
||||
let result = post_table_data(&context.pool, create_table_request(&context, minimal_request), &context.indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_optional_fields_null_vs_empty(
|
||||
#[future] test_context: TestContext,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let context = test_context.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("telefon".into(), "".into());
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, request), &context.indexer_tx).await.unwrap();
|
||||
|
||||
let query = format!(
|
||||
r#"SELECT telefon FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let telefon: Option<String> = sqlx::query_scalar(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(telefon.is_none());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_field_length_limits(
|
||||
#[future] test_context: TestContext,
|
||||
valid_request: HashMap<String, String>,
|
||||
) {
|
||||
let context = test_context.await;
|
||||
let mut request = valid_request;
|
||||
request.insert("firma".into(), "a".repeat(255));
|
||||
request.insert("telefon".into(), "1".repeat(15)); // Within limits
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, request), &context.indexer_tx).await.unwrap();
|
||||
|
||||
let query = format!(
|
||||
r#"SELECT firma, telefon FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let firma: String = row.get("firma");
|
||||
let telefon: Option<String> = row.get("telefon");
|
||||
|
||||
assert_eq!(firma.len(), 255);
|
||||
assert_eq!(telefon.unwrap().len(), 15);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_create_table_data_with_null_values(
|
||||
#[future] test_context: TestContext,
|
||||
) {
|
||||
let context = test_context.await;
|
||||
|
||||
// Create a request with some null values
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), string_to_proto_value("Test Company".into()));
|
||||
data.insert("telefon".into(), Value { kind: Some(Kind::NullValue(0)) }); // Explicit null
|
||||
data.insert("ulica".into(), Value { kind: None }); // Another way to represent null
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx).await.unwrap();
|
||||
|
||||
let query = format!(
|
||||
r#"SELECT firma, telefon, ulica FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let firma: String = row.get("firma");
|
||||
let telefon: Option<String> = row.get("telefon");
|
||||
let ulica: Option<String> = row.get("ulica");
|
||||
|
||||
assert_eq!(firma, "Test Company");
|
||||
assert!(telefon.is_none());
|
||||
assert!(ulica.is_none());
|
||||
}
|
||||
|
||||
include!("post_table_data_test2.rs");
|
||||
include!("post_table_data_test3.rs");
|
||||
include!("post_table_data_test4.rs");
|
||||
include!("post_table_data_test5.rs");
|
||||
@@ -1,484 +0,0 @@
|
||||
// tests/tables_data/handlers/post_table_data_test2.rs
|
||||
|
||||
// ========= Additional helper functions for test2 =========
|
||||
|
||||
async fn create_test_indexer_channel() -> mpsc::Sender<IndexCommand> {
|
||||
let (tx, mut rx) = mpsc::channel(100);
|
||||
|
||||
// Spawn a task to consume indexer messages to prevent blocking
|
||||
tokio::spawn(async move {
|
||||
while let Some(_) = rx.recv().await {
|
||||
// Just consume the messages
|
||||
}
|
||||
});
|
||||
|
||||
tx
|
||||
}
|
||||
|
||||
// ========= Extended Data Type Validation Tests =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_boolean_system_column_validation(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test setting the deleted flag with string (should fail)
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), "System Test Company".to_string());
|
||||
data.insert("deleted".into(), "true".to_string()); // String instead of boolean
|
||||
|
||||
let result = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected boolean for column 'deleted'"));
|
||||
}
|
||||
}
|
||||
|
||||
// ========= String Processing and Edge Cases =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_unicode_special_characters_comprehensive(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let special_strings = vec![
|
||||
"José María González", // Accented characters
|
||||
"Москва", // Cyrillic
|
||||
"北京市", // Chinese
|
||||
"🚀 Tech Company 🌟", // Emoji
|
||||
"Line\nBreak\tTab", // Control characters
|
||||
"Quote\"Test'Apostrophe", // Quotes
|
||||
"SQL'; DROP TABLE test; --", // SQL injection attempt
|
||||
"Price: $1,000.50 (50% off!)", // Special symbols
|
||||
];
|
||||
|
||||
for (i, test_string) in special_strings.into_iter().enumerate() {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), test_string.to_string());
|
||||
data.insert("kz".into(), format!("TEST{}", i));
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await.unwrap();
|
||||
assert!(response.success, "Failed for string: '{}'", test_string);
|
||||
|
||||
// Verify the data was stored correctly
|
||||
let query = format!(
|
||||
r#"SELECT firma FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let stored_firma: Option<String> = sqlx::query_scalar::<_, Option<String>>(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stored_firma.unwrap(), test_string.trim());
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_field_length_boundaries(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test telefon field length validation (should reject >15 chars)
|
||||
let length_test_cases = vec![
|
||||
("1234567890123456", true), // 16 chars - should fail
|
||||
("123456789012345", false), // 15 chars - should pass
|
||||
("", false), // Empty - should pass (becomes NULL)
|
||||
("1", false), // Single char - should pass
|
||||
];
|
||||
|
||||
for (test_string, should_fail) in length_test_cases {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), "Length Test Company".to_string());
|
||||
data.insert("telefon".into(), test_string.to_string());
|
||||
|
||||
let result = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await;
|
||||
|
||||
if should_fail {
|
||||
assert!(result.is_err(), "Should fail for telefon length: {}", test_string.len());
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::Internal);
|
||||
assert!(err.message().contains("Value too long for telefon"));
|
||||
}
|
||||
} else {
|
||||
assert!(result.is_ok(), "Should succeed for telefon length: {}", test_string.len());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ========= NULL vs Empty String Handling =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_empty_strings_become_null(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let test_cases = vec![
|
||||
("", "empty_string"),
|
||||
(" ", "whitespace_only"),
|
||||
("\t\n", "tabs_newlines"),
|
||||
(" Normal Value ", "padded_value"),
|
||||
];
|
||||
|
||||
for (input, test_name) in test_cases {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), format!("Test {}", test_name));
|
||||
data.insert("ulica".into(), input.to_string());
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await.unwrap();
|
||||
assert!(response.success, "Failed for test case: {}", test_name);
|
||||
|
||||
// Check what was actually stored
|
||||
let query = format!(
|
||||
r#"SELECT ulica FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let stored_ulica: Option<String> = sqlx::query_scalar::<_, Option<String>>(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let trimmed = input.trim();
|
||||
if trimmed.is_empty() {
|
||||
assert!(stored_ulica.is_none(), "Empty/whitespace string should be NULL for: {}", test_name);
|
||||
} else {
|
||||
assert_eq!(stored_ulica.unwrap(), trimmed, "String should be trimmed for: {}", test_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ========= Concurrent Operations Testing =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_inserts_same_table(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
|
||||
use futures::future::join_all;
|
||||
|
||||
// Create multiple concurrent insert operations
|
||||
let futures = (0..10).map(|i| {
|
||||
let context = context.clone();
|
||||
async move {
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), format!("Concurrent Company {}", i));
|
||||
data.insert("kz".into(), format!("CONC{}", i));
|
||||
data.insert("mesto".into(), format!("City {}", i));
|
||||
|
||||
post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await
|
||||
}
|
||||
});
|
||||
|
||||
let results = join_all(futures).await;
|
||||
|
||||
// All inserts should succeed
|
||||
for (i, result) in results.into_iter().enumerate() {
|
||||
assert!(result.is_ok(), "Concurrent insert {} should succeed", i);
|
||||
}
|
||||
|
||||
// Verify all records were inserted
|
||||
let query = format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE firma LIKE 'Concurrent Company%'"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let count: i64 = sqlx::query_scalar::<_, i64>(&query)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, 10);
|
||||
}
|
||||
|
||||
// ========= Error Scenarios =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_invalid_column_names(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), "Valid Company".to_string());
|
||||
data.insert("nonexistent_column".into(), "Invalid".to_string());
|
||||
|
||||
let result = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Invalid column: nonexistent_column"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_empty_data_request(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Try to insert completely empty data
|
||||
let data = HashMap::new();
|
||||
|
||||
let result = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("No valid columns to insert"));
|
||||
}
|
||||
}
|
||||
|
||||
// ========= Performance and Stress Testing =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_rapid_sequential_inserts(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
// Perform rapid sequential inserts
|
||||
for i in 0..50 {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), format!("Rapid Company {}", i));
|
||||
data.insert("kz".into(), format!("RAP{}", i));
|
||||
data.insert("telefon".into(), format!("+421{:09}", i));
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await.unwrap();
|
||||
assert!(response.success, "Rapid insert {} should succeed", i);
|
||||
}
|
||||
|
||||
let duration = start_time.elapsed();
|
||||
println!("50 rapid inserts took: {:?}", duration);
|
||||
|
||||
// Verify all records were inserted
|
||||
let query = format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE firma LIKE 'Rapid Company%'"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let count: i64 = sqlx::query_scalar::<_, i64>(&query)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, 50);
|
||||
}
|
||||
|
||||
// ========= SQL Injection Protection =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_sql_injection_protection(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let injection_attempts = vec![
|
||||
"'; DROP TABLE users; --",
|
||||
"1; DELETE FROM adresar; --",
|
||||
"admin'; UPDATE adresar SET firma='hacked' WHERE '1'='1",
|
||||
"' OR '1'='1",
|
||||
"'; INSERT INTO adresar (firma) VALUES ('injected'); --",
|
||||
"Robert'); DROP TABLE students; --", // Classic Bobby Tables
|
||||
];
|
||||
|
||||
let injection_count = injection_attempts.len();
|
||||
|
||||
for (i, injection) in injection_attempts.into_iter().enumerate() {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), injection.to_string());
|
||||
data.insert("kz".into(), format!("INJ{}", i));
|
||||
|
||||
// These should all succeed because values are properly parameterized
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await.unwrap();
|
||||
assert!(response.success, "SQL injection attempt should be safely handled: {}", injection);
|
||||
|
||||
// Verify the injection attempt was stored as literal text
|
||||
let query = format!(
|
||||
r#"SELECT firma FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let stored_firma: String = sqlx::query_scalar::<_, String>(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stored_firma, injection);
|
||||
}
|
||||
|
||||
// Verify the table still exists and has the expected number of records
|
||||
let query = format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE kz LIKE 'INJ%'"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let count: i64 = sqlx::query_scalar::<_, i64>(&query)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, injection_count as i64);
|
||||
}
|
||||
|
||||
// ========= Large Data Testing =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_large_text_fields(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test various large text sizes (except telefon which has length limits)
|
||||
let sizes = vec![1000, 5000, 10000];
|
||||
|
||||
for size in sizes {
|
||||
let large_text = "A".repeat(size);
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), large_text.clone());
|
||||
data.insert("ulica".into(), format!("Street with {} chars", size));
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await.unwrap();
|
||||
assert!(response.success, "Failed for size: {}", size);
|
||||
|
||||
// Verify the large text was stored correctly
|
||||
let query = format!(
|
||||
r#"SELECT firma FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let stored_firma: String = sqlx::query_scalar::<_, String>(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stored_firma.len(), size);
|
||||
assert_eq!(stored_firma, large_text);
|
||||
}
|
||||
}
|
||||
|
||||
// ========= Indexer Integration Testing =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_indexer_command_generation(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let (indexer_tx, mut indexer_rx) = mpsc::channel(100);
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), "Indexer Test Company".to_string());
|
||||
data.insert("kz".into(), "IDX123".to_string());
|
||||
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Check that indexer command was sent
|
||||
let indexer_command = tokio::time::timeout(
|
||||
tokio::time::Duration::from_millis(100),
|
||||
indexer_rx.recv()
|
||||
).await;
|
||||
|
||||
assert!(indexer_command.is_ok());
|
||||
let command = indexer_command.unwrap().unwrap();
|
||||
|
||||
match command {
|
||||
IndexCommand::AddOrUpdate(data) => {
|
||||
assert_eq!(data.table_name, context.table_name);
|
||||
assert_eq!(data.row_id, response.inserted_id);
|
||||
},
|
||||
IndexCommand::Delete(_) => panic!("Expected AddOrUpdate command, got Delete"),
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_indexer_failure_resilience(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
|
||||
// Create a closed channel to simulate indexer failure
|
||||
let (indexer_tx, indexer_rx) = mpsc::channel(1);
|
||||
drop(indexer_rx); // Close receiver to simulate failure
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), "Resilience Test Company".to_string());
|
||||
data.insert("kz".into(), "RES123".to_string());
|
||||
|
||||
// Insert should still succeed even if indexer fails
|
||||
let response = post_table_data(&context.pool, create_table_request(&context, data), &indexer_tx).await.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Verify data was inserted despite indexer failure
|
||||
let query = format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE kz = 'RES123'"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let count: i64 = sqlx::query_scalar::<_, i64>(&query)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, 1);
|
||||
}
|
||||
|
||||
// ========= Profile and Table Validation =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_nonexistent_profile_error(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), "Test Company".to_string());
|
||||
|
||||
let invalid_request = PostTableDataRequest {
|
||||
profile_name: "nonexistent_profile".into(),
|
||||
table_name: context.table_name.clone(),
|
||||
data: convert_to_proto_values(data),
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, invalid_request, &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::NotFound);
|
||||
assert!(err.message().contains("Profile not found"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_nonexistent_table_error(#[future] test_context: TestContext) {
|
||||
let context = test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("firma".into(), "Test Company".to_string());
|
||||
|
||||
let invalid_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: "nonexistent_table".into(),
|
||||
data: convert_to_proto_values(data),
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, invalid_request, &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::NotFound);
|
||||
assert!(err.message().contains("Table not found"));
|
||||
}
|
||||
}
|
||||
@@ -1,847 +0,0 @@
|
||||
// tests/tables_data/handlers/post_table_data_test3.rs
|
||||
|
||||
// ========================================================================
|
||||
// ADDITIONAL HELPER FUNCTIONS FOR TEST3
|
||||
// ========================================================================
|
||||
|
||||
// Helper to create different Value types
|
||||
fn create_string_value(s: &str) -> Value {
|
||||
Value { kind: Some(Kind::StringValue(s.to_string())) }
|
||||
}
|
||||
|
||||
fn create_number_value(n: f64) -> Value {
|
||||
Value { kind: Some(Kind::NumberValue(n)) }
|
||||
}
|
||||
|
||||
fn create_bool_value(b: bool) -> Value {
|
||||
Value { kind: Some(Kind::BoolValue(b)) }
|
||||
}
|
||||
|
||||
fn create_null_value() -> Value {
|
||||
Value { kind: Some(Kind::NullValue(0)) }
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// FIXTURES AND CONTEXT SETUP FOR ADVANCED TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[derive(Clone)]
|
||||
struct DataTypeTestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
table_name: String,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct ForeignKeyTestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
category_table: String,
|
||||
product_table: String,
|
||||
order_table: String,
|
||||
}
|
||||
|
||||
// Create a table with various data types for comprehensive testing
|
||||
async fn create_data_type_test_table(pool: &PgPool, table_name: &str, profile_name: &str) -> Result<(), tonic::Status> {
|
||||
let table_def_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: table_name.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "my_text".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "my_bool".into(), field_type: "boolean".into() },
|
||||
TableColumnDefinition { name: "my_timestamp".into(), field_type: "timestamp".into() },
|
||||
TableColumnDefinition { name: "my_bigint".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "my_money".into(), field_type: "decimal(19,4)".into() },
|
||||
TableColumnDefinition { name: "my_date".into(), field_type: "date".into() },
|
||||
TableColumnDefinition { name: "my_decimal".into(), field_type: "decimal(10,2)".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(pool, table_def_request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Create foreign key test tables (category -> product -> order)
|
||||
async fn create_foreign_key_test_tables(pool: &PgPool, profile_name: &str, category_table: &str, product_table: &str, order_table: &str) -> Result<(), tonic::Status> {
|
||||
// Create category table first (no dependencies)
|
||||
let category_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: category_table.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "description".into(), field_type: "text".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, category_def).await?;
|
||||
|
||||
// Create product table with required link to category
|
||||
let product_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: product_table.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "price".into(), field_type: "decimal(10,2)".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![
|
||||
TableLink { linked_table_name: category_table.into(), required: true },
|
||||
],
|
||||
};
|
||||
post_table_definition(pool, product_def).await?;
|
||||
|
||||
// Create order table with required link to product and optional link to category
|
||||
let order_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: order_table.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "quantity".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "notes".into(), field_type: "text".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![
|
||||
TableLink { linked_table_name: product_table.into(), required: true },
|
||||
TableLink { linked_table_name: category_table.into(), required: false }, // Optional link
|
||||
],
|
||||
};
|
||||
post_table_definition(pool, order_def).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn data_type_test_context() -> DataTypeTestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("dtype_profile_{}", unique_id);
|
||||
let table_name = format!("dtype_table_{}", unique_id);
|
||||
|
||||
create_data_type_test_table(&pool, &table_name, &profile_name).await
|
||||
.expect("Failed to create data type test table");
|
||||
|
||||
DataTypeTestContext { pool, profile_name, table_name }
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn foreign_key_test_context() -> ForeignKeyTestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("fk_profile_{}", unique_id);
|
||||
let category_table = format!("category_{}", unique_id);
|
||||
let product_table = format!("product_{}", unique_id);
|
||||
let order_table = format!("order_{}", unique_id);
|
||||
|
||||
create_foreign_key_test_tables(&pool, &profile_name, &category_table, &product_table, &order_table).await
|
||||
.expect("Failed to create foreign key test tables");
|
||||
|
||||
ForeignKeyTestContext { pool, profile_name, category_table, product_table, order_table }
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// DATA TYPE VALIDATION TESTS
|
||||
// ========================================================================
|
||||
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_correct_data_types_success(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Test String"));
|
||||
data.insert("my_bool".into(), create_bool_value(true));
|
||||
data.insert("my_timestamp".into(), create_string_value("2024-01-15T10:30:00Z"));
|
||||
data.insert("my_bigint".into(), create_number_value(42.0));
|
||||
data.insert("my_money".into(), create_string_value("123.45")); // Use string for decimal
|
||||
data.insert("my_decimal".into(), create_string_value("999.99")); // Use string for decimal
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
let response = post_table_data(&context.pool, request, &indexer_tx).await.unwrap();
|
||||
assert!(response.success);
|
||||
assert!(response.inserted_id > 0);
|
||||
|
||||
// Verify data was stored correctly
|
||||
let query = format!(
|
||||
r#"SELECT my_text, my_bool, my_timestamp, my_bigint FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_text: String = row.get("my_text");
|
||||
let stored_bool: bool = row.get("my_bool");
|
||||
// Change this based on your actual column type in the schema:
|
||||
// If my_bigint is defined as "integer" in table definition, use i32:
|
||||
let stored_bigint: i32 = row.get("my_bigint");
|
||||
// If my_bigint is defined as "biginteger" or "bigint" in table definition, use i64:
|
||||
// let stored_bigint: i64 = row.get("my_bigint");
|
||||
|
||||
assert_eq!(stored_text, "Test String");
|
||||
assert_eq!(stored_bool, true);
|
||||
assert_eq!(stored_bigint, 42);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_type_mismatch_string_for_boolean(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Required field"));
|
||||
data.insert("my_bool".into(), create_string_value("true")); // String instead of boolean
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected boolean for column 'my_bool'"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_type_mismatch_string_for_integer(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Required field"));
|
||||
data.insert("my_bigint".into(), create_string_value("42")); // String instead of number
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected number for column 'my_bigint'"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_type_mismatch_number_for_boolean(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Required field"));
|
||||
data.insert("my_bool".into(), create_number_value(1.0)); // Number instead of boolean
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected boolean for column 'my_bool'"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_decimal_requires_string_not_number(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Required field"));
|
||||
data.insert("my_money".into(), create_number_value(123.45)); // Number instead of string for decimal
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected a string representation for decimal column 'my_money'"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_invalid_timestamp_format(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Required field"));
|
||||
data.insert("my_timestamp".into(), create_string_value("not-a-date"));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Invalid timestamp for my_timestamp"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_float_for_integer_field(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Required field"));
|
||||
data.insert("my_bigint".into(), create_number_value(123.45)); // Float for integer field
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected integer for column 'my_bigint', but got a float"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_valid_timestamp_formats(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let valid_timestamps = vec![
|
||||
"2024-01-15T10:30:00Z",
|
||||
"2024-01-15T10:30:00+00:00",
|
||||
"2024-01-15T10:30:00.123Z",
|
||||
"2024-12-31T23:59:59Z",
|
||||
"1970-01-01T00:00:00Z", // Unix epoch
|
||||
];
|
||||
|
||||
for (i, timestamp) in valid_timestamps.into_iter().enumerate() {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value(&format!("Test {}", i)));
|
||||
data.insert("my_timestamp".into(), create_string_value(timestamp));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Failed for timestamp: {}", timestamp);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_boundary_integer_values(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Use safer boundary values that don't have f64 precision issues
|
||||
let boundary_values = vec![
|
||||
0.0,
|
||||
1.0,
|
||||
-1.0,
|
||||
2147483647.0, // i32::MAX (for INTEGER columns)
|
||||
-2147483648.0, // i32::MIN (for INTEGER columns)
|
||||
];
|
||||
|
||||
for (i, value) in boundary_values.into_iter().enumerate() {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value(&format!("Boundary test {}", i)));
|
||||
data.insert("my_bigint".into(), create_number_value(value));
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Failed for boundary value: {}", value);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_null_values_for_all_types(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Required for test"));
|
||||
data.insert("my_bool".into(), create_null_value());
|
||||
data.insert("my_timestamp".into(), create_null_value());
|
||||
data.insert("my_bigint".into(), create_null_value());
|
||||
data.insert("my_money".into(), create_null_value());
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let response = post_table_data(&context.pool, request, &indexer_tx).await.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Verify nulls were stored correctly
|
||||
let query = format!(
|
||||
r#"SELECT my_bool, my_timestamp, my_bigint FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_bool: Option<bool> = row.get("my_bool");
|
||||
let stored_timestamp: Option<chrono::DateTime<Utc>> = row.get("my_timestamp");
|
||||
let stored_bigint: Option<i64> = row.get("my_bigint");
|
||||
|
||||
assert!(stored_bool.is_none());
|
||||
assert!(stored_timestamp.is_none());
|
||||
assert!(stored_bigint.is_none());
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// FOREIGN KEY CONSTRAINT TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_insert_with_valid_foreign_key(#[future] foreign_key_test_context: ForeignKeyTestContext) {
|
||||
let context = foreign_key_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// First, insert a category
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".into(), create_string_value("Electronics"));
|
||||
category_data.insert("description".into(), create_string_value("Electronic devices"));
|
||||
|
||||
let category_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
|
||||
let category_response = post_table_data(&context.pool, category_request, &indexer_tx).await.unwrap();
|
||||
let category_id = category_response.inserted_id;
|
||||
|
||||
// Now insert a product with the valid category_id
|
||||
let mut product_data = HashMap::new();
|
||||
product_data.insert("name".into(), create_string_value("Laptop"));
|
||||
product_data.insert("price".into(), create_string_value("999.99")); // Use string for decimal
|
||||
product_data.insert(format!("{}_id", context.category_table), create_number_value(category_id as f64));
|
||||
|
||||
let product_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
data: product_data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, product_request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Insert with valid foreign key should succeed");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_insert_with_nonexistent_foreign_key(#[future] foreign_key_test_context: ForeignKeyTestContext) {
|
||||
let context = foreign_key_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Try to insert product with non-existent category_id
|
||||
let mut product_data = HashMap::new();
|
||||
product_data.insert("name".into(), create_string_value("Laptop"));
|
||||
product_data.insert("price".into(), create_string_value("999.99"));
|
||||
product_data.insert(format!("{}_id", context.category_table), create_number_value(99999.0)); // Non-existent ID
|
||||
|
||||
let product_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
data: product_data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, product_request, &indexer_tx).await;
|
||||
assert!(result.is_err(), "Insert with non-existent foreign key should fail");
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::Internal);
|
||||
assert!(err.message().contains("Insert failed"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_insert_with_null_required_foreign_key(#[future] foreign_key_test_context: ForeignKeyTestContext) {
|
||||
let context = foreign_key_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Try to insert product without category_id (required foreign key)
|
||||
let mut product_data = HashMap::new();
|
||||
product_data.insert("name".into(), create_string_value("Laptop"));
|
||||
product_data.insert("price".into(), create_string_value("999.99"));
|
||||
// Intentionally omit category_id
|
||||
|
||||
let product_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
data: product_data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, product_request, &indexer_tx).await;
|
||||
assert!(result.is_err(), "Insert without required foreign key should fail");
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::Internal);
|
||||
assert!(err.message().contains("Insert failed"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_insert_with_null_optional_foreign_key(#[future] foreign_key_test_context: ForeignKeyTestContext) {
|
||||
let context = foreign_key_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// First create a category and product for the required FK
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".into(), create_string_value("Electronics"));
|
||||
|
||||
let category_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
|
||||
let category_response = post_table_data(&context.pool, category_request, &indexer_tx).await.unwrap();
|
||||
|
||||
let mut product_data = HashMap::new();
|
||||
product_data.insert("name".into(), create_string_value("Laptop"));
|
||||
product_data.insert("price".into(), create_string_value("999.99"));
|
||||
product_data.insert(format!("{}_id", context.category_table), create_number_value(category_response.inserted_id as f64));
|
||||
|
||||
let product_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
data: product_data,
|
||||
};
|
||||
|
||||
let product_response = post_table_data(&context.pool, product_request, &indexer_tx).await.unwrap();
|
||||
|
||||
// Now insert order with required product_id but without optional category_id
|
||||
let mut order_data = HashMap::new();
|
||||
order_data.insert("quantity".into(), create_number_value(2.0));
|
||||
order_data.insert("notes".into(), create_string_value("Test order"));
|
||||
order_data.insert(format!("{}_id", context.product_table), create_number_value(product_response.inserted_id as f64));
|
||||
// Intentionally omit optional category_id
|
||||
|
||||
let order_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.order_table.clone(),
|
||||
data: order_data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, order_request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Insert with NULL optional foreign key should succeed");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_multiple_foreign_keys_scenario(#[future] foreign_key_test_context: ForeignKeyTestContext) {
|
||||
let context = foreign_key_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Create category
|
||||
let mut category_data = HashMap::new();
|
||||
category_data.insert("name".into(), create_string_value("Books"));
|
||||
|
||||
let category_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
|
||||
let category_response = post_table_data(&context.pool, category_request, &indexer_tx).await.unwrap();
|
||||
|
||||
// Create product
|
||||
let mut product_data = HashMap::new();
|
||||
product_data.insert("name".into(), create_string_value("Programming Book"));
|
||||
product_data.insert("price".into(), create_string_value("49.99"));
|
||||
product_data.insert(format!("{}_id", context.category_table), create_number_value(category_response.inserted_id as f64));
|
||||
|
||||
let product_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
data: product_data,
|
||||
};
|
||||
|
||||
let product_response = post_table_data(&context.pool, product_request, &indexer_tx).await.unwrap();
|
||||
|
||||
// Create order with both foreign keys
|
||||
let mut order_data = HashMap::new();
|
||||
order_data.insert("quantity".into(), create_number_value(3.0));
|
||||
order_data.insert("notes".into(), create_string_value("Bulk order"));
|
||||
order_data.insert(format!("{}_id", context.product_table), create_number_value(product_response.inserted_id as f64));
|
||||
order_data.insert(format!("{}_id", context.category_table), create_number_value(category_response.inserted_id as f64));
|
||||
|
||||
let order_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.order_table.clone(),
|
||||
data: order_data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, order_request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Insert with multiple valid foreign keys should succeed");
|
||||
|
||||
// Verify the data was inserted correctly
|
||||
let product_id_col = format!("{}_id", context.product_table);
|
||||
let category_id_col = format!("{}_id", context.category_table);
|
||||
|
||||
let query = format!(
|
||||
r#"SELECT quantity, "{}", "{}" FROM "{}"."{}" WHERE id = $1"#,
|
||||
product_id_col, category_id_col, context.profile_name, context.order_table
|
||||
);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.bind(result.unwrap().inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Fix: quantity is defined as "integer" in the foreign key test context, so use i32
|
||||
let quantity: i32 = row.get("quantity");
|
||||
let stored_product_id: i64 = row.get(product_id_col.as_str());
|
||||
let stored_category_id: Option<i64> = row.get(category_id_col.as_str());
|
||||
|
||||
assert_eq!(quantity, 3);
|
||||
assert_eq!(stored_product_id, product_response.inserted_id);
|
||||
assert_eq!(stored_category_id.unwrap(), category_response.inserted_id);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// ADDITIONAL EDGE CASE TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_extremely_large_decimal_numbers(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let large_decimals = vec![
|
||||
"1000000000.0000",
|
||||
"999999999999.99",
|
||||
"-999999999999.99",
|
||||
"0.0001",
|
||||
];
|
||||
|
||||
for (i, decimal_str) in large_decimals.into_iter().enumerate() {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value(&format!("Large decimal test {}", i)));
|
||||
data.insert("my_money".into(), create_string_value(decimal_str));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Failed for large decimal: {}", decimal_str);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_boolean_edge_cases(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let boolean_values = vec![true, false];
|
||||
|
||||
for (i, bool_val) in boolean_values.into_iter().enumerate() {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value(&format!("Boolean test {}", i)));
|
||||
data.insert("my_bool".into(), create_bool_value(bool_val));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let response = post_table_data(&context.pool, request, &indexer_tx).await.unwrap();
|
||||
|
||||
// Verify boolean was stored correctly
|
||||
let query = format!(
|
||||
r#"SELECT my_bool FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let stored_bool: bool = sqlx::query_scalar::<_, bool>(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stored_bool, bool_val);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_decimal_precision_handling(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let decimal_values = vec![
|
||||
"0.01",
|
||||
"99.99",
|
||||
"123.45",
|
||||
"999.99",
|
||||
"-123.45",
|
||||
];
|
||||
|
||||
for (i, decimal_val) in decimal_values.into_iter().enumerate() {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value(&format!("Decimal test {}", i)));
|
||||
data.insert("my_decimal".into(), create_string_value(decimal_val));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Failed for decimal value: {}", decimal_val);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_invalid_decimal_string_formats(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let invalid_decimals = vec![
|
||||
"not-a-number",
|
||||
"123.45.67",
|
||||
"abc123",
|
||||
"",
|
||||
" ",
|
||||
];
|
||||
|
||||
for (i, invalid_decimal) in invalid_decimals.into_iter().enumerate() {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value(&format!("Invalid decimal test {}", i)));
|
||||
data.insert("my_decimal".into(), create_string_value(invalid_decimal));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
|
||||
if invalid_decimal.trim().is_empty() {
|
||||
// Empty strings should be treated as NULL and succeed
|
||||
assert!(result.is_ok(), "Empty string should be treated as NULL for: {}", invalid_decimal);
|
||||
} else {
|
||||
// Invalid decimal strings should fail
|
||||
assert!(result.is_err(), "Should fail for invalid decimal: {}", invalid_decimal);
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Invalid decimal string format"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_mixed_null_and_valid_data(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Mixed data test"));
|
||||
data.insert("my_bool".into(), create_bool_value(true));
|
||||
data.insert("my_timestamp".into(), create_null_value());
|
||||
data.insert("my_bigint".into(), create_number_value(42.0));
|
||||
data.insert("my_money".into(), create_null_value());
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
let response = post_table_data(&context.pool, request, &indexer_tx).await.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Verify mixed null and valid data was stored correctly
|
||||
let query = format!(
|
||||
r#"SELECT my_text, my_bool, my_timestamp, my_bigint, my_money FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_text: String = row.get("my_text");
|
||||
let stored_bool: bool = row.get("my_bool");
|
||||
let stored_timestamp: Option<DateTime<Utc>> = row.get("my_timestamp");
|
||||
// Change this based on your actual column type in the schema:
|
||||
// If my_bigint is defined as "integer" in table definition, use i32:
|
||||
let stored_bigint: i32 = row.get("my_bigint");
|
||||
// If my_bigint is defined as "biginteger" or "bigint" in table definition, use i64:
|
||||
// let stored_bigint: i64 = row.get("my_bigint");
|
||||
let stored_money: Option<Decimal> = row.get("my_money");
|
||||
|
||||
assert_eq!(stored_text, "Mixed data test");
|
||||
assert_eq!(stored_bool, true);
|
||||
assert!(stored_timestamp.is_none());
|
||||
assert_eq!(stored_bigint, 42);
|
||||
assert!(stored_money.is_none());
|
||||
}
|
||||
@@ -1,264 +0,0 @@
|
||||
// tests/tables_data/handlers/post_table_data_test4.rs
|
||||
|
||||
use rust_decimal::Decimal;
|
||||
use rust_decimal_macros::dec;
|
||||
|
||||
// Helper to create a protobuf Value from a string
|
||||
fn proto_string(s: &str) -> Value {
|
||||
Value {
|
||||
kind: Some(Kind::StringValue(s.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to create a protobuf Value from a number
|
||||
fn proto_number(n: f64) -> Value {
|
||||
Value {
|
||||
kind: Some(Kind::NumberValue(n)),
|
||||
}
|
||||
}
|
||||
|
||||
// Helper to create a protobuf Null Value
|
||||
fn proto_null() -> Value {
|
||||
Value {
|
||||
kind: Some(Kind::NullValue(0)),
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to create a table with various decimal types for testing
|
||||
async fn create_financial_table(
|
||||
pool: &PgPool,
|
||||
table_name: &str,
|
||||
profile_name: &str,
|
||||
) -> Result<(), tonic::Status> {
|
||||
let table_def_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: table_name.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition {
|
||||
name: "product_name".into(),
|
||||
field_type: "text".into(),
|
||||
},
|
||||
// Standard money column
|
||||
TableColumnDefinition {
|
||||
name: "price".into(),
|
||||
field_type: "decimal(19, 4)".into(),
|
||||
},
|
||||
// Column for things like exchange rates or precise factors
|
||||
TableColumnDefinition {
|
||||
name: "rate".into(),
|
||||
field_type: "decimal(10, 5)".into(),
|
||||
},
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(pool, table_def_request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// A new test context fixture for our financial table
|
||||
#[fixture]
|
||||
async fn decimal_test_context() -> TestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("decimal_profile_{}", unique_id);
|
||||
let table_name = format!("invoices_{}", unique_id);
|
||||
|
||||
create_financial_table(&pool, &table_name, &profile_name)
|
||||
.await
|
||||
.expect("Failed to create decimal test table");
|
||||
|
||||
let (tx, _rx) = mpsc::channel(100);
|
||||
|
||||
TestContext {
|
||||
pool,
|
||||
profile_name,
|
||||
table_name,
|
||||
indexer_tx: tx,
|
||||
}
|
||||
}
|
||||
|
||||
// ========= DECIMAL/NUMERIC DATA TYPE TESTS =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_insert_valid_decimal_string(#[future] decimal_test_context: TestContext) {
|
||||
let context = decimal_test_context.await;
|
||||
let mut data = HashMap::new();
|
||||
data.insert("product_name".into(), proto_string("Laptop"));
|
||||
data.insert("price".into(), proto_string("1499.99"));
|
||||
data.insert("rate".into(), proto_string("-0.12345"));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
let query = format!(
|
||||
r#"SELECT price, rate FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let price: Decimal = row.get("price");
|
||||
let rate: Decimal = row.get("rate");
|
||||
|
||||
assert_eq!(price, dec!(1499.99));
|
||||
assert_eq!(rate, dec!(-0.12345));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_insert_decimal_from_number_fails(#[future] decimal_test_context: TestContext) {
|
||||
let context = decimal_test_context.await;
|
||||
let mut data = HashMap::new();
|
||||
data.insert("product_name".into(), proto_string("Mouse"));
|
||||
// THIS IS THE INVALID PART: using a number for a decimal field.
|
||||
data.insert("price".into(), proto_number(75.50));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
// The operation should fail.
|
||||
let result = post_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
|
||||
// Verify the error is correct.
|
||||
let status = result.unwrap_err();
|
||||
assert_eq!(status.code(), tonic::Code::InvalidArgument);
|
||||
assert!(status
|
||||
.message()
|
||||
.contains("Expected a string representation for decimal column 'price'"));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_decimal_rounding_behavior(#[future] decimal_test_context: TestContext) {
|
||||
let context = decimal_test_context.await;
|
||||
let mut data = HashMap::new();
|
||||
data.insert("product_name".into(), proto_string("Keyboard"));
|
||||
// price is NUMERIC(19, 4), so this should be rounded up by the database
|
||||
data.insert("price".into(), proto_string("99.12345"));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
let price: Decimal = sqlx::query_scalar(&format!(
|
||||
r#"SELECT price FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// PostgreSQL rounds away from zero (0.5 rounds up)
|
||||
assert_eq!(price, dec!(99.1235));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_insert_null_and_empty_string_for_decimal(
|
||||
#[future] decimal_test_context: TestContext,
|
||||
) {
|
||||
let context = decimal_test_context.await;
|
||||
let mut data = HashMap::new();
|
||||
data.insert("product_name".into(), proto_string("Monitor"));
|
||||
data.insert("price".into(), proto_string(" ")); // Empty string should be NULL
|
||||
data.insert("rate".into(), proto_null()); // Explicit NULL
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
let row = sqlx::query(&format!(
|
||||
r#"SELECT price, rate FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let price: Option<Decimal> = row.get("price");
|
||||
let rate: Option<Decimal> = row.get("rate");
|
||||
|
||||
assert!(price.is_none());
|
||||
assert!(rate.is_none());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_invalid_decimal_string_fails(#[future] decimal_test_context: TestContext) {
|
||||
let context = decimal_test_context.await;
|
||||
let mut data = HashMap::new();
|
||||
data.insert("product_name".into(), proto_string("Bad Data"));
|
||||
data.insert("price".into(), proto_string("not-a-number"));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
let status = result.unwrap_err();
|
||||
assert_eq!(status.code(), tonic::Code::InvalidArgument);
|
||||
assert!(status
|
||||
.message()
|
||||
.contains("Invalid decimal string format for column 'price'"));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_decimal_precision_overflow_fails(#[future] decimal_test_context: TestContext) {
|
||||
let context = decimal_test_context.await;
|
||||
let mut data = HashMap::new();
|
||||
data.insert("product_name".into(), proto_string("Too Expensive"));
|
||||
// rate is NUMERIC(10, 5), so it allows 5 digits before the decimal.
|
||||
// 123456.1 is 6 digits before, so it should fail at the database level.
|
||||
data.insert("rate".into(), proto_string("123456.1"));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_err());
|
||||
let status = result.unwrap_err();
|
||||
// This error comes from the database itself.
|
||||
assert_eq!(status.code(), tonic::Code::InvalidArgument);
|
||||
assert!(status.message().contains("Numeric field overflow"));
|
||||
}
|
||||
@@ -1,588 +0,0 @@
|
||||
// ========================================================================
|
||||
// COMPREHENSIVE INTEGER ROBUSTNESS TESTS - ADD TO TEST FILE 5
|
||||
// ========================================================================
|
||||
|
||||
#[derive(Clone)]
|
||||
struct IntegerRobustnessTestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
mixed_integer_table: String,
|
||||
bigint_only_table: String,
|
||||
integer_only_table: String,
|
||||
}
|
||||
|
||||
// Create tables with different integer type combinations
|
||||
async fn create_integer_robustness_tables(pool: &PgPool, profile_name: &str) -> Result<IntegerRobustnessTestContext, tonic::Status> {
|
||||
let unique_id = generate_unique_id();
|
||||
let mixed_table = format!("mixed_int_table_{}", unique_id);
|
||||
let bigint_table = format!("bigint_table_{}", unique_id);
|
||||
let integer_table = format!("integer_table_{}", unique_id);
|
||||
|
||||
// Table with both INTEGER and BIGINT columns
|
||||
let mixed_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: mixed_table.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "small_int".into(), field_type: "integer".into() }, // i32
|
||||
TableColumnDefinition { name: "big_int".into(), field_type: "biginteger".into() }, // i64
|
||||
TableColumnDefinition { name: "another_int".into(), field_type: "int".into() }, // i32 (alias)
|
||||
TableColumnDefinition { name: "another_bigint".into(), field_type: "bigint".into() }, // i64 (alias)
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, mixed_def).await?;
|
||||
|
||||
// Table with only BIGINT columns
|
||||
let bigint_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: bigint_table.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "value1".into(), field_type: "biginteger".into() },
|
||||
TableColumnDefinition { name: "value2".into(), field_type: "bigint".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, bigint_def).await?;
|
||||
|
||||
// Table with only INTEGER columns
|
||||
let integer_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: integer_table.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "value1".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "value2".into(), field_type: "int".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, integer_def).await?;
|
||||
|
||||
Ok(IntegerRobustnessTestContext {
|
||||
pool: pool.clone(),
|
||||
profile_name: profile_name.to_string(),
|
||||
mixed_integer_table: mixed_table,
|
||||
bigint_only_table: bigint_table,
|
||||
integer_only_table: integer_table,
|
||||
})
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn integer_robustness_context() -> IntegerRobustnessTestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("int_robust_profile_{}", unique_id);
|
||||
|
||||
create_integer_robustness_tables(&pool, &profile_name).await
|
||||
.expect("Failed to create integer robustness test tables")
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// BOUNDARY AND OVERFLOW TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_integer_boundary_values_comprehensive(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test i32 boundaries on INTEGER columns
|
||||
let i32_boundary_tests = vec![
|
||||
(2147483647.0, "i32::MAX"),
|
||||
(-2147483648.0, "i32::MIN"),
|
||||
(2147483646.0, "i32::MAX - 1"),
|
||||
(-2147483647.0, "i32::MIN + 1"),
|
||||
(0.0, "zero"),
|
||||
(1.0, "one"),
|
||||
(-1.0, "negative one"),
|
||||
];
|
||||
|
||||
for (value, description) in i32_boundary_tests {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("i32 test: {}", description)));
|
||||
data.insert("value1".into(), create_number_value(value));
|
||||
data.insert("value2".into(), create_number_value(value));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.integer_only_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Failed for i32 value {}: {}", value, description);
|
||||
|
||||
// Verify correct storage
|
||||
let response = result.unwrap();
|
||||
let query = format!(
|
||||
r#"SELECT value1, value2 FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.integer_only_table
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_val1: i32 = row.get("value1");
|
||||
let stored_val2: i32 = row.get("value2");
|
||||
assert_eq!(stored_val1, value as i32);
|
||||
assert_eq!(stored_val2, value as i32);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_bigint_boundary_values_comprehensive(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test i64 boundaries that can be precisely represented in f64
|
||||
let i64_boundary_tests = vec![
|
||||
(9223372036854774784.0, "Close to i64::MAX (precisely representable)"),
|
||||
(-9223372036854774784.0, "Close to i64::MIN (precisely representable)"),
|
||||
(4611686018427387904.0, "i64::MAX / 2"),
|
||||
(-4611686018427387904.0, "i64::MIN / 2"),
|
||||
(2147483647.0, "i32::MAX in i64 column"),
|
||||
(-2147483648.0, "i32::MIN in i64 column"),
|
||||
(1000000000000.0, "One trillion"),
|
||||
(-1000000000000.0, "Negative one trillion"),
|
||||
];
|
||||
|
||||
for (value, description) in i64_boundary_tests {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("i64 test: {}", description)));
|
||||
data.insert("value1".into(), create_number_value(value));
|
||||
data.insert("value2".into(), create_number_value(value));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.bigint_only_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Failed for i64 value {}: {}", value, description);
|
||||
|
||||
// Verify correct storage
|
||||
let response = result.unwrap();
|
||||
let query = format!(
|
||||
r#"SELECT value1, value2 FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.bigint_only_table
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_val1: i64 = row.get("value1");
|
||||
let stored_val2: i64 = row.get("value2");
|
||||
assert_eq!(stored_val1, value as i64);
|
||||
assert_eq!(stored_val2, value as i64);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_integer_overflow_rejection_i32(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Values that should be rejected for INTEGER columns
|
||||
let overflow_values = vec![
|
||||
(2147483648.0, "i32::MAX + 1"),
|
||||
(-2147483649.0, "i32::MIN - 1"),
|
||||
(3000000000.0, "3 billion"),
|
||||
(-3000000000.0, "negative 3 billion"),
|
||||
(4294967296.0, "2^32"),
|
||||
(9223372036854775807.0, "i64::MAX (should fail on i32)"),
|
||||
];
|
||||
|
||||
for (value, description) in overflow_values {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("Overflow test: {}", description)));
|
||||
data.insert("value1".into(), create_number_value(value));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.integer_only_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err(), "Should have failed for i32 overflow value {}: {}", value, description);
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Integer value out of range for INTEGER column"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_bigint_overflow_rejection_i64(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Values that should be rejected for BIGINT columns
|
||||
// Only include values that actually DON'T round-trip correctly
|
||||
let overflow_values = vec![
|
||||
(f64::INFINITY, "Positive infinity"),
|
||||
(f64::NEG_INFINITY, "Negative infinity"),
|
||||
(1e20, "Very large number (100,000,000,000,000,000,000)"),
|
||||
(-1e20, "Very large negative number"),
|
||||
(1e25, "Extremely large number"),
|
||||
(-1e25, "Extremely large negative number"),
|
||||
(f64::MAX, "f64::MAX"),
|
||||
(f64::MIN, "f64::MIN"),
|
||||
// Remove the problematic values that actually round-trip correctly:
|
||||
// (9223372036854775808.0, "Just above i64 safe range"), // This actually round-trips!
|
||||
// (-9223372036854775808.0, "Just below i64 safe range"), // This might also round-trip!
|
||||
];
|
||||
|
||||
for (value, description) in overflow_values {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("i64 Overflow test: {}", description)));
|
||||
data.insert("value1".into(), create_number_value(value));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.bigint_only_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
|
||||
assert!(result.is_err(), "Should have failed for i64 overflow value {}: {}", value, description);
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
// Check for either message format (the new robust check should catch these)
|
||||
let message = err.message();
|
||||
assert!(
|
||||
message.contains("Integer value out of range for BIGINT column") ||
|
||||
message.contains("Expected integer for column") ||
|
||||
message.contains("but got a float"),
|
||||
"Unexpected error message for {}: {}",
|
||||
description,
|
||||
message
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_bigint_successful_roundtrip_values(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Values that SHOULD successfully round-trip and be accepted
|
||||
let successful_values = vec![
|
||||
(9223372036854775808.0, "Exactly i64::MAX as f64 (legitimate value)"),
|
||||
(-9223372036854775808.0, "Exactly i64::MIN as f64 (legitimate value)"),
|
||||
(9223372036854774784.0, "Large but precisely representable in f64"),
|
||||
(-9223372036854774784.0, "Large negative but precisely representable in f64"),
|
||||
(0.0, "Zero"),
|
||||
(1.0, "One"),
|
||||
(-1.0, "Negative one"),
|
||||
(2147483647.0, "i32::MAX as f64"),
|
||||
(-2147483648.0, "i32::MIN as f64"),
|
||||
];
|
||||
|
||||
for (value, description) in successful_values {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("Successful test: {}", description)));
|
||||
data.insert("value1".into(), create_number_value(value));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.bigint_only_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Should have succeeded for legitimate i64 value {}: {}", value, description);
|
||||
|
||||
// Verify it was stored correctly
|
||||
if let Ok(response) = result {
|
||||
let query = format!(
|
||||
r#"SELECT value1 FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.bigint_only_table
|
||||
);
|
||||
let stored_value: i64 = sqlx::query_scalar(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stored_value, value as i64, "Stored value should match for {}", description);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_mixed_integer_types_in_same_table(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test inserting different values into different integer types in the same table
|
||||
let test_cases = vec![
|
||||
(42.0, 1000000000000.0, "Small i32, large i64"),
|
||||
(2147483647.0, 9223372036854774784.0, "i32::MAX, near i64::MAX"),
|
||||
(-2147483648.0, -9223372036854774784.0, "i32::MIN, near i64::MIN"),
|
||||
(0.0, 0.0, "Both zero"),
|
||||
(-1.0, -1.0, "Both negative one"),
|
||||
];
|
||||
|
||||
for (i32_val, i64_val, description) in test_cases {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("Mixed test: {}", description)));
|
||||
data.insert("small_int".into(), create_number_value(i32_val));
|
||||
data.insert("big_int".into(), create_number_value(i64_val));
|
||||
data.insert("another_int".into(), create_number_value(i32_val));
|
||||
data.insert("another_bigint".into(), create_number_value(i64_val));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Failed for mixed integer test: {}", description);
|
||||
|
||||
// Verify correct storage with correct types
|
||||
let response = result.unwrap();
|
||||
let query = format!(
|
||||
r#"SELECT small_int, big_int, another_int, another_bigint FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.mixed_integer_table
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_small_int: i32 = row.get("small_int");
|
||||
let stored_big_int: i64 = row.get("big_int");
|
||||
let stored_another_int: i32 = row.get("another_int");
|
||||
let stored_another_bigint: i64 = row.get("another_bigint");
|
||||
|
||||
assert_eq!(stored_small_int, i32_val as i32);
|
||||
assert_eq!(stored_big_int, i64_val as i64);
|
||||
assert_eq!(stored_another_int, i32_val as i32);
|
||||
assert_eq!(stored_another_bigint, i64_val as i64);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_wrong_type_for_mixed_integer_columns(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Try to put i64 values into i32 columns
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value("Wrong type test"));
|
||||
data.insert("small_int".into(), create_number_value(3000000000.0)); // Too big for i32
|
||||
data.insert("big_int".into(), create_number_value(42.0)); // This should be fine
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err(), "Should fail when putting i64 value in i32 column");
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Integer value out of range for INTEGER column"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_float_precision_edge_cases(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test values that have fractional parts (should be rejected)
|
||||
let fractional_values = vec![
|
||||
(42.1, "42.1"),
|
||||
(42.9, "42.9"),
|
||||
(42.000001, "42.000001"),
|
||||
(-42.5, "-42.5"),
|
||||
(0.1, "0.1"),
|
||||
(2147483646.5, "Near i32::MAX with fraction"),
|
||||
];
|
||||
|
||||
for (value, description) in fractional_values {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("Float test: {}", description)));
|
||||
data.insert("value1".into(), create_number_value(value));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.integer_only_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err(), "Should fail for fractional value {}: {}", value, description);
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected integer for column") && err.message().contains("but got a float"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_null_integer_handling_comprehensive(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test null values in mixed integer table
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value("Null integer test"));
|
||||
data.insert("small_int".into(), create_null_value());
|
||||
data.insert("big_int".into(), create_null_value());
|
||||
data.insert("another_int".into(), create_number_value(42.0));
|
||||
data.insert("another_bigint".into(), create_number_value(1000000000000.0));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Should succeed with null integer values");
|
||||
|
||||
// Verify null storage
|
||||
let response = result.unwrap();
|
||||
let query = format!(
|
||||
r#"SELECT small_int, big_int, another_int, another_bigint FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.mixed_integer_table
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_small_int: Option<i32> = row.get("small_int");
|
||||
let stored_big_int: Option<i64> = row.get("big_int");
|
||||
let stored_another_int: i32 = row.get("another_int");
|
||||
let stored_another_bigint: i64 = row.get("another_bigint");
|
||||
|
||||
assert!(stored_small_int.is_none());
|
||||
assert!(stored_big_int.is_none());
|
||||
assert_eq!(stored_another_int, 42);
|
||||
assert_eq!(stored_another_bigint, 1000000000000);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_mixed_integer_inserts(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test concurrent inserts with different integer types
|
||||
let tasks: Vec<_> = (0..10).map(|i| {
|
||||
let context = context.clone();
|
||||
let indexer_tx = indexer_tx.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("Concurrent test {}", i)));
|
||||
data.insert("small_int".into(), create_number_value((i * 1000) as f64));
|
||||
data.insert("big_int".into(), create_number_value((i as i64 * 1000000000000) as f64));
|
||||
data.insert("another_int".into(), create_number_value((i * -100) as f64));
|
||||
data.insert("another_bigint".into(), create_number_value((i as i64 * -1000000000000) as f64));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
post_table_data(&context.pool, request, &indexer_tx).await
|
||||
})
|
||||
}).collect();
|
||||
|
||||
// Wait for all tasks to complete
|
||||
let results = futures::future::join_all(tasks).await;
|
||||
|
||||
// All should succeed
|
||||
for (i, result) in results.into_iter().enumerate() {
|
||||
let task_result = result.expect("Task should not panic");
|
||||
assert!(task_result.is_ok(), "Concurrent insert {} should succeed", i);
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// PERFORMANCE AND STRESS TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_rapid_integer_inserts_stress(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Rapid sequential inserts with alternating integer types
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
for i in 0..100 {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("Stress test {}", i)));
|
||||
|
||||
// Alternate between different boundary values
|
||||
let small_val = match i % 4 {
|
||||
0 => 2147483647.0, // i32::MAX
|
||||
1 => -2147483648.0, // i32::MIN
|
||||
2 => 0.0,
|
||||
_ => (i as f64) * 1000.0,
|
||||
};
|
||||
|
||||
let big_val = match i % 4 {
|
||||
0 => 9223372036854774784.0, // Near i64::MAX
|
||||
1 => -9223372036854774784.0, // Near i64::MIN
|
||||
2 => 0.0,
|
||||
_ => (i as f64) * 1000000000000.0,
|
||||
};
|
||||
|
||||
data.insert("small_int".into(), create_number_value(small_val));
|
||||
data.insert("big_int".into(), create_number_value(big_val));
|
||||
data.insert("another_int".into(), create_number_value(small_val));
|
||||
data.insert("another_bigint".into(), create_number_value(big_val));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Rapid insert {} should succeed", i);
|
||||
}
|
||||
|
||||
let duration = start.elapsed();
|
||||
println!("100 mixed integer inserts took: {:?}", duration);
|
||||
|
||||
// Should complete in reasonable time (adjust threshold as needed)
|
||||
assert!(duration.as_secs() < 10, "Stress test took too long: {:?}", duration);
|
||||
}
|
||||
@@ -1,544 +0,0 @@
|
||||
// tests/tables_data/handlers/put_table_data_test.rs
|
||||
|
||||
use rstest::{fixture, rstest};
|
||||
use sqlx::{PgPool, Row};
|
||||
use std::collections::HashMap;
|
||||
use prost_types::{value::Kind, Value};
|
||||
use common::proto::multieko2::table_definition::{
|
||||
PostTableDefinitionRequest, ColumnDefinition as TableColumnDefinition, TableLink,
|
||||
};
|
||||
use common::proto::multieko2::tables_data::{
|
||||
PostTableDataRequest, PutTableDataRequest,
|
||||
};
|
||||
use server::table_definition::handlers::post_table_definition;
|
||||
// The post_table_data handler is used in the "Arrange" step of each test to create initial data.
|
||||
use server::tables_data::handlers::post_table_data;
|
||||
// The put_table_data handler is the function we are testing.
|
||||
use server::tables_data::handlers::put_table_data;
|
||||
use rust_decimal_macros::dec;
|
||||
use crate::common::setup_test_db;
|
||||
use tokio::sync::mpsc;
|
||||
use server::indexer::IndexCommand;
|
||||
use rand::Rng;
|
||||
use rand::distr::Alphanumeric;
|
||||
use futures;
|
||||
|
||||
// ========= Test Helpers =========
|
||||
|
||||
fn generate_unique_id() -> String {
|
||||
rand::rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(8)
|
||||
.map(char::from)
|
||||
.collect::<String>()
|
||||
.to_lowercase()
|
||||
}
|
||||
|
||||
fn string_to_proto_value(s: &str) -> Value {
|
||||
Value {
|
||||
kind: Some(Kind::StringValue(s.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
fn bool_to_proto_value(b: bool) -> Value {
|
||||
Value {
|
||||
kind: Some(Kind::BoolValue(b)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_adresar_table(
|
||||
pool: &PgPool,
|
||||
table_name: &str,
|
||||
profile_name: &str,
|
||||
) -> Result<(), tonic::Status> {
|
||||
let table_def_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: table_name.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "firma".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "kz".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "ulica".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "mesto".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "telefon".into(), field_type: "text".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, table_def_request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Helper to create a record and return its ID for tests
|
||||
async fn create_initial_record(
|
||||
context: &TestContext,
|
||||
initial_data: HashMap<String, Value>,
|
||||
) -> i64 {
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data: initial_data,
|
||||
};
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.expect("Setup: Failed to create initial record");
|
||||
response.inserted_id
|
||||
}
|
||||
|
||||
// ========= Fixtures =========
|
||||
|
||||
#[derive(Clone)]
|
||||
struct TestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
table_name: String,
|
||||
indexer_tx: mpsc::Sender<IndexCommand>,
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[fixture]
|
||||
async fn test_context() -> TestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("test_profile_{}", unique_id);
|
||||
let table_name = format!("adresar_test_{}", unique_id);
|
||||
create_adresar_table(&pool, &table_name, &profile_name)
|
||||
.await
|
||||
.expect("Failed to create test table");
|
||||
let (tx, mut rx) = mpsc::channel(100);
|
||||
// Drain receiver to prevent blocking
|
||||
tokio::spawn(async move { while rx.recv().await.is_some() {} });
|
||||
TestContext { pool, profile_name, table_name, indexer_tx: tx }
|
||||
}
|
||||
|
||||
// ========= Update Tests (Converted from Post Tests) =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_table_data_success(#[future] test_context: TestContext) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let mut initial_data = HashMap::new();
|
||||
initial_data.insert("firma".to_string(), string_to_proto_value("Original Company"));
|
||||
initial_data.insert("ulica".to_string(), string_to_proto_value("Original Street"));
|
||||
let record_id = create_initial_record(&context, initial_data).await;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("firma".to_string(), string_to_proto_value("Updated Company"));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
let response = put_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Assert
|
||||
assert!(response.success);
|
||||
assert_eq!(response.updated_id, record_id);
|
||||
|
||||
let row = sqlx::query(&format!(
|
||||
r#"SELECT firma, ulica FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let firma: String = row.get("firma");
|
||||
let ulica: String = row.get("ulica");
|
||||
assert_eq!(firma, "Updated Company");
|
||||
assert_eq!(ulica, "Original Street"); // Should be unchanged
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_table_data_whitespace_trimming(
|
||||
#[future] test_context: TestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value("Original"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("firma".to_string(), string_to_proto_value(" Trimmed Co. "));
|
||||
update_data.insert("telefon".to_string(), string_to_proto_value(" 12345 "));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
put_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Assert
|
||||
let row = sqlx::query(&format!(
|
||||
r#"SELECT firma, telefon FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
let firma: String = row.get("firma");
|
||||
let telefon: Option<String> = row.get("telefon");
|
||||
assert_eq!(firma, "Trimmed Co.");
|
||||
assert_eq!(telefon.unwrap(), "12345");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_field_to_null_with_empty_string(
|
||||
#[future] test_context: TestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"telefon".to_string(),
|
||||
string_to_proto_value("555-1234"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("telefon".to_string(), string_to_proto_value(" ")); // Update to empty
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
put_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Assert
|
||||
let telefon: Option<String> =
|
||||
sqlx::query_scalar(&format!(
|
||||
r#"SELECT telefon FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(telefon.is_none());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_telefon_length_limit_error(
|
||||
#[future] test_context: TestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"telefon".to_string(),
|
||||
string_to_proto_value("valid-number"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("telefon".to_string(), string_to_proto_value("1".repeat(16).as_str()));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
|
||||
// Verify original data is untouched
|
||||
let telefon: String = sqlx::query_scalar(&format!(
|
||||
r#"SELECT telefon FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(telefon, "valid-number");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_with_invalid_column_name(#[future] test_context: TestContext) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value("Original"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("nonexistent_col".to_string(), string_to_proto_value("invalid"));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_err());
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Invalid column: nonexistent_col"));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_with_empty_data_request(#[future] test_context: TestContext) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value("Original"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: HashMap::new(), // Empty data map
|
||||
};
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert: An update with no fields should be a no-op and succeed.
|
||||
assert!(result.is_ok());
|
||||
let response = result.unwrap();
|
||||
assert!(response.success);
|
||||
assert_eq!(response.updated_id, record_id);
|
||||
|
||||
// Verify original data is untouched
|
||||
let firma: String = sqlx::query_scalar(&format!(
|
||||
r#"SELECT firma FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(firma, "Original");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_sql_injection_protection(#[future] test_context: TestContext) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let injection_attempt = "admin'; UPDATE adresar SET firma='hacked' WHERE '1'='1";
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value("Safe Company"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("firma".to_string(), string_to_proto_value(injection_attempt));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
put_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Assert
|
||||
let firma: String = sqlx::query_scalar(&format!(
|
||||
r#"SELECT firma FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(firma, injection_attempt); // Should be stored as a literal string
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_nonexistent_record_error(#[future] test_context: TestContext) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let nonexistent_id = 999999;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("firma".to_string(), string_to_proto_value("No one to update"));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: nonexistent_id,
|
||||
data: update_data,
|
||||
};
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_err());
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(err.code(), tonic::Code::NotFound);
|
||||
assert!(err.message().contains("Record not found"));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_updates_different_records(
|
||||
#[future] test_context: TestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let mut record_ids = Vec::new();
|
||||
for i in 0..10 {
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value(&format!("Concurrent-{}", i)),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
record_ids.push(record_id);
|
||||
}
|
||||
|
||||
// Act
|
||||
let mut tasks = Vec::new();
|
||||
for (i, record_id) in record_ids.iter().enumerate() {
|
||||
let context = context.clone();
|
||||
let record_id = *record_id;
|
||||
let task = tokio::spawn(async move {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert(
|
||||
"mesto".to_string(),
|
||||
string_to_proto_value(&format!("City-{}", i)),
|
||||
);
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await
|
||||
});
|
||||
tasks.push(task);
|
||||
}
|
||||
let results = futures::future::join_all(tasks).await;
|
||||
|
||||
// Assert
|
||||
for result in results {
|
||||
assert!(result.unwrap().is_ok());
|
||||
}
|
||||
|
||||
let count: i64 = sqlx::query_scalar(&format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE mesto LIKE 'City-%'"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, 10);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_boolean_system_column_validation(
|
||||
#[future] test_context: TestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value("To be deleted"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act: Try to update 'deleted' with a string, which is invalid
|
||||
let mut invalid_data = HashMap::new();
|
||||
invalid_data.insert("deleted".to_string(), string_to_proto_value("true"));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: invalid_data,
|
||||
};
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert: The operation must fail
|
||||
assert!(result.is_err());
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected boolean for column 'deleted'"));
|
||||
|
||||
// Act: Try to update 'deleted' with a proper boolean
|
||||
let mut valid_data = HashMap::new();
|
||||
valid_data.insert("deleted".to_string(), bool_to_proto_value(true));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: valid_data,
|
||||
};
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert: The operation must succeed
|
||||
assert!(result.is_ok());
|
||||
let deleted: bool = sqlx::query_scalar(&format!(
|
||||
r#"SELECT deleted FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(deleted);
|
||||
}
|
||||
|
||||
include!("put_table_data_test2.rs");
|
||||
include!("put_table_data_test3.rs");
|
||||
include!("put_table_data_test4.rs");
|
||||
include!("put_table_data_test5.rs");
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,907 +0,0 @@
|
||||
// tests/tables_data/handlers/put_table_data_test4.rs
|
||||
|
||||
|
||||
#[derive(Clone)]
|
||||
struct ComprehensiveIntegerTestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
mixed_integer_table: String,
|
||||
bigint_only_table: String,
|
||||
integer_only_table: String,
|
||||
indexer_tx: mpsc::Sender<IndexCommand>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct AdvancedDecimalTestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
table_name: String,
|
||||
indexer_tx: mpsc::Sender<IndexCommand>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct PerformanceTestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
stress_table: String,
|
||||
indexer_tx: mpsc::Sender<IndexCommand>,
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// TABLE CREATION HELPERS FOR COMPREHENSIVE TESTING
|
||||
// ========================================================================
|
||||
|
||||
async fn create_comprehensive_integer_tables(
|
||||
pool: &PgPool,
|
||||
profile_name: &str,
|
||||
) -> Result<ComprehensiveIntegerTestContext, tonic::Status> {
|
||||
let unique_id = generate_unique_id();
|
||||
let mixed_table = format!("comprehensive_mixed_table_{}", unique_id);
|
||||
let bigint_table = format!("comprehensive_bigint_table_{}", unique_id);
|
||||
let integer_table = format!("comprehensive_integer_table_{}", unique_id);
|
||||
|
||||
// Table with both INTEGER and BIGINT columns for comprehensive testing
|
||||
let mixed_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: mixed_table.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "small_int".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "big_int".into(), field_type: "biginteger".into() },
|
||||
TableColumnDefinition { name: "another_int".into(), field_type: "int".into() },
|
||||
TableColumnDefinition { name: "another_bigint".into(), field_type: "bigint".into() },
|
||||
TableColumnDefinition { name: "nullable_int".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "nullable_bigint".into(), field_type: "biginteger".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, mixed_def).await?;
|
||||
|
||||
// Table with only BIGINT columns for edge case testing
|
||||
let bigint_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: bigint_table.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "value1".into(), field_type: "biginteger".into() },
|
||||
TableColumnDefinition { name: "value2".into(), field_type: "bigint".into() },
|
||||
TableColumnDefinition { name: "extreme_value".into(), field_type: "biginteger".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, bigint_def).await?;
|
||||
|
||||
// Table with only INTEGER columns for boundary testing
|
||||
let integer_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: integer_table.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "value1".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "value2".into(), field_type: "int".into() },
|
||||
TableColumnDefinition { name: "boundary_test".into(), field_type: "integer".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, integer_def).await?;
|
||||
|
||||
let (tx, mut rx) = mpsc::channel(100);
|
||||
tokio::spawn(async move { while rx.recv().await.is_some() {} });
|
||||
|
||||
Ok(ComprehensiveIntegerTestContext {
|
||||
pool: pool.clone(),
|
||||
profile_name: profile_name.to_string(),
|
||||
mixed_integer_table: mixed_table,
|
||||
bigint_only_table: bigint_table,
|
||||
integer_only_table: integer_table,
|
||||
indexer_tx: tx,
|
||||
})
|
||||
}
|
||||
|
||||
async fn create_advanced_decimal_table(
|
||||
pool: &PgPool,
|
||||
table_name: &str,
|
||||
profile_name: &str,
|
||||
) -> Result<(), tonic::Status> {
|
||||
let table_def_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: table_name.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "product_name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "price".into(), field_type: "decimal(19, 4)".into() },
|
||||
TableColumnDefinition { name: "rate".into(), field_type: "decimal(10, 5)".into() },
|
||||
TableColumnDefinition { name: "discount".into(), field_type: "decimal(5, 3)".into() },
|
||||
TableColumnDefinition { name: "ultra_precise".into(), field_type: "decimal(28, 10)".into() },
|
||||
TableColumnDefinition { name: "percentage".into(), field_type: "decimal(5, 4)".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(pool, table_def_request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_performance_stress_table(
|
||||
pool: &PgPool,
|
||||
table_name: &str,
|
||||
profile_name: &str,
|
||||
) -> Result<(), tonic::Status> {
|
||||
let table_def_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: table_name.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "test_name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "int_val1".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "int_val2".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "bigint_val1".into(), field_type: "biginteger".into() },
|
||||
TableColumnDefinition { name: "bigint_val2".into(), field_type: "biginteger".into() },
|
||||
TableColumnDefinition { name: "decimal_val".into(), field_type: "decimal(10, 2)".into() },
|
||||
TableColumnDefinition { name: "bool_val".into(), field_type: "boolean".into() },
|
||||
TableColumnDefinition { name: "timestamp_val".into(), field_type: "timestamptz".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(pool, table_def_request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// FIXTURES
|
||||
// ========================================================================
|
||||
|
||||
#[fixture]
|
||||
async fn comprehensive_integer_test_context() -> ComprehensiveIntegerTestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("comp_int_profile_{}", unique_id);
|
||||
|
||||
create_comprehensive_integer_tables(&pool, &profile_name).await
|
||||
.expect("Failed to create comprehensive integer test tables")
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn advanced_decimal_test_context() -> AdvancedDecimalTestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("adv_decimal_profile_{}", unique_id);
|
||||
let table_name = format!("advanced_decimals_{}", unique_id);
|
||||
|
||||
create_advanced_decimal_table(&pool, &table_name, &profile_name).await
|
||||
.expect("Failed to create advanced decimal test table");
|
||||
|
||||
let (tx, mut rx) = mpsc::channel(100);
|
||||
tokio::spawn(async move { while rx.recv().await.is_some() {} });
|
||||
|
||||
AdvancedDecimalTestContext { pool, profile_name, table_name, indexer_tx: tx }
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn performance_test_context() -> PerformanceTestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("perf_profile_{}", unique_id);
|
||||
let stress_table = format!("stress_table_{}", unique_id);
|
||||
|
||||
create_performance_stress_table(&pool, &stress_table, &profile_name).await
|
||||
.expect("Failed to create performance stress test table");
|
||||
|
||||
let (tx, mut rx) = mpsc::channel(100);
|
||||
tokio::spawn(async move { while rx.recv().await.is_some() {} });
|
||||
|
||||
PerformanceTestContext { pool, profile_name, stress_table, indexer_tx: tx }
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// HELPER FUNCTIONS FOR CREATING INITIAL RECORDS
|
||||
// ========================================================================
|
||||
|
||||
async fn create_initial_comprehensive_integer_record(
|
||||
context: &ComprehensiveIntegerTestContext,
|
||||
table_name: &str
|
||||
) -> i64 {
|
||||
let mut initial_data = HashMap::new();
|
||||
initial_data.insert("name".to_string(), string_to_proto_value("Initial Record"));
|
||||
|
||||
match table_name {
|
||||
table if table.contains("mixed") => {
|
||||
initial_data.insert("small_int".to_string(), Value { kind: Some(Kind::NumberValue(100.0)) });
|
||||
initial_data.insert("big_int".to_string(), Value { kind: Some(Kind::NumberValue(1000000000000.0)) });
|
||||
initial_data.insert("another_int".to_string(), Value { kind: Some(Kind::NumberValue(200.0)) });
|
||||
initial_data.insert("another_bigint".to_string(), Value { kind: Some(Kind::NumberValue(2000000000000.0)) });
|
||||
initial_data.insert("nullable_int".to_string(), Value { kind: Some(Kind::NumberValue(300.0)) });
|
||||
initial_data.insert("nullable_bigint".to_string(), Value { kind: Some(Kind::NumberValue(3000000000000.0)) });
|
||||
},
|
||||
table if table.contains("bigint") => {
|
||||
initial_data.insert("value1".to_string(), Value { kind: Some(Kind::NumberValue(1000000000000.0)) });
|
||||
initial_data.insert("value2".to_string(), Value { kind: Some(Kind::NumberValue(2000000000000.0)) });
|
||||
initial_data.insert("extreme_value".to_string(), Value { kind: Some(Kind::NumberValue(9223372036854774784.0)) });
|
||||
},
|
||||
table if table.contains("integer") => {
|
||||
initial_data.insert("value1".to_string(), Value { kind: Some(Kind::NumberValue(100.0)) });
|
||||
initial_data.insert("value2".to_string(), Value { kind: Some(Kind::NumberValue(200.0)) });
|
||||
initial_data.insert("boundary_test".to_string(), Value { kind: Some(Kind::NumberValue(300.0)) });
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
data: initial_data,
|
||||
};
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.expect("Setup: Failed to create initial integer record");
|
||||
response.inserted_id
|
||||
}
|
||||
|
||||
async fn create_initial_advanced_decimal_record(context: &AdvancedDecimalTestContext) -> i64 {
|
||||
let mut initial_data = HashMap::new();
|
||||
initial_data.insert("product_name".to_string(), string_to_proto_value("Initial Product"));
|
||||
initial_data.insert("price".to_string(), string_to_proto_value("100.0000"));
|
||||
initial_data.insert("rate".to_string(), string_to_proto_value("1.00000"));
|
||||
initial_data.insert("discount".to_string(), string_to_proto_value("0.100"));
|
||||
initial_data.insert("ultra_precise".to_string(), string_to_proto_value("123.4567890123"));
|
||||
initial_data.insert("percentage".to_string(), string_to_proto_value("0.9999"));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data: initial_data,
|
||||
};
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.expect("Setup: Failed to create initial decimal record");
|
||||
response.inserted_id
|
||||
}
|
||||
|
||||
async fn create_initial_performance_record(context: &PerformanceTestContext) -> i64 {
|
||||
let mut initial_data = HashMap::new();
|
||||
initial_data.insert("test_name".to_string(), string_to_proto_value("Initial Performance Test"));
|
||||
initial_data.insert("int_val1".to_string(), Value { kind: Some(Kind::NumberValue(1.0)) });
|
||||
initial_data.insert("int_val2".to_string(), Value { kind: Some(Kind::NumberValue(2.0)) });
|
||||
initial_data.insert("bigint_val1".to_string(), Value { kind: Some(Kind::NumberValue(1000000000000.0)) });
|
||||
initial_data.insert("bigint_val2".to_string(), Value { kind: Some(Kind::NumberValue(2000000000000.0)) });
|
||||
initial_data.insert("decimal_val".to_string(), string_to_proto_value("123.45"));
|
||||
initial_data.insert("bool_val".to_string(), Value { kind: Some(Kind::BoolValue(false)) });
|
||||
initial_data.insert("timestamp_val".to_string(), string_to_proto_value("2024-01-01T00:00:00Z"));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.stress_table.clone(),
|
||||
data: initial_data,
|
||||
};
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.expect("Setup: Failed to create initial performance record");
|
||||
response.inserted_id
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// BIGINT SUCCESSFUL ROUNDTRIP VALUE TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_bigint_successful_roundtrip_values(
|
||||
#[future] comprehensive_integer_test_context: ComprehensiveIntegerTestContext,
|
||||
) {
|
||||
let context = comprehensive_integer_test_context.await;
|
||||
let record_id = create_initial_comprehensive_integer_record(&context, &context.bigint_only_table).await;
|
||||
|
||||
// Values that SHOULD successfully round-trip and be accepted for updates
|
||||
let successful_values = vec![
|
||||
(9223372036854775808.0, "Exactly i64::MAX as f64 (legitimate value)"),
|
||||
(-9223372036854775808.0, "Exactly i64::MIN as f64 (legitimate value)"),
|
||||
(9223372036854774784.0, "Large but precisely representable in f64"),
|
||||
(-9223372036854774784.0, "Large negative but precisely representable in f64"),
|
||||
(0.0, "Zero"),
|
||||
(1.0, "One"),
|
||||
(-1.0, "Negative one"),
|
||||
(2147483647.0, "i32::MAX as f64"),
|
||||
(-2147483648.0, "i32::MIN as f64"),
|
||||
(4611686018427387904.0, "i64::MAX / 2"),
|
||||
(-4611686018427387904.0, "i64::MIN / 2"),
|
||||
(1000000000000.0, "One trillion"),
|
||||
(-1000000000000.0, "Negative one trillion"),
|
||||
];
|
||||
|
||||
for (value, description) in successful_values {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("name".to_string(), string_to_proto_value(&format!("Roundtrip test: {}", description)));
|
||||
update_data.insert("value1".to_string(), Value { kind: Some(Kind::NumberValue(value)) });
|
||||
update_data.insert("extreme_value".to_string(), Value { kind: Some(Kind::NumberValue(value)) });
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.bigint_only_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_ok(), "Should have succeeded for legitimate i64 update value {}: {}", value, description);
|
||||
|
||||
// Verify it was stored correctly
|
||||
if let Ok(response) = result {
|
||||
let query = format!(
|
||||
r#"SELECT value1, extreme_value FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.bigint_only_table
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_value1: i64 = row.get("value1");
|
||||
let stored_extreme_value: i64 = row.get("extreme_value");
|
||||
|
||||
assert_eq!(stored_value1, value as i64, "Value1 should match for {}", description);
|
||||
assert_eq!(stored_extreme_value, value as i64, "Extreme value should match for {}", description);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_bigint_overflow_rejection_comprehensive(
|
||||
#[future] comprehensive_integer_test_context: ComprehensiveIntegerTestContext,
|
||||
) {
|
||||
let context = comprehensive_integer_test_context.await;
|
||||
let record_id = create_initial_comprehensive_integer_record(&context, &context.bigint_only_table).await;
|
||||
|
||||
// Values that should be rejected for BIGINT columns due to precision loss or overflow
|
||||
let overflow_values = vec![
|
||||
(f64::INFINITY, "Positive infinity"),
|
||||
(f64::NEG_INFINITY, "Negative infinity"),
|
||||
(1e20, "Very large number (100,000,000,000,000,000,000)"),
|
||||
(-1e20, "Very large negative number"),
|
||||
(1e25, "Extremely large number"),
|
||||
(-1e25, "Extremely large negative number"),
|
||||
(f64::MAX, "f64::MAX"),
|
||||
(f64::MIN, "f64::MIN"),
|
||||
(f64::NAN, "NaN"),
|
||||
];
|
||||
|
||||
for (value, description) in overflow_values {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("name".to_string(), string_to_proto_value(&format!("i64 Overflow update test: {}", description)));
|
||||
update_data.insert("extreme_value".to_string(), Value { kind: Some(Kind::NumberValue(value)) });
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.bigint_only_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
assert!(result.is_err(), "Should have failed for i64 overflow update value {}: {}", value, description);
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
let message = err.message();
|
||||
assert!(
|
||||
message.contains("Integer value out of range for BIGINT column") ||
|
||||
message.contains("Expected integer for column") ||
|
||||
message.contains("but got a float") ||
|
||||
message.contains("Invalid number"),
|
||||
"Unexpected error message for {}: {}",
|
||||
description,
|
||||
message
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// WRONG TYPE FOR MIXED INTEGER COLUMNS TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_wrong_type_for_mixed_integer_columns(
|
||||
#[future] comprehensive_integer_test_context: ComprehensiveIntegerTestContext,
|
||||
) {
|
||||
let context = comprehensive_integer_test_context.await;
|
||||
let record_id = create_initial_comprehensive_integer_record(&context, &context.mixed_integer_table).await;
|
||||
|
||||
// Try to put i64 values into i32 columns (should fail)
|
||||
let wrong_type_tests = vec![
|
||||
("small_int", 3000000000.0, "3 billion in i32 column"),
|
||||
("another_int", -3000000000.0, "negative 3 billion in i32 column"),
|
||||
("nullable_int", 2147483648.0, "i32::MAX + 1 in i32 column"),
|
||||
("small_int", 9223372036854775807.0, "i64::MAX in i32 column"),
|
||||
];
|
||||
|
||||
for (column_name, value, description) in wrong_type_tests {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("name".to_string(), string_to_proto_value(&format!("Wrong type test: {}", description)));
|
||||
update_data.insert(column_name.to_string(), Value { kind: Some(Kind::NumberValue(value)) });
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_err(), "Should fail when putting i64 value {} in i32 column {}", value, column_name);
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Integer value out of range for INTEGER column"));
|
||||
}
|
||||
}
|
||||
|
||||
// Try fractional values in integer columns (should fail)
|
||||
let fractional_tests = vec![
|
||||
("small_int", 42.5, "fractional in i32 column"),
|
||||
("big_int", 1000000000000.1, "fractional in i64 column"),
|
||||
("another_int", -42.9, "negative fractional in i32 column"),
|
||||
("another_bigint", -1000000000000.9, "negative fractional in i64 column"),
|
||||
];
|
||||
|
||||
for (column_name, value, description) in fractional_tests {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("name".to_string(), string_to_proto_value(&format!("Fractional test: {}", description)));
|
||||
update_data.insert(column_name.to_string(), Value { kind: Some(Kind::NumberValue(value)) });
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_err(), "Should fail for fractional value {} in column {}", value, column_name);
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected integer for column") && err.message().contains("but got a float"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// CONCURRENT MIXED INTEGER UPDATES TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_concurrent_mixed_integer_updates(
|
||||
#[future] comprehensive_integer_test_context: ComprehensiveIntegerTestContext,
|
||||
) {
|
||||
let context = comprehensive_integer_test_context.await;
|
||||
// Create multiple records for concurrent updating
|
||||
let mut record_ids = Vec::new();
|
||||
for i in 0..10 {
|
||||
let record_id = create_initial_comprehensive_integer_record(&context, &context.mixed_integer_table).await;
|
||||
record_ids.push(record_id);
|
||||
}
|
||||
// Test concurrent updates with different integer types
|
||||
let tasks: Vec<_> = record_ids.into_iter().enumerate().map(|(i, record_id)| {
|
||||
let context = context.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("name".to_string(), string_to_proto_value(&format!("Concurrent update test {}", i)));
|
||||
update_data.insert("small_int".to_string(), Value { kind: Some(Kind::NumberValue((i * 1000) as f64)) });
|
||||
update_data.insert("big_int".to_string(), Value { kind: Some(Kind::NumberValue((i as i64 * 1000000000000) as f64)) });
|
||||
// Fix: Cast i to i32 first, then multiply by negative number
|
||||
update_data.insert("another_int".to_string(), Value { kind: Some(Kind::NumberValue(((i as i32) * -100) as f64)) });
|
||||
update_data.insert("another_bigint".to_string(), Value { kind: Some(Kind::NumberValue((i as i64 * -1000000000000) as f64)) });
|
||||
// Alternate between null and values for nullable columns
|
||||
if i % 2 == 0 {
|
||||
update_data.insert("nullable_int".to_string(), Value { kind: Some(Kind::NumberValue((i * 42) as f64)) });
|
||||
update_data.insert("nullable_bigint".to_string(), Value { kind: Some(Kind::NullValue(0)) });
|
||||
} else {
|
||||
update_data.insert("nullable_int".to_string(), Value { kind: Some(Kind::NullValue(0)) });
|
||||
update_data.insert("nullable_bigint".to_string(), Value { kind: Some(Kind::NumberValue((i as i64 * 9999999999) as f64)) });
|
||||
}
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await
|
||||
})
|
||||
}).collect();
|
||||
// Wait for all tasks to complete
|
||||
let results = futures::future::join_all(tasks).await;
|
||||
// All should succeed
|
||||
for (i, result) in results.into_iter().enumerate() {
|
||||
let task_result = result.expect("Task should not panic");
|
||||
assert!(task_result.is_ok(), "Concurrent integer update {} should succeed", i);
|
||||
}
|
||||
// Verify all records were updated correctly
|
||||
let query = format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE name LIKE 'Concurrent update test%'"#,
|
||||
context.profile_name, context.mixed_integer_table
|
||||
);
|
||||
let count: i64 = sqlx::query_scalar(&query)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, 10);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// ADVANCED DECIMAL PRECISION EDGE CASES
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_ultra_high_precision_decimals(
|
||||
#[future] advanced_decimal_test_context: AdvancedDecimalTestContext,
|
||||
) {
|
||||
let context = advanced_decimal_test_context.await;
|
||||
let record_id = create_initial_advanced_decimal_record(&context).await;
|
||||
|
||||
let ultra_precision_tests = vec![
|
||||
("ultra_precise", "123456789.1234567890", dec!(123456789.1234567890)),
|
||||
("ultra_precise", "-999999999.9999999999", dec!(-999999999.9999999999)),
|
||||
("ultra_precise", "0.0000000001", dec!(0.0000000001)),
|
||||
("percentage", "0.9999", dec!(0.9999)), // decimal(5,4) - 0.9999 is max
|
||||
("percentage", "0.0001", dec!(0.0001)), // decimal(5,4) - minimum precision
|
||||
];
|
||||
|
||||
for (field, value_str, expected_decimal) in ultra_precision_tests {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("product_name".to_string(), string_to_proto_value("Ultra precision test"));
|
||||
update_data.insert(field.to_string(), string_to_proto_value(value_str));
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let response = put_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Verify ultra high precision was preserved
|
||||
let query = format!(
|
||||
r#"SELECT {} FROM "{}"."{}" WHERE id = $1"#,
|
||||
field, context.profile_name, context.table_name
|
||||
);
|
||||
let stored_value: rust_decimal::Decimal = sqlx::query_scalar(&query)
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stored_value, expected_decimal, "Ultra precision mismatch for field {}", field);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_decimal_edge_case_rounding(
|
||||
#[future] advanced_decimal_test_context: AdvancedDecimalTestContext,
|
||||
) {
|
||||
let context = advanced_decimal_test_context.await;
|
||||
let record_id = create_initial_advanced_decimal_record(&context).await;
|
||||
|
||||
// Test edge cases where rounding behavior is critical
|
||||
let edge_rounding_tests = vec![
|
||||
("price", "12345.99995", dec!(12346.0000)), // Should round up at 5
|
||||
("rate", "1.999995", dec!(2.00000)), // Should round up
|
||||
("discount", "0.9995", dec!(1.000)), // Should round up to 1.000
|
||||
("percentage", "0.99995", dec!(1.0000)), // decimal(5,4) rounds to 1.0000
|
||||
("ultra_precise", "1.99999999995", dec!(2.0000000000)), // Ultra precision rounding
|
||||
];
|
||||
|
||||
for (field, input_value, expected_rounded) in edge_rounding_tests {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("product_name".to_string(), string_to_proto_value("Edge rounding test"));
|
||||
update_data.insert(field.to_string(), string_to_proto_value(input_value));
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let response = put_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Verify edge case rounding was applied correctly
|
||||
let query = format!(
|
||||
r#"SELECT {} FROM "{}"."{}" WHERE id = $1"#,
|
||||
field, context.profile_name, context.table_name
|
||||
);
|
||||
let stored_value: rust_decimal::Decimal = sqlx::query_scalar(&query)
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stored_value, expected_rounded, "Edge rounding mismatch for field {}", field);
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// PERFORMANCE AND STRESS TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_rapid_integer_updates_stress(
|
||||
#[future] performance_test_context: PerformanceTestContext,
|
||||
) {
|
||||
let context = performance_test_context.await;
|
||||
|
||||
// Create initial records for stress testing
|
||||
let mut record_ids = Vec::new();
|
||||
for i in 0..100 {
|
||||
let record_id = create_initial_performance_record(&context).await;
|
||||
record_ids.push(record_id);
|
||||
}
|
||||
|
||||
// Rapid sequential updates with alternating integer types and complex data
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
for (i, record_id) in record_ids.iter().enumerate() {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("test_name".to_string(), string_to_proto_value(&format!("Stress update test {}", i)));
|
||||
|
||||
// Alternate between different boundary values for stress testing
|
||||
let small_val = match i % 4 {
|
||||
0 => 2147483647.0, // i32::MAX
|
||||
1 => -2147483648.0, // i32::MIN
|
||||
2 => 0.0,
|
||||
_ => (i as f64) * 1000.0,
|
||||
};
|
||||
|
||||
let big_val = match i % 4 {
|
||||
0 => 9223372036854774784.0, // Near i64::MAX
|
||||
1 => -9223372036854774784.0, // Near i64::MIN
|
||||
2 => 0.0,
|
||||
_ => (i as f64) * 1000000000000.0,
|
||||
};
|
||||
|
||||
update_data.insert("int_val1".to_string(), Value { kind: Some(Kind::NumberValue(small_val)) });
|
||||
update_data.insert("int_val2".to_string(), Value { kind: Some(Kind::NumberValue(small_val)) });
|
||||
update_data.insert("bigint_val1".to_string(), Value { kind: Some(Kind::NumberValue(big_val)) });
|
||||
update_data.insert("bigint_val2".to_string(), Value { kind: Some(Kind::NumberValue(big_val)) });
|
||||
|
||||
// Add some decimal and other type updates for comprehensive stress test
|
||||
update_data.insert("decimal_val".to_string(), string_to_proto_value(&format!("{}.{:02}", i * 10, i % 100)));
|
||||
update_data.insert("bool_val".to_string(), Value { kind: Some(Kind::BoolValue(i % 2 == 0)) });
|
||||
update_data.insert("timestamp_val".to_string(), string_to_proto_value(&format!("2024-01-{:02}T{:02}:00:00Z", (i % 28) + 1, i % 24)));
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.stress_table.clone(),
|
||||
id: *record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_ok(), "Rapid stress update {} should succeed", i);
|
||||
}
|
||||
|
||||
let duration = start.elapsed();
|
||||
println!("100 mixed data type stress updates took: {:?}", duration);
|
||||
|
||||
// Should complete in reasonable time (adjust threshold as needed)
|
||||
assert!(duration.as_secs() < 15, "Stress test took too long: {:?}", duration);
|
||||
|
||||
// Verify all records were updated correctly
|
||||
let query = format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE test_name LIKE 'Stress update test%'"#,
|
||||
context.profile_name, context.stress_table
|
||||
);
|
||||
|
||||
let count: i64 = sqlx::query_scalar(&query)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, 100);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_concurrent_stress_mixed_data_types(
|
||||
#[future] performance_test_context: PerformanceTestContext,
|
||||
) {
|
||||
let context = performance_test_context.await;
|
||||
|
||||
// Create initial records
|
||||
let mut record_ids = Vec::new();
|
||||
for i in 0..20 {
|
||||
let record_id = create_initial_performance_record(&context).await;
|
||||
record_ids.push(record_id);
|
||||
}
|
||||
|
||||
// Concurrent stress test with mixed data types
|
||||
let tasks: Vec<_> = record_ids.into_iter().enumerate().map(|(i, record_id)| {
|
||||
let context = context.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("test_name".to_string(), string_to_proto_value(&format!("Concurrent stress {}", i)));
|
||||
|
||||
// Use complex values that stress different validation paths
|
||||
let complex_int = match i % 3 {
|
||||
0 => 2147483647.0 - (i as f64), // Near i32::MAX
|
||||
1 => -2147483648.0 + (i as f64), // Near i32::MIN
|
||||
_ => (i as f64) * 12345.0,
|
||||
};
|
||||
|
||||
let complex_bigint = match i % 3 {
|
||||
0 => 9223372036854774784.0 - (i as f64 * 1000000000.0),
|
||||
1 => -9223372036854774784.0 + (i as f64 * 1000000000.0),
|
||||
_ => (i as f64) * 987654321012345.0,
|
||||
};
|
||||
|
||||
update_data.insert("int_val1".to_string(), Value { kind: Some(Kind::NumberValue(complex_int)) });
|
||||
update_data.insert("int_val2".to_string(), Value { kind: Some(Kind::NumberValue(complex_int)) });
|
||||
update_data.insert("bigint_val1".to_string(), Value { kind: Some(Kind::NumberValue(complex_bigint)) });
|
||||
update_data.insert("bigint_val2".to_string(), Value { kind: Some(Kind::NumberValue(complex_bigint)) });
|
||||
update_data.insert("decimal_val".to_string(), string_to_proto_value(&format!("{}.{:02}", i * 33, (i * 7) % 100)));
|
||||
update_data.insert("bool_val".to_string(), Value { kind: Some(Kind::BoolValue((i * 3) % 2 == 0)) });
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.stress_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await
|
||||
})
|
||||
}).collect();
|
||||
|
||||
// Wait for all concurrent updates to complete
|
||||
let results = futures::future::join_all(tasks).await;
|
||||
|
||||
// All should succeed
|
||||
for (i, result) in results.into_iter().enumerate() {
|
||||
let task_result = result.expect("Task should not panic");
|
||||
assert!(task_result.is_ok(), "Concurrent stress update {} should succeed", i);
|
||||
}
|
||||
|
||||
// Verify all records were updated
|
||||
let query = format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE test_name LIKE 'Concurrent stress%'"#,
|
||||
context.profile_name, context.stress_table
|
||||
);
|
||||
|
||||
let count: i64 = sqlx::query_scalar(&query)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, 20);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// EDGE CASE COMBINATION TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_complex_mixed_data_type_combinations(
|
||||
#[future] comprehensive_integer_test_context: ComprehensiveIntegerTestContext,
|
||||
) {
|
||||
let context = comprehensive_integer_test_context.await;
|
||||
let record_id = create_initial_comprehensive_integer_record(&context, &context.mixed_integer_table).await;
|
||||
|
||||
// Test complex combinations of data type updates that stress multiple validation paths
|
||||
let complex_combinations = vec![
|
||||
(
|
||||
"All boundary values",
|
||||
HashMap::from([
|
||||
("small_int".to_string(), Value { kind: Some(Kind::NumberValue(2147483647.0)) }),
|
||||
("big_int".to_string(), Value { kind: Some(Kind::NumberValue(9223372036854774784.0)) }),
|
||||
("another_int".to_string(), Value { kind: Some(Kind::NumberValue(-2147483648.0)) }),
|
||||
("another_bigint".to_string(), Value { kind: Some(Kind::NumberValue(-9223372036854774784.0)) }),
|
||||
("nullable_int".to_string(), Value { kind: Some(Kind::NullValue(0)) }),
|
||||
("nullable_bigint".to_string(), Value { kind: Some(Kind::NullValue(0)) }),
|
||||
])
|
||||
),
|
||||
(
|
||||
"Mixed nulls and values",
|
||||
HashMap::from([
|
||||
("small_int".to_string(), Value { kind: Some(Kind::NumberValue(42.0)) }),
|
||||
("big_int".to_string(), Value { kind: Some(Kind::NullValue(0)) }),
|
||||
("another_int".to_string(), Value { kind: Some(Kind::NullValue(0)) }),
|
||||
("another_bigint".to_string(), Value { kind: Some(Kind::NumberValue(1000000000000.0)) }),
|
||||
("nullable_int".to_string(), Value { kind: Some(Kind::NumberValue(123.0)) }),
|
||||
("nullable_bigint".to_string(), Value { kind: Some(Kind::NullValue(0)) }),
|
||||
])
|
||||
),
|
||||
(
|
||||
"Zero and near-zero values",
|
||||
HashMap::from([
|
||||
("small_int".to_string(), Value { kind: Some(Kind::NumberValue(0.0)) }),
|
||||
("big_int".to_string(), Value { kind: Some(Kind::NumberValue(1.0)) }),
|
||||
("another_int".to_string(), Value { kind: Some(Kind::NumberValue(-1.0)) }),
|
||||
("another_bigint".to_string(), Value { kind: Some(Kind::NumberValue(0.0)) }),
|
||||
("nullable_int".to_string(), Value { kind: Some(Kind::NumberValue(1.0)) }),
|
||||
("nullable_bigint".to_string(), Value { kind: Some(Kind::NumberValue(-1.0)) }),
|
||||
])
|
||||
),
|
||||
];
|
||||
|
||||
for (description, mut update_data) in complex_combinations {
|
||||
update_data.insert("name".to_string(), string_to_proto_value(&format!("Complex combo: {}", description)));
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data.clone(),
|
||||
};
|
||||
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_ok(), "Complex combination should succeed: {}", description);
|
||||
|
||||
// Verify the complex combination was stored correctly
|
||||
let query = format!(
|
||||
r#"SELECT small_int, big_int, another_int, another_bigint, nullable_int, nullable_bigint FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.mixed_integer_table
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify each field based on what was set in update_data
|
||||
for (field_name, expected_value) in update_data.iter() {
|
||||
if field_name == "name" { continue; } // Skip text field
|
||||
|
||||
match expected_value.kind.as_ref().unwrap() {
|
||||
Kind::NumberValue(num) => {
|
||||
match field_name.as_str() {
|
||||
"small_int" | "another_int" | "nullable_int" => {
|
||||
let stored: Option<i32> = row.get(field_name.as_str());
|
||||
if let Some(stored_val) = stored {
|
||||
assert_eq!(stored_val, *num as i32, "Field {} mismatch in {}", field_name, description);
|
||||
}
|
||||
},
|
||||
"big_int" | "another_bigint" | "nullable_bigint" => {
|
||||
let stored: Option<i64> = row.get(field_name.as_str());
|
||||
if let Some(stored_val) = stored {
|
||||
assert_eq!(stored_val, *num as i64, "Field {} mismatch in {}", field_name, description);
|
||||
}
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
},
|
||||
Kind::NullValue(_) => {
|
||||
match field_name.as_str() {
|
||||
"small_int" | "another_int" | "nullable_int" => {
|
||||
let stored: Option<i32> = row.get(field_name.as_str());
|
||||
assert!(stored.is_none(), "Field {} should be null in {}", field_name, description);
|
||||
},
|
||||
"big_int" | "another_bigint" | "nullable_bigint" => {
|
||||
let stored: Option<i64> = row.get(field_name.as_str());
|
||||
assert!(stored.is_none(), "Field {} should be null in {}", field_name, description);
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,259 +0,0 @@
|
||||
// tests/tables_data/handlers/put_table_data_test5.rs
|
||||
|
||||
// ========================================================================
|
||||
// MISSING TEST SCENARIOS REPLICATED FROM POST TESTS
|
||||
// ========================================================================
|
||||
|
||||
// Fixture to provide a closed database pool, simulating a connection error.
|
||||
// This is needed for the database error test.
|
||||
#[fixture]
|
||||
async fn closed_test_context() -> TestContext {
|
||||
let mut context = test_context().await;
|
||||
context.pool.close().await;
|
||||
context
|
||||
}
|
||||
|
||||
// Test 1: Ensure that an update fails gracefully when the database is unavailable.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_table_data_database_error(
|
||||
#[future] closed_test_context: TestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = closed_test_context.await;
|
||||
// The record ID doesn't matter as the connection is already closed.
|
||||
let record_id = 1;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value("This will fail"),
|
||||
);
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
let result =
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
|
||||
// Test 2: Ensure that updating a required foreign key to NULL is not allowed.
|
||||
// This uses the `foreign_key_update_test_context` from `put_table_data_test3.rs`.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_required_foreign_key_to_null_fails(
|
||||
#[future]
|
||||
foreign_key_update_test_context: ForeignKeyUpdateTestContext,
|
||||
) {
|
||||
let context = foreign_key_update_test_context.await;
|
||||
|
||||
// Arrange: Create a category and a product linked to it.
|
||||
let mut category_data = HashMap::new();
|
||||
category_data
|
||||
.insert("name".to_string(), string_to_proto_value("Test Category"));
|
||||
let category_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let category_response = post_table_data(
|
||||
&context.pool,
|
||||
category_request,
|
||||
&context.indexer_tx,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let category_id = category_response.inserted_id;
|
||||
|
||||
let mut product_data = HashMap::new();
|
||||
product_data
|
||||
.insert("name".to_string(), string_to_proto_value("Test Product"));
|
||||
product_data.insert(
|
||||
format!("{}_id", context.category_table),
|
||||
Value { kind: Some(Kind::NumberValue(category_id as f64)) },
|
||||
);
|
||||
let product_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
data: product_data,
|
||||
};
|
||||
let product_response =
|
||||
post_table_data(&context.pool, product_request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
let product_id = product_response.inserted_id;
|
||||
|
||||
// Act: Attempt to update the product's required foreign key to NULL.
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert(
|
||||
format!("{}_id", context.category_table),
|
||||
Value { kind: Some(Kind::NullValue(0)) },
|
||||
);
|
||||
|
||||
let update_request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
id: product_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result =
|
||||
put_table_data(&context.pool, update_request, &context.indexer_tx)
|
||||
.await;
|
||||
|
||||
// Assert: The operation should fail due to a database constraint.
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"Update of required foreign key to NULL should fail"
|
||||
);
|
||||
let err = result.unwrap_err();
|
||||
// The database will likely return a NOT NULL violation, which our handler
|
||||
// wraps as an Internal error.
|
||||
assert_eq!(err.code(), tonic::Code::Internal);
|
||||
assert!(err.message().contains("Update failed"));
|
||||
}
|
||||
|
||||
// tests/tables_data/handlers/put_table_data_test6.rs
|
||||
|
||||
// ========================================================================
|
||||
// MISSING DATA TYPE VALIDATION TESTS FOR PUT HANDLER
|
||||
// ========================================================================
|
||||
|
||||
// Note: These tests are replicated from post_table_data_test3.rs to ensure
|
||||
// the PUT handler has the same level of type validation coverage as the POST handler.
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_type_mismatch_string_for_integer(
|
||||
#[future] data_type_test_context: DataTypeTestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = data_type_test_context.await;
|
||||
let record_id = create_initial_data_type_record(&context).await;
|
||||
|
||||
// Act: Attempt to update an integer column with a string value.
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert(
|
||||
"my_bigint".to_string(),
|
||||
create_string_value("not-an-integer"),
|
||||
);
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result =
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_err());
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err
|
||||
.message()
|
||||
.contains("Expected number for column 'my_bigint'"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_type_mismatch_number_for_boolean(
|
||||
#[future] data_type_test_context: DataTypeTestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = data_type_test_context.await;
|
||||
let record_id = create_initial_data_type_record(&context).await;
|
||||
|
||||
// Act: Attempt to update a boolean column with a number value.
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("my_bool".to_string(), create_number_value(1.0));
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result =
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_err());
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err
|
||||
.message()
|
||||
.contains("Expected boolean for column 'my_bool'"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_with_various_valid_timestamp_formats(
|
||||
#[future] data_type_test_context: DataTypeTestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = data_type_test_context.await;
|
||||
let record_id = create_initial_data_type_record(&context).await;
|
||||
|
||||
let valid_timestamps = vec![
|
||||
"2025-06-24T18:30:00Z",
|
||||
"2023-01-01T00:00:00+00:00",
|
||||
"2024-02-29T12:00:00.123456Z",
|
||||
"1999-12-31T23:59:59.999Z",
|
||||
];
|
||||
|
||||
for timestamp_str in valid_timestamps {
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert(
|
||||
"my_timestamp".to_string(),
|
||||
create_string_value(timestamp_str),
|
||||
);
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result =
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Update should succeed for valid timestamp format: {}",
|
||||
timestamp_str
|
||||
);
|
||||
|
||||
// Verify the value was stored correctly
|
||||
let stored_timestamp: chrono::DateTime<chrono::Utc> =
|
||||
sqlx::query_scalar(&format!(
|
||||
r#"SELECT my_timestamp FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let expected_timestamp =
|
||||
chrono::DateTime::parse_from_rfc3339(timestamp_str)
|
||||
.unwrap()
|
||||
.with_timezone(&chrono::Utc);
|
||||
assert_eq!(stored_timestamp, expected_timestamp);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user