ordering of the tests for tables data
This commit is contained in:
3
server/tests/tables_data/put/mod.rs
Normal file
3
server/tests/tables_data/put/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
// tests/tables_data/put/mod.rs
|
||||
|
||||
pub mod put_table_data_test;
|
||||
544
server/tests/tables_data/put/put_table_data_test.rs
Normal file
544
server/tests/tables_data/put/put_table_data_test.rs
Normal file
@@ -0,0 +1,544 @@
|
||||
// tests/tables_data/handlers/put_table_data_test.rs
|
||||
|
||||
use rstest::{fixture, rstest};
|
||||
use sqlx::{PgPool, Row};
|
||||
use std::collections::HashMap;
|
||||
use prost_types::{value::Kind, Value};
|
||||
use common::proto::multieko2::table_definition::{
|
||||
PostTableDefinitionRequest, ColumnDefinition as TableColumnDefinition, TableLink,
|
||||
};
|
||||
use common::proto::multieko2::tables_data::{
|
||||
PostTableDataRequest, PutTableDataRequest,
|
||||
};
|
||||
use server::table_definition::handlers::post_table_definition;
|
||||
// The post_table_data handler is used in the "Arrange" step of each test to create initial data.
|
||||
use server::tables_data::handlers::post_table_data;
|
||||
// The put_table_data handler is the function we are testing.
|
||||
use server::tables_data::handlers::put_table_data;
|
||||
use rust_decimal_macros::dec;
|
||||
use crate::common::setup_test_db;
|
||||
use tokio::sync::mpsc;
|
||||
use server::indexer::IndexCommand;
|
||||
use rand::Rng;
|
||||
use rand::distr::Alphanumeric;
|
||||
use futures;
|
||||
|
||||
// ========= Test Helpers =========
|
||||
|
||||
fn generate_unique_id() -> String {
|
||||
rand::rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(8)
|
||||
.map(char::from)
|
||||
.collect::<String>()
|
||||
.to_lowercase()
|
||||
}
|
||||
|
||||
fn string_to_proto_value(s: &str) -> Value {
|
||||
Value {
|
||||
kind: Some(Kind::StringValue(s.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
fn bool_to_proto_value(b: bool) -> Value {
|
||||
Value {
|
||||
kind: Some(Kind::BoolValue(b)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_adresar_table(
|
||||
pool: &PgPool,
|
||||
table_name: &str,
|
||||
profile_name: &str,
|
||||
) -> Result<(), tonic::Status> {
|
||||
let table_def_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: table_name.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "firma".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "kz".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "ulica".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "mesto".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "telefon".into(), field_type: "text".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, table_def_request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Helper to create a record and return its ID for tests
|
||||
async fn create_initial_record(
|
||||
context: &TestContext,
|
||||
initial_data: HashMap<String, Value>,
|
||||
) -> i64 {
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data: initial_data,
|
||||
};
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.expect("Setup: Failed to create initial record");
|
||||
response.inserted_id
|
||||
}
|
||||
|
||||
// ========= Fixtures =========
|
||||
|
||||
#[derive(Clone)]
|
||||
struct TestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
table_name: String,
|
||||
indexer_tx: mpsc::Sender<IndexCommand>,
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[fixture]
|
||||
async fn test_context() -> TestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("test_profile_{}", unique_id);
|
||||
let table_name = format!("adresar_test_{}", unique_id);
|
||||
create_adresar_table(&pool, &table_name, &profile_name)
|
||||
.await
|
||||
.expect("Failed to create test table");
|
||||
let (tx, mut rx) = mpsc::channel(100);
|
||||
// Drain receiver to prevent blocking
|
||||
tokio::spawn(async move { while rx.recv().await.is_some() {} });
|
||||
TestContext { pool, profile_name, table_name, indexer_tx: tx }
|
||||
}
|
||||
|
||||
// ========= Update Tests (Converted from Post Tests) =========
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_table_data_success(#[future] test_context: TestContext) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let mut initial_data = HashMap::new();
|
||||
initial_data.insert("firma".to_string(), string_to_proto_value("Original Company"));
|
||||
initial_data.insert("ulica".to_string(), string_to_proto_value("Original Street"));
|
||||
let record_id = create_initial_record(&context, initial_data).await;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("firma".to_string(), string_to_proto_value("Updated Company"));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
let response = put_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Assert
|
||||
assert!(response.success);
|
||||
assert_eq!(response.updated_id, record_id);
|
||||
|
||||
let row = sqlx::query(&format!(
|
||||
r#"SELECT firma, ulica FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let firma: String = row.get("firma");
|
||||
let ulica: String = row.get("ulica");
|
||||
assert_eq!(firma, "Updated Company");
|
||||
assert_eq!(ulica, "Original Street"); // Should be unchanged
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_table_data_whitespace_trimming(
|
||||
#[future] test_context: TestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value("Original"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("firma".to_string(), string_to_proto_value(" Trimmed Co. "));
|
||||
update_data.insert("telefon".to_string(), string_to_proto_value(" 12345 "));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
put_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Assert
|
||||
let row = sqlx::query(&format!(
|
||||
r#"SELECT firma, telefon FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
let firma: String = row.get("firma");
|
||||
let telefon: Option<String> = row.get("telefon");
|
||||
assert_eq!(firma, "Trimmed Co.");
|
||||
assert_eq!(telefon.unwrap(), "12345");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_field_to_null_with_empty_string(
|
||||
#[future] test_context: TestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"telefon".to_string(),
|
||||
string_to_proto_value("555-1234"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("telefon".to_string(), string_to_proto_value(" ")); // Update to empty
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
put_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Assert
|
||||
let telefon: Option<String> =
|
||||
sqlx::query_scalar(&format!(
|
||||
r#"SELECT telefon FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(telefon.is_none());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_telefon_length_limit_error(
|
||||
#[future] test_context: TestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"telefon".to_string(),
|
||||
string_to_proto_value("valid-number"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("telefon".to_string(), string_to_proto_value("1".repeat(16).as_str()));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
|
||||
// Verify original data is untouched
|
||||
let telefon: String = sqlx::query_scalar(&format!(
|
||||
r#"SELECT telefon FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(telefon, "valid-number");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_with_invalid_column_name(#[future] test_context: TestContext) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value("Original"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("nonexistent_col".to_string(), string_to_proto_value("invalid"));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_err());
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Invalid column: nonexistent_col"));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_with_empty_data_request(#[future] test_context: TestContext) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value("Original"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: HashMap::new(), // Empty data map
|
||||
};
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert: An update with no fields should be a no-op and succeed.
|
||||
assert!(result.is_ok());
|
||||
let response = result.unwrap();
|
||||
assert!(response.success);
|
||||
assert_eq!(response.updated_id, record_id);
|
||||
|
||||
// Verify original data is untouched
|
||||
let firma: String = sqlx::query_scalar(&format!(
|
||||
r#"SELECT firma FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(firma, "Original");
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_sql_injection_protection(#[future] test_context: TestContext) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let injection_attempt = "admin'; UPDATE adresar SET firma='hacked' WHERE '1'='1";
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value("Safe Company"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("firma".to_string(), string_to_proto_value(injection_attempt));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
put_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Assert
|
||||
let firma: String = sqlx::query_scalar(&format!(
|
||||
r#"SELECT firma FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(firma, injection_attempt); // Should be stored as a literal string
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_nonexistent_record_error(#[future] test_context: TestContext) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let nonexistent_id = 999999;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("firma".to_string(), string_to_proto_value("No one to update"));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: nonexistent_id,
|
||||
data: update_data,
|
||||
};
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_err());
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(err.code(), tonic::Code::NotFound);
|
||||
assert!(err.message().contains("Record not found"));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_updates_different_records(
|
||||
#[future] test_context: TestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let mut record_ids = Vec::new();
|
||||
for i in 0..10 {
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value(&format!("Concurrent-{}", i)),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
record_ids.push(record_id);
|
||||
}
|
||||
|
||||
// Act
|
||||
let mut tasks = Vec::new();
|
||||
for (i, record_id) in record_ids.iter().enumerate() {
|
||||
let context = context.clone();
|
||||
let record_id = *record_id;
|
||||
let task = tokio::spawn(async move {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert(
|
||||
"mesto".to_string(),
|
||||
string_to_proto_value(&format!("City-{}", i)),
|
||||
);
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await
|
||||
});
|
||||
tasks.push(task);
|
||||
}
|
||||
let results = futures::future::join_all(tasks).await;
|
||||
|
||||
// Assert
|
||||
for result in results {
|
||||
assert!(result.unwrap().is_ok());
|
||||
}
|
||||
|
||||
let count: i64 = sqlx::query_scalar(&format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE mesto LIKE 'City-%'"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, 10);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_boolean_system_column_validation(
|
||||
#[future] test_context: TestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = test_context.await;
|
||||
let record_id = create_initial_record(
|
||||
&context,
|
||||
HashMap::from([(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value("To be deleted"),
|
||||
)]),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Act: Try to update 'deleted' with a string, which is invalid
|
||||
let mut invalid_data = HashMap::new();
|
||||
invalid_data.insert("deleted".to_string(), string_to_proto_value("true"));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: invalid_data,
|
||||
};
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert: The operation must fail
|
||||
assert!(result.is_err());
|
||||
let err = result.unwrap_err();
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected boolean for column 'deleted'"));
|
||||
|
||||
// Act: Try to update 'deleted' with a proper boolean
|
||||
let mut valid_data = HashMap::new();
|
||||
valid_data.insert("deleted".to_string(), bool_to_proto_value(true));
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: valid_data,
|
||||
};
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert: The operation must succeed
|
||||
assert!(result.is_ok());
|
||||
let deleted: bool = sqlx::query_scalar(&format!(
|
||||
r#"SELECT deleted FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(deleted);
|
||||
}
|
||||
|
||||
include!("put_table_data_test2.rs");
|
||||
include!("put_table_data_test3.rs");
|
||||
include!("put_table_data_test4.rs");
|
||||
include!("put_table_data_test5.rs");
|
||||
1293
server/tests/tables_data/put/put_table_data_test2.rs
Normal file
1293
server/tests/tables_data/put/put_table_data_test2.rs
Normal file
File diff suppressed because it is too large
Load Diff
1001
server/tests/tables_data/put/put_table_data_test3.rs
Normal file
1001
server/tests/tables_data/put/put_table_data_test3.rs
Normal file
File diff suppressed because it is too large
Load Diff
907
server/tests/tables_data/put/put_table_data_test4.rs
Normal file
907
server/tests/tables_data/put/put_table_data_test4.rs
Normal file
@@ -0,0 +1,907 @@
|
||||
// tests/tables_data/handlers/put_table_data_test4.rs
|
||||
|
||||
|
||||
#[derive(Clone)]
|
||||
struct ComprehensiveIntegerTestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
mixed_integer_table: String,
|
||||
bigint_only_table: String,
|
||||
integer_only_table: String,
|
||||
indexer_tx: mpsc::Sender<IndexCommand>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct AdvancedDecimalTestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
table_name: String,
|
||||
indexer_tx: mpsc::Sender<IndexCommand>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct PerformanceTestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
stress_table: String,
|
||||
indexer_tx: mpsc::Sender<IndexCommand>,
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// TABLE CREATION HELPERS FOR COMPREHENSIVE TESTING
|
||||
// ========================================================================
|
||||
|
||||
async fn create_comprehensive_integer_tables(
|
||||
pool: &PgPool,
|
||||
profile_name: &str,
|
||||
) -> Result<ComprehensiveIntegerTestContext, tonic::Status> {
|
||||
let unique_id = generate_unique_id();
|
||||
let mixed_table = format!("comprehensive_mixed_table_{}", unique_id);
|
||||
let bigint_table = format!("comprehensive_bigint_table_{}", unique_id);
|
||||
let integer_table = format!("comprehensive_integer_table_{}", unique_id);
|
||||
|
||||
// Table with both INTEGER and BIGINT columns for comprehensive testing
|
||||
let mixed_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: mixed_table.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "small_int".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "big_int".into(), field_type: "biginteger".into() },
|
||||
TableColumnDefinition { name: "another_int".into(), field_type: "int".into() },
|
||||
TableColumnDefinition { name: "another_bigint".into(), field_type: "bigint".into() },
|
||||
TableColumnDefinition { name: "nullable_int".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "nullable_bigint".into(), field_type: "biginteger".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, mixed_def).await?;
|
||||
|
||||
// Table with only BIGINT columns for edge case testing
|
||||
let bigint_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: bigint_table.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "value1".into(), field_type: "biginteger".into() },
|
||||
TableColumnDefinition { name: "value2".into(), field_type: "bigint".into() },
|
||||
TableColumnDefinition { name: "extreme_value".into(), field_type: "biginteger".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, bigint_def).await?;
|
||||
|
||||
// Table with only INTEGER columns for boundary testing
|
||||
let integer_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: integer_table.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "value1".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "value2".into(), field_type: "int".into() },
|
||||
TableColumnDefinition { name: "boundary_test".into(), field_type: "integer".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, integer_def).await?;
|
||||
|
||||
let (tx, mut rx) = mpsc::channel(100);
|
||||
tokio::spawn(async move { while rx.recv().await.is_some() {} });
|
||||
|
||||
Ok(ComprehensiveIntegerTestContext {
|
||||
pool: pool.clone(),
|
||||
profile_name: profile_name.to_string(),
|
||||
mixed_integer_table: mixed_table,
|
||||
bigint_only_table: bigint_table,
|
||||
integer_only_table: integer_table,
|
||||
indexer_tx: tx,
|
||||
})
|
||||
}
|
||||
|
||||
async fn create_advanced_decimal_table(
|
||||
pool: &PgPool,
|
||||
table_name: &str,
|
||||
profile_name: &str,
|
||||
) -> Result<(), tonic::Status> {
|
||||
let table_def_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: table_name.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "product_name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "price".into(), field_type: "decimal(19, 4)".into() },
|
||||
TableColumnDefinition { name: "rate".into(), field_type: "decimal(10, 5)".into() },
|
||||
TableColumnDefinition { name: "discount".into(), field_type: "decimal(5, 3)".into() },
|
||||
TableColumnDefinition { name: "ultra_precise".into(), field_type: "decimal(28, 10)".into() },
|
||||
TableColumnDefinition { name: "percentage".into(), field_type: "decimal(5, 4)".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(pool, table_def_request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_performance_stress_table(
|
||||
pool: &PgPool,
|
||||
table_name: &str,
|
||||
profile_name: &str,
|
||||
) -> Result<(), tonic::Status> {
|
||||
let table_def_request = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: table_name.into(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "test_name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "int_val1".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "int_val2".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "bigint_val1".into(), field_type: "biginteger".into() },
|
||||
TableColumnDefinition { name: "bigint_val2".into(), field_type: "biginteger".into() },
|
||||
TableColumnDefinition { name: "decimal_val".into(), field_type: "decimal(10, 2)".into() },
|
||||
TableColumnDefinition { name: "bool_val".into(), field_type: "boolean".into() },
|
||||
TableColumnDefinition { name: "timestamp_val".into(), field_type: "timestamptz".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
|
||||
post_table_definition(pool, table_def_request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// FIXTURES
|
||||
// ========================================================================
|
||||
|
||||
#[fixture]
|
||||
async fn comprehensive_integer_test_context() -> ComprehensiveIntegerTestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("comp_int_profile_{}", unique_id);
|
||||
|
||||
create_comprehensive_integer_tables(&pool, &profile_name).await
|
||||
.expect("Failed to create comprehensive integer test tables")
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn advanced_decimal_test_context() -> AdvancedDecimalTestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("adv_decimal_profile_{}", unique_id);
|
||||
let table_name = format!("advanced_decimals_{}", unique_id);
|
||||
|
||||
create_advanced_decimal_table(&pool, &table_name, &profile_name).await
|
||||
.expect("Failed to create advanced decimal test table");
|
||||
|
||||
let (tx, mut rx) = mpsc::channel(100);
|
||||
tokio::spawn(async move { while rx.recv().await.is_some() {} });
|
||||
|
||||
AdvancedDecimalTestContext { pool, profile_name, table_name, indexer_tx: tx }
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn performance_test_context() -> PerformanceTestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("perf_profile_{}", unique_id);
|
||||
let stress_table = format!("stress_table_{}", unique_id);
|
||||
|
||||
create_performance_stress_table(&pool, &stress_table, &profile_name).await
|
||||
.expect("Failed to create performance stress test table");
|
||||
|
||||
let (tx, mut rx) = mpsc::channel(100);
|
||||
tokio::spawn(async move { while rx.recv().await.is_some() {} });
|
||||
|
||||
PerformanceTestContext { pool, profile_name, stress_table, indexer_tx: tx }
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// HELPER FUNCTIONS FOR CREATING INITIAL RECORDS
|
||||
// ========================================================================
|
||||
|
||||
async fn create_initial_comprehensive_integer_record(
|
||||
context: &ComprehensiveIntegerTestContext,
|
||||
table_name: &str
|
||||
) -> i64 {
|
||||
let mut initial_data = HashMap::new();
|
||||
initial_data.insert("name".to_string(), string_to_proto_value("Initial Record"));
|
||||
|
||||
match table_name {
|
||||
table if table.contains("mixed") => {
|
||||
initial_data.insert("small_int".to_string(), Value { kind: Some(Kind::NumberValue(100.0)) });
|
||||
initial_data.insert("big_int".to_string(), Value { kind: Some(Kind::NumberValue(1000000000000.0)) });
|
||||
initial_data.insert("another_int".to_string(), Value { kind: Some(Kind::NumberValue(200.0)) });
|
||||
initial_data.insert("another_bigint".to_string(), Value { kind: Some(Kind::NumberValue(2000000000000.0)) });
|
||||
initial_data.insert("nullable_int".to_string(), Value { kind: Some(Kind::NumberValue(300.0)) });
|
||||
initial_data.insert("nullable_bigint".to_string(), Value { kind: Some(Kind::NumberValue(3000000000000.0)) });
|
||||
},
|
||||
table if table.contains("bigint") => {
|
||||
initial_data.insert("value1".to_string(), Value { kind: Some(Kind::NumberValue(1000000000000.0)) });
|
||||
initial_data.insert("value2".to_string(), Value { kind: Some(Kind::NumberValue(2000000000000.0)) });
|
||||
initial_data.insert("extreme_value".to_string(), Value { kind: Some(Kind::NumberValue(9223372036854774784.0)) });
|
||||
},
|
||||
table if table.contains("integer") => {
|
||||
initial_data.insert("value1".to_string(), Value { kind: Some(Kind::NumberValue(100.0)) });
|
||||
initial_data.insert("value2".to_string(), Value { kind: Some(Kind::NumberValue(200.0)) });
|
||||
initial_data.insert("boundary_test".to_string(), Value { kind: Some(Kind::NumberValue(300.0)) });
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: table_name.to_string(),
|
||||
data: initial_data,
|
||||
};
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.expect("Setup: Failed to create initial integer record");
|
||||
response.inserted_id
|
||||
}
|
||||
|
||||
async fn create_initial_advanced_decimal_record(context: &AdvancedDecimalTestContext) -> i64 {
|
||||
let mut initial_data = HashMap::new();
|
||||
initial_data.insert("product_name".to_string(), string_to_proto_value("Initial Product"));
|
||||
initial_data.insert("price".to_string(), string_to_proto_value("100.0000"));
|
||||
initial_data.insert("rate".to_string(), string_to_proto_value("1.00000"));
|
||||
initial_data.insert("discount".to_string(), string_to_proto_value("0.100"));
|
||||
initial_data.insert("ultra_precise".to_string(), string_to_proto_value("123.4567890123"));
|
||||
initial_data.insert("percentage".to_string(), string_to_proto_value("0.9999"));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data: initial_data,
|
||||
};
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.expect("Setup: Failed to create initial decimal record");
|
||||
response.inserted_id
|
||||
}
|
||||
|
||||
async fn create_initial_performance_record(context: &PerformanceTestContext) -> i64 {
|
||||
let mut initial_data = HashMap::new();
|
||||
initial_data.insert("test_name".to_string(), string_to_proto_value("Initial Performance Test"));
|
||||
initial_data.insert("int_val1".to_string(), Value { kind: Some(Kind::NumberValue(1.0)) });
|
||||
initial_data.insert("int_val2".to_string(), Value { kind: Some(Kind::NumberValue(2.0)) });
|
||||
initial_data.insert("bigint_val1".to_string(), Value { kind: Some(Kind::NumberValue(1000000000000.0)) });
|
||||
initial_data.insert("bigint_val2".to_string(), Value { kind: Some(Kind::NumberValue(2000000000000.0)) });
|
||||
initial_data.insert("decimal_val".to_string(), string_to_proto_value("123.45"));
|
||||
initial_data.insert("bool_val".to_string(), Value { kind: Some(Kind::BoolValue(false)) });
|
||||
initial_data.insert("timestamp_val".to_string(), string_to_proto_value("2024-01-01T00:00:00Z"));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.stress_table.clone(),
|
||||
data: initial_data,
|
||||
};
|
||||
let response = post_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.expect("Setup: Failed to create initial performance record");
|
||||
response.inserted_id
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// BIGINT SUCCESSFUL ROUNDTRIP VALUE TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_bigint_successful_roundtrip_values(
|
||||
#[future] comprehensive_integer_test_context: ComprehensiveIntegerTestContext,
|
||||
) {
|
||||
let context = comprehensive_integer_test_context.await;
|
||||
let record_id = create_initial_comprehensive_integer_record(&context, &context.bigint_only_table).await;
|
||||
|
||||
// Values that SHOULD successfully round-trip and be accepted for updates
|
||||
let successful_values = vec![
|
||||
(9223372036854775808.0, "Exactly i64::MAX as f64 (legitimate value)"),
|
||||
(-9223372036854775808.0, "Exactly i64::MIN as f64 (legitimate value)"),
|
||||
(9223372036854774784.0, "Large but precisely representable in f64"),
|
||||
(-9223372036854774784.0, "Large negative but precisely representable in f64"),
|
||||
(0.0, "Zero"),
|
||||
(1.0, "One"),
|
||||
(-1.0, "Negative one"),
|
||||
(2147483647.0, "i32::MAX as f64"),
|
||||
(-2147483648.0, "i32::MIN as f64"),
|
||||
(4611686018427387904.0, "i64::MAX / 2"),
|
||||
(-4611686018427387904.0, "i64::MIN / 2"),
|
||||
(1000000000000.0, "One trillion"),
|
||||
(-1000000000000.0, "Negative one trillion"),
|
||||
];
|
||||
|
||||
for (value, description) in successful_values {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("name".to_string(), string_to_proto_value(&format!("Roundtrip test: {}", description)));
|
||||
update_data.insert("value1".to_string(), Value { kind: Some(Kind::NumberValue(value)) });
|
||||
update_data.insert("extreme_value".to_string(), Value { kind: Some(Kind::NumberValue(value)) });
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.bigint_only_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_ok(), "Should have succeeded for legitimate i64 update value {}: {}", value, description);
|
||||
|
||||
// Verify it was stored correctly
|
||||
if let Ok(response) = result {
|
||||
let query = format!(
|
||||
r#"SELECT value1, extreme_value FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.bigint_only_table
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_value1: i64 = row.get("value1");
|
||||
let stored_extreme_value: i64 = row.get("extreme_value");
|
||||
|
||||
assert_eq!(stored_value1, value as i64, "Value1 should match for {}", description);
|
||||
assert_eq!(stored_extreme_value, value as i64, "Extreme value should match for {}", description);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_bigint_overflow_rejection_comprehensive(
|
||||
#[future] comprehensive_integer_test_context: ComprehensiveIntegerTestContext,
|
||||
) {
|
||||
let context = comprehensive_integer_test_context.await;
|
||||
let record_id = create_initial_comprehensive_integer_record(&context, &context.bigint_only_table).await;
|
||||
|
||||
// Values that should be rejected for BIGINT columns due to precision loss or overflow
|
||||
let overflow_values = vec![
|
||||
(f64::INFINITY, "Positive infinity"),
|
||||
(f64::NEG_INFINITY, "Negative infinity"),
|
||||
(1e20, "Very large number (100,000,000,000,000,000,000)"),
|
||||
(-1e20, "Very large negative number"),
|
||||
(1e25, "Extremely large number"),
|
||||
(-1e25, "Extremely large negative number"),
|
||||
(f64::MAX, "f64::MAX"),
|
||||
(f64::MIN, "f64::MIN"),
|
||||
(f64::NAN, "NaN"),
|
||||
];
|
||||
|
||||
for (value, description) in overflow_values {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("name".to_string(), string_to_proto_value(&format!("i64 Overflow update test: {}", description)));
|
||||
update_data.insert("extreme_value".to_string(), Value { kind: Some(Kind::NumberValue(value)) });
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.bigint_only_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
assert!(result.is_err(), "Should have failed for i64 overflow update value {}: {}", value, description);
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
let message = err.message();
|
||||
assert!(
|
||||
message.contains("Integer value out of range for BIGINT column") ||
|
||||
message.contains("Expected integer for column") ||
|
||||
message.contains("but got a float") ||
|
||||
message.contains("Invalid number"),
|
||||
"Unexpected error message for {}: {}",
|
||||
description,
|
||||
message
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// WRONG TYPE FOR MIXED INTEGER COLUMNS TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_wrong_type_for_mixed_integer_columns(
|
||||
#[future] comprehensive_integer_test_context: ComprehensiveIntegerTestContext,
|
||||
) {
|
||||
let context = comprehensive_integer_test_context.await;
|
||||
let record_id = create_initial_comprehensive_integer_record(&context, &context.mixed_integer_table).await;
|
||||
|
||||
// Try to put i64 values into i32 columns (should fail)
|
||||
let wrong_type_tests = vec![
|
||||
("small_int", 3000000000.0, "3 billion in i32 column"),
|
||||
("another_int", -3000000000.0, "negative 3 billion in i32 column"),
|
||||
("nullable_int", 2147483648.0, "i32::MAX + 1 in i32 column"),
|
||||
("small_int", 9223372036854775807.0, "i64::MAX in i32 column"),
|
||||
];
|
||||
|
||||
for (column_name, value, description) in wrong_type_tests {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("name".to_string(), string_to_proto_value(&format!("Wrong type test: {}", description)));
|
||||
update_data.insert(column_name.to_string(), Value { kind: Some(Kind::NumberValue(value)) });
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_err(), "Should fail when putting i64 value {} in i32 column {}", value, column_name);
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Integer value out of range for INTEGER column"));
|
||||
}
|
||||
}
|
||||
|
||||
// Try fractional values in integer columns (should fail)
|
||||
let fractional_tests = vec![
|
||||
("small_int", 42.5, "fractional in i32 column"),
|
||||
("big_int", 1000000000000.1, "fractional in i64 column"),
|
||||
("another_int", -42.9, "negative fractional in i32 column"),
|
||||
("another_bigint", -1000000000000.9, "negative fractional in i64 column"),
|
||||
];
|
||||
|
||||
for (column_name, value, description) in fractional_tests {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("name".to_string(), string_to_proto_value(&format!("Fractional test: {}", description)));
|
||||
update_data.insert(column_name.to_string(), Value { kind: Some(Kind::NumberValue(value)) });
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_err(), "Should fail for fractional value {} in column {}", value, column_name);
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected integer for column") && err.message().contains("but got a float"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// CONCURRENT MIXED INTEGER UPDATES TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_concurrent_mixed_integer_updates(
|
||||
#[future] comprehensive_integer_test_context: ComprehensiveIntegerTestContext,
|
||||
) {
|
||||
let context = comprehensive_integer_test_context.await;
|
||||
// Create multiple records for concurrent updating
|
||||
let mut record_ids = Vec::new();
|
||||
for i in 0..10 {
|
||||
let record_id = create_initial_comprehensive_integer_record(&context, &context.mixed_integer_table).await;
|
||||
record_ids.push(record_id);
|
||||
}
|
||||
// Test concurrent updates with different integer types
|
||||
let tasks: Vec<_> = record_ids.into_iter().enumerate().map(|(i, record_id)| {
|
||||
let context = context.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("name".to_string(), string_to_proto_value(&format!("Concurrent update test {}", i)));
|
||||
update_data.insert("small_int".to_string(), Value { kind: Some(Kind::NumberValue((i * 1000) as f64)) });
|
||||
update_data.insert("big_int".to_string(), Value { kind: Some(Kind::NumberValue((i as i64 * 1000000000000) as f64)) });
|
||||
// Fix: Cast i to i32 first, then multiply by negative number
|
||||
update_data.insert("another_int".to_string(), Value { kind: Some(Kind::NumberValue(((i as i32) * -100) as f64)) });
|
||||
update_data.insert("another_bigint".to_string(), Value { kind: Some(Kind::NumberValue((i as i64 * -1000000000000) as f64)) });
|
||||
// Alternate between null and values for nullable columns
|
||||
if i % 2 == 0 {
|
||||
update_data.insert("nullable_int".to_string(), Value { kind: Some(Kind::NumberValue((i * 42) as f64)) });
|
||||
update_data.insert("nullable_bigint".to_string(), Value { kind: Some(Kind::NullValue(0)) });
|
||||
} else {
|
||||
update_data.insert("nullable_int".to_string(), Value { kind: Some(Kind::NullValue(0)) });
|
||||
update_data.insert("nullable_bigint".to_string(), Value { kind: Some(Kind::NumberValue((i as i64 * 9999999999) as f64)) });
|
||||
}
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await
|
||||
})
|
||||
}).collect();
|
||||
// Wait for all tasks to complete
|
||||
let results = futures::future::join_all(tasks).await;
|
||||
// All should succeed
|
||||
for (i, result) in results.into_iter().enumerate() {
|
||||
let task_result = result.expect("Task should not panic");
|
||||
assert!(task_result.is_ok(), "Concurrent integer update {} should succeed", i);
|
||||
}
|
||||
// Verify all records were updated correctly
|
||||
let query = format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE name LIKE 'Concurrent update test%'"#,
|
||||
context.profile_name, context.mixed_integer_table
|
||||
);
|
||||
let count: i64 = sqlx::query_scalar(&query)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, 10);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// ADVANCED DECIMAL PRECISION EDGE CASES
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_ultra_high_precision_decimals(
|
||||
#[future] advanced_decimal_test_context: AdvancedDecimalTestContext,
|
||||
) {
|
||||
let context = advanced_decimal_test_context.await;
|
||||
let record_id = create_initial_advanced_decimal_record(&context).await;
|
||||
|
||||
let ultra_precision_tests = vec![
|
||||
("ultra_precise", "123456789.1234567890", dec!(123456789.1234567890)),
|
||||
("ultra_precise", "-999999999.9999999999", dec!(-999999999.9999999999)),
|
||||
("ultra_precise", "0.0000000001", dec!(0.0000000001)),
|
||||
("percentage", "0.9999", dec!(0.9999)), // decimal(5,4) - 0.9999 is max
|
||||
("percentage", "0.0001", dec!(0.0001)), // decimal(5,4) - minimum precision
|
||||
];
|
||||
|
||||
for (field, value_str, expected_decimal) in ultra_precision_tests {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("product_name".to_string(), string_to_proto_value("Ultra precision test"));
|
||||
update_data.insert(field.to_string(), string_to_proto_value(value_str));
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let response = put_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Verify ultra high precision was preserved
|
||||
let query = format!(
|
||||
r#"SELECT {} FROM "{}"."{}" WHERE id = $1"#,
|
||||
field, context.profile_name, context.table_name
|
||||
);
|
||||
let stored_value: rust_decimal::Decimal = sqlx::query_scalar(&query)
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stored_value, expected_decimal, "Ultra precision mismatch for field {}", field);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_decimal_edge_case_rounding(
|
||||
#[future] advanced_decimal_test_context: AdvancedDecimalTestContext,
|
||||
) {
|
||||
let context = advanced_decimal_test_context.await;
|
||||
let record_id = create_initial_advanced_decimal_record(&context).await;
|
||||
|
||||
// Test edge cases where rounding behavior is critical
|
||||
let edge_rounding_tests = vec![
|
||||
("price", "12345.99995", dec!(12346.0000)), // Should round up at 5
|
||||
("rate", "1.999995", dec!(2.00000)), // Should round up
|
||||
("discount", "0.9995", dec!(1.000)), // Should round up to 1.000
|
||||
("percentage", "0.99995", dec!(1.0000)), // decimal(5,4) rounds to 1.0000
|
||||
("ultra_precise", "1.99999999995", dec!(2.0000000000)), // Ultra precision rounding
|
||||
];
|
||||
|
||||
for (field, input_value, expected_rounded) in edge_rounding_tests {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("product_name".to_string(), string_to_proto_value("Edge rounding test"));
|
||||
update_data.insert(field.to_string(), string_to_proto_value(input_value));
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let response = put_table_data(&context.pool, request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
// Verify edge case rounding was applied correctly
|
||||
let query = format!(
|
||||
r#"SELECT {} FROM "{}"."{}" WHERE id = $1"#,
|
||||
field, context.profile_name, context.table_name
|
||||
);
|
||||
let stored_value: rust_decimal::Decimal = sqlx::query_scalar(&query)
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stored_value, expected_rounded, "Edge rounding mismatch for field {}", field);
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// PERFORMANCE AND STRESS TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_rapid_integer_updates_stress(
|
||||
#[future] performance_test_context: PerformanceTestContext,
|
||||
) {
|
||||
let context = performance_test_context.await;
|
||||
|
||||
// Create initial records for stress testing
|
||||
let mut record_ids = Vec::new();
|
||||
for i in 0..100 {
|
||||
let record_id = create_initial_performance_record(&context).await;
|
||||
record_ids.push(record_id);
|
||||
}
|
||||
|
||||
// Rapid sequential updates with alternating integer types and complex data
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
for (i, record_id) in record_ids.iter().enumerate() {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("test_name".to_string(), string_to_proto_value(&format!("Stress update test {}", i)));
|
||||
|
||||
// Alternate between different boundary values for stress testing
|
||||
let small_val = match i % 4 {
|
||||
0 => 2147483647.0, // i32::MAX
|
||||
1 => -2147483648.0, // i32::MIN
|
||||
2 => 0.0,
|
||||
_ => (i as f64) * 1000.0,
|
||||
};
|
||||
|
||||
let big_val = match i % 4 {
|
||||
0 => 9223372036854774784.0, // Near i64::MAX
|
||||
1 => -9223372036854774784.0, // Near i64::MIN
|
||||
2 => 0.0,
|
||||
_ => (i as f64) * 1000000000000.0,
|
||||
};
|
||||
|
||||
update_data.insert("int_val1".to_string(), Value { kind: Some(Kind::NumberValue(small_val)) });
|
||||
update_data.insert("int_val2".to_string(), Value { kind: Some(Kind::NumberValue(small_val)) });
|
||||
update_data.insert("bigint_val1".to_string(), Value { kind: Some(Kind::NumberValue(big_val)) });
|
||||
update_data.insert("bigint_val2".to_string(), Value { kind: Some(Kind::NumberValue(big_val)) });
|
||||
|
||||
// Add some decimal and other type updates for comprehensive stress test
|
||||
update_data.insert("decimal_val".to_string(), string_to_proto_value(&format!("{}.{:02}", i * 10, i % 100)));
|
||||
update_data.insert("bool_val".to_string(), Value { kind: Some(Kind::BoolValue(i % 2 == 0)) });
|
||||
update_data.insert("timestamp_val".to_string(), string_to_proto_value(&format!("2024-01-{:02}T{:02}:00:00Z", (i % 28) + 1, i % 24)));
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.stress_table.clone(),
|
||||
id: *record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_ok(), "Rapid stress update {} should succeed", i);
|
||||
}
|
||||
|
||||
let duration = start.elapsed();
|
||||
println!("100 mixed data type stress updates took: {:?}", duration);
|
||||
|
||||
// Should complete in reasonable time (adjust threshold as needed)
|
||||
assert!(duration.as_secs() < 15, "Stress test took too long: {:?}", duration);
|
||||
|
||||
// Verify all records were updated correctly
|
||||
let query = format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE test_name LIKE 'Stress update test%'"#,
|
||||
context.profile_name, context.stress_table
|
||||
);
|
||||
|
||||
let count: i64 = sqlx::query_scalar(&query)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, 100);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_concurrent_stress_mixed_data_types(
|
||||
#[future] performance_test_context: PerformanceTestContext,
|
||||
) {
|
||||
let context = performance_test_context.await;
|
||||
|
||||
// Create initial records
|
||||
let mut record_ids = Vec::new();
|
||||
for i in 0..20 {
|
||||
let record_id = create_initial_performance_record(&context).await;
|
||||
record_ids.push(record_id);
|
||||
}
|
||||
|
||||
// Concurrent stress test with mixed data types
|
||||
let tasks: Vec<_> = record_ids.into_iter().enumerate().map(|(i, record_id)| {
|
||||
let context = context.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("test_name".to_string(), string_to_proto_value(&format!("Concurrent stress {}", i)));
|
||||
|
||||
// Use complex values that stress different validation paths
|
||||
let complex_int = match i % 3 {
|
||||
0 => 2147483647.0 - (i as f64), // Near i32::MAX
|
||||
1 => -2147483648.0 + (i as f64), // Near i32::MIN
|
||||
_ => (i as f64) * 12345.0,
|
||||
};
|
||||
|
||||
let complex_bigint = match i % 3 {
|
||||
0 => 9223372036854774784.0 - (i as f64 * 1000000000.0),
|
||||
1 => -9223372036854774784.0 + (i as f64 * 1000000000.0),
|
||||
_ => (i as f64) * 987654321012345.0,
|
||||
};
|
||||
|
||||
update_data.insert("int_val1".to_string(), Value { kind: Some(Kind::NumberValue(complex_int)) });
|
||||
update_data.insert("int_val2".to_string(), Value { kind: Some(Kind::NumberValue(complex_int)) });
|
||||
update_data.insert("bigint_val1".to_string(), Value { kind: Some(Kind::NumberValue(complex_bigint)) });
|
||||
update_data.insert("bigint_val2".to_string(), Value { kind: Some(Kind::NumberValue(complex_bigint)) });
|
||||
update_data.insert("decimal_val".to_string(), string_to_proto_value(&format!("{}.{:02}", i * 33, (i * 7) % 100)));
|
||||
update_data.insert("bool_val".to_string(), Value { kind: Some(Kind::BoolValue((i * 3) % 2 == 0)) });
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.stress_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await
|
||||
})
|
||||
}).collect();
|
||||
|
||||
// Wait for all concurrent updates to complete
|
||||
let results = futures::future::join_all(tasks).await;
|
||||
|
||||
// All should succeed
|
||||
for (i, result) in results.into_iter().enumerate() {
|
||||
let task_result = result.expect("Task should not panic");
|
||||
assert!(task_result.is_ok(), "Concurrent stress update {} should succeed", i);
|
||||
}
|
||||
|
||||
// Verify all records were updated
|
||||
let query = format!(
|
||||
r#"SELECT COUNT(*) FROM "{}"."{}" WHERE test_name LIKE 'Concurrent stress%'"#,
|
||||
context.profile_name, context.stress_table
|
||||
);
|
||||
|
||||
let count: i64 = sqlx::query_scalar(&query)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count, 20);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// EDGE CASE COMBINATION TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_complex_mixed_data_type_combinations(
|
||||
#[future] comprehensive_integer_test_context: ComprehensiveIntegerTestContext,
|
||||
) {
|
||||
let context = comprehensive_integer_test_context.await;
|
||||
let record_id = create_initial_comprehensive_integer_record(&context, &context.mixed_integer_table).await;
|
||||
|
||||
// Test complex combinations of data type updates that stress multiple validation paths
|
||||
let complex_combinations = vec![
|
||||
(
|
||||
"All boundary values",
|
||||
HashMap::from([
|
||||
("small_int".to_string(), Value { kind: Some(Kind::NumberValue(2147483647.0)) }),
|
||||
("big_int".to_string(), Value { kind: Some(Kind::NumberValue(9223372036854774784.0)) }),
|
||||
("another_int".to_string(), Value { kind: Some(Kind::NumberValue(-2147483648.0)) }),
|
||||
("another_bigint".to_string(), Value { kind: Some(Kind::NumberValue(-9223372036854774784.0)) }),
|
||||
("nullable_int".to_string(), Value { kind: Some(Kind::NullValue(0)) }),
|
||||
("nullable_bigint".to_string(), Value { kind: Some(Kind::NullValue(0)) }),
|
||||
])
|
||||
),
|
||||
(
|
||||
"Mixed nulls and values",
|
||||
HashMap::from([
|
||||
("small_int".to_string(), Value { kind: Some(Kind::NumberValue(42.0)) }),
|
||||
("big_int".to_string(), Value { kind: Some(Kind::NullValue(0)) }),
|
||||
("another_int".to_string(), Value { kind: Some(Kind::NullValue(0)) }),
|
||||
("another_bigint".to_string(), Value { kind: Some(Kind::NumberValue(1000000000000.0)) }),
|
||||
("nullable_int".to_string(), Value { kind: Some(Kind::NumberValue(123.0)) }),
|
||||
("nullable_bigint".to_string(), Value { kind: Some(Kind::NullValue(0)) }),
|
||||
])
|
||||
),
|
||||
(
|
||||
"Zero and near-zero values",
|
||||
HashMap::from([
|
||||
("small_int".to_string(), Value { kind: Some(Kind::NumberValue(0.0)) }),
|
||||
("big_int".to_string(), Value { kind: Some(Kind::NumberValue(1.0)) }),
|
||||
("another_int".to_string(), Value { kind: Some(Kind::NumberValue(-1.0)) }),
|
||||
("another_bigint".to_string(), Value { kind: Some(Kind::NumberValue(0.0)) }),
|
||||
("nullable_int".to_string(), Value { kind: Some(Kind::NumberValue(1.0)) }),
|
||||
("nullable_bigint".to_string(), Value { kind: Some(Kind::NumberValue(-1.0)) }),
|
||||
])
|
||||
),
|
||||
];
|
||||
|
||||
for (description, mut update_data) in complex_combinations {
|
||||
update_data.insert("name".to_string(), string_to_proto_value(&format!("Complex combo: {}", description)));
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
id: record_id,
|
||||
data: update_data.clone(),
|
||||
};
|
||||
|
||||
let result = put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
assert!(result.is_ok(), "Complex combination should succeed: {}", description);
|
||||
|
||||
// Verify the complex combination was stored correctly
|
||||
let query = format!(
|
||||
r#"SELECT small_int, big_int, another_int, another_bigint, nullable_int, nullable_bigint FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.mixed_integer_table
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify each field based on what was set in update_data
|
||||
for (field_name, expected_value) in update_data.iter() {
|
||||
if field_name == "name" { continue; } // Skip text field
|
||||
|
||||
match expected_value.kind.as_ref().unwrap() {
|
||||
Kind::NumberValue(num) => {
|
||||
match field_name.as_str() {
|
||||
"small_int" | "another_int" | "nullable_int" => {
|
||||
let stored: Option<i32> = row.get(field_name.as_str());
|
||||
if let Some(stored_val) = stored {
|
||||
assert_eq!(stored_val, *num as i32, "Field {} mismatch in {}", field_name, description);
|
||||
}
|
||||
},
|
||||
"big_int" | "another_bigint" | "nullable_bigint" => {
|
||||
let stored: Option<i64> = row.get(field_name.as_str());
|
||||
if let Some(stored_val) = stored {
|
||||
assert_eq!(stored_val, *num as i64, "Field {} mismatch in {}", field_name, description);
|
||||
}
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
},
|
||||
Kind::NullValue(_) => {
|
||||
match field_name.as_str() {
|
||||
"small_int" | "another_int" | "nullable_int" => {
|
||||
let stored: Option<i32> = row.get(field_name.as_str());
|
||||
assert!(stored.is_none(), "Field {} should be null in {}", field_name, description);
|
||||
},
|
||||
"big_int" | "another_bigint" | "nullable_bigint" => {
|
||||
let stored: Option<i64> = row.get(field_name.as_str());
|
||||
assert!(stored.is_none(), "Field {} should be null in {}", field_name, description);
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
259
server/tests/tables_data/put/put_table_data_test5.rs
Normal file
259
server/tests/tables_data/put/put_table_data_test5.rs
Normal file
@@ -0,0 +1,259 @@
|
||||
// tests/tables_data/handlers/put_table_data_test5.rs
|
||||
|
||||
// ========================================================================
|
||||
// MISSING TEST SCENARIOS REPLICATED FROM POST TESTS
|
||||
// ========================================================================
|
||||
|
||||
// Fixture to provide a closed database pool, simulating a connection error.
|
||||
// This is needed for the database error test.
|
||||
#[fixture]
|
||||
async fn closed_test_context() -> TestContext {
|
||||
let mut context = test_context().await;
|
||||
context.pool.close().await;
|
||||
context
|
||||
}
|
||||
|
||||
// Test 1: Ensure that an update fails gracefully when the database is unavailable.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_table_data_database_error(
|
||||
#[future] closed_test_context: TestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = closed_test_context.await;
|
||||
// The record ID doesn't matter as the connection is already closed.
|
||||
let record_id = 1;
|
||||
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert(
|
||||
"firma".to_string(),
|
||||
string_to_proto_value("This will fail"),
|
||||
);
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
let result =
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
|
||||
}
|
||||
|
||||
// Test 2: Ensure that updating a required foreign key to NULL is not allowed.
|
||||
// This uses the `foreign_key_update_test_context` from `put_table_data_test3.rs`.
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_required_foreign_key_to_null_fails(
|
||||
#[future]
|
||||
foreign_key_update_test_context: ForeignKeyUpdateTestContext,
|
||||
) {
|
||||
let context = foreign_key_update_test_context.await;
|
||||
|
||||
// Arrange: Create a category and a product linked to it.
|
||||
let mut category_data = HashMap::new();
|
||||
category_data
|
||||
.insert("name".to_string(), string_to_proto_value("Test Category"));
|
||||
let category_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.category_table.clone(),
|
||||
data: category_data,
|
||||
};
|
||||
let category_response = post_table_data(
|
||||
&context.pool,
|
||||
category_request,
|
||||
&context.indexer_tx,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let category_id = category_response.inserted_id;
|
||||
|
||||
let mut product_data = HashMap::new();
|
||||
product_data
|
||||
.insert("name".to_string(), string_to_proto_value("Test Product"));
|
||||
product_data.insert(
|
||||
format!("{}_id", context.category_table),
|
||||
Value { kind: Some(Kind::NumberValue(category_id as f64)) },
|
||||
);
|
||||
let product_request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
data: product_data,
|
||||
};
|
||||
let product_response =
|
||||
post_table_data(&context.pool, product_request, &context.indexer_tx)
|
||||
.await
|
||||
.unwrap();
|
||||
let product_id = product_response.inserted_id;
|
||||
|
||||
// Act: Attempt to update the product's required foreign key to NULL.
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert(
|
||||
format!("{}_id", context.category_table),
|
||||
Value { kind: Some(Kind::NullValue(0)) },
|
||||
);
|
||||
|
||||
let update_request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.product_table.clone(),
|
||||
id: product_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result =
|
||||
put_table_data(&context.pool, update_request, &context.indexer_tx)
|
||||
.await;
|
||||
|
||||
// Assert: The operation should fail due to a database constraint.
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"Update of required foreign key to NULL should fail"
|
||||
);
|
||||
let err = result.unwrap_err();
|
||||
// The database will likely return a NOT NULL violation, which our handler
|
||||
// wraps as an Internal error.
|
||||
assert_eq!(err.code(), tonic::Code::Internal);
|
||||
assert!(err.message().contains("Update failed"));
|
||||
}
|
||||
|
||||
// tests/tables_data/handlers/put_table_data_test6.rs
|
||||
|
||||
// ========================================================================
|
||||
// MISSING DATA TYPE VALIDATION TESTS FOR PUT HANDLER
|
||||
// ========================================================================
|
||||
|
||||
// Note: These tests are replicated from post_table_data_test3.rs to ensure
|
||||
// the PUT handler has the same level of type validation coverage as the POST handler.
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_type_mismatch_string_for_integer(
|
||||
#[future] data_type_test_context: DataTypeTestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = data_type_test_context.await;
|
||||
let record_id = create_initial_data_type_record(&context).await;
|
||||
|
||||
// Act: Attempt to update an integer column with a string value.
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert(
|
||||
"my_bigint".to_string(),
|
||||
create_string_value("not-an-integer"),
|
||||
);
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result =
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_err());
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err
|
||||
.message()
|
||||
.contains("Expected number for column 'my_bigint'"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_type_mismatch_number_for_boolean(
|
||||
#[future] data_type_test_context: DataTypeTestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = data_type_test_context.await;
|
||||
let record_id = create_initial_data_type_record(&context).await;
|
||||
|
||||
// Act: Attempt to update a boolean column with a number value.
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert("my_bool".to_string(), create_number_value(1.0));
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result =
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(result.is_err());
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err
|
||||
.message()
|
||||
.contains("Expected boolean for column 'my_bool'"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_update_with_various_valid_timestamp_formats(
|
||||
#[future] data_type_test_context: DataTypeTestContext,
|
||||
) {
|
||||
// Arrange
|
||||
let context = data_type_test_context.await;
|
||||
let record_id = create_initial_data_type_record(&context).await;
|
||||
|
||||
let valid_timestamps = vec![
|
||||
"2025-06-24T18:30:00Z",
|
||||
"2023-01-01T00:00:00+00:00",
|
||||
"2024-02-29T12:00:00.123456Z",
|
||||
"1999-12-31T23:59:59.999Z",
|
||||
];
|
||||
|
||||
for timestamp_str in valid_timestamps {
|
||||
// Act
|
||||
let mut update_data = HashMap::new();
|
||||
update_data.insert(
|
||||
"my_timestamp".to_string(),
|
||||
create_string_value(timestamp_str),
|
||||
);
|
||||
|
||||
let request = PutTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
id: record_id,
|
||||
data: update_data,
|
||||
};
|
||||
|
||||
let result =
|
||||
put_table_data(&context.pool, request, &context.indexer_tx).await;
|
||||
|
||||
// Assert
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Update should succeed for valid timestamp format: {}",
|
||||
timestamp_str
|
||||
);
|
||||
|
||||
// Verify the value was stored correctly
|
||||
let stored_timestamp: chrono::DateTime<chrono::Utc> =
|
||||
sqlx::query_scalar(&format!(
|
||||
r#"SELECT my_timestamp FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
))
|
||||
.bind(record_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let expected_timestamp =
|
||||
chrono::DateTime::parse_from_rfc3339(timestamp_str)
|
||||
.unwrap()
|
||||
.with_timezone(&chrono::Utc);
|
||||
assert_eq!(stored_timestamp, expected_timestamp);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user