robustness, one test still failing, will fix it
This commit is contained in:
@@ -182,7 +182,8 @@ pub async fn post_table_data(
|
||||
"BOOLEAN" => params.add(None::<bool>),
|
||||
"TEXT" => params.add(None::<String>),
|
||||
"TIMESTAMPTZ" => params.add(None::<DateTime<Utc>>),
|
||||
"BIGINT" | "INTEGER" => params.add(None::<i64>),
|
||||
"BIGINT" => params.add(None::<i64>),
|
||||
"INTEGER" => params.add(None::<i32>),
|
||||
s if s.starts_with("NUMERIC") => params.add(None::<Decimal>),
|
||||
_ => return Err(Status::invalid_argument(format!("Unsupported type for null value: {}", sql_type))),
|
||||
}.map_err(|e| Status::internal(format!("Failed to add null parameter for {}: {}", col, e)))?;
|
||||
@@ -223,12 +224,37 @@ pub async fn post_table_data(
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected ISO 8601 string for column '{}'", col)));
|
||||
}
|
||||
} else if sql_type == "BIGINT" || sql_type == "INTEGER" {
|
||||
} else if sql_type == "BIGINT" {
|
||||
if let Kind::NumberValue(val) = kind {
|
||||
if val.fract() != 0.0 {
|
||||
return Err(Status::invalid_argument(format!("Expected integer for column '{}', but got a float", col)));
|
||||
}
|
||||
params.add(*val as i64).map_err(|e| Status::invalid_argument(format!("Failed to add integer parameter for {}: {}", col, e)))?;
|
||||
|
||||
// Simple universal check: try the conversion and verify it's reversible
|
||||
// This handles ALL edge cases: infinity, NaN, overflow, underflow, precision loss
|
||||
let as_i64 = *val as i64;
|
||||
if (as_i64 as f64) != *val {
|
||||
return Err(Status::invalid_argument(format!("Integer value out of range for BIGINT column '{}'", col)));
|
||||
}
|
||||
|
||||
params.add(as_i64).map_err(|e| Status::invalid_argument(format!("Failed to add bigint parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected number for column '{}'", col)));
|
||||
}
|
||||
} else if sql_type == "INTEGER" {
|
||||
if let Kind::NumberValue(val) = kind {
|
||||
if val.fract() != 0.0 {
|
||||
return Err(Status::invalid_argument(format!("Expected integer for column '{}', but got a float", col)));
|
||||
}
|
||||
|
||||
// Simple universal check: try the conversion and verify it's reversible
|
||||
// This handles ALL edge cases: infinity, NaN, overflow, underflow, precision loss
|
||||
let as_i32 = *val as i32;
|
||||
if (as_i32 as f64) != *val {
|
||||
return Err(Status::invalid_argument(format!("Integer value out of range for INTEGER column '{}'", col)));
|
||||
}
|
||||
|
||||
params.add(as_i32).map_err(|e| Status::invalid_argument(format!("Failed to add integer parameter for {}: {}", col, e)))?;
|
||||
} else {
|
||||
return Err(Status::invalid_argument(format!("Expected number for column '{}'", col)));
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ use server::table_definition::handlers::post_table_definition;
|
||||
use crate::common::setup_test_db;
|
||||
use tonic;
|
||||
use chrono::Utc;
|
||||
use sqlx::types::chrono::DateTime;
|
||||
use tokio::sync::mpsc;
|
||||
use server::indexer::IndexCommand;
|
||||
use sqlx::Row;
|
||||
@@ -480,3 +481,4 @@ async fn test_create_table_data_with_null_values(
|
||||
include!("post_table_data_test2.rs");
|
||||
include!("post_table_data_test3.rs");
|
||||
include!("post_table_data_test4.rs");
|
||||
include!("post_table_data_test5.rs");
|
||||
|
||||
@@ -144,12 +144,12 @@ async fn foreign_key_test_context() -> ForeignKeyTestContext {
|
||||
// DATA TYPE VALIDATION TESTS
|
||||
// ========================================================================
|
||||
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_correct_data_types_success(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Test String"));
|
||||
data.insert("my_bool".into(), create_bool_value(true));
|
||||
@@ -157,13 +157,11 @@ async fn test_correct_data_types_success(#[future] data_type_test_context: DataT
|
||||
data.insert("my_bigint".into(), create_number_value(42.0));
|
||||
data.insert("my_money".into(), create_string_value("123.45")); // Use string for decimal
|
||||
data.insert("my_decimal".into(), create_string_value("999.99")); // Use string for decimal
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let response = post_table_data(&context.pool, request, &indexer_tx).await.unwrap();
|
||||
assert!(response.success);
|
||||
assert!(response.inserted_id > 0);
|
||||
@@ -173,7 +171,6 @@ async fn test_correct_data_types_success(#[future] data_type_test_context: DataT
|
||||
r#"SELECT my_text, my_bool, my_timestamp, my_bigint FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
@@ -182,7 +179,11 @@ async fn test_correct_data_types_success(#[future] data_type_test_context: DataT
|
||||
|
||||
let stored_text: String = row.get("my_text");
|
||||
let stored_bool: bool = row.get("my_bool");
|
||||
let stored_bigint: i64 = row.get("my_bigint");
|
||||
// Change this based on your actual column type in the schema:
|
||||
// If my_bigint is defined as "integer" in table definition, use i32:
|
||||
let stored_bigint: i32 = row.get("my_bigint");
|
||||
// If my_bigint is defined as "biginteger" or "bigint" in table definition, use i64:
|
||||
// let stored_bigint: i64 = row.get("my_bigint");
|
||||
|
||||
assert_eq!(stored_text, "Test String");
|
||||
assert_eq!(stored_bool, true);
|
||||
@@ -375,25 +376,24 @@ async fn test_boundary_integer_values(#[future] data_type_test_context: DataType
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Use safer boundary values that don't have f64 precision issues
|
||||
let boundary_values = vec![
|
||||
0.0,
|
||||
1.0,
|
||||
-1.0,
|
||||
9223372036854775807.0, // i64::MAX
|
||||
-9223372036854775808.0, // i64::MIN
|
||||
2147483647.0, // i32::MAX (for INTEGER columns)
|
||||
-2147483648.0, // i32::MIN (for INTEGER columns)
|
||||
];
|
||||
|
||||
for (i, value) in boundary_values.into_iter().enumerate() {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value(&format!("Boundary test {}", i)));
|
||||
data.insert("my_bigint".into(), create_number_value(value));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Failed for boundary value: {}", value);
|
||||
}
|
||||
@@ -635,7 +635,7 @@ async fn test_multiple_foreign_keys_scenario(#[future] foreign_key_test_context:
|
||||
// Verify the data was inserted correctly
|
||||
let product_id_col = format!("{}_id", context.product_table);
|
||||
let category_id_col = format!("{}_id", context.category_table);
|
||||
|
||||
|
||||
let query = format!(
|
||||
r#"SELECT quantity, "{}", "{}" FROM "{}"."{}" WHERE id = $1"#,
|
||||
product_id_col, category_id_col, context.profile_name, context.order_table
|
||||
@@ -647,7 +647,8 @@ async fn test_multiple_foreign_keys_scenario(#[future] foreign_key_test_context:
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let quantity: i64 = row.get("quantity");
|
||||
// Fix: quantity is defined as "integer" in the foreign key test context, so use i32
|
||||
let quantity: i32 = row.get("quantity");
|
||||
let stored_product_id: i64 = row.get(product_id_col.as_str());
|
||||
let stored_category_id: Option<i64> = row.get(category_id_col.as_str());
|
||||
|
||||
@@ -797,25 +798,23 @@ async fn test_invalid_decimal_string_formats(#[future] data_type_test_context: D
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_mixed_null_and_valid_data(#[future] data_type_test_context: DataTypeTestContext) {
|
||||
let context = data_type_test_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
let mut data = HashMap::new();
|
||||
data.insert("my_text".into(), create_string_value("Mixed data test"));
|
||||
data.insert("my_bool".into(), create_bool_value(true));
|
||||
data.insert("my_timestamp".into(), create_null_value());
|
||||
data.insert("my_bigint".into(), create_number_value(42.0));
|
||||
data.insert("my_money".into(), create_null_value());
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.table_name.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let response = post_table_data(&context.pool, request, &indexer_tx).await.unwrap();
|
||||
assert!(response.success);
|
||||
|
||||
@@ -824,7 +823,6 @@ async fn test_mixed_null_and_valid_data(#[future] data_type_test_context: DataTy
|
||||
r#"SELECT my_text, my_bool, my_timestamp, my_bigint, my_money FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.table_name
|
||||
);
|
||||
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
@@ -833,9 +831,13 @@ async fn test_mixed_null_and_valid_data(#[future] data_type_test_context: DataTy
|
||||
|
||||
let stored_text: String = row.get("my_text");
|
||||
let stored_bool: bool = row.get("my_bool");
|
||||
let stored_timestamp: Option<chrono::DateTime<Utc>> = row.get("my_timestamp");
|
||||
let stored_bigint: i64 = row.get("my_bigint");
|
||||
let stored_money: Option<rust_decimal::Decimal> = row.get("my_money");
|
||||
let stored_timestamp: Option<DateTime<Utc>> = row.get("my_timestamp");
|
||||
// Change this based on your actual column type in the schema:
|
||||
// If my_bigint is defined as "integer" in table definition, use i32:
|
||||
let stored_bigint: i32 = row.get("my_bigint");
|
||||
// If my_bigint is defined as "biginteger" or "bigint" in table definition, use i64:
|
||||
// let stored_bigint: i64 = row.get("my_bigint");
|
||||
let stored_money: Option<Decimal> = row.get("my_money");
|
||||
|
||||
assert_eq!(stored_text, "Mixed data test");
|
||||
assert_eq!(stored_bool, true);
|
||||
|
||||
536
server/tests/tables_data/handlers/post_table_data_test5.rs
Normal file
536
server/tests/tables_data/handlers/post_table_data_test5.rs
Normal file
@@ -0,0 +1,536 @@
|
||||
// ========================================================================
|
||||
// COMPREHENSIVE INTEGER ROBUSTNESS TESTS - ADD TO TEST FILE 5
|
||||
// ========================================================================
|
||||
|
||||
#[derive(Clone)]
|
||||
struct IntegerRobustnessTestContext {
|
||||
pool: PgPool,
|
||||
profile_name: String,
|
||||
mixed_integer_table: String,
|
||||
bigint_only_table: String,
|
||||
integer_only_table: String,
|
||||
}
|
||||
|
||||
// Create tables with different integer type combinations
|
||||
async fn create_integer_robustness_tables(pool: &PgPool, profile_name: &str) -> Result<IntegerRobustnessTestContext, tonic::Status> {
|
||||
let unique_id = generate_unique_id();
|
||||
let mixed_table = format!("mixed_int_table_{}", unique_id);
|
||||
let bigint_table = format!("bigint_table_{}", unique_id);
|
||||
let integer_table = format!("integer_table_{}", unique_id);
|
||||
|
||||
// Table with both INTEGER and BIGINT columns
|
||||
let mixed_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: mixed_table.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "small_int".into(), field_type: "integer".into() }, // i32
|
||||
TableColumnDefinition { name: "big_int".into(), field_type: "biginteger".into() }, // i64
|
||||
TableColumnDefinition { name: "another_int".into(), field_type: "int".into() }, // i32 (alias)
|
||||
TableColumnDefinition { name: "another_bigint".into(), field_type: "bigint".into() }, // i64 (alias)
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, mixed_def).await?;
|
||||
|
||||
// Table with only BIGINT columns
|
||||
let bigint_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: bigint_table.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "value1".into(), field_type: "biginteger".into() },
|
||||
TableColumnDefinition { name: "value2".into(), field_type: "bigint".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, bigint_def).await?;
|
||||
|
||||
// Table with only INTEGER columns
|
||||
let integer_def = PostTableDefinitionRequest {
|
||||
profile_name: profile_name.into(),
|
||||
table_name: integer_table.clone(),
|
||||
columns: vec![
|
||||
TableColumnDefinition { name: "name".into(), field_type: "text".into() },
|
||||
TableColumnDefinition { name: "value1".into(), field_type: "integer".into() },
|
||||
TableColumnDefinition { name: "value2".into(), field_type: "int".into() },
|
||||
],
|
||||
indexes: vec![],
|
||||
links: vec![],
|
||||
};
|
||||
post_table_definition(pool, integer_def).await?;
|
||||
|
||||
Ok(IntegerRobustnessTestContext {
|
||||
pool: pool.clone(),
|
||||
profile_name: profile_name.to_string(),
|
||||
mixed_integer_table: mixed_table,
|
||||
bigint_only_table: bigint_table,
|
||||
integer_only_table: integer_table,
|
||||
})
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
async fn integer_robustness_context() -> IntegerRobustnessTestContext {
|
||||
let pool = setup_test_db().await;
|
||||
let unique_id = generate_unique_id();
|
||||
let profile_name = format!("int_robust_profile_{}", unique_id);
|
||||
|
||||
create_integer_robustness_tables(&pool, &profile_name).await
|
||||
.expect("Failed to create integer robustness test tables")
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// BOUNDARY AND OVERFLOW TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_integer_boundary_values_comprehensive(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test i32 boundaries on INTEGER columns
|
||||
let i32_boundary_tests = vec![
|
||||
(2147483647.0, "i32::MAX"),
|
||||
(-2147483648.0, "i32::MIN"),
|
||||
(2147483646.0, "i32::MAX - 1"),
|
||||
(-2147483647.0, "i32::MIN + 1"),
|
||||
(0.0, "zero"),
|
||||
(1.0, "one"),
|
||||
(-1.0, "negative one"),
|
||||
];
|
||||
|
||||
for (value, description) in i32_boundary_tests {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("i32 test: {}", description)));
|
||||
data.insert("value1".into(), create_number_value(value));
|
||||
data.insert("value2".into(), create_number_value(value));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.integer_only_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Failed for i32 value {}: {}", value, description);
|
||||
|
||||
// Verify correct storage
|
||||
let response = result.unwrap();
|
||||
let query = format!(
|
||||
r#"SELECT value1, value2 FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.integer_only_table
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_val1: i32 = row.get("value1");
|
||||
let stored_val2: i32 = row.get("value2");
|
||||
assert_eq!(stored_val1, value as i32);
|
||||
assert_eq!(stored_val2, value as i32);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_bigint_boundary_values_comprehensive(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test i64 boundaries that can be precisely represented in f64
|
||||
let i64_boundary_tests = vec![
|
||||
(9223372036854774784.0, "Close to i64::MAX (precisely representable)"),
|
||||
(-9223372036854774784.0, "Close to i64::MIN (precisely representable)"),
|
||||
(4611686018427387904.0, "i64::MAX / 2"),
|
||||
(-4611686018427387904.0, "i64::MIN / 2"),
|
||||
(2147483647.0, "i32::MAX in i64 column"),
|
||||
(-2147483648.0, "i32::MIN in i64 column"),
|
||||
(1000000000000.0, "One trillion"),
|
||||
(-1000000000000.0, "Negative one trillion"),
|
||||
];
|
||||
|
||||
for (value, description) in i64_boundary_tests {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("i64 test: {}", description)));
|
||||
data.insert("value1".into(), create_number_value(value));
|
||||
data.insert("value2".into(), create_number_value(value));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.bigint_only_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Failed for i64 value {}: {}", value, description);
|
||||
|
||||
// Verify correct storage
|
||||
let response = result.unwrap();
|
||||
let query = format!(
|
||||
r#"SELECT value1, value2 FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.bigint_only_table
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_val1: i64 = row.get("value1");
|
||||
let stored_val2: i64 = row.get("value2");
|
||||
assert_eq!(stored_val1, value as i64);
|
||||
assert_eq!(stored_val2, value as i64);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_integer_overflow_rejection_i32(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Values that should be rejected for INTEGER columns
|
||||
let overflow_values = vec![
|
||||
(2147483648.0, "i32::MAX + 1"),
|
||||
(-2147483649.0, "i32::MIN - 1"),
|
||||
(3000000000.0, "3 billion"),
|
||||
(-3000000000.0, "negative 3 billion"),
|
||||
(4294967296.0, "2^32"),
|
||||
(9223372036854775807.0, "i64::MAX (should fail on i32)"),
|
||||
];
|
||||
|
||||
for (value, description) in overflow_values {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("Overflow test: {}", description)));
|
||||
data.insert("value1".into(), create_number_value(value));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.integer_only_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err(), "Should have failed for i32 overflow value {}: {}", value, description);
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Integer value out of range for INTEGER column"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_bigint_overflow_rejection_i64(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Values that should be rejected for BIGINT columns
|
||||
let overflow_values = vec![
|
||||
(f64::INFINITY, "Positive infinity"),
|
||||
(f64::NEG_INFINITY, "Negative infinity"),
|
||||
(1e20, "Very large number (100,000,000,000,000,000,000)"),
|
||||
(-1e20, "Very large negative number"),
|
||||
(1e25, "Extremely large number"),
|
||||
(-1e25, "Extremely large negative number"),
|
||||
(9223372036854775808.0, "Just above i64 safe range"),
|
||||
(-9223372036854775808.0, "Just below i64 safe range"),
|
||||
(f64::MAX, "f64::MAX"),
|
||||
(f64::MIN, "f64::MIN"),
|
||||
];
|
||||
|
||||
for (value, description) in overflow_values {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("i64 Overflow test: {}", description)));
|
||||
data.insert("value1".into(), create_number_value(value));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.bigint_only_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
|
||||
assert!(result.is_err(), "Should have failed for i64 overflow value {}: {}", value, description);
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
// Check for either message format (the new robust check should catch these)
|
||||
let message = err.message();
|
||||
assert!(
|
||||
message.contains("Integer value out of range for BIGINT column") ||
|
||||
message.contains("Expected integer for column") ||
|
||||
message.contains("but got a float"),
|
||||
"Unexpected error message for {}: {}",
|
||||
description,
|
||||
message
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_mixed_integer_types_in_same_table(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test inserting different values into different integer types in the same table
|
||||
let test_cases = vec![
|
||||
(42.0, 1000000000000.0, "Small i32, large i64"),
|
||||
(2147483647.0, 9223372036854774784.0, "i32::MAX, near i64::MAX"),
|
||||
(-2147483648.0, -9223372036854774784.0, "i32::MIN, near i64::MIN"),
|
||||
(0.0, 0.0, "Both zero"),
|
||||
(-1.0, -1.0, "Both negative one"),
|
||||
];
|
||||
|
||||
for (i32_val, i64_val, description) in test_cases {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("Mixed test: {}", description)));
|
||||
data.insert("small_int".into(), create_number_value(i32_val));
|
||||
data.insert("big_int".into(), create_number_value(i64_val));
|
||||
data.insert("another_int".into(), create_number_value(i32_val));
|
||||
data.insert("another_bigint".into(), create_number_value(i64_val));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Failed for mixed integer test: {}", description);
|
||||
|
||||
// Verify correct storage with correct types
|
||||
let response = result.unwrap();
|
||||
let query = format!(
|
||||
r#"SELECT small_int, big_int, another_int, another_bigint FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.mixed_integer_table
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_small_int: i32 = row.get("small_int");
|
||||
let stored_big_int: i64 = row.get("big_int");
|
||||
let stored_another_int: i32 = row.get("another_int");
|
||||
let stored_another_bigint: i64 = row.get("another_bigint");
|
||||
|
||||
assert_eq!(stored_small_int, i32_val as i32);
|
||||
assert_eq!(stored_big_int, i64_val as i64);
|
||||
assert_eq!(stored_another_int, i32_val as i32);
|
||||
assert_eq!(stored_another_bigint, i64_val as i64);
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_wrong_type_for_mixed_integer_columns(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Try to put i64 values into i32 columns
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value("Wrong type test"));
|
||||
data.insert("small_int".into(), create_number_value(3000000000.0)); // Too big for i32
|
||||
data.insert("big_int".into(), create_number_value(42.0)); // This should be fine
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err(), "Should fail when putting i64 value in i32 column");
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Integer value out of range for INTEGER column"));
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_float_precision_edge_cases(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test values that have fractional parts (should be rejected)
|
||||
let fractional_values = vec![
|
||||
(42.1, "42.1"),
|
||||
(42.9, "42.9"),
|
||||
(42.000001, "42.000001"),
|
||||
(-42.5, "-42.5"),
|
||||
(0.1, "0.1"),
|
||||
(2147483646.5, "Near i32::MAX with fraction"),
|
||||
];
|
||||
|
||||
for (value, description) in fractional_values {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("Float test: {}", description)));
|
||||
data.insert("value1".into(), create_number_value(value));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.integer_only_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_err(), "Should fail for fractional value {}: {}", value, description);
|
||||
|
||||
if let Err(err) = result {
|
||||
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||
assert!(err.message().contains("Expected integer for column") && err.message().contains("but got a float"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_null_integer_handling_comprehensive(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test null values in mixed integer table
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value("Null integer test"));
|
||||
data.insert("small_int".into(), create_null_value());
|
||||
data.insert("big_int".into(), create_null_value());
|
||||
data.insert("another_int".into(), create_number_value(42.0));
|
||||
data.insert("another_bigint".into(), create_number_value(1000000000000.0));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Should succeed with null integer values");
|
||||
|
||||
// Verify null storage
|
||||
let response = result.unwrap();
|
||||
let query = format!(
|
||||
r#"SELECT small_int, big_int, another_int, another_bigint FROM "{}"."{}" WHERE id = $1"#,
|
||||
context.profile_name, context.mixed_integer_table
|
||||
);
|
||||
let row = sqlx::query(&query)
|
||||
.bind(response.inserted_id)
|
||||
.fetch_one(&context.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let stored_small_int: Option<i32> = row.get("small_int");
|
||||
let stored_big_int: Option<i64> = row.get("big_int");
|
||||
let stored_another_int: i32 = row.get("another_int");
|
||||
let stored_another_bigint: i64 = row.get("another_bigint");
|
||||
|
||||
assert!(stored_small_int.is_none());
|
||||
assert!(stored_big_int.is_none());
|
||||
assert_eq!(stored_another_int, 42);
|
||||
assert_eq!(stored_another_bigint, 1000000000000);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_mixed_integer_inserts(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Test concurrent inserts with different integer types
|
||||
let tasks: Vec<_> = (0..10).map(|i| {
|
||||
let context = context.clone();
|
||||
let indexer_tx = indexer_tx.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("Concurrent test {}", i)));
|
||||
data.insert("small_int".into(), create_number_value((i * 1000) as f64));
|
||||
data.insert("big_int".into(), create_number_value((i as i64 * 1000000000000) as f64));
|
||||
data.insert("another_int".into(), create_number_value((i * -100) as f64));
|
||||
data.insert("another_bigint".into(), create_number_value((i as i64 * -1000000000000) as f64));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
post_table_data(&context.pool, request, &indexer_tx).await
|
||||
})
|
||||
}).collect();
|
||||
|
||||
// Wait for all tasks to complete
|
||||
let results = futures::future::join_all(tasks).await;
|
||||
|
||||
// All should succeed
|
||||
for (i, result) in results.into_iter().enumerate() {
|
||||
let task_result = result.expect("Task should not panic");
|
||||
assert!(task_result.is_ok(), "Concurrent insert {} should succeed", i);
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// PERFORMANCE AND STRESS TESTS
|
||||
// ========================================================================
|
||||
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_rapid_integer_inserts_stress(#[future] integer_robustness_context: IntegerRobustnessTestContext) {
|
||||
let context = integer_robustness_context.await;
|
||||
let indexer_tx = create_test_indexer_channel().await;
|
||||
|
||||
// Rapid sequential inserts with alternating integer types
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
for i in 0..100 {
|
||||
let mut data = HashMap::new();
|
||||
data.insert("name".into(), create_string_value(&format!("Stress test {}", i)));
|
||||
|
||||
// Alternate between different boundary values
|
||||
let small_val = match i % 4 {
|
||||
0 => 2147483647.0, // i32::MAX
|
||||
1 => -2147483648.0, // i32::MIN
|
||||
2 => 0.0,
|
||||
_ => (i as f64) * 1000.0,
|
||||
};
|
||||
|
||||
let big_val = match i % 4 {
|
||||
0 => 9223372036854774784.0, // Near i64::MAX
|
||||
1 => -9223372036854774784.0, // Near i64::MIN
|
||||
2 => 0.0,
|
||||
_ => (i as f64) * 1000000000000.0,
|
||||
};
|
||||
|
||||
data.insert("small_int".into(), create_number_value(small_val));
|
||||
data.insert("big_int".into(), create_number_value(big_val));
|
||||
data.insert("another_int".into(), create_number_value(small_val));
|
||||
data.insert("another_bigint".into(), create_number_value(big_val));
|
||||
|
||||
let request = PostTableDataRequest {
|
||||
profile_name: context.profile_name.clone(),
|
||||
table_name: context.mixed_integer_table.clone(),
|
||||
data,
|
||||
};
|
||||
|
||||
let result = post_table_data(&context.pool, request, &indexer_tx).await;
|
||||
assert!(result.is_ok(), "Rapid insert {} should succeed", i);
|
||||
}
|
||||
|
||||
let duration = start.elapsed();
|
||||
println!("100 mixed integer inserts took: {:?}", duration);
|
||||
|
||||
// Should complete in reasonable time (adjust threshold as needed)
|
||||
assert!(duration.as_secs() < 10, "Stress test took too long: {:?}", duration);
|
||||
}
|
||||
Reference in New Issue
Block a user