tests are now passing fully
This commit is contained in:
@@ -120,8 +120,8 @@ async fn test_unicode_in_schema_names_rejected(#[future] pool: PgPool) {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_fk_column_name_uniqueness_collision(#[future] pool: PgPool) {
|
async fn test_fk_column_name_uniqueness_collision(#[future] pool: PgPool) {
|
||||||
let pool = pool.await;
|
let pool = pool.await;
|
||||||
|
|
||||||
// Create tables that would cause FK column name collisions
|
// Create tables with similar suffixes
|
||||||
let req1 = PostTableDefinitionRequest {
|
let req1 = PostTableDefinitionRequest {
|
||||||
profile_name: "default".into(),
|
profile_name: "default".into(),
|
||||||
table_name: "customers_146053".into(),
|
table_name: "customers_146053".into(),
|
||||||
@@ -130,7 +130,7 @@ async fn test_fk_column_name_uniqueness_collision(#[future] pool: PgPool) {
|
|||||||
links: vec![],
|
links: vec![],
|
||||||
};
|
};
|
||||||
post_table_definition(&pool, req1).await.unwrap();
|
post_table_definition(&pool, req1).await.unwrap();
|
||||||
|
|
||||||
let req2 = PostTableDefinitionRequest {
|
let req2 = PostTableDefinitionRequest {
|
||||||
profile_name: "default".into(),
|
profile_name: "default".into(),
|
||||||
table_name: "suppliers_146053".into(),
|
table_name: "suppliers_146053".into(),
|
||||||
@@ -139,11 +139,11 @@ async fn test_fk_column_name_uniqueness_collision(#[future] pool: PgPool) {
|
|||||||
links: vec![],
|
links: vec![],
|
||||||
};
|
};
|
||||||
post_table_definition(&pool, req2).await.unwrap();
|
post_table_definition(&pool, req2).await.unwrap();
|
||||||
|
|
||||||
// Try to create a table linking to both - this should reveal the FK naming bug
|
// Create a table linking to both - should succeed with full table names
|
||||||
let request = PostTableDefinitionRequest {
|
let request = PostTableDefinitionRequest {
|
||||||
profile_name: "default".into(),
|
profile_name: "default".into(),
|
||||||
table_name: "orders".into(),
|
table_name: "orders_test".into(), // Use unique name to avoid conflicts
|
||||||
columns: vec![],
|
columns: vec![],
|
||||||
indexes: vec![],
|
indexes: vec![],
|
||||||
links: vec![
|
links: vec![
|
||||||
@@ -157,23 +157,20 @@ async fn test_fk_column_name_uniqueness_collision(#[future] pool: PgPool) {
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
};
|
};
|
||||||
|
|
||||||
let result = post_table_definition(&pool, request).await;
|
let result = post_table_definition(&pool, request).await;
|
||||||
|
|
||||||
// This test documents the current bug - both tables create "146053_id" columns
|
// Should succeed - no collision with full table names
|
||||||
if result.is_err() {
|
assert!(result.is_ok());
|
||||||
let err = result.unwrap_err();
|
let response = result.unwrap();
|
||||||
if err.message().contains("specified more than once") {
|
|
||||||
// This confirms the FK naming collision bug described in the analysis
|
// Verify both FK columns are created with full table names
|
||||||
assert!(err.message().contains("146053_id"));
|
assert!(response.sql.contains("\"customers_146053_id\""));
|
||||||
} else {
|
assert!(response.sql.contains("\"suppliers_146053_id\""));
|
||||||
// If it's a different error, let it fail normally
|
|
||||||
panic!("Unexpected error: {:?}", err);
|
// Verify both are NOT NULL (required = true)
|
||||||
}
|
assert!(response.sql.contains("\"customers_146053_id\" BIGINT NOT NULL"));
|
||||||
} else {
|
assert!(response.sql.contains("\"suppliers_146053_id\" BIGINT NOT NULL"));
|
||||||
// If this passes, the bug has been fixed
|
|
||||||
assert!(result.is_ok());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[rstest]
|
#[rstest]
|
||||||
@@ -183,7 +180,7 @@ async fn test_cross_schema_references_prevented(#[future] pool: PgPool) {
|
|||||||
|
|
||||||
// Create table in schema A
|
// Create table in schema A
|
||||||
let req_a = PostTableDefinitionRequest {
|
let req_a = PostTableDefinitionRequest {
|
||||||
profile_name: "A".into(),
|
profile_name: "scheam_a".into(),
|
||||||
table_name: "users".into(),
|
table_name: "users".into(),
|
||||||
columns: vec![],
|
columns: vec![],
|
||||||
indexes: vec![],
|
indexes: vec![],
|
||||||
@@ -193,7 +190,7 @@ async fn test_cross_schema_references_prevented(#[future] pool: PgPool) {
|
|||||||
|
|
||||||
// Try to link from schema B to schema A's table
|
// Try to link from schema B to schema A's table
|
||||||
let req_b = PostTableDefinitionRequest {
|
let req_b = PostTableDefinitionRequest {
|
||||||
profile_name: "B".into(),
|
profile_name: "schema_b".into(),
|
||||||
table_name: "orders".into(),
|
table_name: "orders".into(),
|
||||||
columns: vec![],
|
columns: vec![],
|
||||||
indexes: vec![],
|
indexes: vec![],
|
||||||
@@ -309,31 +306,54 @@ async fn test_empty_schema_and_table_names_rejected(#[future] pool: PgPool) {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_schema_name_case_sensitivity(#[future] pool: PgPool) {
|
async fn test_schema_name_case_sensitivity(#[future] pool: PgPool) {
|
||||||
let pool = pool.await;
|
let pool = pool.await;
|
||||||
|
|
||||||
// Test that schema names are properly case-sensitive
|
// First, verify that uppercase letters are rejected
|
||||||
|
let invalid_request = PostTableDefinitionRequest {
|
||||||
|
profile_name: "TestSchema".into(), // Contains uppercase - should be rejected
|
||||||
|
table_name: "test_table".into(),
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![],
|
||||||
|
};
|
||||||
|
let result = post_table_definition(&pool, invalid_request).await;
|
||||||
|
assert!(result.is_err());
|
||||||
|
let err = result.unwrap_err();
|
||||||
|
assert_eq!(err.code(), Code::InvalidArgument);
|
||||||
|
assert!(err.message().contains("contains invalid characters"));
|
||||||
|
|
||||||
|
// Now test with valid lowercase names - create first schema
|
||||||
let request1 = PostTableDefinitionRequest {
|
let request1 = PostTableDefinitionRequest {
|
||||||
profile_name: "TestSchema".into(),
|
profile_name: "test_schema_a".into(),
|
||||||
table_name: "test_table".into(),
|
table_name: "test_table".into(),
|
||||||
columns: vec![],
|
columns: vec![],
|
||||||
indexes: vec![],
|
indexes: vec![],
|
||||||
links: vec![],
|
links: vec![],
|
||||||
};
|
};
|
||||||
post_table_definition(&pool, request1).await.unwrap();
|
post_table_definition(&pool, request1).await.unwrap();
|
||||||
|
|
||||||
// Different case should be treated as different schema
|
// Different lowercase schema should work fine
|
||||||
let request2 = PostTableDefinitionRequest {
|
let request2 = PostTableDefinitionRequest {
|
||||||
profile_name: "testschema".into(),
|
profile_name: "test_schema_b".into(),
|
||||||
table_name: "test_table".into(),
|
table_name: "test_table".into(), // Same table name, different schema
|
||||||
columns: vec![],
|
columns: vec![],
|
||||||
indexes: vec![],
|
indexes: vec![],
|
||||||
links: vec![],
|
links: vec![],
|
||||||
};
|
};
|
||||||
let result = post_table_definition(&pool, request2).await;
|
let result2 = post_table_definition(&pool, request2).await;
|
||||||
// Under case-insensitive profiles this must collide
|
assert!(result2.is_ok(), "Different schemas should allow same table names");
|
||||||
assert!(result.is_err(), "Expected duplicate‐schema error");
|
|
||||||
let err = result.unwrap_err();
|
// Same schema name should cause table collision
|
||||||
// pick the right code for “already exists” in your handler
|
let request3 = PostTableDefinitionRequest {
|
||||||
assert_eq!(err.code(), Code::AlreadyExists, "{:?}", err);
|
profile_name: "test_schema_a".into(), // Same schema as request1
|
||||||
|
table_name: "test_table".into(), // Same table name as request1
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![],
|
||||||
|
};
|
||||||
|
let result3 = post_table_definition(&pool, request3).await;
|
||||||
|
assert!(result3.is_err(), "Same schema + table should cause collision");
|
||||||
|
let err3 = result3.unwrap_err();
|
||||||
|
assert_eq!(err3.code(), Code::AlreadyExists);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[rstest]
|
#[rstest]
|
||||||
|
|||||||
Reference in New Issue
Block a user