more changes and more fixes, 3 more tests to go
This commit is contained in:
17
Cargo.lock
generated
17
Cargo.lock
generated
@@ -967,6 +967,21 @@ dependencies = [
|
|||||||
"windows-sys 0.52.0",
|
"windows-sys 0.52.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "futures"
|
||||||
|
version = "0.3.31"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
|
||||||
|
dependencies = [
|
||||||
|
"futures-channel",
|
||||||
|
"futures-core",
|
||||||
|
"futures-executor",
|
||||||
|
"futures-io",
|
||||||
|
"futures-sink",
|
||||||
|
"futures-task",
|
||||||
|
"futures-util",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "futures-channel"
|
name = "futures-channel"
|
||||||
version = "0.3.31"
|
version = "0.3.31"
|
||||||
@@ -1046,6 +1061,7 @@ version = "0.3.31"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
|
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"futures-channel",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-io",
|
"futures-io",
|
||||||
"futures-macro",
|
"futures-macro",
|
||||||
@@ -2842,6 +2858,7 @@ dependencies = [
|
|||||||
"common",
|
"common",
|
||||||
"dashmap",
|
"dashmap",
|
||||||
"dotenvy",
|
"dotenvy",
|
||||||
|
"futures",
|
||||||
"jsonwebtoken",
|
"jsonwebtoken",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"prost",
|
"prost",
|
||||||
|
|||||||
@@ -43,3 +43,4 @@ tokio = { version = "1.44", features = ["full", "test-util"] }
|
|||||||
rstest = "0.25.0"
|
rstest = "0.25.0"
|
||||||
lazy_static = "1.5.0"
|
lazy_static = "1.5.0"
|
||||||
rand = "0.9.1"
|
rand = "0.9.1"
|
||||||
|
futures = "0.3.31"
|
||||||
|
|||||||
@@ -332,9 +332,7 @@ async fn generate_table_sql(
|
|||||||
indexes: &[String],
|
indexes: &[String],
|
||||||
links: &[(i64, bool)],
|
links: &[(i64, bool)],
|
||||||
) -> Result<(String, Vec<String>), Status> {
|
) -> Result<(String, Vec<String>), Status> {
|
||||||
// Quote the schema name
|
|
||||||
let qualified_table = format!("\"{}\".\"{}\"", profile_name, table_name);
|
let qualified_table = format!("\"{}\".\"{}\"", profile_name, table_name);
|
||||||
|
|
||||||
let mut system_columns = vec![
|
let mut system_columns = vec![
|
||||||
"id BIGSERIAL PRIMARY KEY".to_string(),
|
"id BIGSERIAL PRIMARY KEY".to_string(),
|
||||||
"deleted BOOLEAN NOT NULL DEFAULT FALSE".to_string(),
|
"deleted BOOLEAN NOT NULL DEFAULT FALSE".to_string(),
|
||||||
@@ -342,17 +340,13 @@ async fn generate_table_sql(
|
|||||||
|
|
||||||
for (linked_id, required) in links {
|
for (linked_id, required) in links {
|
||||||
let linked_table = get_table_name_by_id(tx, *linked_id).await?;
|
let linked_table = get_table_name_by_id(tx, *linked_id).await?;
|
||||||
// Quote the schema name here too
|
|
||||||
let qualified_linked_table = format!("\"{}\".\"{}\"", profile_name, linked_table);
|
let qualified_linked_table = format!("\"{}\".\"{}\"", profile_name, linked_table);
|
||||||
let base_name = linked_table.split_once('_')
|
|
||||||
.map(|(_, rest)| rest)
|
|
||||||
.unwrap_or(&linked_table)
|
|
||||||
.to_string();
|
|
||||||
let null_clause = if *required { "NOT NULL" } else { "" };
|
|
||||||
|
|
||||||
|
// Simply use the full table name - no truncation!
|
||||||
|
let null_clause = if *required { "NOT NULL" } else { "" };
|
||||||
system_columns.push(
|
system_columns.push(
|
||||||
format!("\"{0}_id\" BIGINT {1} REFERENCES {2}(id)",
|
format!("\"{}_id\" BIGINT {} REFERENCES {}(id)",
|
||||||
base_name, null_clause, qualified_linked_table
|
linked_table, null_clause, qualified_linked_table
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -372,13 +366,9 @@ async fn generate_table_sql(
|
|||||||
let mut all_indexes = Vec::new();
|
let mut all_indexes = Vec::new();
|
||||||
for (linked_id, _) in links {
|
for (linked_id, _) in links {
|
||||||
let linked_table = get_table_name_by_id(tx, *linked_id).await?;
|
let linked_table = get_table_name_by_id(tx, *linked_id).await?;
|
||||||
let base_name = linked_table.split_once('_')
|
|
||||||
.map(|(_, rest)| rest)
|
|
||||||
.unwrap_or(&linked_table)
|
|
||||||
.to_string();
|
|
||||||
all_indexes.push(format!(
|
all_indexes.push(format!(
|
||||||
"CREATE INDEX \"idx_{}_{}_fk\" ON {} (\"{}_id\")",
|
"CREATE INDEX \"idx_{}_{}_fk\" ON {} (\"{}_id\")",
|
||||||
table_name, base_name, qualified_table, base_name
|
table_name, linked_table, qualified_table, linked_table
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -597,3 +597,4 @@ async fn test_sql_injection_attempts_are_rejected(#[future] pool: PgPool) {
|
|||||||
include!("post_table_definition_test2.rs");
|
include!("post_table_definition_test2.rs");
|
||||||
include!("post_table_definition_test3.rs");
|
include!("post_table_definition_test3.rs");
|
||||||
include!("post_table_definition_test4.rs");
|
include!("post_table_definition_test4.rs");
|
||||||
|
include!("post_table_definition_test5.rs");
|
||||||
|
|||||||
@@ -7,9 +7,8 @@
|
|||||||
#[rstest]
|
#[rstest]
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_fail_on_fk_base_name_collision(#[future] pool: PgPool) {
|
async fn test_fail_on_fk_base_name_collision(#[future] pool: PgPool) {
|
||||||
// Scenario: Link to two tables (`team1_users`, `team2_users`) that both have a
|
// Scenario: Link to two tables (`team1_users`, `team2_users`) that use full table names
|
||||||
// base name of "users". This should cause a duplicate "users_id" column in the
|
// for FK columns, so no collision occurs - this should succeed.
|
||||||
// generated SQL.
|
|
||||||
let pool = pool.await;
|
let pool = pool.await;
|
||||||
|
|
||||||
// Arrange: Create the two prerequisite tables
|
// Arrange: Create the two prerequisite tables
|
||||||
@@ -27,8 +26,8 @@ async fn test_fail_on_fk_base_name_collision(#[future] pool: PgPool) {
|
|||||||
};
|
};
|
||||||
post_table_definition(&pool, req2).await.unwrap();
|
post_table_definition(&pool, req2).await.unwrap();
|
||||||
|
|
||||||
// Arrange: A request that links to both, causing the collision
|
// Arrange: A request that links to both - should succeed with full table names
|
||||||
let colliding_req = PostTableDefinitionRequest {
|
let linking_req = PostTableDefinitionRequest {
|
||||||
profile_name: "default".into(),
|
profile_name: "default".into(),
|
||||||
table_name: "tasks".into(),
|
table_name: "tasks".into(),
|
||||||
links: vec![
|
links: vec![
|
||||||
@@ -45,18 +44,27 @@ async fn test_fail_on_fk_base_name_collision(#[future] pool: PgPool) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
let result = post_table_definition(&pool, colliding_req).await;
|
let result = post_table_definition(&pool, linking_req).await;
|
||||||
|
|
||||||
// Assert
|
// Assert - should succeed
|
||||||
let err = result.unwrap_err();
|
let response = result.unwrap();
|
||||||
assert_eq!(
|
assert!(response.success);
|
||||||
err.code(),
|
|
||||||
Code::Internal,
|
// Verify the SQL contains both full table name FK columns
|
||||||
"Expected Internal error from duplicate column in CREATE TABLE"
|
assert!(response.sql.contains("\"team1_users_id\""),
|
||||||
);
|
"SQL should contain team1_users_id column");
|
||||||
|
assert!(response.sql.contains("\"team2_users_id\""),
|
||||||
|
"SQL should contain team2_users_id column");
|
||||||
|
|
||||||
|
// Verify the references are correct
|
||||||
|
assert!(response.sql.contains("REFERENCES \"default\".\"team1_users\"(id)"));
|
||||||
|
assert!(response.sql.contains("REFERENCES \"default\".\"team2_users\"(id)"));
|
||||||
|
|
||||||
|
// Verify one is NOT NULL and one is nullable
|
||||||
|
assert!(response.sql.contains("\"team1_users_id\" BIGINT NOT NULL"));
|
||||||
|
assert!(response.sql.contains("\"team2_users_id\" BIGINT REFERENCES"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[rstest]
|
#[rstest]
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_sql_reserved_keywords_as_identifiers_are_allowed(#[future] pool: PgPool) {
|
async fn test_sql_reserved_keywords_as_identifiers_are_allowed(#[future] pool: PgPool) {
|
||||||
|
|||||||
382
server/tests/table_definition/post_table_definition_test5.rs
Normal file
382
server/tests/table_definition/post_table_definition_test5.rs
Normal file
@@ -0,0 +1,382 @@
|
|||||||
|
// tests/table_definition/post_table_definition_test5.rs
|
||||||
|
|
||||||
|
// NOTE: All 'use' statements are inherited from the parent file that includes this one.
|
||||||
|
|
||||||
|
// ========= Category 7: Schema Validation and Edge Cases =========
|
||||||
|
|
||||||
|
#[rstest]
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_schema_name_validation_reserved_names(#[future] pool: PgPool) {
|
||||||
|
let pool = pool.await;
|
||||||
|
|
||||||
|
let reserved_names = vec![
|
||||||
|
"pg_catalog",
|
||||||
|
"information_schema",
|
||||||
|
"pg_toast",
|
||||||
|
"public", // May be reserved depending on implementation
|
||||||
|
];
|
||||||
|
|
||||||
|
for reserved_name in reserved_names {
|
||||||
|
let request = PostTableDefinitionRequest {
|
||||||
|
profile_name: reserved_name.into(),
|
||||||
|
table_name: "test_table".into(),
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![],
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = post_table_definition(&pool, request).await;
|
||||||
|
assert!(result.is_err(), "Reserved schema name '{}' should be rejected", reserved_name);
|
||||||
|
if let Err(status) = result {
|
||||||
|
assert_eq!(status.code(), Code::InvalidArgument);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rstest]
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_schema_name_validation_sql_injection(#[future] pool: PgPool) {
|
||||||
|
let pool = pool.await;
|
||||||
|
|
||||||
|
let malicious_names = vec![
|
||||||
|
"test; DROP SCHEMA public",
|
||||||
|
"test'; DROP TABLE users; --",
|
||||||
|
"test\"; CREATE TABLE evil; --",
|
||||||
|
];
|
||||||
|
|
||||||
|
for malicious_name in malicious_names {
|
||||||
|
let request = PostTableDefinitionRequest {
|
||||||
|
profile_name: malicious_name.into(),
|
||||||
|
table_name: "test_table".into(),
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![],
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = post_table_definition(&pool, request).await;
|
||||||
|
assert!(result.is_err(), "Malicious schema name '{}' should be rejected", malicious_name);
|
||||||
|
if let Err(status) = result {
|
||||||
|
assert_eq!(status.code(), Code::InvalidArgument);
|
||||||
|
assert!(status.message().contains("contains invalid characters"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rstest]
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_schema_name_length_limits(#[future] pool: PgPool) {
|
||||||
|
let pool = pool.await;
|
||||||
|
|
||||||
|
// Test schema name length limits (63 chars in PostgreSQL)
|
||||||
|
let long_name = "a".repeat(64);
|
||||||
|
let request = PostTableDefinitionRequest {
|
||||||
|
profile_name: long_name,
|
||||||
|
table_name: "test_table".into(),
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![],
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = post_table_definition(&pool, request).await;
|
||||||
|
assert!(result.is_err(), "Schema names longer than 63 characters should be rejected");
|
||||||
|
if let Err(status) = result {
|
||||||
|
assert_eq!(status.code(), Code::InvalidArgument);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rstest]
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_unicode_in_schema_names_rejected(#[future] pool: PgPool) {
|
||||||
|
let pool = pool.await;
|
||||||
|
|
||||||
|
let unicode_names = vec![
|
||||||
|
"test_😀",
|
||||||
|
"schéma",
|
||||||
|
"тест",
|
||||||
|
"测试",
|
||||||
|
];
|
||||||
|
|
||||||
|
for unicode_name in unicode_names {
|
||||||
|
let request = PostTableDefinitionRequest {
|
||||||
|
profile_name: unicode_name.into(),
|
||||||
|
table_name: "test_table".into(),
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![],
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = post_table_definition(&pool, request).await;
|
||||||
|
assert!(result.is_err(), "Unicode schema name '{}' should be rejected", unicode_name);
|
||||||
|
if let Err(status) = result {
|
||||||
|
assert_eq!(status.code(), Code::InvalidArgument);
|
||||||
|
assert!(status.message().contains("contains invalid characters"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ========= Category 8: Foreign Key Edge Cases =========
|
||||||
|
|
||||||
|
#[rstest]
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_fk_column_name_uniqueness_collision(#[future] pool: PgPool) {
|
||||||
|
let pool = pool.await;
|
||||||
|
|
||||||
|
// Create tables that would cause FK column name collisions
|
||||||
|
let req1 = PostTableDefinitionRequest {
|
||||||
|
profile_name: "default".into(),
|
||||||
|
table_name: "customers_146053".into(),
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![],
|
||||||
|
};
|
||||||
|
post_table_definition(&pool, req1).await.unwrap();
|
||||||
|
|
||||||
|
let req2 = PostTableDefinitionRequest {
|
||||||
|
profile_name: "default".into(),
|
||||||
|
table_name: "suppliers_146053".into(),
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![],
|
||||||
|
};
|
||||||
|
post_table_definition(&pool, req2).await.unwrap();
|
||||||
|
|
||||||
|
// Try to create a table linking to both - this should reveal the FK naming bug
|
||||||
|
let request = PostTableDefinitionRequest {
|
||||||
|
profile_name: "default".into(),
|
||||||
|
table_name: "orders".into(),
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![
|
||||||
|
TableLink {
|
||||||
|
linked_table_name: "customers_146053".into(),
|
||||||
|
required: true,
|
||||||
|
},
|
||||||
|
TableLink {
|
||||||
|
linked_table_name: "suppliers_146053".into(),
|
||||||
|
required: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = post_table_definition(&pool, request).await;
|
||||||
|
|
||||||
|
// This test documents the current bug - both tables create "146053_id" columns
|
||||||
|
if result.is_err() {
|
||||||
|
let err = result.unwrap_err();
|
||||||
|
if err.message().contains("specified more than once") {
|
||||||
|
// This confirms the FK naming collision bug described in the analysis
|
||||||
|
assert!(err.message().contains("146053_id"));
|
||||||
|
} else {
|
||||||
|
// If it's a different error, let it fail normally
|
||||||
|
panic!("Unexpected error: {:?}", err);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If this passes, the bug has been fixed
|
||||||
|
assert!(result.is_ok());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rstest]
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_cross_schema_references_prevented(#[future] pool: PgPool) {
|
||||||
|
let pool = pool.await;
|
||||||
|
|
||||||
|
// Create table in schema A
|
||||||
|
let req_a = PostTableDefinitionRequest {
|
||||||
|
profile_name: "A".into(),
|
||||||
|
table_name: "users".into(),
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![],
|
||||||
|
};
|
||||||
|
post_table_definition(&pool, req_a).await.unwrap();
|
||||||
|
|
||||||
|
// Try to link from schema B to schema A's table
|
||||||
|
let req_b = PostTableDefinitionRequest {
|
||||||
|
profile_name: "B".into(),
|
||||||
|
table_name: "orders".into(),
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![TableLink {
|
||||||
|
linked_table_name: "users".into(), // This should not find A.users
|
||||||
|
required: true,
|
||||||
|
}],
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = post_table_definition(&pool, req_b).await;
|
||||||
|
assert!(result.is_err());
|
||||||
|
assert!(result.unwrap_err().message().contains("not found"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// ========= Category 9: Concurrent Operations =========
|
||||||
|
|
||||||
|
#[rstest]
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_concurrent_schema_creation(#[future] pool: PgPool) {
|
||||||
|
let pool = pool.await;
|
||||||
|
|
||||||
|
use futures::future::join_all;
|
||||||
|
|
||||||
|
let futures = (0..10).map(|i| {
|
||||||
|
let pool = pool.clone();
|
||||||
|
async move {
|
||||||
|
let request = PostTableDefinitionRequest {
|
||||||
|
profile_name: format!("concurrent_schema_{}", i),
|
||||||
|
table_name: "test_table".into(),
|
||||||
|
columns: vec![ColumnDefinition {
|
||||||
|
name: "test_column".into(),
|
||||||
|
field_type: "text".into(),
|
||||||
|
}],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![],
|
||||||
|
};
|
||||||
|
post_table_definition(&pool, request).await
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let results = join_all(futures).await;
|
||||||
|
assert!(results.iter().all(|r| r.is_ok()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rstest]
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_table_creation_with_many_foreign_keys(#[future] pool: PgPool) {
|
||||||
|
let pool = pool.await;
|
||||||
|
|
||||||
|
// Create several tables to link to
|
||||||
|
for i in 0..5 {
|
||||||
|
let request = PostTableDefinitionRequest {
|
||||||
|
profile_name: "default".into(),
|
||||||
|
table_name: format!("target_table_{}", i),
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![],
|
||||||
|
};
|
||||||
|
post_table_definition(&pool, request).await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a table that links to all of them
|
||||||
|
let links = (0..5).map(|i| TableLink {
|
||||||
|
linked_table_name: format!("target_table_{}", i),
|
||||||
|
required: false,
|
||||||
|
}).collect();
|
||||||
|
|
||||||
|
let request = PostTableDefinitionRequest {
|
||||||
|
profile_name: "default".into(),
|
||||||
|
table_name: "many_links_table".into(),
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links,
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = post_table_definition(&pool, request).await;
|
||||||
|
assert!(result.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
// ========= Category 10: Empty and Boundary Cases =========
|
||||||
|
|
||||||
|
#[rstest]
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_empty_schema_and_table_names_rejected(#[future] pool: PgPool) {
|
||||||
|
let pool = pool.await;
|
||||||
|
|
||||||
|
// Test empty schema name
|
||||||
|
let request = PostTableDefinitionRequest {
|
||||||
|
profile_name: "".into(),
|
||||||
|
table_name: "valid_table".into(),
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![],
|
||||||
|
};
|
||||||
|
let result = post_table_definition(&pool, request).await;
|
||||||
|
assert!(result.is_err());
|
||||||
|
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
|
||||||
|
|
||||||
|
// Test empty table name
|
||||||
|
let request = PostTableDefinitionRequest {
|
||||||
|
profile_name: "default".into(),
|
||||||
|
table_name: "".into(),
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![],
|
||||||
|
};
|
||||||
|
let result = post_table_definition(&pool, request).await;
|
||||||
|
assert!(result.is_err());
|
||||||
|
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rstest]
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_schema_name_case_sensitivity(#[future] pool: PgPool) {
|
||||||
|
let pool = pool.await;
|
||||||
|
|
||||||
|
// Test that schema names are properly case-sensitive
|
||||||
|
let request1 = PostTableDefinitionRequest {
|
||||||
|
profile_name: "TestSchema".into(),
|
||||||
|
table_name: "test_table".into(),
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![],
|
||||||
|
};
|
||||||
|
post_table_definition(&pool, request1).await.unwrap();
|
||||||
|
|
||||||
|
// Different case should be treated as different schema
|
||||||
|
let request2 = PostTableDefinitionRequest {
|
||||||
|
profile_name: "testschema".into(),
|
||||||
|
table_name: "test_table".into(),
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![],
|
||||||
|
};
|
||||||
|
let result = post_table_definition(&pool, request2).await;
|
||||||
|
// Under case-insensitive profiles this must collide
|
||||||
|
assert!(result.is_err(), "Expected duplicate‐schema error");
|
||||||
|
let err = result.unwrap_err();
|
||||||
|
// pick the right code for “already exists” in your handler
|
||||||
|
assert_eq!(err.code(), Code::AlreadyExists, "{:?}", err);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rstest]
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_whitespace_in_identifiers_rejected(#[future] pool: PgPool) {
|
||||||
|
let pool = pool.await;
|
||||||
|
|
||||||
|
// Test schema name with whitespace
|
||||||
|
let request = PostTableDefinitionRequest {
|
||||||
|
profile_name: "test schema".into(),
|
||||||
|
table_name: "test_table".into(),
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![],
|
||||||
|
};
|
||||||
|
let result = post_table_definition(&pool, request).await;
|
||||||
|
assert!(result.is_err());
|
||||||
|
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
|
||||||
|
|
||||||
|
// Test table name with whitespace
|
||||||
|
let request = PostTableDefinitionRequest {
|
||||||
|
profile_name: "default".into(),
|
||||||
|
table_name: "test table".into(),
|
||||||
|
columns: vec![],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![],
|
||||||
|
};
|
||||||
|
let result = post_table_definition(&pool, request).await;
|
||||||
|
assert!(result.is_err());
|
||||||
|
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
|
||||||
|
|
||||||
|
// Test column name with whitespace
|
||||||
|
let request = PostTableDefinitionRequest {
|
||||||
|
profile_name: "default".into(),
|
||||||
|
table_name: "test_table".into(),
|
||||||
|
columns: vec![ColumnDefinition {
|
||||||
|
name: "test column".into(),
|
||||||
|
field_type: "text".into(),
|
||||||
|
}],
|
||||||
|
indexes: vec![],
|
||||||
|
links: vec![],
|
||||||
|
};
|
||||||
|
let result = post_table_definition(&pool, request).await;
|
||||||
|
assert!(result.is_err());
|
||||||
|
assert_eq!(result.unwrap_err().code(), Code::InvalidArgument);
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user