tests for delete endpoint are passing all the tests
This commit is contained in:
@@ -62,7 +62,7 @@ async fn closed_pool(#[future] pool: PgPool) -> PgPool {
|
|||||||
#[fixture]
|
#[fixture]
|
||||||
async fn existing_profile(#[future] pool: PgPool) -> (PgPool, String, i64) {
|
async fn existing_profile(#[future] pool: PgPool) -> (PgPool, String, i64) {
|
||||||
let pool = pool.await;
|
let pool = pool.await;
|
||||||
let profile_name = format!("TestProfile_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
let profile_name = format!("testprofile_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
||||||
|
|
||||||
// FIX: The table is `schemas`, not `profiles`.
|
// FIX: The table is `schemas`, not `profiles`.
|
||||||
let profile = sqlx::query!(
|
let profile = sqlx::query!(
|
||||||
@@ -79,36 +79,24 @@ async fn existing_profile(#[future] pool: PgPool) -> (PgPool, String, i64) {
|
|||||||
async fn existing_table(
|
async fn existing_table(
|
||||||
#[future] existing_profile: (PgPool, String, i64),
|
#[future] existing_profile: (PgPool, String, i64),
|
||||||
) -> (PgPool, String, i64, String) {
|
) -> (PgPool, String, i64, String) {
|
||||||
let (pool, profile_name, schema_id) = existing_profile.await; // Renamed for clarity
|
let (pool, profile_name, schema_id) = existing_profile.await;
|
||||||
let table_name = format!("test_table_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
let table_name = format!("test_table_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
||||||
|
|
||||||
let columns = json!([
|
// Use post_table_definition instead of manual table creation
|
||||||
{ "name": "id", "type": "BIGSERIAL", "primary_key": true },
|
let table_def_request = PostTableDefinitionRequest {
|
||||||
{ "name": "deleted", "type": "BOOLEAN", "default": false }
|
profile_name: profile_name.clone(),
|
||||||
]);
|
table_name: table_name.clone(),
|
||||||
let indexes = json!([]);
|
columns: vec![
|
||||||
|
TableColumnDefinition {
|
||||||
// FIX: The column is `schema_id`, not `profile_id`.
|
name: "test_data".into(),
|
||||||
sqlx::query!(
|
field_type: "text".into(),
|
||||||
"INSERT INTO table_definitions (schema_id, table_name, columns, indexes) VALUES ($1, $2, $3, $4)",
|
}
|
||||||
schema_id,
|
],
|
||||||
table_name,
|
indexes: vec![],
|
||||||
columns,
|
links: vec![],
|
||||||
indexes
|
};
|
||||||
)
|
|
||||||
.execute(&pool)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Create the physical schema and table
|
|
||||||
sqlx::query(&format!("CREATE SCHEMA IF NOT EXISTS \"{}\"", profile_name))
|
|
||||||
.execute(&pool).await.unwrap();
|
|
||||||
let create_table = format!(
|
|
||||||
r#"CREATE TABLE "{}"."{}" (id BIGSERIAL PRIMARY KEY, deleted BOOLEAN NOT NULL DEFAULT false)"#,
|
|
||||||
profile_name, table_name
|
|
||||||
);
|
|
||||||
sqlx::query(&create_table).execute(&pool).await.unwrap();
|
|
||||||
|
|
||||||
|
post_table_definition(&pool, table_def_request).await.unwrap();
|
||||||
(pool, profile_name, schema_id, table_name)
|
(pool, profile_name, schema_id, table_name)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -117,13 +105,20 @@ async fn existing_record(
|
|||||||
#[future] existing_table: (PgPool, String, i64, String),
|
#[future] existing_table: (PgPool, String, i64, String),
|
||||||
) -> (PgPool, String, String, i64) {
|
) -> (PgPool, String, String, i64) {
|
||||||
let (pool, profile_name, _schema_id, table_name) = existing_table.await;
|
let (pool, profile_name, _schema_id, table_name) = existing_table.await;
|
||||||
let query = format!(
|
|
||||||
"INSERT INTO \"{}\".\"{}\" (deleted) VALUES (false) RETURNING id",
|
let mut data = HashMap::new();
|
||||||
profile_name, table_name
|
data.insert("test_data".to_string(), string_to_proto_value("Test Record"));
|
||||||
);
|
|
||||||
let row = sqlx::query(&query).fetch_one(&pool).await.unwrap();
|
let post_req = PostTableDataRequest {
|
||||||
let id: i64 = row.get("id");
|
profile_name: profile_name.clone(),
|
||||||
(pool, profile_name, table_name, id)
|
table_name: table_name.clone(),
|
||||||
|
data,
|
||||||
|
};
|
||||||
|
|
||||||
|
let (indexer_tx, _indexer_rx) = mpsc::channel(1);
|
||||||
|
let response = post_table_data(&pool, post_req, &indexer_tx).await.unwrap();
|
||||||
|
|
||||||
|
(pool, profile_name, table_name, response.inserted_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[fixture]
|
#[fixture]
|
||||||
@@ -131,13 +126,30 @@ async fn existing_deleted_record(
|
|||||||
#[future] existing_table: (PgPool, String, i64, String),
|
#[future] existing_table: (PgPool, String, i64, String),
|
||||||
) -> (PgPool, String, String, i64) {
|
) -> (PgPool, String, String, i64) {
|
||||||
let (pool, profile_name, _schema_id, table_name) = existing_table.await;
|
let (pool, profile_name, _schema_id, table_name) = existing_table.await;
|
||||||
let query = format!(
|
|
||||||
"INSERT INTO \"{}\".\"{}\" (deleted) VALUES (true) RETURNING id",
|
// First create a record
|
||||||
profile_name, table_name
|
let mut data = HashMap::new();
|
||||||
);
|
data.insert("test_data".to_string(), string_to_proto_value("Test Deleted Record"));
|
||||||
let row = sqlx::query(&query).fetch_one(&pool).await.unwrap();
|
|
||||||
let id: i64 = row.get("id");
|
let post_req = PostTableDataRequest {
|
||||||
(pool, profile_name, table_name, id)
|
profile_name: profile_name.clone(),
|
||||||
|
table_name: table_name.clone(),
|
||||||
|
data,
|
||||||
|
};
|
||||||
|
|
||||||
|
let (indexer_tx, _indexer_rx) = mpsc::channel(1);
|
||||||
|
let response = post_table_data(&pool, post_req, &indexer_tx).await.unwrap();
|
||||||
|
let record_id = response.inserted_id;
|
||||||
|
|
||||||
|
// Then delete it
|
||||||
|
let delete_req = DeleteTableDataRequest {
|
||||||
|
profile_name: profile_name.clone(),
|
||||||
|
table_name: table_name.clone(),
|
||||||
|
record_id,
|
||||||
|
};
|
||||||
|
delete_table_data(&pool, delete_req).await.unwrap();
|
||||||
|
|
||||||
|
(pool, profile_name, table_name, record_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// New fixture for advanced tests
|
// New fixture for advanced tests
|
||||||
|
|||||||
@@ -153,7 +153,10 @@ async fn test_delete_fails_if_physical_table_is_missing(
|
|||||||
// Arrange: Create definitions, then manually drop the physical table to create a state mismatch.
|
// Arrange: Create definitions, then manually drop the physical table to create a state mismatch.
|
||||||
let context = advanced_delete_context.await;
|
let context = advanced_delete_context.await;
|
||||||
let qualified_table = format!("\"{}\".\"{}\"", context.profile_name, context.category_table);
|
let qualified_table = format!("\"{}\".\"{}\"", context.profile_name, context.category_table);
|
||||||
sqlx::query(&format!("DROP TABLE {}", qualified_table)).execute(&context.pool).await.unwrap();
|
sqlx::query(&format!("DROP TABLE {} CASCADE", qualified_table))
|
||||||
|
.execute(&context.pool)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
// Act: Attempt to delete a record from the logically-defined but physically-absent table.
|
// Act: Attempt to delete a record from the logically-defined but physically-absent table.
|
||||||
let delete_req = DeleteTableDataRequest {
|
let delete_req = DeleteTableDataRequest {
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ async fn closed_pool(#[future] pool: PgPool) -> PgPool {
|
|||||||
#[fixture]
|
#[fixture]
|
||||||
async fn profile(#[future] pool: PgPool) -> (PgPool, String, i64) {
|
async fn profile(#[future] pool: PgPool) -> (PgPool, String, i64) {
|
||||||
let pool = pool.await;
|
let pool = pool.await;
|
||||||
let profile_name = format!("TestProfile_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
let profile_name = format!("testprofile_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
|
||||||
|
|
||||||
let profile = sqlx::query!(
|
let profile = sqlx::query!(
|
||||||
"INSERT INTO profiles (name) VALUES ($1) RETURNING id",
|
"INSERT INTO profiles (name) VALUES ($1) RETURNING id",
|
||||||
|
|||||||
Reference in New Issue
Block a user