get test updated, working now

This commit is contained in:
filipriec
2025-06-25 09:16:32 +02:00
parent d346670839
commit f87e3c03cb
2 changed files with 142 additions and 124 deletions

View File

@@ -1,7 +1,7 @@
// tests/tables_data/handlers/get_table_data_test.rs // tests/tables_data/handlers/get_table_data_test.rs
use rstest::{fixture, rstest}; use rstest::{fixture, rstest};
use server::tables_data::handlers::get_table_data; use server::tables_data::handlers::get_table_data;
use common::proto::multieko2::tables_data::{GetTableDataRequest, GetTableDataResponse}; use common::proto::multieko2::tables_data::GetTableDataRequest;
use crate::common::setup_test_db; use crate::common::setup_test_db;
use sqlx::{PgPool, Row}; use sqlx::{PgPool, Row};
use tonic; use tonic;
@@ -22,60 +22,70 @@ async fn closed_pool(#[future] pool: PgPool) -> PgPool {
} }
#[fixture] #[fixture]
async fn profile(#[future] pool: PgPool) -> (PgPool, String, i64) { async fn schema(#[future] pool: PgPool) -> (PgPool, String, i64) {
let pool = pool.await; let pool = pool.await;
let profile_name = format!("testprofile_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default()); let schema_name = format!("testschema_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
let profile = sqlx::query!( // Insert into schemas table instead of profiles
"INSERT INTO profiles (name) VALUES ($1) RETURNING id", let schema = sqlx::query!(
profile_name "INSERT INTO schemas (name) VALUES ($1) RETURNING id",
schema_name
) )
.fetch_one(&pool) .fetch_one(&pool)
.await .await
.unwrap(); .unwrap();
(pool, profile_name, profile.id) // Create the actual PostgreSQL schema
let create_schema_sql = format!("CREATE SCHEMA IF NOT EXISTS \"{}\"", schema_name);
sqlx::query(&create_schema_sql)
.execute(&pool)
.await
.unwrap();
(pool, schema_name, schema.id)
} }
#[fixture] #[fixture]
async fn table_definition(#[future] profile: (PgPool, String, i64)) -> (PgPool, String, String, i64) { async fn table_definition(#[future] schema: (PgPool, String, i64)) -> (PgPool, String, String, i64) {
let (pool, profile_name, profile_id) = profile.await; let (pool, schema_name, schema_id) = schema.await;
let table_name = format!("test_table_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default()); let table_name = format!("test_table_{}", Utc::now().timestamp_nanos_opt().unwrap_or_default());
// Define columns and indexes for the table // Define columns and indexes for the table
let columns = json!([ let columns = json!([
"\"name\" VARCHAR(255)", "\"name\" TEXT",
"\"age\" INTEGER", "\"age\" INTEGER",
"\"email\" VARCHAR(100)", "\"email\" TEXT",
"\"is_active\" BOOLEAN" "\"is_active\" BOOLEAN"
]); ]);
let indexes = json!([]); // Add empty indexes array let indexes = json!([]);
// Use schema_id instead of profile_id
let table_def = sqlx::query!( let table_def = sqlx::query!(
"INSERT INTO table_definitions (profile_id, table_name, columns, indexes) VALUES ($1, $2, $3, $4) RETURNING id", "INSERT INTO table_definitions (schema_id, table_name, columns, indexes) VALUES ($1, $2, $3, $4) RETURNING id",
profile_id, schema_id,
table_name, table_name,
columns, columns,
indexes // Add indexes to the insert indexes
) )
.fetch_one(&pool) .fetch_one(&pool)
.await .await
.unwrap(); .unwrap();
// Create actual table // Create actual table in the schema
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
let create_table = format!( let create_table = format!(
r#" r#"
CREATE TABLE "{}" ( CREATE TABLE {} (
id BIGSERIAL PRIMARY KEY, id BIGSERIAL PRIMARY KEY,
deleted BOOLEAN NOT NULL DEFAULT false, deleted BOOLEAN NOT NULL DEFAULT FALSE,
firma TEXT NOT NULL, name TEXT,
name VARCHAR(255),
age INTEGER, age INTEGER,
email VARCHAR(100), email TEXT,
is_active BOOLEAN is_active BOOLEAN,
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
) )
"#, "#,
table_name qualified_table
); );
sqlx::query(&create_table) sqlx::query(&create_table)
@@ -83,23 +93,23 @@ async fn table_definition(#[future] profile: (PgPool, String, i64)) -> (PgPool,
.await .await
.unwrap(); .unwrap();
(pool, profile_name, table_name, table_def.id) (pool, schema_name, table_name, table_def.id)
} }
#[fixture] #[fixture]
async fn regular_record(#[future] table_definition: (PgPool, String, String, i64)) -> (PgPool, String, String, i64) { async fn regular_record(#[future] table_definition: (PgPool, String, String, i64)) -> (PgPool, String, String, i64) {
let (pool, profile_name, table_name, _) = table_definition.await; let (pool, schema_name, table_name, _) = table_definition.await;
// Insert a record with all fields // Insert a record with all fields
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
let query = format!( let query = format!(
r#"INSERT INTO "{}" (firma, name, age, email, is_active) r#"INSERT INTO {} (name, age, email, is_active)
VALUES ($1, $2, $3, $4, $5) VALUES ($1, $2, $3, $4)
RETURNING id"#, RETURNING id"#,
table_name qualified_table
); );
let record = sqlx::query(&query) let record = sqlx::query(&query)
.bind("Test Company")
.bind("John Doe") .bind("John Doe")
.bind(30) .bind(30)
.bind("john@example.com") .bind("john@example.com")
@@ -109,59 +119,58 @@ async fn regular_record(#[future] table_definition: (PgPool, String, String, i64
.unwrap(); .unwrap();
let id: i64 = record.get("id"); let id: i64 = record.get("id");
(pool, profile_name, table_name, id) (pool, schema_name, table_name, id)
} }
#[fixture] #[fixture]
async fn null_fields_record(#[future] table_definition: (PgPool, String, String, i64)) -> (PgPool, String, String, i64) { async fn null_fields_record(#[future] table_definition: (PgPool, String, String, i64)) -> (PgPool, String, String, i64) {
let (pool, profile_name, table_name, _) = table_definition.await; let (pool, schema_name, table_name, _) = table_definition.await;
// Insert a record with only required fields // Insert a record with only basic fields (all others will be NULL)
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
let query = format!( let query = format!(
r#"INSERT INTO "{}" (firma) r#"INSERT INTO {} DEFAULT VALUES
VALUES ($1)
RETURNING id"#, RETURNING id"#,
table_name qualified_table
); );
let record = sqlx::query(&query) let record = sqlx::query(&query)
.bind("Null Fields Company")
.fetch_one(&pool) .fetch_one(&pool)
.await .await
.unwrap(); .unwrap();
let id: i64 = record.get("id"); let id: i64 = record.get("id");
(pool, profile_name, table_name, id) (pool, schema_name, table_name, id)
} }
#[fixture] #[fixture]
async fn deleted_record(#[future] table_definition: (PgPool, String, String, i64)) -> (PgPool, String, String, i64) { async fn deleted_record(#[future] table_definition: (PgPool, String, String, i64)) -> (PgPool, String, String, i64) {
let (pool, profile_name, table_name, _) = table_definition.await; let (pool, schema_name, table_name, _) = table_definition.await;
// Insert a deleted record // Insert a deleted record
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
let query = format!( let query = format!(
r#"INSERT INTO "{}" (firma, deleted) r#"INSERT INTO {} (deleted)
VALUES ($1, true) VALUES (true)
RETURNING id"#, RETURNING id"#,
table_name qualified_table
); );
let record = sqlx::query(&query) let record = sqlx::query(&query)
.bind("Deleted Company") .bind(true)
.fetch_one(&pool) .fetch_one(&pool)
.await .await
.unwrap(); .unwrap();
let id: i64 = record.get("id"); let id: i64 = record.get("id");
(pool, profile_name, table_name, id) (pool, schema_name, table_name, id)
} }
async fn assert_response_matches(pool: &PgPool, table_name: &str, id: i64, response: &HashMap<String, String>) { async fn assert_response_matches(pool: &PgPool, schema_name: &str, table_name: &str, id: i64, response: &HashMap<String, String>) {
let columns = format!( let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
"id, deleted, firma, name, age, email, is_active" let columns = "id, deleted, name, age, email, is_active";
); let query = format!(r#"SELECT {} FROM {} WHERE id = $1"#, columns, qualified_table);
let query = format!(r#"SELECT {} FROM "{}" WHERE id = $1"#, columns, table_name);
let row = sqlx::query(&query) let row = sqlx::query(&query)
.bind(id) .bind(id)
.fetch_one(pool) .fetch_one(pool)
@@ -170,30 +179,38 @@ async fn assert_response_matches(pool: &PgPool, table_name: &str, id: i64, respo
assert_eq!(row.get::<i64, _>("id").to_string(), response["id"]); assert_eq!(row.get::<i64, _>("id").to_string(), response["id"]);
assert_eq!(row.get::<bool, _>("deleted").to_string(), response["deleted"]); assert_eq!(row.get::<bool, _>("deleted").to_string(), response["deleted"]);
assert_eq!(row.get::<String, _>("firma"), response["firma"]);
// Check optional fields // Check optional fields
let name: Option<String> = row.try_get("name").unwrap_or(None); let name: Option<String> = row.try_get("name").unwrap_or(None);
assert_eq!(name.unwrap_or_default(), response["name"]); assert_eq!(name.unwrap_or_default(), response["name"]);
let age: Option<i32> = row.try_get("age").unwrap_or(None); let age: Option<i32> = row.try_get("age").unwrap_or(None);
assert_eq!(age.map(|v| v.to_string()).unwrap_or_default(), response["age"]); assert_eq!(age.map(|v| v.to_string()).unwrap_or_default(), response["age"]);
let email: Option<String> = row.try_get("email").unwrap_or(None); let email: Option<String> = row.try_get("email").unwrap_or(None);
assert_eq!(email.unwrap_or_default(), response["email"]); assert_eq!(email.unwrap_or_default(), response["email"]);
let is_active: Option<bool> = row.try_get("is_active").unwrap_or(None); let is_active: Option<bool> = row.try_get("is_active").unwrap_or(None);
assert_eq!(is_active.map(|v| v.to_string()).unwrap_or_default(), response["is_active"]); assert_eq!(is_active.map(|v| v.to_string()).unwrap_or_default(), response["is_active"]);
} }
async fn cleanup_test_data(pool: &PgPool, table_name: &str) { async fn cleanup_test_data(pool: &PgPool, schema_name: &str, table_name: &str) {
let _ = sqlx::query(&format!(r#"DROP TABLE IF EXISTS "{}" CASCADE"#, table_name)) let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
let _ = sqlx::query(&format!(r#"DROP TABLE IF EXISTS {} CASCADE"#, qualified_table))
.execute(pool) .execute(pool)
.await; .await;
let _ = sqlx::query!("DELETE FROM table_definitions WHERE table_name = $1", table_name) let _ = sqlx::query!("DELETE FROM table_definitions WHERE table_name = $1", table_name)
.execute(pool) .execute(pool)
.await; .await;
let _ = sqlx::query(&format!(r#"DROP SCHEMA IF EXISTS "{}" CASCADE"#, schema_name))
.execute(pool)
.await;
let _ = sqlx::query!("DELETE FROM schemas WHERE name = $1", schema_name)
.execute(pool)
.await;
} }
#[rstest] #[rstest]
@@ -201,27 +218,26 @@ async fn cleanup_test_data(pool: &PgPool, table_name: &str) {
async fn test_get_table_data_success( async fn test_get_table_data_success(
#[future] regular_record: (PgPool, String, String, i64), #[future] regular_record: (PgPool, String, String, i64),
) { ) {
let (pool, profile_name, table_name, id) = regular_record.await; let (pool, schema_name, table_name, id) = regular_record.await;
let request = GetTableDataRequest { let request = GetTableDataRequest {
profile_name: profile_name.clone(), profile_name: schema_name.clone(),
table_name: table_name.clone(), table_name: table_name.clone(),
id, id,
}; };
let response = get_table_data(&pool, request).await.unwrap(); let response = get_table_data(&pool, request).await.unwrap();
assert_eq!(response.data["id"], id.to_string()); assert_eq!(response.data["id"], id.to_string());
assert_eq!(response.data["firma"], "Test Company");
assert_eq!(response.data["name"], "John Doe"); assert_eq!(response.data["name"], "John Doe");
assert_eq!(response.data["age"], "30"); assert_eq!(response.data["age"], "30");
assert_eq!(response.data["email"], "john@example.com"); assert_eq!(response.data["email"], "john@example.com");
assert_eq!(response.data["is_active"], "true"); assert_eq!(response.data["is_active"], "true");
assert_eq!(response.data["deleted"], "false"); assert_eq!(response.data["deleted"], "false");
assert_response_matches(&pool, &table_name, id, &response.data).await; assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await;
cleanup_test_data(&pool, &table_name).await; cleanup_test_data(&pool, &schema_name, &table_name).await;
} }
#[rstest] #[rstest]
@@ -229,10 +245,10 @@ async fn test_get_table_data_success(
async fn test_get_optional_fields_null( async fn test_get_optional_fields_null(
#[future] null_fields_record: (PgPool, String, String, i64), #[future] null_fields_record: (PgPool, String, String, i64),
) { ) {
let (pool, profile_name, table_name, id) = null_fields_record.await; let (pool, schema_name, table_name, id) = null_fields_record.await;
let request = GetTableDataRequest { let request = GetTableDataRequest {
profile_name: profile_name.clone(), profile_name: schema_name.clone(),
table_name: table_name.clone(), table_name: table_name.clone(),
id, id,
}; };
@@ -244,10 +260,10 @@ async fn test_get_optional_fields_null(
assert_eq!(response.data["email"], ""); assert_eq!(response.data["email"], "");
assert_eq!(response.data["is_active"], ""); assert_eq!(response.data["is_active"], "");
assert_eq!(response.data["deleted"], "false"); assert_eq!(response.data["deleted"], "false");
assert_response_matches(&pool, &table_name, id, &response.data).await; assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await;
cleanup_test_data(&pool, &table_name).await; cleanup_test_data(&pool, &schema_name, &table_name).await;
} }
#[rstest] #[rstest]
@@ -255,20 +271,20 @@ async fn test_get_optional_fields_null(
async fn test_get_nonexistent_id( async fn test_get_nonexistent_id(
#[future] table_definition: (PgPool, String, String, i64), #[future] table_definition: (PgPool, String, String, i64),
) { ) {
let (pool, profile_name, table_name, _) = table_definition.await; let (pool, schema_name, table_name, _) = table_definition.await;
let request = GetTableDataRequest { let request = GetTableDataRequest {
profile_name: profile_name.clone(), profile_name: schema_name.clone(),
table_name: table_name.clone(), table_name: table_name.clone(),
id: 9999, id: 9999,
}; };
let result = get_table_data(&pool, request).await; let result = get_table_data(&pool, request).await;
assert!(result.is_err()); assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound); assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
cleanup_test_data(&pool, &table_name).await; cleanup_test_data(&pool, &schema_name, &table_name).await;
} }
#[rstest] #[rstest]
@@ -276,20 +292,20 @@ async fn test_get_nonexistent_id(
async fn test_get_deleted_record( async fn test_get_deleted_record(
#[future] deleted_record: (PgPool, String, String, i64), #[future] deleted_record: (PgPool, String, String, i64),
) { ) {
let (pool, profile_name, table_name, id) = deleted_record.await; let (pool, schema_name, table_name, id) = deleted_record.await;
let request = GetTableDataRequest { let request = GetTableDataRequest {
profile_name: profile_name.clone(), profile_name: schema_name.clone(),
table_name: table_name.clone(), table_name: table_name.clone(),
id, id,
}; };
let result = get_table_data(&pool, request).await; let result = get_table_data(&pool, request).await;
assert!(result.is_err()); assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound); assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
cleanup_test_data(&pool, &table_name).await; cleanup_test_data(&pool, &schema_name, &table_name).await;
} }
#[rstest] #[rstest]
@@ -298,7 +314,7 @@ async fn test_get_database_error(
#[future] closed_pool: PgPool, #[future] closed_pool: PgPool,
) { ) {
let closed_pool = closed_pool.await; let closed_pool = closed_pool.await;
let request = GetTableDataRequest { let request = GetTableDataRequest {
profile_name: "test".into(), profile_name: "test".into(),
table_name: "test".into(), table_name: "test".into(),
@@ -306,7 +322,7 @@ async fn test_get_database_error(
}; };
let result = get_table_data(&closed_pool, request).await; let result = get_table_data(&closed_pool, request).await;
assert!(result.is_err()); assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::Internal); assert_eq!(result.unwrap_err().code(), tonic::Code::Internal);
} }
@@ -316,17 +332,17 @@ async fn test_get_database_error(
async fn test_get_special_characters( async fn test_get_special_characters(
#[future] table_definition: (PgPool, String, String, i64), #[future] table_definition: (PgPool, String, String, i64),
) { ) {
let (pool, profile_name, table_name, _) = table_definition.await; let (pool, schema_name, table_name, _) = table_definition.await;
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
let query = format!( let query = format!(
r#"INSERT INTO "{}" (firma, name, email) r#"INSERT INTO {} (name, email)
VALUES ($1, $2, $3) VALUES ($1, $2)
RETURNING id"#, RETURNING id"#,
table_name qualified_table
); );
let record = sqlx::query(&query) let record = sqlx::query(&query)
.bind("Test Company")
.bind("Náměstí ČR") .bind("Náměstí ČR")
.bind("čšěř@example.com") .bind("čšěř@example.com")
.fetch_one(&pool) .fetch_one(&pool)
@@ -336,7 +352,7 @@ async fn test_get_special_characters(
let id: i64 = record.get("id"); let id: i64 = record.get("id");
let request = GetTableDataRequest { let request = GetTableDataRequest {
profile_name: profile_name.clone(), profile_name: schema_name.clone(),
table_name: table_name.clone(), table_name: table_name.clone(),
id, id,
}; };
@@ -345,10 +361,10 @@ async fn test_get_special_characters(
assert_eq!(response.data["name"], "Náměstí ČR"); assert_eq!(response.data["name"], "Náměstí ČR");
assert_eq!(response.data["email"], "čšěř@example.com"); assert_eq!(response.data["email"], "čšěř@example.com");
assert_response_matches(&pool, &table_name, id, &response.data).await; assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await;
cleanup_test_data(&pool, &table_name).await; cleanup_test_data(&pool, &schema_name, &table_name).await;
} }
#[rstest] #[rstest]
@@ -356,18 +372,18 @@ async fn test_get_special_characters(
async fn test_get_max_length_fields( async fn test_get_max_length_fields(
#[future] table_definition: (PgPool, String, String, i64), #[future] table_definition: (PgPool, String, String, i64),
) { ) {
let (pool, profile_name, table_name, _) = table_definition.await; let (pool, schema_name, table_name, _) = table_definition.await;
let long_name = "a".repeat(255); let long_name = "a".repeat(255);
let qualified_table = format!("\"{}\".\"{}\"", schema_name, table_name);
let query = format!( let query = format!(
r#"INSERT INTO "{}" (firma, name) r#"INSERT INTO {} (name)
VALUES ($1, $2) VALUES ($1)
RETURNING id"#, RETURNING id"#,
table_name qualified_table
); );
let record = sqlx::query(&query) let record = sqlx::query(&query)
.bind("Test Company")
.bind(&long_name) .bind(&long_name)
.fetch_one(&pool) .fetch_one(&pool)
.await .await
@@ -376,7 +392,7 @@ async fn test_get_max_length_fields(
let id: i64 = record.get("id"); let id: i64 = record.get("id");
let request = GetTableDataRequest { let request = GetTableDataRequest {
profile_name: profile_name.clone(), profile_name: schema_name.clone(),
table_name: table_name.clone(), table_name: table_name.clone(),
id, id,
}; };
@@ -385,10 +401,10 @@ async fn test_get_max_length_fields(
assert_eq!(response.data["name"], long_name); assert_eq!(response.data["name"], long_name);
assert_eq!(response.data["name"].len(), 255); assert_eq!(response.data["name"].len(), 255);
assert_response_matches(&pool, &table_name, id, &response.data).await; assert_response_matches(&pool, &schema_name, &table_name, id, &response.data).await;
cleanup_test_data(&pool, &table_name).await; cleanup_test_data(&pool, &schema_name, &table_name).await;
} }
#[rstest] #[rstest]
@@ -397,7 +413,7 @@ async fn test_get_invalid_profile(
#[future] pool: PgPool, #[future] pool: PgPool,
) { ) {
let pool = pool.await; let pool = pool.await;
let request = GetTableDataRequest { let request = GetTableDataRequest {
profile_name: "non_existent_profile".into(), profile_name: "non_existent_profile".into(),
table_name: "test_table".into(), table_name: "test_table".into(),
@@ -405,7 +421,7 @@ async fn test_get_invalid_profile(
}; };
let result = get_table_data(&pool, request).await; let result = get_table_data(&pool, request).await;
assert!(result.is_err()); assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound); assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
} }
@@ -413,20 +429,22 @@ async fn test_get_invalid_profile(
#[rstest] #[rstest]
#[tokio::test] #[tokio::test]
async fn test_get_invalid_table( async fn test_get_invalid_table(
#[future] profile: (PgPool, String, i64), #[future] schema: (PgPool, String, i64),
) { ) {
let (pool, profile_name, _) = profile.await; let (pool, schema_name, _) = schema.await;
let request = GetTableDataRequest { let request = GetTableDataRequest {
profile_name, profile_name: schema_name.clone(),
table_name: "non_existent_table".into(), table_name: "non_existent_table".into(),
id: 1, id: 1,
}; };
let result = get_table_data(&pool, request).await; let result = get_table_data(&pool, request).await;
assert!(result.is_err()); assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound); assert_eq!(result.unwrap_err().code(), tonic::Code::NotFound);
cleanup_test_data(&pool, &schema_name, "non_existent_table").await;
} }
#[rstest] #[rstest]
@@ -434,17 +452,17 @@ async fn test_get_invalid_table(
async fn test_get_invalid_column( async fn test_get_invalid_column(
#[future] regular_record: (PgPool, String, String, i64), #[future] regular_record: (PgPool, String, String, i64),
) { ) {
let (pool, profile_name, table_name, id) = regular_record.await; let (pool, schema_name, table_name, id) = regular_record.await;
let request = GetTableDataRequest { let request = GetTableDataRequest {
profile_name: profile_name.clone(), profile_name: schema_name.clone(),
table_name: table_name.clone(), table_name: table_name.clone(),
id, id,
}; };
let result = get_table_data(&pool, request).await; let result = get_table_data(&pool, request).await;
assert!(result.is_ok()); // Should still succeed as we're not filtering columns assert!(result.is_ok()); // Should still succeed as we're not filtering columns
cleanup_test_data(&pool, &table_name).await; cleanup_test_data(&pool, &schema_name, &table_name).await;
} }

View File

@@ -1,7 +1,7 @@
// tests/tables_data/mod.rs // tests/tables_data/mod.rs
// pub mod post_table_data_test; // pub mod post_table_data_test;
// pub mod put_table_data_test; // pub mod put_table_data_test;
pub mod delete_table_data_test; // pub mod delete_table_data_test;
// pub mod get_table_data_test; pub mod get_table_data_test;
// pub mod get_table_data_count_test; // pub mod get_table_data_count_test;
// pub mod get_table_data_by_position_test; // pub mod get_table_data_by_position_test;