search in common module, now fixing layer mixing issue

This commit is contained in:
filipriec
2025-06-10 13:47:18 +02:00
parent 350c522d19
commit 679bb3b6ab
6 changed files with 96 additions and 119 deletions

View File

@@ -15,6 +15,7 @@ use common::proto::multieko2::search::{
};
pub use common::proto::multieko2::search::searcher_server::SearcherServer;
use common::proto::multieko2::search::searcher_server::Searcher;
use common::search::register_slovak_tokenizers;
use tantivy::schema::Value;
pub struct SearcherService;
@@ -217,41 +218,3 @@ impl Searcher for SearcherService {
}
}
/// This function is now an exact mirror of the one in `server/src/search_schema.rs`
fn register_slovak_tokenizers(index: &Index) -> tantivy::Result<()> {
use tantivy::tokenizer::*;
let tokenizer_manager = index.tokenizers();
// TOKENIZER for `prefix_edge`: Edge N-gram (1-15 chars)
if tokenizer_manager.get("slovak_prefix_edge").is_none() {
let tokenizer = TextAnalyzer::builder(NgramTokenizer::new(1, 15, true)?)
.filter(RemoveLongFilter::limit(40))
.filter(LowerCaser)
.filter(AsciiFoldingFilter)
.build();
tokenizer_manager.register("slovak_prefix_edge", tokenizer);
}
// TOKENIZER for `prefix_full`: Simple word tokenizer
if tokenizer_manager.get("slovak_prefix_full").is_none() {
let tokenizer = TextAnalyzer::builder(SimpleTokenizer::default())
.filter(RemoveLongFilter::limit(40))
.filter(LowerCaser)
.filter(AsciiFoldingFilter)
.build();
tokenizer_manager.register("slovak_prefix_full", tokenizer);
}
// NGRAM TOKENIZER: For substring matching.
if tokenizer_manager.get("slovak_ngram").is_none() {
let tokenizer = TextAnalyzer::builder(NgramTokenizer::new(3, 3, false)?)
.filter(RemoveLongFilter::limit(40))
.filter(LowerCaser)
.filter(AsciiFoldingFilter)
.build();
tokenizer_manager.register("slovak_ngram", tokenizer);
}
Ok(())
}