mito2/sst/
index.rs

1// Copyright 2023 Greptime Team
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15pub(crate) mod bloom_filter;
16pub(crate) mod fulltext_index;
17mod indexer;
18pub mod intermediate;
19pub(crate) mod inverted_index;
20pub mod puffin_manager;
21mod statistics;
22pub(crate) mod store;
23#[cfg(feature = "vector_index")]
24pub(crate) mod vector_index;
25
26use std::cmp::Ordering;
27use std::collections::{BinaryHeap, HashMap, HashSet};
28use std::num::NonZeroUsize;
29use std::sync::Arc;
30
31use bloom_filter::creator::BloomFilterIndexer;
32use common_telemetry::{debug, error, info, warn};
33use datatypes::arrow::array::BinaryArray;
34use datatypes::arrow::record_batch::RecordBatch;
35use mito_codec::index::IndexValuesCodec;
36use mito_codec::row_converter::CompositeValues;
37use object_store::ObjectStore;
38use puffin_manager::SstPuffinManager;
39use smallvec::{SmallVec, smallvec};
40use snafu::{OptionExt, ResultExt};
41use statistics::{ByteCount, RowCount};
42use store_api::metadata::RegionMetadataRef;
43use store_api::storage::{ColumnId, FileId, RegionId};
44use strum::IntoStaticStr;
45use tokio::sync::mpsc::Sender;
46#[cfg(feature = "vector_index")]
47use vector_index::creator::VectorIndexer;
48
49use crate::access_layer::{AccessLayerRef, FilePathProvider, OperationType, RegionFilePathFactory};
50use crate::cache::file_cache::{FileCacheRef, FileType, IndexKey};
51use crate::cache::write_cache::{UploadTracker, WriteCacheRef};
52#[cfg(feature = "vector_index")]
53use crate::config::VectorIndexConfig;
54use crate::config::{BloomFilterConfig, FulltextIndexConfig, InvertedIndexConfig};
55use crate::error::{
56    BuildIndexAsyncSnafu, DecodeSnafu, Error, InvalidRecordBatchSnafu, RegionClosedSnafu,
57    RegionDroppedSnafu, RegionTruncatedSnafu, Result,
58};
59use crate::manifest::action::{RegionEdit, RegionMetaAction, RegionMetaActionList};
60use crate::metrics::INDEX_CREATE_MEMORY_USAGE;
61use crate::read::{Batch, BatchReader};
62use crate::region::options::IndexOptions;
63use crate::region::version::VersionControlRef;
64use crate::region::{ManifestContextRef, RegionLeaderState};
65use crate::request::{
66    BackgroundNotify, IndexBuildFailed, IndexBuildFinished, IndexBuildStopped, WorkerRequest,
67    WorkerRequestWithTime,
68};
69use crate::schedule::scheduler::{Job, SchedulerRef};
70use crate::sst::file::{
71    ColumnIndexMetadata, FileHandle, FileMeta, IndexType, IndexTypes, RegionFileId, RegionIndexId,
72};
73use crate::sst::file_purger::FilePurgerRef;
74use crate::sst::index::fulltext_index::creator::FulltextIndexer;
75use crate::sst::index::intermediate::IntermediateManager;
76use crate::sst::index::inverted_index::creator::InvertedIndexer;
77use crate::sst::parquet::SstInfo;
78use crate::sst::parquet::flat_format::primary_key_column_index;
79use crate::sst::parquet::format::PrimaryKeyArray;
80use crate::worker::WorkerListener;
81
82pub(crate) const TYPE_INVERTED_INDEX: &str = "inverted_index";
83pub(crate) const TYPE_FULLTEXT_INDEX: &str = "fulltext_index";
84pub(crate) const TYPE_BLOOM_FILTER_INDEX: &str = "bloom_filter_index";
85#[cfg(feature = "vector_index")]
86pub(crate) const TYPE_VECTOR_INDEX: &str = "vector_index";
87
88/// Triggers background download of an index file to the local cache.
89pub(crate) fn trigger_index_background_download(
90    file_cache: Option<&FileCacheRef>,
91    file_id: &RegionIndexId,
92    file_size_hint: Option<u64>,
93    path_factory: &RegionFilePathFactory,
94    object_store: &ObjectStore,
95) {
96    if let (Some(file_cache), Some(file_size)) = (file_cache, file_size_hint) {
97        let index_key = IndexKey::new(
98            file_id.region_id(),
99            file_id.file_id(),
100            FileType::Puffin(file_id.version),
101        );
102        let remote_path = path_factory.build_index_file_path(file_id.file_id);
103        file_cache.maybe_download_background(
104            index_key,
105            remote_path,
106            object_store.clone(),
107            file_size,
108        );
109    }
110}
111
112/// Output of the index creation.
113#[derive(Debug, Clone, Default)]
114pub struct IndexOutput {
115    /// Size of the file.
116    pub file_size: u64,
117    /// Index version.
118    pub version: u64,
119    /// Inverted index output.
120    pub inverted_index: InvertedIndexOutput,
121    /// Fulltext index output.
122    pub fulltext_index: FulltextIndexOutput,
123    /// Bloom filter output.
124    pub bloom_filter: BloomFilterOutput,
125    /// Vector index output.
126    #[cfg(feature = "vector_index")]
127    pub vector_index: VectorIndexOutput,
128}
129
130impl IndexOutput {
131    pub fn build_available_indexes(&self) -> SmallVec<[IndexType; 4]> {
132        let mut indexes = SmallVec::new();
133        if self.inverted_index.is_available() {
134            indexes.push(IndexType::InvertedIndex);
135        }
136        if self.fulltext_index.is_available() {
137            indexes.push(IndexType::FulltextIndex);
138        }
139        if self.bloom_filter.is_available() {
140            indexes.push(IndexType::BloomFilterIndex);
141        }
142        #[cfg(feature = "vector_index")]
143        if self.vector_index.is_available() {
144            indexes.push(IndexType::VectorIndex);
145        }
146        indexes
147    }
148
149    pub fn build_indexes(&self) -> Vec<ColumnIndexMetadata> {
150        let mut map: HashMap<ColumnId, IndexTypes> = HashMap::new();
151
152        if self.inverted_index.is_available() {
153            for &col in &self.inverted_index.columns {
154                map.entry(col).or_default().push(IndexType::InvertedIndex);
155            }
156        }
157        if self.fulltext_index.is_available() {
158            for &col in &self.fulltext_index.columns {
159                map.entry(col).or_default().push(IndexType::FulltextIndex);
160            }
161        }
162        if self.bloom_filter.is_available() {
163            for &col in &self.bloom_filter.columns {
164                map.entry(col)
165                    .or_default()
166                    .push(IndexType::BloomFilterIndex);
167            }
168        }
169        #[cfg(feature = "vector_index")]
170        if self.vector_index.is_available() {
171            for &col in &self.vector_index.columns {
172                map.entry(col).or_default().push(IndexType::VectorIndex);
173            }
174        }
175
176        map.into_iter()
177            .map(|(column_id, created_indexes)| ColumnIndexMetadata {
178                column_id,
179                created_indexes,
180            })
181            .collect::<Vec<_>>()
182    }
183}
184
185/// Base output of the index creation.
186#[derive(Debug, Clone, Default)]
187pub struct IndexBaseOutput {
188    /// Size of the index.
189    pub index_size: ByteCount,
190    /// Number of rows in the index.
191    pub row_count: RowCount,
192    /// Available columns in the index.
193    pub columns: Vec<ColumnId>,
194}
195
196impl IndexBaseOutput {
197    pub fn is_available(&self) -> bool {
198        self.index_size > 0
199    }
200}
201
202/// Output of the inverted index creation.
203pub type InvertedIndexOutput = IndexBaseOutput;
204/// Output of the fulltext index creation.
205pub type FulltextIndexOutput = IndexBaseOutput;
206/// Output of the bloom filter creation.
207pub type BloomFilterOutput = IndexBaseOutput;
208/// Output of the vector index creation.
209#[cfg(feature = "vector_index")]
210pub type VectorIndexOutput = IndexBaseOutput;
211
212/// The index creator that hides the error handling details.
213#[derive(Default)]
214pub struct Indexer {
215    file_id: FileId,
216    region_id: RegionId,
217    index_version: u64,
218    puffin_manager: Option<SstPuffinManager>,
219    write_cache_enabled: bool,
220    inverted_indexer: Option<InvertedIndexer>,
221    last_mem_inverted_index: usize,
222    fulltext_indexer: Option<FulltextIndexer>,
223    last_mem_fulltext_index: usize,
224    bloom_filter_indexer: Option<BloomFilterIndexer>,
225    last_mem_bloom_filter: usize,
226    #[cfg(feature = "vector_index")]
227    vector_indexer: Option<VectorIndexer>,
228    #[cfg(feature = "vector_index")]
229    last_mem_vector_index: usize,
230    intermediate_manager: Option<IntermediateManager>,
231}
232
233impl Indexer {
234    /// Updates the index with the given batch.
235    pub async fn update(&mut self, batch: &mut Batch) {
236        self.do_update(batch).await;
237
238        self.flush_mem_metrics();
239    }
240
241    /// Updates the index with the given flat format RecordBatch.
242    pub async fn update_flat(&mut self, batch: &RecordBatch) {
243        self.do_update_flat(batch).await;
244
245        self.flush_mem_metrics();
246    }
247
248    /// Finalizes the index creation.
249    pub async fn finish(&mut self) -> IndexOutput {
250        let output = self.do_finish().await;
251
252        self.flush_mem_metrics();
253        output
254    }
255
256    /// Aborts the index creation.
257    pub async fn abort(&mut self) {
258        self.do_abort().await;
259
260        self.flush_mem_metrics();
261    }
262
263    fn flush_mem_metrics(&mut self) {
264        let inverted_mem = self
265            .inverted_indexer
266            .as_ref()
267            .map_or(0, |creator| creator.memory_usage());
268        INDEX_CREATE_MEMORY_USAGE
269            .with_label_values(&[TYPE_INVERTED_INDEX])
270            .add(inverted_mem as i64 - self.last_mem_inverted_index as i64);
271        self.last_mem_inverted_index = inverted_mem;
272
273        let fulltext_mem = self
274            .fulltext_indexer
275            .as_ref()
276            .map_or(0, |creator| creator.memory_usage());
277        INDEX_CREATE_MEMORY_USAGE
278            .with_label_values(&[TYPE_FULLTEXT_INDEX])
279            .add(fulltext_mem as i64 - self.last_mem_fulltext_index as i64);
280        self.last_mem_fulltext_index = fulltext_mem;
281
282        let bloom_filter_mem = self
283            .bloom_filter_indexer
284            .as_ref()
285            .map_or(0, |creator| creator.memory_usage());
286        INDEX_CREATE_MEMORY_USAGE
287            .with_label_values(&[TYPE_BLOOM_FILTER_INDEX])
288            .add(bloom_filter_mem as i64 - self.last_mem_bloom_filter as i64);
289        self.last_mem_bloom_filter = bloom_filter_mem;
290
291        #[cfg(feature = "vector_index")]
292        {
293            let vector_mem = self
294                .vector_indexer
295                .as_ref()
296                .map_or(0, |creator| creator.memory_usage());
297            INDEX_CREATE_MEMORY_USAGE
298                .with_label_values(&[TYPE_VECTOR_INDEX])
299                .add(vector_mem as i64 - self.last_mem_vector_index as i64);
300            self.last_mem_vector_index = vector_mem;
301        }
302    }
303}
304
305#[async_trait::async_trait]
306pub trait IndexerBuilder {
307    /// Builds indexer of given file id to [index_file_path].
308    async fn build(&self, file_id: FileId, index_version: u64) -> Indexer;
309}
310#[derive(Clone)]
311pub(crate) struct IndexerBuilderImpl {
312    pub(crate) build_type: IndexBuildType,
313    pub(crate) metadata: RegionMetadataRef,
314    pub(crate) row_group_size: usize,
315    pub(crate) puffin_manager: SstPuffinManager,
316    pub(crate) write_cache_enabled: bool,
317    pub(crate) intermediate_manager: IntermediateManager,
318    pub(crate) index_options: IndexOptions,
319    pub(crate) inverted_index_config: InvertedIndexConfig,
320    pub(crate) fulltext_index_config: FulltextIndexConfig,
321    pub(crate) bloom_filter_index_config: BloomFilterConfig,
322    #[cfg(feature = "vector_index")]
323    pub(crate) vector_index_config: VectorIndexConfig,
324}
325
326#[async_trait::async_trait]
327impl IndexerBuilder for IndexerBuilderImpl {
328    /// Sanity check for arguments and create a new [Indexer] if arguments are valid.
329    async fn build(&self, file_id: FileId, index_version: u64) -> Indexer {
330        let mut indexer = Indexer {
331            file_id,
332            region_id: self.metadata.region_id,
333            index_version,
334            write_cache_enabled: self.write_cache_enabled,
335            ..Default::default()
336        };
337
338        indexer.inverted_indexer = self.build_inverted_indexer(file_id);
339        indexer.fulltext_indexer = self.build_fulltext_indexer(file_id).await;
340        indexer.bloom_filter_indexer = self.build_bloom_filter_indexer(file_id);
341        #[cfg(feature = "vector_index")]
342        {
343            indexer.vector_indexer = self.build_vector_indexer(file_id);
344        }
345        indexer.intermediate_manager = Some(self.intermediate_manager.clone());
346
347        #[cfg(feature = "vector_index")]
348        let has_any_indexer = indexer.inverted_indexer.is_some()
349            || indexer.fulltext_indexer.is_some()
350            || indexer.bloom_filter_indexer.is_some()
351            || indexer.vector_indexer.is_some();
352        #[cfg(not(feature = "vector_index"))]
353        let has_any_indexer = indexer.inverted_indexer.is_some()
354            || indexer.fulltext_indexer.is_some()
355            || indexer.bloom_filter_indexer.is_some();
356
357        if !has_any_indexer {
358            indexer.abort().await;
359            return Indexer::default();
360        }
361
362        indexer.puffin_manager = Some(self.puffin_manager.clone());
363        indexer
364    }
365}
366
367impl IndexerBuilderImpl {
368    fn build_inverted_indexer(&self, file_id: FileId) -> Option<InvertedIndexer> {
369        let create = match self.build_type {
370            IndexBuildType::Flush => self.inverted_index_config.create_on_flush.auto(),
371            IndexBuildType::Compact => self.inverted_index_config.create_on_compaction.auto(),
372            _ => true,
373        };
374
375        if !create {
376            debug!(
377                "Skip creating inverted index due to config, region_id: {}, file_id: {}",
378                self.metadata.region_id, file_id,
379            );
380            return None;
381        }
382
383        let indexed_column_ids = self.metadata.inverted_indexed_column_ids(
384            self.index_options.inverted_index.ignore_column_ids.iter(),
385        );
386        if indexed_column_ids.is_empty() {
387            debug!(
388                "No columns to be indexed, skip creating inverted index, region_id: {}, file_id: {}",
389                self.metadata.region_id, file_id,
390            );
391            return None;
392        }
393
394        let Some(mut segment_row_count) =
395            NonZeroUsize::new(self.index_options.inverted_index.segment_row_count)
396        else {
397            warn!(
398                "Segment row count is 0, skip creating index, region_id: {}, file_id: {}",
399                self.metadata.region_id, file_id,
400            );
401            return None;
402        };
403
404        let Some(row_group_size) = NonZeroUsize::new(self.row_group_size) else {
405            warn!(
406                "Row group size is 0, skip creating index, region_id: {}, file_id: {}",
407                self.metadata.region_id, file_id,
408            );
409            return None;
410        };
411
412        // if segment row count not aligned with row group size, adjust it to be aligned.
413        if row_group_size.get() % segment_row_count.get() != 0 {
414            segment_row_count = row_group_size;
415        }
416
417        let indexer = InvertedIndexer::new(
418            file_id,
419            &self.metadata,
420            self.intermediate_manager.clone(),
421            self.inverted_index_config.mem_threshold_on_create(),
422            segment_row_count,
423            indexed_column_ids,
424        );
425
426        Some(indexer)
427    }
428
429    async fn build_fulltext_indexer(&self, file_id: FileId) -> Option<FulltextIndexer> {
430        let create = match self.build_type {
431            IndexBuildType::Flush => self.fulltext_index_config.create_on_flush.auto(),
432            IndexBuildType::Compact => self.fulltext_index_config.create_on_compaction.auto(),
433            _ => true,
434        };
435
436        if !create {
437            debug!(
438                "Skip creating full-text index due to config, region_id: {}, file_id: {}",
439                self.metadata.region_id, file_id,
440            );
441            return None;
442        }
443
444        let mem_limit = self.fulltext_index_config.mem_threshold_on_create();
445        let creator = FulltextIndexer::new(
446            &self.metadata.region_id,
447            &file_id,
448            &self.intermediate_manager,
449            &self.metadata,
450            self.fulltext_index_config.compress,
451            mem_limit,
452        )
453        .await;
454
455        let err = match creator {
456            Ok(creator) => {
457                if creator.is_none() {
458                    debug!(
459                        "Skip creating full-text index due to no columns require indexing, region_id: {}, file_id: {}",
460                        self.metadata.region_id, file_id,
461                    );
462                }
463                return creator;
464            }
465            Err(err) => err,
466        };
467
468        if cfg!(any(test, feature = "test")) {
469            panic!(
470                "Failed to create full-text indexer, region_id: {}, file_id: {}, err: {:?}",
471                self.metadata.region_id, file_id, err
472            );
473        } else {
474            warn!(
475                err; "Failed to create full-text indexer, region_id: {}, file_id: {}",
476                self.metadata.region_id, file_id,
477            );
478        }
479
480        None
481    }
482
483    fn build_bloom_filter_indexer(&self, file_id: FileId) -> Option<BloomFilterIndexer> {
484        let create = match self.build_type {
485            IndexBuildType::Flush => self.bloom_filter_index_config.create_on_flush.auto(),
486            IndexBuildType::Compact => self.bloom_filter_index_config.create_on_compaction.auto(),
487            _ => true,
488        };
489
490        if !create {
491            debug!(
492                "Skip creating bloom filter due to config, region_id: {}, file_id: {}",
493                self.metadata.region_id, file_id,
494            );
495            return None;
496        }
497
498        let mem_limit = self.bloom_filter_index_config.mem_threshold_on_create();
499        let indexer = BloomFilterIndexer::new(
500            file_id,
501            &self.metadata,
502            self.intermediate_manager.clone(),
503            mem_limit,
504        );
505
506        let err = match indexer {
507            Ok(indexer) => {
508                if indexer.is_none() {
509                    debug!(
510                        "Skip creating bloom filter due to no columns require indexing, region_id: {}, file_id: {}",
511                        self.metadata.region_id, file_id,
512                    );
513                }
514                return indexer;
515            }
516            Err(err) => err,
517        };
518
519        if cfg!(any(test, feature = "test")) {
520            panic!(
521                "Failed to create bloom filter, region_id: {}, file_id: {}, err: {:?}",
522                self.metadata.region_id, file_id, err
523            );
524        } else {
525            warn!(
526                err; "Failed to create bloom filter, region_id: {}, file_id: {}",
527                self.metadata.region_id, file_id,
528            );
529        }
530
531        None
532    }
533
534    #[cfg(feature = "vector_index")]
535    fn build_vector_indexer(&self, file_id: FileId) -> Option<VectorIndexer> {
536        let create = match self.build_type {
537            IndexBuildType::Flush => self.vector_index_config.create_on_flush.auto(),
538            IndexBuildType::Compact => self.vector_index_config.create_on_compaction.auto(),
539            _ => true,
540        };
541
542        if !create {
543            debug!(
544                "Skip creating vector index due to config, region_id: {}, file_id: {}",
545                self.metadata.region_id, file_id,
546            );
547            return None;
548        }
549
550        // Get vector index column IDs and options from metadata
551        let vector_index_options = self.metadata.vector_indexed_column_ids();
552        if vector_index_options.is_empty() {
553            debug!(
554                "No vector columns to index, skip creating vector index, region_id: {}, file_id: {}",
555                self.metadata.region_id, file_id,
556            );
557            return None;
558        }
559
560        let mem_limit = self.vector_index_config.mem_threshold_on_create();
561        let indexer = VectorIndexer::new(
562            file_id,
563            &self.metadata,
564            self.intermediate_manager.clone(),
565            mem_limit,
566            &vector_index_options,
567        );
568
569        let err = match indexer {
570            Ok(indexer) => {
571                if indexer.is_none() {
572                    debug!(
573                        "Skip creating vector index due to no columns require indexing, region_id: {}, file_id: {}",
574                        self.metadata.region_id, file_id,
575                    );
576                }
577                return indexer;
578            }
579            Err(err) => err,
580        };
581
582        if cfg!(any(test, feature = "test")) {
583            panic!(
584                "Failed to create vector index, region_id: {}, file_id: {}, err: {:?}",
585                self.metadata.region_id, file_id, err
586            );
587        } else {
588            warn!(
589                err; "Failed to create vector index, region_id: {}, file_id: {}",
590                self.metadata.region_id, file_id,
591            );
592        }
593
594        None
595    }
596}
597
598/// Type of an index build task.
599#[derive(Debug, Clone, IntoStaticStr, PartialEq)]
600pub enum IndexBuildType {
601    /// Build index when schema change.
602    SchemaChange,
603    /// Create or update index after flush.
604    Flush,
605    /// Create or update index after compact.
606    Compact,
607    /// Manually build index.
608    Manual,
609}
610
611impl IndexBuildType {
612    fn as_str(&self) -> &'static str {
613        self.into()
614    }
615
616    // Higher value means higher priority.
617    fn priority(&self) -> u8 {
618        match self {
619            IndexBuildType::Manual => 3,
620            IndexBuildType::SchemaChange => 2,
621            IndexBuildType::Flush => 1,
622            IndexBuildType::Compact => 0,
623        }
624    }
625}
626
627impl From<OperationType> for IndexBuildType {
628    fn from(op_type: OperationType) -> Self {
629        match op_type {
630            OperationType::Flush => IndexBuildType::Flush,
631            OperationType::Compact => IndexBuildType::Compact,
632        }
633    }
634}
635
636/// Outcome of an index build task.
637#[derive(Debug, Clone, PartialEq, Eq, Hash)]
638pub enum IndexBuildOutcome {
639    Finished,
640    Aborted(String),
641}
642
643/// Mpsc output result sender.
644pub type ResultMpscSender = Sender<Result<IndexBuildOutcome>>;
645
646#[derive(Clone)]
647pub struct IndexBuildTask {
648    /// The SST file handle to build index for.
649    pub file: FileHandle,
650    /// The file meta to build index for.
651    pub file_meta: FileMeta,
652    pub reason: IndexBuildType,
653    pub access_layer: AccessLayerRef,
654    pub(crate) listener: WorkerListener,
655    pub(crate) manifest_ctx: ManifestContextRef,
656    pub write_cache: Option<WriteCacheRef>,
657    pub file_purger: FilePurgerRef,
658    /// When write cache is enabled, the indexer builder should be built from the write cache.
659    /// Otherwise, it should be built from the access layer.
660    pub indexer_builder: Arc<dyn IndexerBuilder + Send + Sync>,
661    /// Request sender to notify the region worker.
662    pub(crate) request_sender: Sender<WorkerRequestWithTime>,
663    /// Index build result sender.
664    pub(crate) result_sender: ResultMpscSender,
665}
666
667impl std::fmt::Debug for IndexBuildTask {
668    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
669        f.debug_struct("IndexBuildTask")
670            .field("region_id", &self.file_meta.region_id)
671            .field("file_id", &self.file_meta.file_id)
672            .field("reason", &self.reason)
673            .finish()
674    }
675}
676
677impl IndexBuildTask {
678    /// Notify the caller the job is success.
679    pub async fn on_success(&self, outcome: IndexBuildOutcome) {
680        let _ = self.result_sender.send(Ok(outcome)).await;
681    }
682
683    /// Send index build error to waiter.
684    pub async fn on_failure(&self, err: Arc<Error>) {
685        let _ = self
686            .result_sender
687            .send(Err(err.clone()).context(BuildIndexAsyncSnafu {
688                region_id: self.file_meta.region_id,
689            }))
690            .await;
691    }
692
693    fn into_index_build_job(mut self, version_control: VersionControlRef) -> Job {
694        Box::pin(async move {
695            self.do_index_build(version_control).await;
696        })
697    }
698
699    async fn do_index_build(&mut self, version_control: VersionControlRef) {
700        self.listener
701            .on_index_build_begin(RegionFileId::new(
702                self.file_meta.region_id,
703                self.file_meta.file_id,
704            ))
705            .await;
706        match self.index_build(version_control).await {
707            Ok(outcome) => self.on_success(outcome).await,
708            Err(e) => {
709                warn!(
710                    e; "Index build task failed, region: {}, file_id: {}",
711                    self.file_meta.region_id, self.file_meta.file_id,
712                );
713                self.on_failure(e.into()).await
714            }
715        }
716        let worker_request = WorkerRequest::Background {
717            region_id: self.file_meta.region_id,
718            notify: BackgroundNotify::IndexBuildStopped(IndexBuildStopped {
719                region_id: self.file_meta.region_id,
720                file_id: self.file_meta.file_id,
721            }),
722        };
723        let _ = self
724            .request_sender
725            .send(WorkerRequestWithTime::new(worker_request))
726            .await;
727    }
728
729    // Checks if the SST file still exists in object store and version to avoid conflict with compaction.
730    async fn check_sst_file_exists(&self, version_control: &VersionControlRef) -> bool {
731        let file_id = self.file_meta.file_id;
732        let level = self.file_meta.level;
733        // We should check current version instead of the version when the job is created.
734        let version = version_control.current().version;
735
736        let Some(level_files) = version.ssts.levels().get(level as usize) else {
737            warn!(
738                "File id {} not found in level {} for index build, region: {}",
739                file_id, level, self.file_meta.region_id
740            );
741            return false;
742        };
743
744        match level_files.files.get(&file_id) {
745            Some(handle) if !handle.is_deleted() && !handle.compacting() => {
746                // If the file's metadata is present in the current version, the physical SST file
747                // is guaranteed to exist on object store. The file purger removes the physical
748                // file only after its metadata is removed from the version.
749                true
750            }
751            _ => {
752                warn!(
753                    "File id {} not found in region version for index build, region: {}",
754                    file_id, self.file_meta.region_id
755                );
756                false
757            }
758        }
759    }
760
761    async fn index_build(
762        &mut self,
763        version_control: VersionControlRef,
764    ) -> Result<IndexBuildOutcome> {
765        // Determine the new index version
766        let new_index_version = if self.file_meta.index_file_size > 0 {
767            // Increment version if index file exists to avoid overwrite.
768            self.file_meta.index_version + 1
769        } else {
770            0 // Default version for new index files
771        };
772
773        // Use the same file_id but with new version for index file
774        let index_file_id = self.file_meta.file_id;
775        let mut indexer = self
776            .indexer_builder
777            .build(index_file_id, new_index_version)
778            .await;
779
780        // Check SST file existence before building index to avoid failure of parquet reader.
781        if !self.check_sst_file_exists(&version_control).await {
782            // Calls abort to clean up index files.
783            indexer.abort().await;
784            self.listener
785                .on_index_build_abort(RegionFileId::new(
786                    self.file_meta.region_id,
787                    self.file_meta.file_id,
788                ))
789                .await;
790            return Ok(IndexBuildOutcome::Aborted(format!(
791                "SST file not found during index build, region: {}, file_id: {}",
792                self.file_meta.region_id, self.file_meta.file_id
793            )));
794        }
795
796        let mut parquet_reader = self
797            .access_layer
798            .read_sst(self.file.clone()) // use the latest file handle instead of creating a new one
799            .build()
800            .await?;
801
802        // TODO(SNC123): optimize index batch
803        loop {
804            match parquet_reader.next_batch().await {
805                Ok(Some(mut batch)) => {
806                    indexer.update(&mut batch).await;
807                }
808                Ok(None) => break,
809                Err(e) => {
810                    indexer.abort().await;
811                    return Err(e);
812                }
813            }
814        }
815        let index_output = indexer.finish().await;
816
817        if index_output.file_size > 0 {
818            // Check SST file existence again after building index.
819            if !self.check_sst_file_exists(&version_control).await {
820                // Calls abort to clean up index files.
821                indexer.abort().await;
822                self.listener
823                    .on_index_build_abort(RegionFileId::new(
824                        self.file_meta.region_id,
825                        self.file_meta.file_id,
826                    ))
827                    .await;
828                return Ok(IndexBuildOutcome::Aborted(format!(
829                    "SST file not found during index build, region: {}, file_id: {}",
830                    self.file_meta.region_id, self.file_meta.file_id
831                )));
832            }
833
834            // Upload index file if write cache is enabled.
835            self.maybe_upload_index_file(index_output.clone(), index_file_id, new_index_version)
836                .await?;
837
838            let worker_request = match self.update_manifest(index_output, new_index_version).await {
839                Ok(edit) => {
840                    let index_build_finished = IndexBuildFinished {
841                        region_id: self.file_meta.region_id,
842                        edit,
843                    };
844                    WorkerRequest::Background {
845                        region_id: self.file_meta.region_id,
846                        notify: BackgroundNotify::IndexBuildFinished(index_build_finished),
847                    }
848                }
849                Err(e) => {
850                    let err = Arc::new(e);
851                    WorkerRequest::Background {
852                        region_id: self.file_meta.region_id,
853                        notify: BackgroundNotify::IndexBuildFailed(IndexBuildFailed { err }),
854                    }
855                }
856            };
857
858            let _ = self
859                .request_sender
860                .send(WorkerRequestWithTime::new(worker_request))
861                .await;
862        }
863        Ok(IndexBuildOutcome::Finished)
864    }
865
866    async fn maybe_upload_index_file(
867        &self,
868        output: IndexOutput,
869        index_file_id: FileId,
870        index_version: u64,
871    ) -> Result<()> {
872        if let Some(write_cache) = &self.write_cache {
873            let file_id = self.file_meta.file_id;
874            let region_id = self.file_meta.region_id;
875            let remote_store = self.access_layer.object_store();
876            let mut upload_tracker = UploadTracker::new(region_id);
877            let mut err = None;
878            let puffin_key =
879                IndexKey::new(region_id, index_file_id, FileType::Puffin(output.version));
880            let index_id = RegionIndexId::new(RegionFileId::new(region_id, file_id), index_version);
881            let puffin_path = RegionFilePathFactory::new(
882                self.access_layer.table_dir().to_string(),
883                self.access_layer.path_type(),
884            )
885            .build_index_file_path_with_version(index_id);
886            if let Err(e) = write_cache
887                .upload(puffin_key, &puffin_path, remote_store)
888                .await
889            {
890                err = Some(e);
891            }
892            upload_tracker.push_uploaded_file(puffin_path);
893            if let Some(err) = err {
894                // Cleans index files on failure.
895                upload_tracker
896                    .clean(
897                        &smallvec![SstInfo {
898                            file_id,
899                            index_metadata: output,
900                            ..Default::default()
901                        }],
902                        &write_cache.file_cache(),
903                        remote_store,
904                    )
905                    .await;
906                return Err(err);
907            }
908        } else {
909            debug!("write cache is not available, skip uploading index file");
910        }
911        Ok(())
912    }
913
914    async fn update_manifest(
915        &mut self,
916        output: IndexOutput,
917        new_index_version: u64,
918    ) -> Result<RegionEdit> {
919        self.file_meta.available_indexes = output.build_available_indexes();
920        self.file_meta.indexes = output.build_indexes();
921        self.file_meta.index_file_size = output.file_size;
922        self.file_meta.index_version = new_index_version;
923        let edit = RegionEdit {
924            files_to_add: vec![self.file_meta.clone()],
925            files_to_remove: vec![],
926            timestamp_ms: Some(chrono::Utc::now().timestamp_millis()),
927            flushed_sequence: None,
928            flushed_entry_id: None,
929            committed_sequence: None,
930            compaction_time_window: None,
931        };
932        let version = self
933            .manifest_ctx
934            .update_manifest(
935                RegionLeaderState::Writable,
936                RegionMetaActionList::with_action(RegionMetaAction::Edit(edit.clone())),
937                false,
938            )
939            .await?;
940        info!(
941            "Successfully update manifest version to {version}, region: {}, reason: {}",
942            self.file_meta.region_id,
943            self.reason.as_str()
944        );
945        Ok(edit)
946    }
947}
948
949impl PartialEq for IndexBuildTask {
950    fn eq(&self, other: &Self) -> bool {
951        self.reason.priority() == other.reason.priority()
952    }
953}
954
955impl Eq for IndexBuildTask {}
956
957impl PartialOrd for IndexBuildTask {
958    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
959        Some(self.cmp(other))
960    }
961}
962
963impl Ord for IndexBuildTask {
964    fn cmp(&self, other: &Self) -> Ordering {
965        self.reason.priority().cmp(&other.reason.priority())
966    }
967}
968
969/// Tracks the index build status of a region scheduled by the [IndexBuildScheduler].
970pub struct IndexBuildStatus {
971    pub region_id: RegionId,
972    pub building_files: HashSet<FileId>,
973    pub pending_tasks: BinaryHeap<IndexBuildTask>,
974}
975
976impl IndexBuildStatus {
977    pub fn new(region_id: RegionId) -> Self {
978        IndexBuildStatus {
979            region_id,
980            building_files: HashSet::new(),
981            pending_tasks: BinaryHeap::new(),
982        }
983    }
984
985    async fn on_failure(self, err: Arc<Error>) {
986        for task in self.pending_tasks {
987            task.on_failure(err.clone()).await;
988        }
989    }
990}
991
992pub struct IndexBuildScheduler {
993    /// Background job scheduler.
994    scheduler: SchedulerRef,
995    /// Tracks regions need to build index.
996    region_status: HashMap<RegionId, IndexBuildStatus>,
997    /// Limit of files allowed to build index concurrently for a region.
998    files_limit: usize,
999}
1000
1001/// Manager background index build tasks of a worker.
1002impl IndexBuildScheduler {
1003    pub fn new(scheduler: SchedulerRef, files_limit: usize) -> Self {
1004        IndexBuildScheduler {
1005            scheduler,
1006            region_status: HashMap::new(),
1007            files_limit,
1008        }
1009    }
1010
1011    pub(crate) async fn schedule_build(
1012        &mut self,
1013        version_control: &VersionControlRef,
1014        task: IndexBuildTask,
1015    ) -> Result<()> {
1016        let status = self
1017            .region_status
1018            .entry(task.file_meta.region_id)
1019            .or_insert_with(|| IndexBuildStatus::new(task.file_meta.region_id));
1020
1021        if status.building_files.contains(&task.file_meta.file_id) {
1022            let region_file_id =
1023                RegionFileId::new(task.file_meta.region_id, task.file_meta.file_id);
1024            debug!(
1025                "Aborting index build task since index is already being built for region file {:?}",
1026                region_file_id
1027            );
1028            task.on_success(IndexBuildOutcome::Aborted(format!(
1029                "Index is already being built for region file {:?}",
1030                region_file_id
1031            )))
1032            .await;
1033            task.listener.on_index_build_abort(region_file_id).await;
1034            return Ok(());
1035        }
1036
1037        status.pending_tasks.push(task);
1038
1039        self.schedule_next_build_batch(version_control);
1040        Ok(())
1041    }
1042
1043    /// Schedule tasks until reaching the files limit or no more tasks.
1044    fn schedule_next_build_batch(&mut self, version_control: &VersionControlRef) {
1045        let mut building_count = 0;
1046        for status in self.region_status.values() {
1047            building_count += status.building_files.len();
1048        }
1049
1050        while building_count < self.files_limit {
1051            if let Some(task) = self.find_next_task() {
1052                let region_id = task.file_meta.region_id;
1053                let file_id = task.file_meta.file_id;
1054                let job = task.into_index_build_job(version_control.clone());
1055                if self.scheduler.schedule(job).is_ok() {
1056                    if let Some(status) = self.region_status.get_mut(&region_id) {
1057                        status.building_files.insert(file_id);
1058                        building_count += 1;
1059                        status
1060                            .pending_tasks
1061                            .retain(|t| t.file_meta.file_id != file_id);
1062                    } else {
1063                        error!(
1064                            "Region status not found when scheduling index build task, region: {}",
1065                            region_id
1066                        );
1067                    }
1068                } else {
1069                    error!(
1070                        "Failed to schedule index build job, region: {}, file_id: {}",
1071                        region_id, file_id
1072                    );
1073                }
1074            } else {
1075                // No more tasks to schedule.
1076                break;
1077            }
1078        }
1079    }
1080
1081    /// Find the next task which has the highest priority to run.
1082    fn find_next_task(&self) -> Option<IndexBuildTask> {
1083        self.region_status
1084            .iter()
1085            .filter_map(|(_, status)| status.pending_tasks.peek())
1086            .max()
1087            .cloned()
1088    }
1089
1090    pub(crate) fn on_task_stopped(
1091        &mut self,
1092        region_id: RegionId,
1093        file_id: FileId,
1094        version_control: &VersionControlRef,
1095    ) {
1096        if let Some(status) = self.region_status.get_mut(&region_id) {
1097            status.building_files.remove(&file_id);
1098            if status.building_files.is_empty() && status.pending_tasks.is_empty() {
1099                // No more tasks for this region, remove it.
1100                self.region_status.remove(&region_id);
1101            }
1102        }
1103
1104        self.schedule_next_build_batch(version_control);
1105    }
1106
1107    pub(crate) async fn on_failure(&mut self, region_id: RegionId, err: Arc<Error>) {
1108        error!(
1109            err; "Index build scheduler encountered failure for region {}, removing all pending tasks.",
1110            region_id
1111        );
1112        let Some(status) = self.region_status.remove(&region_id) else {
1113            return;
1114        };
1115        status.on_failure(err).await;
1116    }
1117
1118    /// Notifies the scheduler that the region is dropped.
1119    pub(crate) async fn on_region_dropped(&mut self, region_id: RegionId) {
1120        self.remove_region_on_failure(
1121            region_id,
1122            Arc::new(RegionDroppedSnafu { region_id }.build()),
1123        )
1124        .await;
1125    }
1126
1127    /// Notifies the scheduler that the region is closed.
1128    pub(crate) async fn on_region_closed(&mut self, region_id: RegionId) {
1129        self.remove_region_on_failure(region_id, Arc::new(RegionClosedSnafu { region_id }.build()))
1130            .await;
1131    }
1132
1133    /// Notifies the scheduler that the region is truncated.
1134    pub(crate) async fn on_region_truncated(&mut self, region_id: RegionId) {
1135        self.remove_region_on_failure(
1136            region_id,
1137            Arc::new(RegionTruncatedSnafu { region_id }.build()),
1138        )
1139        .await;
1140    }
1141
1142    async fn remove_region_on_failure(&mut self, region_id: RegionId, err: Arc<Error>) {
1143        let Some(status) = self.region_status.remove(&region_id) else {
1144            return;
1145        };
1146        status.on_failure(err).await;
1147    }
1148}
1149
1150/// Decodes primary keys from a flat format RecordBatch.
1151/// Returns a list of (decoded_pk_value, count) tuples where count is the number of occurrences.
1152pub(crate) fn decode_primary_keys_with_counts(
1153    batch: &RecordBatch,
1154    codec: &IndexValuesCodec,
1155) -> Result<Vec<(CompositeValues, usize)>> {
1156    let primary_key_index = primary_key_column_index(batch.num_columns());
1157    let pk_dict_array = batch
1158        .column(primary_key_index)
1159        .as_any()
1160        .downcast_ref::<PrimaryKeyArray>()
1161        .context(InvalidRecordBatchSnafu {
1162            reason: "Primary key column is not a dictionary array",
1163        })?;
1164    let pk_values_array = pk_dict_array
1165        .values()
1166        .as_any()
1167        .downcast_ref::<BinaryArray>()
1168        .context(InvalidRecordBatchSnafu {
1169            reason: "Primary key values are not binary array",
1170        })?;
1171    let keys = pk_dict_array.keys();
1172
1173    // Decodes primary keys and count consecutive occurrences
1174    let mut result: Vec<(CompositeValues, usize)> = Vec::new();
1175    let mut prev_key: Option<u32> = None;
1176
1177    let pk_indices = keys.values();
1178    for &current_key in pk_indices.iter().take(keys.len()) {
1179        // Checks if current key is the same as previous key
1180        if let Some(prev) = prev_key
1181            && prev == current_key
1182        {
1183            // Safety: We already have a key in the result vector.
1184            result.last_mut().unwrap().1 += 1;
1185            continue;
1186        }
1187
1188        // New key, decodes it.
1189        let pk_bytes = pk_values_array.value(current_key as usize);
1190        let decoded_value = codec.decoder().decode(pk_bytes).context(DecodeSnafu)?;
1191
1192        result.push((decoded_value, 1));
1193        prev_key = Some(current_key);
1194    }
1195
1196    Ok(result)
1197}
1198
1199#[cfg(test)]
1200mod tests {
1201    use std::sync::Arc;
1202
1203    use api::v1::SemanticType;
1204    use common_base::readable_size::ReadableSize;
1205    use datafusion_common::HashMap;
1206    use datatypes::data_type::ConcreteDataType;
1207    use datatypes::schema::{
1208        ColumnSchema, FulltextOptions, SkippingIndexOptions, SkippingIndexType,
1209    };
1210    use object_store::ObjectStore;
1211    use object_store::services::Memory;
1212    use puffin_manager::PuffinManagerFactory;
1213    use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder};
1214    use tokio::sync::mpsc;
1215
1216    use super::*;
1217    use crate::access_layer::{FilePathProvider, Metrics, SstWriteRequest, WriteType};
1218    use crate::cache::write_cache::WriteCache;
1219    use crate::config::{FulltextIndexConfig, IndexBuildMode, MitoConfig, Mode};
1220    use crate::memtable::time_partition::TimePartitions;
1221    use crate::region::version::{VersionBuilder, VersionControl};
1222    use crate::sst::file::RegionFileId;
1223    use crate::sst::file_purger::NoopFilePurger;
1224    use crate::sst::location;
1225    use crate::sst::parquet::WriteOptions;
1226    use crate::test_util::memtable_util::EmptyMemtableBuilder;
1227    use crate::test_util::scheduler_util::SchedulerEnv;
1228    use crate::test_util::sst_util::{new_batch_by_range, new_source, sst_region_metadata};
1229
1230    struct MetaConfig {
1231        with_inverted: bool,
1232        with_fulltext: bool,
1233        with_skipping_bloom: bool,
1234        #[cfg(feature = "vector_index")]
1235        with_vector: bool,
1236    }
1237
1238    fn mock_region_metadata(
1239        MetaConfig {
1240            with_inverted,
1241            with_fulltext,
1242            with_skipping_bloom,
1243            #[cfg(feature = "vector_index")]
1244            with_vector,
1245        }: MetaConfig,
1246    ) -> RegionMetadataRef {
1247        let mut builder = RegionMetadataBuilder::new(RegionId::new(1, 2));
1248        let mut column_schema = ColumnSchema::new("a", ConcreteDataType::int64_datatype(), false);
1249        if with_inverted {
1250            column_schema = column_schema.with_inverted_index(true);
1251        }
1252        builder
1253            .push_column_metadata(ColumnMetadata {
1254                column_schema,
1255                semantic_type: SemanticType::Field,
1256                column_id: 1,
1257            })
1258            .push_column_metadata(ColumnMetadata {
1259                column_schema: ColumnSchema::new("b", ConcreteDataType::float64_datatype(), false),
1260                semantic_type: SemanticType::Field,
1261                column_id: 2,
1262            })
1263            .push_column_metadata(ColumnMetadata {
1264                column_schema: ColumnSchema::new(
1265                    "c",
1266                    ConcreteDataType::timestamp_millisecond_datatype(),
1267                    false,
1268                ),
1269                semantic_type: SemanticType::Timestamp,
1270                column_id: 3,
1271            });
1272
1273        if with_fulltext {
1274            let column_schema =
1275                ColumnSchema::new("text", ConcreteDataType::string_datatype(), true)
1276                    .with_fulltext_options(FulltextOptions {
1277                        enable: true,
1278                        ..Default::default()
1279                    })
1280                    .unwrap();
1281
1282            let column = ColumnMetadata {
1283                column_schema,
1284                semantic_type: SemanticType::Field,
1285                column_id: 4,
1286            };
1287
1288            builder.push_column_metadata(column);
1289        }
1290
1291        if with_skipping_bloom {
1292            let column_schema =
1293                ColumnSchema::new("bloom", ConcreteDataType::string_datatype(), false)
1294                    .with_skipping_options(SkippingIndexOptions::new_unchecked(
1295                        42,
1296                        0.01,
1297                        SkippingIndexType::BloomFilter,
1298                    ))
1299                    .unwrap();
1300
1301            let column = ColumnMetadata {
1302                column_schema,
1303                semantic_type: SemanticType::Field,
1304                column_id: 5,
1305            };
1306
1307            builder.push_column_metadata(column);
1308        }
1309
1310        #[cfg(feature = "vector_index")]
1311        if with_vector {
1312            use index::vector::VectorIndexOptions;
1313
1314            let options = VectorIndexOptions::default();
1315            let column_schema =
1316                ColumnSchema::new("vec", ConcreteDataType::vector_datatype(4), true)
1317                    .with_vector_index_options(&options)
1318                    .unwrap();
1319            let column = ColumnMetadata {
1320                column_schema,
1321                semantic_type: SemanticType::Field,
1322                column_id: 6,
1323            };
1324
1325            builder.push_column_metadata(column);
1326        }
1327
1328        Arc::new(builder.build().unwrap())
1329    }
1330
1331    fn mock_object_store() -> ObjectStore {
1332        ObjectStore::new(Memory::default()).unwrap().finish()
1333    }
1334
1335    async fn mock_intm_mgr(path: impl AsRef<str>) -> IntermediateManager {
1336        IntermediateManager::init_fs(path).await.unwrap()
1337    }
1338    struct NoopPathProvider;
1339
1340    impl FilePathProvider for NoopPathProvider {
1341        fn build_index_file_path(&self, _file_id: RegionFileId) -> String {
1342            unreachable!()
1343        }
1344
1345        fn build_index_file_path_with_version(&self, _index_id: RegionIndexId) -> String {
1346            unreachable!()
1347        }
1348
1349        fn build_sst_file_path(&self, _file_id: RegionFileId) -> String {
1350            unreachable!()
1351        }
1352    }
1353
1354    async fn mock_sst_file(
1355        metadata: RegionMetadataRef,
1356        env: &SchedulerEnv,
1357        build_mode: IndexBuildMode,
1358    ) -> SstInfo {
1359        let source = new_source(&[
1360            new_batch_by_range(&["a", "d"], 0, 60),
1361            new_batch_by_range(&["b", "f"], 0, 40),
1362            new_batch_by_range(&["b", "h"], 100, 200),
1363        ]);
1364        let mut index_config = MitoConfig::default().index;
1365        index_config.build_mode = build_mode;
1366        let write_request = SstWriteRequest {
1367            op_type: OperationType::Flush,
1368            metadata: metadata.clone(),
1369            source: either::Left(source),
1370            storage: None,
1371            max_sequence: None,
1372            cache_manager: Default::default(),
1373            index_options: IndexOptions::default(),
1374            index_config,
1375            inverted_index_config: Default::default(),
1376            fulltext_index_config: Default::default(),
1377            bloom_filter_index_config: Default::default(),
1378            #[cfg(feature = "vector_index")]
1379            vector_index_config: Default::default(),
1380        };
1381        let mut metrics = Metrics::new(WriteType::Flush);
1382        env.access_layer
1383            .write_sst(write_request, &WriteOptions::default(), &mut metrics)
1384            .await
1385            .unwrap()
1386            .remove(0)
1387    }
1388
1389    async fn mock_version_control(
1390        metadata: RegionMetadataRef,
1391        file_purger: FilePurgerRef,
1392        files: HashMap<FileId, FileMeta>,
1393    ) -> VersionControlRef {
1394        let mutable = Arc::new(TimePartitions::new(
1395            metadata.clone(),
1396            Arc::new(EmptyMemtableBuilder::default()),
1397            0,
1398            None,
1399        ));
1400        let version_builder = VersionBuilder::new(metadata, mutable)
1401            .add_files(file_purger, files.values().cloned())
1402            .build();
1403        Arc::new(VersionControl::new(version_builder))
1404    }
1405
1406    async fn mock_indexer_builder(
1407        metadata: RegionMetadataRef,
1408        env: &SchedulerEnv,
1409    ) -> Arc<dyn IndexerBuilder + Send + Sync> {
1410        let (dir, factory) = PuffinManagerFactory::new_for_test_async("mock_indexer_builder").await;
1411        let intm_manager = mock_intm_mgr(dir.path().to_string_lossy()).await;
1412        let puffin_manager = factory.build(
1413            env.access_layer.object_store().clone(),
1414            RegionFilePathFactory::new(
1415                env.access_layer.table_dir().to_string(),
1416                env.access_layer.path_type(),
1417            ),
1418        );
1419        Arc::new(IndexerBuilderImpl {
1420            build_type: IndexBuildType::Flush,
1421            metadata,
1422            row_group_size: 1024,
1423            puffin_manager,
1424            write_cache_enabled: false,
1425            intermediate_manager: intm_manager,
1426            index_options: IndexOptions::default(),
1427            inverted_index_config: InvertedIndexConfig::default(),
1428            fulltext_index_config: FulltextIndexConfig::default(),
1429            bloom_filter_index_config: BloomFilterConfig::default(),
1430            #[cfg(feature = "vector_index")]
1431            vector_index_config: Default::default(),
1432        })
1433    }
1434
1435    #[tokio::test]
1436    async fn test_build_indexer_basic() {
1437        let (dir, factory) =
1438            PuffinManagerFactory::new_for_test_async("test_build_indexer_basic_").await;
1439        let intm_manager = mock_intm_mgr(dir.path().to_string_lossy()).await;
1440
1441        let metadata = mock_region_metadata(MetaConfig {
1442            with_inverted: true,
1443            with_fulltext: true,
1444            with_skipping_bloom: true,
1445            #[cfg(feature = "vector_index")]
1446            with_vector: false,
1447        });
1448        let indexer = IndexerBuilderImpl {
1449            build_type: IndexBuildType::Flush,
1450            metadata,
1451            row_group_size: 1024,
1452            puffin_manager: factory.build(mock_object_store(), NoopPathProvider),
1453            write_cache_enabled: false,
1454            intermediate_manager: intm_manager,
1455            index_options: IndexOptions::default(),
1456            inverted_index_config: InvertedIndexConfig::default(),
1457            fulltext_index_config: FulltextIndexConfig::default(),
1458            bloom_filter_index_config: BloomFilterConfig::default(),
1459            #[cfg(feature = "vector_index")]
1460            vector_index_config: Default::default(),
1461        }
1462        .build(FileId::random(), 0)
1463        .await;
1464
1465        assert!(indexer.inverted_indexer.is_some());
1466        assert!(indexer.fulltext_indexer.is_some());
1467        assert!(indexer.bloom_filter_indexer.is_some());
1468    }
1469
1470    #[tokio::test]
1471    async fn test_build_indexer_disable_create() {
1472        let (dir, factory) =
1473            PuffinManagerFactory::new_for_test_async("test_build_indexer_disable_create_").await;
1474        let intm_manager = mock_intm_mgr(dir.path().to_string_lossy()).await;
1475
1476        let metadata = mock_region_metadata(MetaConfig {
1477            with_inverted: true,
1478            with_fulltext: true,
1479            with_skipping_bloom: true,
1480            #[cfg(feature = "vector_index")]
1481            with_vector: false,
1482        });
1483        let indexer = IndexerBuilderImpl {
1484            build_type: IndexBuildType::Flush,
1485            metadata: metadata.clone(),
1486            row_group_size: 1024,
1487            puffin_manager: factory.build(mock_object_store(), NoopPathProvider),
1488            write_cache_enabled: false,
1489            intermediate_manager: intm_manager.clone(),
1490            index_options: IndexOptions::default(),
1491            inverted_index_config: InvertedIndexConfig {
1492                create_on_flush: Mode::Disable,
1493                ..Default::default()
1494            },
1495            fulltext_index_config: FulltextIndexConfig::default(),
1496            bloom_filter_index_config: BloomFilterConfig::default(),
1497            #[cfg(feature = "vector_index")]
1498            vector_index_config: Default::default(),
1499        }
1500        .build(FileId::random(), 0)
1501        .await;
1502
1503        assert!(indexer.inverted_indexer.is_none());
1504        assert!(indexer.fulltext_indexer.is_some());
1505        assert!(indexer.bloom_filter_indexer.is_some());
1506
1507        let indexer = IndexerBuilderImpl {
1508            build_type: IndexBuildType::Compact,
1509            metadata: metadata.clone(),
1510            row_group_size: 1024,
1511            puffin_manager: factory.build(mock_object_store(), NoopPathProvider),
1512            write_cache_enabled: false,
1513            intermediate_manager: intm_manager.clone(),
1514            index_options: IndexOptions::default(),
1515            inverted_index_config: InvertedIndexConfig::default(),
1516            fulltext_index_config: FulltextIndexConfig {
1517                create_on_compaction: Mode::Disable,
1518                ..Default::default()
1519            },
1520            bloom_filter_index_config: BloomFilterConfig::default(),
1521            #[cfg(feature = "vector_index")]
1522            vector_index_config: Default::default(),
1523        }
1524        .build(FileId::random(), 0)
1525        .await;
1526
1527        assert!(indexer.inverted_indexer.is_some());
1528        assert!(indexer.fulltext_indexer.is_none());
1529        assert!(indexer.bloom_filter_indexer.is_some());
1530
1531        let indexer = IndexerBuilderImpl {
1532            build_type: IndexBuildType::Compact,
1533            metadata,
1534            row_group_size: 1024,
1535            puffin_manager: factory.build(mock_object_store(), NoopPathProvider),
1536            write_cache_enabled: false,
1537            intermediate_manager: intm_manager,
1538            index_options: IndexOptions::default(),
1539            inverted_index_config: InvertedIndexConfig::default(),
1540            fulltext_index_config: FulltextIndexConfig::default(),
1541            bloom_filter_index_config: BloomFilterConfig {
1542                create_on_compaction: Mode::Disable,
1543                ..Default::default()
1544            },
1545            #[cfg(feature = "vector_index")]
1546            vector_index_config: Default::default(),
1547        }
1548        .build(FileId::random(), 0)
1549        .await;
1550
1551        assert!(indexer.inverted_indexer.is_some());
1552        assert!(indexer.fulltext_indexer.is_some());
1553        assert!(indexer.bloom_filter_indexer.is_none());
1554    }
1555
1556    #[tokio::test]
1557    async fn test_build_indexer_no_required() {
1558        let (dir, factory) =
1559            PuffinManagerFactory::new_for_test_async("test_build_indexer_no_required_").await;
1560        let intm_manager = mock_intm_mgr(dir.path().to_string_lossy()).await;
1561
1562        let metadata = mock_region_metadata(MetaConfig {
1563            with_inverted: false,
1564            with_fulltext: true,
1565            with_skipping_bloom: true,
1566            #[cfg(feature = "vector_index")]
1567            with_vector: false,
1568        });
1569        let indexer = IndexerBuilderImpl {
1570            build_type: IndexBuildType::Flush,
1571            metadata: metadata.clone(),
1572            row_group_size: 1024,
1573            puffin_manager: factory.build(mock_object_store(), NoopPathProvider),
1574            write_cache_enabled: false,
1575            intermediate_manager: intm_manager.clone(),
1576            index_options: IndexOptions::default(),
1577            inverted_index_config: InvertedIndexConfig::default(),
1578            fulltext_index_config: FulltextIndexConfig::default(),
1579            bloom_filter_index_config: BloomFilterConfig::default(),
1580            #[cfg(feature = "vector_index")]
1581            vector_index_config: Default::default(),
1582        }
1583        .build(FileId::random(), 0)
1584        .await;
1585
1586        assert!(indexer.inverted_indexer.is_none());
1587        assert!(indexer.fulltext_indexer.is_some());
1588        assert!(indexer.bloom_filter_indexer.is_some());
1589
1590        let metadata = mock_region_metadata(MetaConfig {
1591            with_inverted: true,
1592            with_fulltext: false,
1593            with_skipping_bloom: true,
1594            #[cfg(feature = "vector_index")]
1595            with_vector: false,
1596        });
1597        let indexer = IndexerBuilderImpl {
1598            build_type: IndexBuildType::Flush,
1599            metadata: metadata.clone(),
1600            row_group_size: 1024,
1601            puffin_manager: factory.build(mock_object_store(), NoopPathProvider),
1602            write_cache_enabled: false,
1603            intermediate_manager: intm_manager.clone(),
1604            index_options: IndexOptions::default(),
1605            inverted_index_config: InvertedIndexConfig::default(),
1606            fulltext_index_config: FulltextIndexConfig::default(),
1607            bloom_filter_index_config: BloomFilterConfig::default(),
1608            #[cfg(feature = "vector_index")]
1609            vector_index_config: Default::default(),
1610        }
1611        .build(FileId::random(), 0)
1612        .await;
1613
1614        assert!(indexer.inverted_indexer.is_some());
1615        assert!(indexer.fulltext_indexer.is_none());
1616        assert!(indexer.bloom_filter_indexer.is_some());
1617
1618        let metadata = mock_region_metadata(MetaConfig {
1619            with_inverted: true,
1620            with_fulltext: true,
1621            with_skipping_bloom: false,
1622            #[cfg(feature = "vector_index")]
1623            with_vector: false,
1624        });
1625        let indexer = IndexerBuilderImpl {
1626            build_type: IndexBuildType::Flush,
1627            metadata: metadata.clone(),
1628            row_group_size: 1024,
1629            puffin_manager: factory.build(mock_object_store(), NoopPathProvider),
1630            write_cache_enabled: false,
1631            intermediate_manager: intm_manager,
1632            index_options: IndexOptions::default(),
1633            inverted_index_config: InvertedIndexConfig::default(),
1634            fulltext_index_config: FulltextIndexConfig::default(),
1635            bloom_filter_index_config: BloomFilterConfig::default(),
1636            #[cfg(feature = "vector_index")]
1637            vector_index_config: Default::default(),
1638        }
1639        .build(FileId::random(), 0)
1640        .await;
1641
1642        assert!(indexer.inverted_indexer.is_some());
1643        assert!(indexer.fulltext_indexer.is_some());
1644        assert!(indexer.bloom_filter_indexer.is_none());
1645    }
1646
1647    #[tokio::test]
1648    async fn test_build_indexer_zero_row_group() {
1649        let (dir, factory) =
1650            PuffinManagerFactory::new_for_test_async("test_build_indexer_zero_row_group_").await;
1651        let intm_manager = mock_intm_mgr(dir.path().to_string_lossy()).await;
1652
1653        let metadata = mock_region_metadata(MetaConfig {
1654            with_inverted: true,
1655            with_fulltext: true,
1656            with_skipping_bloom: true,
1657            #[cfg(feature = "vector_index")]
1658            with_vector: false,
1659        });
1660        let indexer = IndexerBuilderImpl {
1661            build_type: IndexBuildType::Flush,
1662            metadata,
1663            row_group_size: 0,
1664            puffin_manager: factory.build(mock_object_store(), NoopPathProvider),
1665            write_cache_enabled: false,
1666            intermediate_manager: intm_manager,
1667            index_options: IndexOptions::default(),
1668            inverted_index_config: InvertedIndexConfig::default(),
1669            fulltext_index_config: FulltextIndexConfig::default(),
1670            bloom_filter_index_config: BloomFilterConfig::default(),
1671            #[cfg(feature = "vector_index")]
1672            vector_index_config: Default::default(),
1673        }
1674        .build(FileId::random(), 0)
1675        .await;
1676
1677        assert!(indexer.inverted_indexer.is_none());
1678    }
1679
1680    #[cfg(feature = "vector_index")]
1681    #[tokio::test]
1682    async fn test_update_flat_builds_vector_index() {
1683        use datatypes::arrow::array::BinaryBuilder;
1684        use datatypes::arrow::datatypes::{DataType, Field, Schema};
1685
1686        struct TestPathProvider;
1687
1688        impl FilePathProvider for TestPathProvider {
1689            fn build_index_file_path(&self, file_id: RegionFileId) -> String {
1690                format!("index/{}.puffin", file_id)
1691            }
1692
1693            fn build_index_file_path_with_version(&self, index_id: RegionIndexId) -> String {
1694                format!("index/{}.puffin", index_id)
1695            }
1696
1697            fn build_sst_file_path(&self, file_id: RegionFileId) -> String {
1698                format!("sst/{}.parquet", file_id)
1699            }
1700        }
1701
1702        fn f32s_to_bytes(values: &[f32]) -> Vec<u8> {
1703            let mut bytes = Vec::with_capacity(values.len() * 4);
1704            for v in values {
1705                bytes.extend_from_slice(&v.to_le_bytes());
1706            }
1707            bytes
1708        }
1709
1710        let (dir, factory) =
1711            PuffinManagerFactory::new_for_test_async("test_update_flat_builds_vector_index_").await;
1712        let intm_manager = mock_intm_mgr(dir.path().to_string_lossy()).await;
1713
1714        let metadata = mock_region_metadata(MetaConfig {
1715            with_inverted: false,
1716            with_fulltext: false,
1717            with_skipping_bloom: false,
1718            with_vector: true,
1719        });
1720
1721        let mut indexer = IndexerBuilderImpl {
1722            build_type: IndexBuildType::Flush,
1723            metadata,
1724            row_group_size: 1024,
1725            puffin_manager: factory.build(mock_object_store(), TestPathProvider),
1726            write_cache_enabled: false,
1727            intermediate_manager: intm_manager,
1728            index_options: IndexOptions::default(),
1729            inverted_index_config: InvertedIndexConfig::default(),
1730            fulltext_index_config: FulltextIndexConfig::default(),
1731            bloom_filter_index_config: BloomFilterConfig::default(),
1732            vector_index_config: Default::default(),
1733        }
1734        .build(FileId::random(), 0)
1735        .await;
1736
1737        assert!(indexer.vector_indexer.is_some());
1738
1739        let vec1 = f32s_to_bytes(&[1.0, 0.0, 0.0, 0.0]);
1740        let vec2 = f32s_to_bytes(&[0.0, 1.0, 0.0, 0.0]);
1741
1742        let mut builder = BinaryBuilder::with_capacity(2, vec1.len() + vec2.len());
1743        builder.append_value(&vec1);
1744        builder.append_value(&vec2);
1745
1746        let schema = Arc::new(Schema::new(vec![Field::new("vec", DataType::Binary, true)]));
1747        let batch = RecordBatch::try_new(schema, vec![Arc::new(builder.finish())]).unwrap();
1748
1749        indexer.update_flat(&batch).await;
1750        let output = indexer.finish().await;
1751
1752        assert!(output.vector_index.is_available());
1753        assert!(output.vector_index.columns.contains(&6));
1754    }
1755
1756    #[tokio::test]
1757    async fn test_index_build_task_sst_not_exist() {
1758        let env = SchedulerEnv::new().await;
1759        let (tx, _rx) = mpsc::channel(4);
1760        let (result_tx, mut result_rx) = mpsc::channel::<Result<IndexBuildOutcome>>(4);
1761        let mut scheduler = env.mock_index_build_scheduler(4);
1762        let metadata = Arc::new(sst_region_metadata());
1763        let manifest_ctx = env.mock_manifest_context(metadata.clone()).await;
1764        let file_purger = Arc::new(NoopFilePurger {});
1765        let files = HashMap::new();
1766        let version_control =
1767            mock_version_control(metadata.clone(), file_purger.clone(), files).await;
1768        let region_id = metadata.region_id;
1769        let indexer_builder = mock_indexer_builder(metadata, &env).await;
1770
1771        let file_meta = FileMeta {
1772            region_id,
1773            file_id: FileId::random(),
1774            file_size: 100,
1775            ..Default::default()
1776        };
1777
1778        let file = FileHandle::new(file_meta.clone(), file_purger.clone());
1779
1780        // Create mock task.
1781        let task = IndexBuildTask {
1782            file,
1783            file_meta,
1784            reason: IndexBuildType::Flush,
1785            access_layer: env.access_layer.clone(),
1786            listener: WorkerListener::default(),
1787            manifest_ctx,
1788            write_cache: None,
1789            file_purger,
1790            indexer_builder,
1791            request_sender: tx,
1792            result_sender: result_tx,
1793        };
1794
1795        // Schedule the build task and check result.
1796        scheduler
1797            .schedule_build(&version_control, task)
1798            .await
1799            .unwrap();
1800        match result_rx.recv().await.unwrap() {
1801            Ok(outcome) => {
1802                if outcome == IndexBuildOutcome::Finished {
1803                    panic!("Expect aborted result due to missing SST file")
1804                }
1805            }
1806            _ => panic!("Expect aborted result due to missing SST file"),
1807        }
1808    }
1809
1810    #[tokio::test]
1811    async fn test_index_build_task_sst_exist() {
1812        let env = SchedulerEnv::new().await;
1813        let mut scheduler = env.mock_index_build_scheduler(4);
1814        let metadata = Arc::new(sst_region_metadata());
1815        let manifest_ctx = env.mock_manifest_context(metadata.clone()).await;
1816        let region_id = metadata.region_id;
1817        let file_purger = Arc::new(NoopFilePurger {});
1818        let sst_info = mock_sst_file(metadata.clone(), &env, IndexBuildMode::Async).await;
1819        let file_meta = FileMeta {
1820            region_id,
1821            file_id: sst_info.file_id,
1822            file_size: sst_info.file_size,
1823            max_row_group_uncompressed_size: sst_info.max_row_group_uncompressed_size,
1824            index_file_size: sst_info.index_metadata.file_size,
1825            num_rows: sst_info.num_rows as u64,
1826            num_row_groups: sst_info.num_row_groups,
1827            ..Default::default()
1828        };
1829        let files = HashMap::from([(file_meta.file_id, file_meta.clone())]);
1830        let version_control =
1831            mock_version_control(metadata.clone(), file_purger.clone(), files).await;
1832        let indexer_builder = mock_indexer_builder(metadata.clone(), &env).await;
1833
1834        let file = FileHandle::new(file_meta.clone(), file_purger.clone());
1835
1836        // Create mock task.
1837        let (tx, mut rx) = mpsc::channel(4);
1838        let (result_tx, mut result_rx) = mpsc::channel::<Result<IndexBuildOutcome>>(4);
1839        let task = IndexBuildTask {
1840            file,
1841            file_meta: file_meta.clone(),
1842            reason: IndexBuildType::Flush,
1843            access_layer: env.access_layer.clone(),
1844            listener: WorkerListener::default(),
1845            manifest_ctx,
1846            write_cache: None,
1847            file_purger,
1848            indexer_builder,
1849            request_sender: tx,
1850            result_sender: result_tx,
1851        };
1852
1853        scheduler
1854            .schedule_build(&version_control, task)
1855            .await
1856            .unwrap();
1857
1858        // The task should finish successfully.
1859        match result_rx.recv().await.unwrap() {
1860            Ok(outcome) => {
1861                assert_eq!(outcome, IndexBuildOutcome::Finished);
1862            }
1863            _ => panic!("Expect finished result"),
1864        }
1865
1866        // A notification should be sent to the worker to update the manifest.
1867        let worker_req = rx.recv().await.unwrap().request;
1868        match worker_req {
1869            WorkerRequest::Background {
1870                region_id: req_region_id,
1871                notify: BackgroundNotify::IndexBuildFinished(finished),
1872            } => {
1873                assert_eq!(req_region_id, region_id);
1874                assert_eq!(finished.edit.files_to_add.len(), 1);
1875                let updated_meta = &finished.edit.files_to_add[0];
1876
1877                // The mock indexer builder creates all index types.
1878                assert!(!updated_meta.available_indexes.is_empty());
1879                assert!(updated_meta.index_file_size > 0);
1880                assert_eq!(updated_meta.file_id, file_meta.file_id);
1881            }
1882            _ => panic!("Unexpected worker request: {:?}", worker_req),
1883        }
1884    }
1885
1886    async fn schedule_index_build_task_with_mode(build_mode: IndexBuildMode) {
1887        let env = SchedulerEnv::new().await;
1888        let mut scheduler = env.mock_index_build_scheduler(4);
1889        let metadata = Arc::new(sst_region_metadata());
1890        let manifest_ctx = env.mock_manifest_context(metadata.clone()).await;
1891        let file_purger = Arc::new(NoopFilePurger {});
1892        let region_id = metadata.region_id;
1893        let sst_info = mock_sst_file(metadata.clone(), &env, build_mode.clone()).await;
1894        let file_meta = FileMeta {
1895            region_id,
1896            file_id: sst_info.file_id,
1897            file_size: sst_info.file_size,
1898            max_row_group_uncompressed_size: sst_info.max_row_group_uncompressed_size,
1899            index_file_size: sst_info.index_metadata.file_size,
1900            num_rows: sst_info.num_rows as u64,
1901            num_row_groups: sst_info.num_row_groups,
1902            ..Default::default()
1903        };
1904        let files = HashMap::from([(file_meta.file_id, file_meta.clone())]);
1905        let version_control =
1906            mock_version_control(metadata.clone(), file_purger.clone(), files).await;
1907        let indexer_builder = mock_indexer_builder(metadata.clone(), &env).await;
1908
1909        let file = FileHandle::new(file_meta.clone(), file_purger.clone());
1910
1911        // Create mock task.
1912        let (tx, _rx) = mpsc::channel(4);
1913        let (result_tx, mut result_rx) = mpsc::channel::<Result<IndexBuildOutcome>>(4);
1914        let task = IndexBuildTask {
1915            file,
1916            file_meta: file_meta.clone(),
1917            reason: IndexBuildType::Flush,
1918            access_layer: env.access_layer.clone(),
1919            listener: WorkerListener::default(),
1920            manifest_ctx,
1921            write_cache: None,
1922            file_purger,
1923            indexer_builder,
1924            request_sender: tx,
1925            result_sender: result_tx,
1926        };
1927
1928        scheduler
1929            .schedule_build(&version_control, task)
1930            .await
1931            .unwrap();
1932
1933        let puffin_path = location::index_file_path(
1934            env.access_layer.table_dir(),
1935            RegionIndexId::new(RegionFileId::new(region_id, file_meta.file_id), 0),
1936            env.access_layer.path_type(),
1937        );
1938
1939        if build_mode == IndexBuildMode::Async {
1940            // The index file should not exist before the task finishes.
1941            assert!(
1942                !env.access_layer
1943                    .object_store()
1944                    .exists(&puffin_path)
1945                    .await
1946                    .unwrap()
1947            );
1948        } else {
1949            // The index file should exist before the task finishes.
1950            assert!(
1951                env.access_layer
1952                    .object_store()
1953                    .exists(&puffin_path)
1954                    .await
1955                    .unwrap()
1956            );
1957        }
1958
1959        // The task should finish successfully.
1960        match result_rx.recv().await.unwrap() {
1961            Ok(outcome) => {
1962                assert_eq!(outcome, IndexBuildOutcome::Finished);
1963            }
1964            _ => panic!("Expect finished result"),
1965        }
1966
1967        // The index file should exist after the task finishes.
1968        assert!(
1969            env.access_layer
1970                .object_store()
1971                .exists(&puffin_path)
1972                .await
1973                .unwrap()
1974        );
1975    }
1976
1977    #[tokio::test]
1978    async fn test_index_build_task_build_mode() {
1979        schedule_index_build_task_with_mode(IndexBuildMode::Async).await;
1980        schedule_index_build_task_with_mode(IndexBuildMode::Sync).await;
1981    }
1982
1983    #[tokio::test]
1984    async fn test_index_build_task_no_index() {
1985        let env = SchedulerEnv::new().await;
1986        let mut scheduler = env.mock_index_build_scheduler(4);
1987        let mut metadata = sst_region_metadata();
1988        // Unset indexes in metadata to simulate no index scenario.
1989        metadata.column_metadatas.iter_mut().for_each(|col| {
1990            col.column_schema.set_inverted_index(false);
1991            let _ = col.column_schema.unset_skipping_options();
1992        });
1993        let region_id = metadata.region_id;
1994        let metadata = Arc::new(metadata);
1995        let manifest_ctx = env.mock_manifest_context(metadata.clone()).await;
1996        let file_purger = Arc::new(NoopFilePurger {});
1997        let sst_info = mock_sst_file(metadata.clone(), &env, IndexBuildMode::Async).await;
1998        let file_meta = FileMeta {
1999            region_id,
2000            file_id: sst_info.file_id,
2001            file_size: sst_info.file_size,
2002            max_row_group_uncompressed_size: sst_info.max_row_group_uncompressed_size,
2003            index_file_size: sst_info.index_metadata.file_size,
2004            num_rows: sst_info.num_rows as u64,
2005            num_row_groups: sst_info.num_row_groups,
2006            ..Default::default()
2007        };
2008        let files = HashMap::from([(file_meta.file_id, file_meta.clone())]);
2009        let version_control =
2010            mock_version_control(metadata.clone(), file_purger.clone(), files).await;
2011        let indexer_builder = mock_indexer_builder(metadata.clone(), &env).await;
2012
2013        let file = FileHandle::new(file_meta.clone(), file_purger.clone());
2014
2015        // Create mock task.
2016        let (tx, mut rx) = mpsc::channel(4);
2017        let (result_tx, mut result_rx) = mpsc::channel::<Result<IndexBuildOutcome>>(4);
2018        let task = IndexBuildTask {
2019            file,
2020            file_meta: file_meta.clone(),
2021            reason: IndexBuildType::Flush,
2022            access_layer: env.access_layer.clone(),
2023            listener: WorkerListener::default(),
2024            manifest_ctx,
2025            write_cache: None,
2026            file_purger,
2027            indexer_builder,
2028            request_sender: tx,
2029            result_sender: result_tx,
2030        };
2031
2032        scheduler
2033            .schedule_build(&version_control, task)
2034            .await
2035            .unwrap();
2036
2037        // The task should finish successfully.
2038        match result_rx.recv().await.unwrap() {
2039            Ok(outcome) => {
2040                assert_eq!(outcome, IndexBuildOutcome::Finished);
2041            }
2042            _ => panic!("Expect finished result"),
2043        }
2044
2045        // No index is built, so no notification should be sent to the worker.
2046        let _ = rx.recv().await.is_none();
2047    }
2048
2049    #[tokio::test]
2050    async fn test_index_build_task_with_write_cache() {
2051        let env = SchedulerEnv::new().await;
2052        let mut scheduler = env.mock_index_build_scheduler(4);
2053        let metadata = Arc::new(sst_region_metadata());
2054        let manifest_ctx = env.mock_manifest_context(metadata.clone()).await;
2055        let file_purger = Arc::new(NoopFilePurger {});
2056        let region_id = metadata.region_id;
2057
2058        let (dir, factory) = PuffinManagerFactory::new_for_test_async("test_write_cache").await;
2059        let intm_manager = mock_intm_mgr(dir.path().to_string_lossy()).await;
2060
2061        // Create mock write cache
2062        let write_cache = Arc::new(
2063            WriteCache::new_fs(
2064                dir.path().to_str().unwrap(),
2065                ReadableSize::mb(10),
2066                None,
2067                None,
2068                true, // enable_background_worker
2069                factory,
2070                intm_manager,
2071                ReadableSize::mb(10),
2072            )
2073            .await
2074            .unwrap(),
2075        );
2076        // Indexer builder built from write cache.
2077        let indexer_builder = Arc::new(IndexerBuilderImpl {
2078            build_type: IndexBuildType::Flush,
2079            metadata: metadata.clone(),
2080            row_group_size: 1024,
2081            puffin_manager: write_cache.build_puffin_manager().clone(),
2082            write_cache_enabled: true,
2083            intermediate_manager: write_cache.intermediate_manager().clone(),
2084            index_options: IndexOptions::default(),
2085            inverted_index_config: InvertedIndexConfig::default(),
2086            fulltext_index_config: FulltextIndexConfig::default(),
2087            bloom_filter_index_config: BloomFilterConfig::default(),
2088            #[cfg(feature = "vector_index")]
2089            vector_index_config: Default::default(),
2090        });
2091
2092        let sst_info = mock_sst_file(metadata.clone(), &env, IndexBuildMode::Async).await;
2093        let file_meta = FileMeta {
2094            region_id,
2095            file_id: sst_info.file_id,
2096            file_size: sst_info.file_size,
2097            index_file_size: sst_info.index_metadata.file_size,
2098            num_rows: sst_info.num_rows as u64,
2099            num_row_groups: sst_info.num_row_groups,
2100            ..Default::default()
2101        };
2102        let files = HashMap::from([(file_meta.file_id, file_meta.clone())]);
2103        let version_control =
2104            mock_version_control(metadata.clone(), file_purger.clone(), files).await;
2105
2106        let file = FileHandle::new(file_meta.clone(), file_purger.clone());
2107
2108        // Create mock task.
2109        let (tx, mut _rx) = mpsc::channel(4);
2110        let (result_tx, mut result_rx) = mpsc::channel::<Result<IndexBuildOutcome>>(4);
2111        let task = IndexBuildTask {
2112            file,
2113            file_meta: file_meta.clone(),
2114            reason: IndexBuildType::Flush,
2115            access_layer: env.access_layer.clone(),
2116            listener: WorkerListener::default(),
2117            manifest_ctx,
2118            write_cache: Some(write_cache.clone()),
2119            file_purger,
2120            indexer_builder,
2121            request_sender: tx,
2122            result_sender: result_tx,
2123        };
2124
2125        scheduler
2126            .schedule_build(&version_control, task)
2127            .await
2128            .unwrap();
2129
2130        // The task should finish successfully.
2131        match result_rx.recv().await.unwrap() {
2132            Ok(outcome) => {
2133                assert_eq!(outcome, IndexBuildOutcome::Finished);
2134            }
2135            _ => panic!("Expect finished result"),
2136        }
2137
2138        // The write cache should contain the uploaded index file.
2139        let index_key = IndexKey::new(
2140            region_id,
2141            file_meta.file_id,
2142            FileType::Puffin(sst_info.index_metadata.version),
2143        );
2144        assert!(write_cache.file_cache().contains_key(&index_key));
2145    }
2146
2147    async fn create_mock_task_for_schedule(
2148        env: &SchedulerEnv,
2149        file_id: FileId,
2150        region_id: RegionId,
2151        reason: IndexBuildType,
2152    ) -> IndexBuildTask {
2153        let metadata = Arc::new(sst_region_metadata());
2154        let manifest_ctx = env.mock_manifest_context(metadata.clone()).await;
2155        let file_purger = Arc::new(NoopFilePurger {});
2156        let indexer_builder = mock_indexer_builder(metadata, env).await;
2157        let (tx, _rx) = mpsc::channel(4);
2158        let (result_tx, _result_rx) = mpsc::channel::<Result<IndexBuildOutcome>>(4);
2159
2160        let file_meta = FileMeta {
2161            region_id,
2162            file_id,
2163            file_size: 100,
2164            ..Default::default()
2165        };
2166
2167        let file = FileHandle::new(file_meta.clone(), file_purger.clone());
2168
2169        IndexBuildTask {
2170            file,
2171            file_meta,
2172            reason,
2173            access_layer: env.access_layer.clone(),
2174            listener: WorkerListener::default(),
2175            manifest_ctx,
2176            write_cache: None,
2177            file_purger,
2178            indexer_builder,
2179            request_sender: tx,
2180            result_sender: result_tx,
2181        }
2182    }
2183
2184    #[tokio::test]
2185    async fn test_scheduler_comprehensive() {
2186        let env = SchedulerEnv::new().await;
2187        let mut scheduler = env.mock_index_build_scheduler(2);
2188        let metadata = Arc::new(sst_region_metadata());
2189        let region_id = metadata.region_id;
2190        let file_purger = Arc::new(NoopFilePurger {});
2191
2192        // Prepare multiple files for testing
2193        let file_id1 = FileId::random();
2194        let file_id2 = FileId::random();
2195        let file_id3 = FileId::random();
2196        let file_id4 = FileId::random();
2197        let file_id5 = FileId::random();
2198
2199        let mut files = HashMap::new();
2200        for file_id in [file_id1, file_id2, file_id3, file_id4, file_id5] {
2201            files.insert(
2202                file_id,
2203                FileMeta {
2204                    region_id,
2205                    file_id,
2206                    file_size: 100,
2207                    ..Default::default()
2208                },
2209            );
2210        }
2211
2212        let version_control = mock_version_control(metadata, file_purger, files).await;
2213
2214        // Test 1: Basic scheduling
2215        let task1 =
2216            create_mock_task_for_schedule(&env, file_id1, region_id, IndexBuildType::Flush).await;
2217        assert!(
2218            scheduler
2219                .schedule_build(&version_control, task1)
2220                .await
2221                .is_ok()
2222        );
2223        assert!(scheduler.region_status.contains_key(&region_id));
2224        let status = scheduler.region_status.get(&region_id).unwrap();
2225        assert_eq!(status.building_files.len(), 1);
2226        assert!(status.building_files.contains(&file_id1));
2227
2228        // Test 2: Duplicate file scheduling (should be skipped)
2229        let task1_dup =
2230            create_mock_task_for_schedule(&env, file_id1, region_id, IndexBuildType::Flush).await;
2231        scheduler
2232            .schedule_build(&version_control, task1_dup)
2233            .await
2234            .unwrap();
2235        let status = scheduler.region_status.get(&region_id).unwrap();
2236        assert_eq!(status.building_files.len(), 1); // Still only one
2237
2238        // Test 3: Fill up to limit (2 building tasks)
2239        let task2 =
2240            create_mock_task_for_schedule(&env, file_id2, region_id, IndexBuildType::Flush).await;
2241        scheduler
2242            .schedule_build(&version_control, task2)
2243            .await
2244            .unwrap();
2245        let status = scheduler.region_status.get(&region_id).unwrap();
2246        assert_eq!(status.building_files.len(), 2); // Reached limit
2247        assert_eq!(status.pending_tasks.len(), 0);
2248
2249        // Test 4: Add tasks with different priorities to pending queue
2250        // Now all new tasks will be pending since we reached the limit
2251        let task3 =
2252            create_mock_task_for_schedule(&env, file_id3, region_id, IndexBuildType::Compact).await;
2253        let task4 =
2254            create_mock_task_for_schedule(&env, file_id4, region_id, IndexBuildType::SchemaChange)
2255                .await;
2256        let task5 =
2257            create_mock_task_for_schedule(&env, file_id5, region_id, IndexBuildType::Manual).await;
2258
2259        scheduler
2260            .schedule_build(&version_control, task3)
2261            .await
2262            .unwrap();
2263        scheduler
2264            .schedule_build(&version_control, task4)
2265            .await
2266            .unwrap();
2267        scheduler
2268            .schedule_build(&version_control, task5)
2269            .await
2270            .unwrap();
2271
2272        let status = scheduler.region_status.get(&region_id).unwrap();
2273        assert_eq!(status.building_files.len(), 2); // Still at limit
2274        assert_eq!(status.pending_tasks.len(), 3); // Three pending
2275
2276        // Test 5: Task completion triggers scheduling next highest priority task (Manual)
2277        scheduler.on_task_stopped(region_id, file_id1, &version_control);
2278        let status = scheduler.region_status.get(&region_id).unwrap();
2279        assert!(!status.building_files.contains(&file_id1));
2280        assert_eq!(status.building_files.len(), 2); // Should schedule next task
2281        assert_eq!(status.pending_tasks.len(), 2); // One less pending
2282        // The highest priority task (Manual) should now be building
2283        assert!(status.building_files.contains(&file_id5));
2284
2285        // Test 6: Complete another task, should schedule SchemaChange (second highest priority)
2286        scheduler.on_task_stopped(region_id, file_id2, &version_control);
2287        let status = scheduler.region_status.get(&region_id).unwrap();
2288        assert_eq!(status.building_files.len(), 2);
2289        assert_eq!(status.pending_tasks.len(), 1); // One less pending
2290        assert!(status.building_files.contains(&file_id4)); // SchemaChange should be building
2291
2292        // Test 7: Complete remaining tasks and cleanup
2293        scheduler.on_task_stopped(region_id, file_id5, &version_control);
2294        scheduler.on_task_stopped(region_id, file_id4, &version_control);
2295
2296        let status = scheduler.region_status.get(&region_id).unwrap();
2297        assert_eq!(status.building_files.len(), 1); // Last task (Compact) should be building
2298        assert_eq!(status.pending_tasks.len(), 0);
2299        assert!(status.building_files.contains(&file_id3));
2300
2301        scheduler.on_task_stopped(region_id, file_id3, &version_control);
2302
2303        // Region should be removed when all tasks complete
2304        assert!(!scheduler.region_status.contains_key(&region_id));
2305
2306        // Test 8: Region dropped with pending tasks
2307        let task6 =
2308            create_mock_task_for_schedule(&env, file_id1, region_id, IndexBuildType::Flush).await;
2309        let task7 =
2310            create_mock_task_for_schedule(&env, file_id2, region_id, IndexBuildType::Flush).await;
2311        let task8 =
2312            create_mock_task_for_schedule(&env, file_id3, region_id, IndexBuildType::Manual).await;
2313
2314        scheduler
2315            .schedule_build(&version_control, task6)
2316            .await
2317            .unwrap();
2318        scheduler
2319            .schedule_build(&version_control, task7)
2320            .await
2321            .unwrap();
2322        scheduler
2323            .schedule_build(&version_control, task8)
2324            .await
2325            .unwrap();
2326
2327        assert!(scheduler.region_status.contains_key(&region_id));
2328        let status = scheduler.region_status.get(&region_id).unwrap();
2329        assert_eq!(status.building_files.len(), 2);
2330        assert_eq!(status.pending_tasks.len(), 1);
2331
2332        scheduler.on_region_dropped(region_id).await;
2333        assert!(!scheduler.region_status.contains_key(&region_id));
2334    }
2335}