mito2/sst/
index.rs

1// Copyright 2023 Greptime Team
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15pub(crate) mod bloom_filter;
16pub(crate) mod fulltext_index;
17mod indexer;
18pub mod intermediate;
19pub(crate) mod inverted_index;
20pub mod puffin_manager;
21mod statistics;
22pub(crate) mod store;
23#[cfg(feature = "vector_index")]
24pub(crate) mod vector_index;
25
26use std::cmp::Ordering;
27use std::collections::{BinaryHeap, HashMap, HashSet};
28use std::num::NonZeroUsize;
29use std::sync::Arc;
30
31use bloom_filter::creator::BloomFilterIndexer;
32use common_telemetry::{debug, error, info, warn};
33use datatypes::arrow::array::BinaryArray;
34use datatypes::arrow::record_batch::RecordBatch;
35use mito_codec::index::IndexValuesCodec;
36use mito_codec::row_converter::CompositeValues;
37use object_store::ObjectStore;
38use puffin_manager::SstPuffinManager;
39use smallvec::{SmallVec, smallvec};
40use snafu::{OptionExt, ResultExt};
41use statistics::{ByteCount, RowCount};
42use store_api::metadata::RegionMetadataRef;
43use store_api::storage::{ColumnId, FileId, RegionId};
44use strum::IntoStaticStr;
45use tokio::sync::mpsc::Sender;
46#[cfg(feature = "vector_index")]
47use vector_index::creator::VectorIndexer;
48
49use crate::access_layer::{AccessLayerRef, FilePathProvider, OperationType, RegionFilePathFactory};
50use crate::cache::file_cache::{FileCacheRef, FileType, IndexKey};
51use crate::cache::write_cache::{UploadTracker, WriteCacheRef};
52#[cfg(feature = "vector_index")]
53use crate::config::VectorIndexConfig;
54use crate::config::{BloomFilterConfig, FulltextIndexConfig, InvertedIndexConfig};
55use crate::error::{
56    BuildIndexAsyncSnafu, DecodeSnafu, Error, InvalidRecordBatchSnafu, RegionClosedSnafu,
57    RegionDroppedSnafu, RegionTruncatedSnafu, Result,
58};
59use crate::manifest::action::{RegionEdit, RegionMetaAction, RegionMetaActionList};
60use crate::metrics::INDEX_CREATE_MEMORY_USAGE;
61use crate::read::{Batch, BatchReader};
62use crate::region::options::IndexOptions;
63use crate::region::version::VersionControlRef;
64use crate::region::{ManifestContextRef, RegionLeaderState};
65use crate::request::{
66    BackgroundNotify, IndexBuildFailed, IndexBuildFinished, IndexBuildStopped, WorkerRequest,
67    WorkerRequestWithTime,
68};
69use crate::schedule::scheduler::{Job, SchedulerRef};
70use crate::sst::file::{
71    ColumnIndexMetadata, FileHandle, FileMeta, IndexType, IndexTypes, RegionFileId, RegionIndexId,
72};
73use crate::sst::file_purger::FilePurgerRef;
74use crate::sst::index::fulltext_index::creator::FulltextIndexer;
75use crate::sst::index::intermediate::IntermediateManager;
76use crate::sst::index::inverted_index::creator::InvertedIndexer;
77use crate::sst::parquet::SstInfo;
78use crate::sst::parquet::flat_format::primary_key_column_index;
79use crate::sst::parquet::format::PrimaryKeyArray;
80use crate::worker::WorkerListener;
81
82pub(crate) const TYPE_INVERTED_INDEX: &str = "inverted_index";
83pub(crate) const TYPE_FULLTEXT_INDEX: &str = "fulltext_index";
84pub(crate) const TYPE_BLOOM_FILTER_INDEX: &str = "bloom_filter_index";
85#[cfg(feature = "vector_index")]
86pub(crate) const TYPE_VECTOR_INDEX: &str = "vector_index";
87
88/// Triggers background download of an index file to the local cache.
89pub(crate) fn trigger_index_background_download(
90    file_cache: Option<&FileCacheRef>,
91    file_id: &RegionIndexId,
92    file_size_hint: Option<u64>,
93    path_factory: &RegionFilePathFactory,
94    object_store: &ObjectStore,
95) {
96    if let (Some(file_cache), Some(file_size)) = (file_cache, file_size_hint) {
97        let index_key = IndexKey::new(
98            file_id.region_id(),
99            file_id.file_id(),
100            FileType::Puffin(file_id.version),
101        );
102        let remote_path = path_factory.build_index_file_path(file_id.file_id);
103        file_cache.maybe_download_background(
104            index_key,
105            remote_path,
106            object_store.clone(),
107            file_size,
108        );
109    }
110}
111
112/// Output of the index creation.
113#[derive(Debug, Clone, Default)]
114pub struct IndexOutput {
115    /// Size of the file.
116    pub file_size: u64,
117    /// Index version.
118    pub version: u64,
119    /// Inverted index output.
120    pub inverted_index: InvertedIndexOutput,
121    /// Fulltext index output.
122    pub fulltext_index: FulltextIndexOutput,
123    /// Bloom filter output.
124    pub bloom_filter: BloomFilterOutput,
125    /// Vector index output.
126    #[cfg(feature = "vector_index")]
127    pub vector_index: VectorIndexOutput,
128}
129
130impl IndexOutput {
131    pub fn build_available_indexes(&self) -> SmallVec<[IndexType; 4]> {
132        let mut indexes = SmallVec::new();
133        if self.inverted_index.is_available() {
134            indexes.push(IndexType::InvertedIndex);
135        }
136        if self.fulltext_index.is_available() {
137            indexes.push(IndexType::FulltextIndex);
138        }
139        if self.bloom_filter.is_available() {
140            indexes.push(IndexType::BloomFilterIndex);
141        }
142        #[cfg(feature = "vector_index")]
143        if self.vector_index.is_available() {
144            indexes.push(IndexType::VectorIndex);
145        }
146        indexes
147    }
148
149    pub fn build_indexes(&self) -> Vec<ColumnIndexMetadata> {
150        let mut map: HashMap<ColumnId, IndexTypes> = HashMap::new();
151
152        if self.inverted_index.is_available() {
153            for &col in &self.inverted_index.columns {
154                map.entry(col).or_default().push(IndexType::InvertedIndex);
155            }
156        }
157        if self.fulltext_index.is_available() {
158            for &col in &self.fulltext_index.columns {
159                map.entry(col).or_default().push(IndexType::FulltextIndex);
160            }
161        }
162        if self.bloom_filter.is_available() {
163            for &col in &self.bloom_filter.columns {
164                map.entry(col)
165                    .or_default()
166                    .push(IndexType::BloomFilterIndex);
167            }
168        }
169        #[cfg(feature = "vector_index")]
170        if self.vector_index.is_available() {
171            for &col in &self.vector_index.columns {
172                map.entry(col).or_default().push(IndexType::VectorIndex);
173            }
174        }
175
176        map.into_iter()
177            .map(|(column_id, created_indexes)| ColumnIndexMetadata {
178                column_id,
179                created_indexes,
180            })
181            .collect::<Vec<_>>()
182    }
183}
184
185/// Base output of the index creation.
186#[derive(Debug, Clone, Default)]
187pub struct IndexBaseOutput {
188    /// Size of the index.
189    pub index_size: ByteCount,
190    /// Number of rows in the index.
191    pub row_count: RowCount,
192    /// Available columns in the index.
193    pub columns: Vec<ColumnId>,
194}
195
196impl IndexBaseOutput {
197    pub fn is_available(&self) -> bool {
198        self.index_size > 0
199    }
200}
201
202/// Output of the inverted index creation.
203pub type InvertedIndexOutput = IndexBaseOutput;
204/// Output of the fulltext index creation.
205pub type FulltextIndexOutput = IndexBaseOutput;
206/// Output of the bloom filter creation.
207pub type BloomFilterOutput = IndexBaseOutput;
208/// Output of the vector index creation.
209#[cfg(feature = "vector_index")]
210pub type VectorIndexOutput = IndexBaseOutput;
211
212/// The index creator that hides the error handling details.
213#[derive(Default)]
214pub struct Indexer {
215    file_id: FileId,
216    region_id: RegionId,
217    index_version: u64,
218    puffin_manager: Option<SstPuffinManager>,
219    write_cache_enabled: bool,
220    inverted_indexer: Option<InvertedIndexer>,
221    last_mem_inverted_index: usize,
222    fulltext_indexer: Option<FulltextIndexer>,
223    last_mem_fulltext_index: usize,
224    bloom_filter_indexer: Option<BloomFilterIndexer>,
225    last_mem_bloom_filter: usize,
226    #[cfg(feature = "vector_index")]
227    vector_indexer: Option<VectorIndexer>,
228    #[cfg(feature = "vector_index")]
229    last_mem_vector_index: usize,
230    intermediate_manager: Option<IntermediateManager>,
231}
232
233impl Indexer {
234    /// Updates the index with the given batch.
235    pub async fn update(&mut self, batch: &mut Batch) {
236        self.do_update(batch).await;
237
238        self.flush_mem_metrics();
239    }
240
241    /// Updates the index with the given flat format RecordBatch.
242    pub async fn update_flat(&mut self, batch: &RecordBatch) {
243        self.do_update_flat(batch).await;
244
245        self.flush_mem_metrics();
246    }
247
248    /// Finalizes the index creation.
249    pub async fn finish(&mut self) -> IndexOutput {
250        let output = self.do_finish().await;
251
252        self.flush_mem_metrics();
253        output
254    }
255
256    /// Aborts the index creation.
257    pub async fn abort(&mut self) {
258        self.do_abort().await;
259
260        self.flush_mem_metrics();
261    }
262
263    fn flush_mem_metrics(&mut self) {
264        let inverted_mem = self
265            .inverted_indexer
266            .as_ref()
267            .map_or(0, |creator| creator.memory_usage());
268        INDEX_CREATE_MEMORY_USAGE
269            .with_label_values(&[TYPE_INVERTED_INDEX])
270            .add(inverted_mem as i64 - self.last_mem_inverted_index as i64);
271        self.last_mem_inverted_index = inverted_mem;
272
273        let fulltext_mem = self
274            .fulltext_indexer
275            .as_ref()
276            .map_or(0, |creator| creator.memory_usage());
277        INDEX_CREATE_MEMORY_USAGE
278            .with_label_values(&[TYPE_FULLTEXT_INDEX])
279            .add(fulltext_mem as i64 - self.last_mem_fulltext_index as i64);
280        self.last_mem_fulltext_index = fulltext_mem;
281
282        let bloom_filter_mem = self
283            .bloom_filter_indexer
284            .as_ref()
285            .map_or(0, |creator| creator.memory_usage());
286        INDEX_CREATE_MEMORY_USAGE
287            .with_label_values(&[TYPE_BLOOM_FILTER_INDEX])
288            .add(bloom_filter_mem as i64 - self.last_mem_bloom_filter as i64);
289        self.last_mem_bloom_filter = bloom_filter_mem;
290
291        #[cfg(feature = "vector_index")]
292        {
293            let vector_mem = self
294                .vector_indexer
295                .as_ref()
296                .map_or(0, |creator| creator.memory_usage());
297            INDEX_CREATE_MEMORY_USAGE
298                .with_label_values(&[TYPE_VECTOR_INDEX])
299                .add(vector_mem as i64 - self.last_mem_vector_index as i64);
300            self.last_mem_vector_index = vector_mem;
301        }
302    }
303}
304
305#[async_trait::async_trait]
306pub trait IndexerBuilder {
307    /// Builds indexer of given file id to [index_file_path].
308    async fn build(&self, file_id: FileId, index_version: u64) -> Indexer;
309}
310#[derive(Clone)]
311pub(crate) struct IndexerBuilderImpl {
312    pub(crate) build_type: IndexBuildType,
313    pub(crate) metadata: RegionMetadataRef,
314    pub(crate) row_group_size: usize,
315    pub(crate) puffin_manager: SstPuffinManager,
316    pub(crate) write_cache_enabled: bool,
317    pub(crate) intermediate_manager: IntermediateManager,
318    pub(crate) index_options: IndexOptions,
319    pub(crate) inverted_index_config: InvertedIndexConfig,
320    pub(crate) fulltext_index_config: FulltextIndexConfig,
321    pub(crate) bloom_filter_index_config: BloomFilterConfig,
322    #[cfg(feature = "vector_index")]
323    pub(crate) vector_index_config: VectorIndexConfig,
324}
325
326#[async_trait::async_trait]
327impl IndexerBuilder for IndexerBuilderImpl {
328    /// Sanity check for arguments and create a new [Indexer] if arguments are valid.
329    async fn build(&self, file_id: FileId, index_version: u64) -> Indexer {
330        let mut indexer = Indexer {
331            file_id,
332            region_id: self.metadata.region_id,
333            index_version,
334            write_cache_enabled: self.write_cache_enabled,
335            ..Default::default()
336        };
337
338        indexer.inverted_indexer = self.build_inverted_indexer(file_id);
339        indexer.fulltext_indexer = self.build_fulltext_indexer(file_id).await;
340        indexer.bloom_filter_indexer = self.build_bloom_filter_indexer(file_id);
341        #[cfg(feature = "vector_index")]
342        {
343            indexer.vector_indexer = self.build_vector_indexer(file_id);
344        }
345        indexer.intermediate_manager = Some(self.intermediate_manager.clone());
346
347        #[cfg(feature = "vector_index")]
348        let has_any_indexer = indexer.inverted_indexer.is_some()
349            || indexer.fulltext_indexer.is_some()
350            || indexer.bloom_filter_indexer.is_some()
351            || indexer.vector_indexer.is_some();
352        #[cfg(not(feature = "vector_index"))]
353        let has_any_indexer = indexer.inverted_indexer.is_some()
354            || indexer.fulltext_indexer.is_some()
355            || indexer.bloom_filter_indexer.is_some();
356
357        if !has_any_indexer {
358            indexer.abort().await;
359            return Indexer::default();
360        }
361
362        indexer.puffin_manager = Some(self.puffin_manager.clone());
363        indexer
364    }
365}
366
367impl IndexerBuilderImpl {
368    fn build_inverted_indexer(&self, file_id: FileId) -> Option<InvertedIndexer> {
369        let create = match self.build_type {
370            IndexBuildType::Flush => self.inverted_index_config.create_on_flush.auto(),
371            IndexBuildType::Compact => self.inverted_index_config.create_on_compaction.auto(),
372            _ => true,
373        };
374
375        if !create {
376            debug!(
377                "Skip creating inverted index due to config, region_id: {}, file_id: {}",
378                self.metadata.region_id, file_id,
379            );
380            return None;
381        }
382
383        let indexed_column_ids = self.metadata.inverted_indexed_column_ids(
384            self.index_options.inverted_index.ignore_column_ids.iter(),
385        );
386        if indexed_column_ids.is_empty() {
387            debug!(
388                "No columns to be indexed, skip creating inverted index, region_id: {}, file_id: {}",
389                self.metadata.region_id, file_id,
390            );
391            return None;
392        }
393
394        let Some(mut segment_row_count) =
395            NonZeroUsize::new(self.index_options.inverted_index.segment_row_count)
396        else {
397            warn!(
398                "Segment row count is 0, skip creating index, region_id: {}, file_id: {}",
399                self.metadata.region_id, file_id,
400            );
401            return None;
402        };
403
404        let Some(row_group_size) = NonZeroUsize::new(self.row_group_size) else {
405            warn!(
406                "Row group size is 0, skip creating index, region_id: {}, file_id: {}",
407                self.metadata.region_id, file_id,
408            );
409            return None;
410        };
411
412        // if segment row count not aligned with row group size, adjust it to be aligned.
413        if row_group_size.get() % segment_row_count.get() != 0 {
414            segment_row_count = row_group_size;
415        }
416
417        let indexer = InvertedIndexer::new(
418            file_id,
419            &self.metadata,
420            self.intermediate_manager.clone(),
421            self.inverted_index_config.mem_threshold_on_create(),
422            segment_row_count,
423            indexed_column_ids,
424        );
425
426        Some(indexer)
427    }
428
429    async fn build_fulltext_indexer(&self, file_id: FileId) -> Option<FulltextIndexer> {
430        let create = match self.build_type {
431            IndexBuildType::Flush => self.fulltext_index_config.create_on_flush.auto(),
432            IndexBuildType::Compact => self.fulltext_index_config.create_on_compaction.auto(),
433            _ => true,
434        };
435
436        if !create {
437            debug!(
438                "Skip creating full-text index due to config, region_id: {}, file_id: {}",
439                self.metadata.region_id, file_id,
440            );
441            return None;
442        }
443
444        let mem_limit = self.fulltext_index_config.mem_threshold_on_create();
445        let creator = FulltextIndexer::new(
446            &self.metadata.region_id,
447            &file_id,
448            &self.intermediate_manager,
449            &self.metadata,
450            self.fulltext_index_config.compress,
451            mem_limit,
452        )
453        .await;
454
455        let err = match creator {
456            Ok(creator) => {
457                if creator.is_none() {
458                    debug!(
459                        "Skip creating full-text index due to no columns require indexing, region_id: {}, file_id: {}",
460                        self.metadata.region_id, file_id,
461                    );
462                }
463                return creator;
464            }
465            Err(err) => err,
466        };
467
468        if cfg!(any(test, feature = "test")) {
469            panic!(
470                "Failed to create full-text indexer, region_id: {}, file_id: {}, err: {:?}",
471                self.metadata.region_id, file_id, err
472            );
473        } else {
474            warn!(
475                err; "Failed to create full-text indexer, region_id: {}, file_id: {}",
476                self.metadata.region_id, file_id,
477            );
478        }
479
480        None
481    }
482
483    fn build_bloom_filter_indexer(&self, file_id: FileId) -> Option<BloomFilterIndexer> {
484        let create = match self.build_type {
485            IndexBuildType::Flush => self.bloom_filter_index_config.create_on_flush.auto(),
486            IndexBuildType::Compact => self.bloom_filter_index_config.create_on_compaction.auto(),
487            _ => true,
488        };
489
490        if !create {
491            debug!(
492                "Skip creating bloom filter due to config, region_id: {}, file_id: {}",
493                self.metadata.region_id, file_id,
494            );
495            return None;
496        }
497
498        let mem_limit = self.bloom_filter_index_config.mem_threshold_on_create();
499        let indexer = BloomFilterIndexer::new(
500            file_id,
501            &self.metadata,
502            self.intermediate_manager.clone(),
503            mem_limit,
504        );
505
506        let err = match indexer {
507            Ok(indexer) => {
508                if indexer.is_none() {
509                    debug!(
510                        "Skip creating bloom filter due to no columns require indexing, region_id: {}, file_id: {}",
511                        self.metadata.region_id, file_id,
512                    );
513                }
514                return indexer;
515            }
516            Err(err) => err,
517        };
518
519        if cfg!(any(test, feature = "test")) {
520            panic!(
521                "Failed to create bloom filter, region_id: {}, file_id: {}, err: {:?}",
522                self.metadata.region_id, file_id, err
523            );
524        } else {
525            warn!(
526                err; "Failed to create bloom filter, region_id: {}, file_id: {}",
527                self.metadata.region_id, file_id,
528            );
529        }
530
531        None
532    }
533
534    #[cfg(feature = "vector_index")]
535    fn build_vector_indexer(&self, file_id: FileId) -> Option<VectorIndexer> {
536        let create = match self.build_type {
537            IndexBuildType::Flush => self.vector_index_config.create_on_flush.auto(),
538            IndexBuildType::Compact => self.vector_index_config.create_on_compaction.auto(),
539            _ => true,
540        };
541
542        if !create {
543            debug!(
544                "Skip creating vector index due to config, region_id: {}, file_id: {}",
545                self.metadata.region_id, file_id,
546            );
547            return None;
548        }
549
550        // Get vector index column IDs and options from metadata
551        let vector_index_options = self.metadata.vector_indexed_column_ids();
552        if vector_index_options.is_empty() {
553            debug!(
554                "No vector columns to index, skip creating vector index, region_id: {}, file_id: {}",
555                self.metadata.region_id, file_id,
556            );
557            return None;
558        }
559
560        let mem_limit = self.vector_index_config.mem_threshold_on_create();
561        let indexer = VectorIndexer::new(
562            file_id,
563            &self.metadata,
564            self.intermediate_manager.clone(),
565            mem_limit,
566            &vector_index_options,
567        );
568
569        let err = match indexer {
570            Ok(indexer) => {
571                if indexer.is_none() {
572                    debug!(
573                        "Skip creating vector index due to no columns require indexing, region_id: {}, file_id: {}",
574                        self.metadata.region_id, file_id,
575                    );
576                }
577                return indexer;
578            }
579            Err(err) => err,
580        };
581
582        if cfg!(any(test, feature = "test")) {
583            panic!(
584                "Failed to create vector index, region_id: {}, file_id: {}, err: {:?}",
585                self.metadata.region_id, file_id, err
586            );
587        } else {
588            warn!(
589                err; "Failed to create vector index, region_id: {}, file_id: {}",
590                self.metadata.region_id, file_id,
591            );
592        }
593
594        None
595    }
596}
597
598/// Type of an index build task.
599#[derive(Debug, Clone, IntoStaticStr, PartialEq)]
600pub enum IndexBuildType {
601    /// Build index when schema change.
602    SchemaChange,
603    /// Create or update index after flush.
604    Flush,
605    /// Create or update index after compact.
606    Compact,
607    /// Manually build index.
608    Manual,
609}
610
611impl IndexBuildType {
612    fn as_str(&self) -> &'static str {
613        self.into()
614    }
615
616    // Higher value means higher priority.
617    fn priority(&self) -> u8 {
618        match self {
619            IndexBuildType::Manual => 3,
620            IndexBuildType::SchemaChange => 2,
621            IndexBuildType::Flush => 1,
622            IndexBuildType::Compact => 0,
623        }
624    }
625}
626
627impl From<OperationType> for IndexBuildType {
628    fn from(op_type: OperationType) -> Self {
629        match op_type {
630            OperationType::Flush => IndexBuildType::Flush,
631            OperationType::Compact => IndexBuildType::Compact,
632        }
633    }
634}
635
636/// Outcome of an index build task.
637#[derive(Debug, Clone, PartialEq, Eq, Hash)]
638pub enum IndexBuildOutcome {
639    Finished,
640    Aborted(String),
641}
642
643/// Mpsc output result sender.
644pub type ResultMpscSender = Sender<Result<IndexBuildOutcome>>;
645
646#[derive(Clone)]
647pub struct IndexBuildTask {
648    /// The SST file handle to build index for.
649    pub file: FileHandle,
650    /// The file meta to build index for.
651    pub file_meta: FileMeta,
652    pub reason: IndexBuildType,
653    pub access_layer: AccessLayerRef,
654    pub(crate) listener: WorkerListener,
655    pub(crate) manifest_ctx: ManifestContextRef,
656    pub write_cache: Option<WriteCacheRef>,
657    pub file_purger: FilePurgerRef,
658    /// When write cache is enabled, the indexer builder should be built from the write cache.
659    /// Otherwise, it should be built from the access layer.
660    pub indexer_builder: Arc<dyn IndexerBuilder + Send + Sync>,
661    /// Request sender to notify the region worker.
662    pub(crate) request_sender: Sender<WorkerRequestWithTime>,
663    /// Index build result sender.
664    pub(crate) result_sender: ResultMpscSender,
665}
666
667impl std::fmt::Debug for IndexBuildTask {
668    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
669        f.debug_struct("IndexBuildTask")
670            .field("region_id", &self.file_meta.region_id)
671            .field("file_id", &self.file_meta.file_id)
672            .field("reason", &self.reason)
673            .finish()
674    }
675}
676
677impl IndexBuildTask {
678    /// Notify the caller the job is success.
679    pub async fn on_success(&self, outcome: IndexBuildOutcome) {
680        let _ = self.result_sender.send(Ok(outcome)).await;
681    }
682
683    /// Send index build error to waiter.
684    pub async fn on_failure(&self, err: Arc<Error>) {
685        let _ = self
686            .result_sender
687            .send(Err(err.clone()).context(BuildIndexAsyncSnafu {
688                region_id: self.file_meta.region_id,
689            }))
690            .await;
691    }
692
693    fn into_index_build_job(mut self, version_control: VersionControlRef) -> Job {
694        Box::pin(async move {
695            self.do_index_build(version_control).await;
696        })
697    }
698
699    async fn do_index_build(&mut self, version_control: VersionControlRef) {
700        self.listener
701            .on_index_build_begin(RegionFileId::new(
702                self.file_meta.region_id,
703                self.file_meta.file_id,
704            ))
705            .await;
706        match self.index_build(version_control).await {
707            Ok(outcome) => self.on_success(outcome).await,
708            Err(e) => {
709                warn!(
710                    e; "Index build task failed, region: {}, file_id: {}",
711                    self.file_meta.region_id, self.file_meta.file_id,
712                );
713                self.on_failure(e.into()).await
714            }
715        }
716        let worker_request = WorkerRequest::Background {
717            region_id: self.file_meta.region_id,
718            notify: BackgroundNotify::IndexBuildStopped(IndexBuildStopped {
719                region_id: self.file_meta.region_id,
720                file_id: self.file_meta.file_id,
721            }),
722        };
723        let _ = self
724            .request_sender
725            .send(WorkerRequestWithTime::new(worker_request))
726            .await;
727    }
728
729    // Checks if the SST file still exists in object store and version to avoid conflict with compaction.
730    async fn check_sst_file_exists(&self, version_control: &VersionControlRef) -> bool {
731        let file_id = self.file_meta.file_id;
732        let level = self.file_meta.level;
733        // We should check current version instead of the version when the job is created.
734        let version = version_control.current().version;
735
736        let Some(level_files) = version.ssts.levels().get(level as usize) else {
737            warn!(
738                "File id {} not found in level {} for index build, region: {}",
739                file_id, level, self.file_meta.region_id
740            );
741            return false;
742        };
743
744        match level_files.files.get(&file_id) {
745            Some(handle) if !handle.is_deleted() && !handle.compacting() => {
746                // If the file's metadata is present in the current version, the physical SST file
747                // is guaranteed to exist on object store. The file purger removes the physical
748                // file only after its metadata is removed from the version.
749                true
750            }
751            _ => {
752                warn!(
753                    "File id {} not found in region version for index build, region: {}",
754                    file_id, self.file_meta.region_id
755                );
756                false
757            }
758        }
759    }
760
761    async fn index_build(
762        &mut self,
763        version_control: VersionControlRef,
764    ) -> Result<IndexBuildOutcome> {
765        // Determine the new index version
766        let new_index_version = if self.file_meta.index_file_size > 0 {
767            // Increment version if index file exists to avoid overwrite.
768            self.file_meta.index_version + 1
769        } else {
770            0 // Default version for new index files
771        };
772
773        // Use the same file_id but with new version for index file
774        let index_file_id = self.file_meta.file_id;
775        let mut indexer = self
776            .indexer_builder
777            .build(index_file_id, new_index_version)
778            .await;
779
780        // Check SST file existence before building index to avoid failure of parquet reader.
781        if !self.check_sst_file_exists(&version_control).await {
782            // Calls abort to clean up index files.
783            indexer.abort().await;
784            self.listener
785                .on_index_build_abort(RegionFileId::new(
786                    self.file_meta.region_id,
787                    self.file_meta.file_id,
788                ))
789                .await;
790            return Ok(IndexBuildOutcome::Aborted(format!(
791                "SST file not found during index build, region: {}, file_id: {}",
792                self.file_meta.region_id, self.file_meta.file_id
793            )));
794        }
795
796        let mut parquet_reader = self
797            .access_layer
798            .read_sst(self.file.clone()) // use the latest file handle instead of creating a new one
799            .build()
800            .await?;
801
802        // TODO(SNC123): optimize index batch
803        loop {
804            match parquet_reader.next_batch().await {
805                Ok(Some(mut batch)) => {
806                    indexer.update(&mut batch).await;
807                }
808                Ok(None) => break,
809                Err(e) => {
810                    indexer.abort().await;
811                    return Err(e);
812                }
813            }
814        }
815        let index_output = indexer.finish().await;
816
817        if index_output.file_size > 0 {
818            // Check SST file existence again after building index.
819            if !self.check_sst_file_exists(&version_control).await {
820                // Calls abort to clean up index files.
821                indexer.abort().await;
822                self.listener
823                    .on_index_build_abort(RegionFileId::new(
824                        self.file_meta.region_id,
825                        self.file_meta.file_id,
826                    ))
827                    .await;
828                return Ok(IndexBuildOutcome::Aborted(format!(
829                    "SST file not found during index build, region: {}, file_id: {}",
830                    self.file_meta.region_id, self.file_meta.file_id
831                )));
832            }
833
834            // Upload index file if write cache is enabled.
835            self.maybe_upload_index_file(index_output.clone(), index_file_id, new_index_version)
836                .await?;
837
838            let worker_request = match self.update_manifest(index_output, new_index_version).await {
839                Ok(edit) => {
840                    let index_build_finished = IndexBuildFinished {
841                        region_id: self.file_meta.region_id,
842                        edit,
843                    };
844                    WorkerRequest::Background {
845                        region_id: self.file_meta.region_id,
846                        notify: BackgroundNotify::IndexBuildFinished(index_build_finished),
847                    }
848                }
849                Err(e) => {
850                    let err = Arc::new(e);
851                    WorkerRequest::Background {
852                        region_id: self.file_meta.region_id,
853                        notify: BackgroundNotify::IndexBuildFailed(IndexBuildFailed { err }),
854                    }
855                }
856            };
857
858            let _ = self
859                .request_sender
860                .send(WorkerRequestWithTime::new(worker_request))
861                .await;
862        }
863        Ok(IndexBuildOutcome::Finished)
864    }
865
866    async fn maybe_upload_index_file(
867        &self,
868        output: IndexOutput,
869        index_file_id: FileId,
870        index_version: u64,
871    ) -> Result<()> {
872        if let Some(write_cache) = &self.write_cache {
873            let file_id = self.file_meta.file_id;
874            let region_id = self.file_meta.region_id;
875            let remote_store = self.access_layer.object_store();
876            let mut upload_tracker = UploadTracker::new(region_id);
877            let mut err = None;
878            let puffin_key =
879                IndexKey::new(region_id, index_file_id, FileType::Puffin(output.version));
880            let index_id = RegionIndexId::new(RegionFileId::new(region_id, file_id), index_version);
881            let puffin_path = RegionFilePathFactory::new(
882                self.access_layer.table_dir().to_string(),
883                self.access_layer.path_type(),
884            )
885            .build_index_file_path_with_version(index_id);
886            if let Err(e) = write_cache
887                .upload(puffin_key, &puffin_path, remote_store)
888                .await
889            {
890                err = Some(e);
891            }
892            upload_tracker.push_uploaded_file(puffin_path);
893            if let Some(err) = err {
894                // Cleans index files on failure.
895                upload_tracker
896                    .clean(
897                        &smallvec![SstInfo {
898                            file_id,
899                            index_metadata: output,
900                            ..Default::default()
901                        }],
902                        &write_cache.file_cache(),
903                        remote_store,
904                    )
905                    .await;
906                return Err(err);
907            }
908        } else {
909            debug!("write cache is not available, skip uploading index file");
910        }
911        Ok(())
912    }
913
914    async fn update_manifest(
915        &mut self,
916        output: IndexOutput,
917        new_index_version: u64,
918    ) -> Result<RegionEdit> {
919        self.file_meta.available_indexes = output.build_available_indexes();
920        self.file_meta.indexes = output.build_indexes();
921        self.file_meta.index_file_size = output.file_size;
922        self.file_meta.index_version = new_index_version;
923        let edit = RegionEdit {
924            files_to_add: vec![self.file_meta.clone()],
925            files_to_remove: vec![],
926            timestamp_ms: Some(chrono::Utc::now().timestamp_millis()),
927            flushed_sequence: None,
928            flushed_entry_id: None,
929            committed_sequence: None,
930            compaction_time_window: None,
931        };
932        let version = self
933            .manifest_ctx
934            .update_manifest(
935                RegionLeaderState::Writable,
936                RegionMetaActionList::with_action(RegionMetaAction::Edit(edit.clone())),
937                false,
938            )
939            .await?;
940        info!(
941            "Successfully update manifest version to {version}, region: {}, reason: {}",
942            self.file_meta.region_id,
943            self.reason.as_str()
944        );
945        Ok(edit)
946    }
947}
948
949impl PartialEq for IndexBuildTask {
950    fn eq(&self, other: &Self) -> bool {
951        self.reason.priority() == other.reason.priority()
952    }
953}
954
955impl Eq for IndexBuildTask {}
956
957impl PartialOrd for IndexBuildTask {
958    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
959        Some(self.cmp(other))
960    }
961}
962
963impl Ord for IndexBuildTask {
964    fn cmp(&self, other: &Self) -> Ordering {
965        self.reason.priority().cmp(&other.reason.priority())
966    }
967}
968
969/// Tracks the index build status of a region scheduled by the [IndexBuildScheduler].
970pub struct IndexBuildStatus {
971    pub region_id: RegionId,
972    pub building_files: HashSet<FileId>,
973    pub pending_tasks: BinaryHeap<IndexBuildTask>,
974}
975
976impl IndexBuildStatus {
977    pub fn new(region_id: RegionId) -> Self {
978        IndexBuildStatus {
979            region_id,
980            building_files: HashSet::new(),
981            pending_tasks: BinaryHeap::new(),
982        }
983    }
984
985    async fn on_failure(self, err: Arc<Error>) {
986        for task in self.pending_tasks {
987            task.on_failure(err.clone()).await;
988        }
989    }
990}
991
992pub struct IndexBuildScheduler {
993    /// Background job scheduler.
994    scheduler: SchedulerRef,
995    /// Tracks regions need to build index.
996    region_status: HashMap<RegionId, IndexBuildStatus>,
997    /// Limit of files allowed to build index concurrently for a region.
998    files_limit: usize,
999}
1000
1001/// Manager background index build tasks of a worker.
1002impl IndexBuildScheduler {
1003    pub fn new(scheduler: SchedulerRef, files_limit: usize) -> Self {
1004        IndexBuildScheduler {
1005            scheduler,
1006            region_status: HashMap::new(),
1007            files_limit,
1008        }
1009    }
1010
1011    pub(crate) async fn schedule_build(
1012        &mut self,
1013        version_control: &VersionControlRef,
1014        task: IndexBuildTask,
1015    ) -> Result<()> {
1016        let status = self
1017            .region_status
1018            .entry(task.file_meta.region_id)
1019            .or_insert_with(|| IndexBuildStatus::new(task.file_meta.region_id));
1020
1021        if status.building_files.contains(&task.file_meta.file_id) {
1022            let region_file_id =
1023                RegionFileId::new(task.file_meta.region_id, task.file_meta.file_id);
1024            debug!(
1025                "Aborting index build task since index is already being built for region file {:?}",
1026                region_file_id
1027            );
1028            task.on_success(IndexBuildOutcome::Aborted(format!(
1029                "Index is already being built for region file {:?}",
1030                region_file_id
1031            )))
1032            .await;
1033            task.listener.on_index_build_abort(region_file_id).await;
1034            return Ok(());
1035        }
1036
1037        status.pending_tasks.push(task);
1038
1039        self.schedule_next_build_batch(version_control);
1040        Ok(())
1041    }
1042
1043    /// Schedule tasks until reaching the files limit or no more tasks.
1044    fn schedule_next_build_batch(&mut self, version_control: &VersionControlRef) {
1045        let mut building_count = 0;
1046        for status in self.region_status.values() {
1047            building_count += status.building_files.len();
1048        }
1049
1050        while building_count < self.files_limit {
1051            if let Some(task) = self.find_next_task() {
1052                let region_id = task.file_meta.region_id;
1053                let file_id = task.file_meta.file_id;
1054                let job = task.into_index_build_job(version_control.clone());
1055                if self.scheduler.schedule(job).is_ok() {
1056                    if let Some(status) = self.region_status.get_mut(&region_id) {
1057                        status.building_files.insert(file_id);
1058                        building_count += 1;
1059                        status
1060                            .pending_tasks
1061                            .retain(|t| t.file_meta.file_id != file_id);
1062                    } else {
1063                        error!(
1064                            "Region status not found when scheduling index build task, region: {}",
1065                            region_id
1066                        );
1067                    }
1068                } else {
1069                    error!(
1070                        "Failed to schedule index build job, region: {}, file_id: {}",
1071                        region_id, file_id
1072                    );
1073                }
1074            } else {
1075                // No more tasks to schedule.
1076                break;
1077            }
1078        }
1079    }
1080
1081    /// Find the next task which has the highest priority to run.
1082    fn find_next_task(&self) -> Option<IndexBuildTask> {
1083        self.region_status
1084            .iter()
1085            .filter_map(|(_, status)| status.pending_tasks.peek())
1086            .max()
1087            .cloned()
1088    }
1089
1090    pub(crate) fn on_task_stopped(
1091        &mut self,
1092        region_id: RegionId,
1093        file_id: FileId,
1094        version_control: &VersionControlRef,
1095    ) {
1096        if let Some(status) = self.region_status.get_mut(&region_id) {
1097            status.building_files.remove(&file_id);
1098            if status.building_files.is_empty() && status.pending_tasks.is_empty() {
1099                // No more tasks for this region, remove it.
1100                self.region_status.remove(&region_id);
1101            }
1102        }
1103
1104        self.schedule_next_build_batch(version_control);
1105    }
1106
1107    pub(crate) async fn on_failure(&mut self, region_id: RegionId, err: Arc<Error>) {
1108        error!(
1109            err; "Index build scheduler encountered failure for region {}, removing all pending tasks.",
1110            region_id
1111        );
1112        let Some(status) = self.region_status.remove(&region_id) else {
1113            return;
1114        };
1115        status.on_failure(err).await;
1116    }
1117
1118    /// Notifies the scheduler that the region is dropped.
1119    pub(crate) async fn on_region_dropped(&mut self, region_id: RegionId) {
1120        self.remove_region_on_failure(
1121            region_id,
1122            Arc::new(RegionDroppedSnafu { region_id }.build()),
1123        )
1124        .await;
1125    }
1126
1127    /// Notifies the scheduler that the region is closed.
1128    pub(crate) async fn on_region_closed(&mut self, region_id: RegionId) {
1129        self.remove_region_on_failure(region_id, Arc::new(RegionClosedSnafu { region_id }.build()))
1130            .await;
1131    }
1132
1133    /// Notifies the scheduler that the region is truncated.
1134    pub(crate) async fn on_region_truncated(&mut self, region_id: RegionId) {
1135        self.remove_region_on_failure(
1136            region_id,
1137            Arc::new(RegionTruncatedSnafu { region_id }.build()),
1138        )
1139        .await;
1140    }
1141
1142    async fn remove_region_on_failure(&mut self, region_id: RegionId, err: Arc<Error>) {
1143        let Some(status) = self.region_status.remove(&region_id) else {
1144            return;
1145        };
1146        status.on_failure(err).await;
1147    }
1148}
1149
1150/// Decodes primary keys from a flat format RecordBatch.
1151/// Returns a list of (decoded_pk_value, count) tuples where count is the number of occurrences.
1152pub(crate) fn decode_primary_keys_with_counts(
1153    batch: &RecordBatch,
1154    codec: &IndexValuesCodec,
1155) -> Result<Vec<(CompositeValues, usize)>> {
1156    let primary_key_index = primary_key_column_index(batch.num_columns());
1157    let pk_dict_array = batch
1158        .column(primary_key_index)
1159        .as_any()
1160        .downcast_ref::<PrimaryKeyArray>()
1161        .context(InvalidRecordBatchSnafu {
1162            reason: "Primary key column is not a dictionary array",
1163        })?;
1164    let pk_values_array = pk_dict_array
1165        .values()
1166        .as_any()
1167        .downcast_ref::<BinaryArray>()
1168        .context(InvalidRecordBatchSnafu {
1169            reason: "Primary key values are not binary array",
1170        })?;
1171    let keys = pk_dict_array.keys();
1172
1173    // Decodes primary keys and count consecutive occurrences
1174    let mut result: Vec<(CompositeValues, usize)> = Vec::new();
1175    let mut prev_key: Option<u32> = None;
1176
1177    for i in 0..keys.len() {
1178        let current_key = keys.value(i);
1179
1180        // Checks if current key is the same as previous key
1181        if let Some(prev) = prev_key
1182            && prev == current_key
1183        {
1184            // Safety: We already have a key in the result vector.
1185            result.last_mut().unwrap().1 += 1;
1186            continue;
1187        }
1188
1189        // New key, decodes it.
1190        let pk_bytes = pk_values_array.value(current_key as usize);
1191        let decoded_value = codec.decoder().decode(pk_bytes).context(DecodeSnafu)?;
1192
1193        result.push((decoded_value, 1));
1194        prev_key = Some(current_key);
1195    }
1196
1197    Ok(result)
1198}
1199
1200#[cfg(test)]
1201mod tests {
1202    use std::sync::Arc;
1203
1204    use api::v1::SemanticType;
1205    use common_base::readable_size::ReadableSize;
1206    use datafusion_common::HashMap;
1207    use datatypes::data_type::ConcreteDataType;
1208    use datatypes::schema::{
1209        ColumnSchema, FulltextOptions, SkippingIndexOptions, SkippingIndexType,
1210    };
1211    use object_store::ObjectStore;
1212    use object_store::services::Memory;
1213    use puffin_manager::PuffinManagerFactory;
1214    use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder};
1215    use tokio::sync::mpsc;
1216
1217    use super::*;
1218    use crate::access_layer::{FilePathProvider, Metrics, SstWriteRequest, WriteType};
1219    use crate::cache::write_cache::WriteCache;
1220    use crate::config::{FulltextIndexConfig, IndexBuildMode, MitoConfig, Mode};
1221    use crate::memtable::time_partition::TimePartitions;
1222    use crate::region::version::{VersionBuilder, VersionControl};
1223    use crate::sst::file::RegionFileId;
1224    use crate::sst::file_purger::NoopFilePurger;
1225    use crate::sst::location;
1226    use crate::sst::parquet::WriteOptions;
1227    use crate::test_util::memtable_util::EmptyMemtableBuilder;
1228    use crate::test_util::scheduler_util::SchedulerEnv;
1229    use crate::test_util::sst_util::{new_batch_by_range, new_source, sst_region_metadata};
1230
1231    struct MetaConfig {
1232        with_inverted: bool,
1233        with_fulltext: bool,
1234        with_skipping_bloom: bool,
1235        #[cfg(feature = "vector_index")]
1236        with_vector: bool,
1237    }
1238
1239    fn mock_region_metadata(
1240        MetaConfig {
1241            with_inverted,
1242            with_fulltext,
1243            with_skipping_bloom,
1244            #[cfg(feature = "vector_index")]
1245            with_vector,
1246        }: MetaConfig,
1247    ) -> RegionMetadataRef {
1248        let mut builder = RegionMetadataBuilder::new(RegionId::new(1, 2));
1249        let mut column_schema = ColumnSchema::new("a", ConcreteDataType::int64_datatype(), false);
1250        if with_inverted {
1251            column_schema = column_schema.with_inverted_index(true);
1252        }
1253        builder
1254            .push_column_metadata(ColumnMetadata {
1255                column_schema,
1256                semantic_type: SemanticType::Field,
1257                column_id: 1,
1258            })
1259            .push_column_metadata(ColumnMetadata {
1260                column_schema: ColumnSchema::new("b", ConcreteDataType::float64_datatype(), false),
1261                semantic_type: SemanticType::Field,
1262                column_id: 2,
1263            })
1264            .push_column_metadata(ColumnMetadata {
1265                column_schema: ColumnSchema::new(
1266                    "c",
1267                    ConcreteDataType::timestamp_millisecond_datatype(),
1268                    false,
1269                ),
1270                semantic_type: SemanticType::Timestamp,
1271                column_id: 3,
1272            });
1273
1274        if with_fulltext {
1275            let column_schema =
1276                ColumnSchema::new("text", ConcreteDataType::string_datatype(), true)
1277                    .with_fulltext_options(FulltextOptions {
1278                        enable: true,
1279                        ..Default::default()
1280                    })
1281                    .unwrap();
1282
1283            let column = ColumnMetadata {
1284                column_schema,
1285                semantic_type: SemanticType::Field,
1286                column_id: 4,
1287            };
1288
1289            builder.push_column_metadata(column);
1290        }
1291
1292        if with_skipping_bloom {
1293            let column_schema =
1294                ColumnSchema::new("bloom", ConcreteDataType::string_datatype(), false)
1295                    .with_skipping_options(SkippingIndexOptions::new_unchecked(
1296                        42,
1297                        0.01,
1298                        SkippingIndexType::BloomFilter,
1299                    ))
1300                    .unwrap();
1301
1302            let column = ColumnMetadata {
1303                column_schema,
1304                semantic_type: SemanticType::Field,
1305                column_id: 5,
1306            };
1307
1308            builder.push_column_metadata(column);
1309        }
1310
1311        #[cfg(feature = "vector_index")]
1312        if with_vector {
1313            use index::vector::VectorIndexOptions;
1314
1315            let options = VectorIndexOptions::default();
1316            let column_schema =
1317                ColumnSchema::new("vec", ConcreteDataType::vector_datatype(4), true)
1318                    .with_vector_index_options(&options)
1319                    .unwrap();
1320            let column = ColumnMetadata {
1321                column_schema,
1322                semantic_type: SemanticType::Field,
1323                column_id: 6,
1324            };
1325
1326            builder.push_column_metadata(column);
1327        }
1328
1329        Arc::new(builder.build().unwrap())
1330    }
1331
1332    fn mock_object_store() -> ObjectStore {
1333        ObjectStore::new(Memory::default()).unwrap().finish()
1334    }
1335
1336    async fn mock_intm_mgr(path: impl AsRef<str>) -> IntermediateManager {
1337        IntermediateManager::init_fs(path).await.unwrap()
1338    }
1339    struct NoopPathProvider;
1340
1341    impl FilePathProvider for NoopPathProvider {
1342        fn build_index_file_path(&self, _file_id: RegionFileId) -> String {
1343            unreachable!()
1344        }
1345
1346        fn build_index_file_path_with_version(&self, _index_id: RegionIndexId) -> String {
1347            unreachable!()
1348        }
1349
1350        fn build_sst_file_path(&self, _file_id: RegionFileId) -> String {
1351            unreachable!()
1352        }
1353    }
1354
1355    async fn mock_sst_file(
1356        metadata: RegionMetadataRef,
1357        env: &SchedulerEnv,
1358        build_mode: IndexBuildMode,
1359    ) -> SstInfo {
1360        let source = new_source(&[
1361            new_batch_by_range(&["a", "d"], 0, 60),
1362            new_batch_by_range(&["b", "f"], 0, 40),
1363            new_batch_by_range(&["b", "h"], 100, 200),
1364        ]);
1365        let mut index_config = MitoConfig::default().index;
1366        index_config.build_mode = build_mode;
1367        let write_request = SstWriteRequest {
1368            op_type: OperationType::Flush,
1369            metadata: metadata.clone(),
1370            source: either::Left(source),
1371            storage: None,
1372            max_sequence: None,
1373            cache_manager: Default::default(),
1374            index_options: IndexOptions::default(),
1375            index_config,
1376            inverted_index_config: Default::default(),
1377            fulltext_index_config: Default::default(),
1378            bloom_filter_index_config: Default::default(),
1379            #[cfg(feature = "vector_index")]
1380            vector_index_config: Default::default(),
1381        };
1382        let mut metrics = Metrics::new(WriteType::Flush);
1383        env.access_layer
1384            .write_sst(write_request, &WriteOptions::default(), &mut metrics)
1385            .await
1386            .unwrap()
1387            .remove(0)
1388    }
1389
1390    async fn mock_version_control(
1391        metadata: RegionMetadataRef,
1392        file_purger: FilePurgerRef,
1393        files: HashMap<FileId, FileMeta>,
1394    ) -> VersionControlRef {
1395        let mutable = Arc::new(TimePartitions::new(
1396            metadata.clone(),
1397            Arc::new(EmptyMemtableBuilder::default()),
1398            0,
1399            None,
1400        ));
1401        let version_builder = VersionBuilder::new(metadata, mutable)
1402            .add_files(file_purger, files.values().cloned())
1403            .build();
1404        Arc::new(VersionControl::new(version_builder))
1405    }
1406
1407    async fn mock_indexer_builder(
1408        metadata: RegionMetadataRef,
1409        env: &SchedulerEnv,
1410    ) -> Arc<dyn IndexerBuilder + Send + Sync> {
1411        let (dir, factory) = PuffinManagerFactory::new_for_test_async("mock_indexer_builder").await;
1412        let intm_manager = mock_intm_mgr(dir.path().to_string_lossy()).await;
1413        let puffin_manager = factory.build(
1414            env.access_layer.object_store().clone(),
1415            RegionFilePathFactory::new(
1416                env.access_layer.table_dir().to_string(),
1417                env.access_layer.path_type(),
1418            ),
1419        );
1420        Arc::new(IndexerBuilderImpl {
1421            build_type: IndexBuildType::Flush,
1422            metadata,
1423            row_group_size: 1024,
1424            puffin_manager,
1425            write_cache_enabled: false,
1426            intermediate_manager: intm_manager,
1427            index_options: IndexOptions::default(),
1428            inverted_index_config: InvertedIndexConfig::default(),
1429            fulltext_index_config: FulltextIndexConfig::default(),
1430            bloom_filter_index_config: BloomFilterConfig::default(),
1431            #[cfg(feature = "vector_index")]
1432            vector_index_config: Default::default(),
1433        })
1434    }
1435
1436    #[tokio::test]
1437    async fn test_build_indexer_basic() {
1438        let (dir, factory) =
1439            PuffinManagerFactory::new_for_test_async("test_build_indexer_basic_").await;
1440        let intm_manager = mock_intm_mgr(dir.path().to_string_lossy()).await;
1441
1442        let metadata = mock_region_metadata(MetaConfig {
1443            with_inverted: true,
1444            with_fulltext: true,
1445            with_skipping_bloom: true,
1446            #[cfg(feature = "vector_index")]
1447            with_vector: false,
1448        });
1449        let indexer = IndexerBuilderImpl {
1450            build_type: IndexBuildType::Flush,
1451            metadata,
1452            row_group_size: 1024,
1453            puffin_manager: factory.build(mock_object_store(), NoopPathProvider),
1454            write_cache_enabled: false,
1455            intermediate_manager: intm_manager,
1456            index_options: IndexOptions::default(),
1457            inverted_index_config: InvertedIndexConfig::default(),
1458            fulltext_index_config: FulltextIndexConfig::default(),
1459            bloom_filter_index_config: BloomFilterConfig::default(),
1460            #[cfg(feature = "vector_index")]
1461            vector_index_config: Default::default(),
1462        }
1463        .build(FileId::random(), 0)
1464        .await;
1465
1466        assert!(indexer.inverted_indexer.is_some());
1467        assert!(indexer.fulltext_indexer.is_some());
1468        assert!(indexer.bloom_filter_indexer.is_some());
1469    }
1470
1471    #[tokio::test]
1472    async fn test_build_indexer_disable_create() {
1473        let (dir, factory) =
1474            PuffinManagerFactory::new_for_test_async("test_build_indexer_disable_create_").await;
1475        let intm_manager = mock_intm_mgr(dir.path().to_string_lossy()).await;
1476
1477        let metadata = mock_region_metadata(MetaConfig {
1478            with_inverted: true,
1479            with_fulltext: true,
1480            with_skipping_bloom: true,
1481            #[cfg(feature = "vector_index")]
1482            with_vector: false,
1483        });
1484        let indexer = IndexerBuilderImpl {
1485            build_type: IndexBuildType::Flush,
1486            metadata: metadata.clone(),
1487            row_group_size: 1024,
1488            puffin_manager: factory.build(mock_object_store(), NoopPathProvider),
1489            write_cache_enabled: false,
1490            intermediate_manager: intm_manager.clone(),
1491            index_options: IndexOptions::default(),
1492            inverted_index_config: InvertedIndexConfig {
1493                create_on_flush: Mode::Disable,
1494                ..Default::default()
1495            },
1496            fulltext_index_config: FulltextIndexConfig::default(),
1497            bloom_filter_index_config: BloomFilterConfig::default(),
1498            #[cfg(feature = "vector_index")]
1499            vector_index_config: Default::default(),
1500        }
1501        .build(FileId::random(), 0)
1502        .await;
1503
1504        assert!(indexer.inverted_indexer.is_none());
1505        assert!(indexer.fulltext_indexer.is_some());
1506        assert!(indexer.bloom_filter_indexer.is_some());
1507
1508        let indexer = IndexerBuilderImpl {
1509            build_type: IndexBuildType::Compact,
1510            metadata: metadata.clone(),
1511            row_group_size: 1024,
1512            puffin_manager: factory.build(mock_object_store(), NoopPathProvider),
1513            write_cache_enabled: false,
1514            intermediate_manager: intm_manager.clone(),
1515            index_options: IndexOptions::default(),
1516            inverted_index_config: InvertedIndexConfig::default(),
1517            fulltext_index_config: FulltextIndexConfig {
1518                create_on_compaction: Mode::Disable,
1519                ..Default::default()
1520            },
1521            bloom_filter_index_config: BloomFilterConfig::default(),
1522            #[cfg(feature = "vector_index")]
1523            vector_index_config: Default::default(),
1524        }
1525        .build(FileId::random(), 0)
1526        .await;
1527
1528        assert!(indexer.inverted_indexer.is_some());
1529        assert!(indexer.fulltext_indexer.is_none());
1530        assert!(indexer.bloom_filter_indexer.is_some());
1531
1532        let indexer = IndexerBuilderImpl {
1533            build_type: IndexBuildType::Compact,
1534            metadata,
1535            row_group_size: 1024,
1536            puffin_manager: factory.build(mock_object_store(), NoopPathProvider),
1537            write_cache_enabled: false,
1538            intermediate_manager: intm_manager,
1539            index_options: IndexOptions::default(),
1540            inverted_index_config: InvertedIndexConfig::default(),
1541            fulltext_index_config: FulltextIndexConfig::default(),
1542            bloom_filter_index_config: BloomFilterConfig {
1543                create_on_compaction: Mode::Disable,
1544                ..Default::default()
1545            },
1546            #[cfg(feature = "vector_index")]
1547            vector_index_config: Default::default(),
1548        }
1549        .build(FileId::random(), 0)
1550        .await;
1551
1552        assert!(indexer.inverted_indexer.is_some());
1553        assert!(indexer.fulltext_indexer.is_some());
1554        assert!(indexer.bloom_filter_indexer.is_none());
1555    }
1556
1557    #[tokio::test]
1558    async fn test_build_indexer_no_required() {
1559        let (dir, factory) =
1560            PuffinManagerFactory::new_for_test_async("test_build_indexer_no_required_").await;
1561        let intm_manager = mock_intm_mgr(dir.path().to_string_lossy()).await;
1562
1563        let metadata = mock_region_metadata(MetaConfig {
1564            with_inverted: false,
1565            with_fulltext: true,
1566            with_skipping_bloom: true,
1567            #[cfg(feature = "vector_index")]
1568            with_vector: false,
1569        });
1570        let indexer = IndexerBuilderImpl {
1571            build_type: IndexBuildType::Flush,
1572            metadata: metadata.clone(),
1573            row_group_size: 1024,
1574            puffin_manager: factory.build(mock_object_store(), NoopPathProvider),
1575            write_cache_enabled: false,
1576            intermediate_manager: intm_manager.clone(),
1577            index_options: IndexOptions::default(),
1578            inverted_index_config: InvertedIndexConfig::default(),
1579            fulltext_index_config: FulltextIndexConfig::default(),
1580            bloom_filter_index_config: BloomFilterConfig::default(),
1581            #[cfg(feature = "vector_index")]
1582            vector_index_config: Default::default(),
1583        }
1584        .build(FileId::random(), 0)
1585        .await;
1586
1587        assert!(indexer.inverted_indexer.is_none());
1588        assert!(indexer.fulltext_indexer.is_some());
1589        assert!(indexer.bloom_filter_indexer.is_some());
1590
1591        let metadata = mock_region_metadata(MetaConfig {
1592            with_inverted: true,
1593            with_fulltext: false,
1594            with_skipping_bloom: true,
1595            #[cfg(feature = "vector_index")]
1596            with_vector: false,
1597        });
1598        let indexer = IndexerBuilderImpl {
1599            build_type: IndexBuildType::Flush,
1600            metadata: metadata.clone(),
1601            row_group_size: 1024,
1602            puffin_manager: factory.build(mock_object_store(), NoopPathProvider),
1603            write_cache_enabled: false,
1604            intermediate_manager: intm_manager.clone(),
1605            index_options: IndexOptions::default(),
1606            inverted_index_config: InvertedIndexConfig::default(),
1607            fulltext_index_config: FulltextIndexConfig::default(),
1608            bloom_filter_index_config: BloomFilterConfig::default(),
1609            #[cfg(feature = "vector_index")]
1610            vector_index_config: Default::default(),
1611        }
1612        .build(FileId::random(), 0)
1613        .await;
1614
1615        assert!(indexer.inverted_indexer.is_some());
1616        assert!(indexer.fulltext_indexer.is_none());
1617        assert!(indexer.bloom_filter_indexer.is_some());
1618
1619        let metadata = mock_region_metadata(MetaConfig {
1620            with_inverted: true,
1621            with_fulltext: true,
1622            with_skipping_bloom: false,
1623            #[cfg(feature = "vector_index")]
1624            with_vector: false,
1625        });
1626        let indexer = IndexerBuilderImpl {
1627            build_type: IndexBuildType::Flush,
1628            metadata: metadata.clone(),
1629            row_group_size: 1024,
1630            puffin_manager: factory.build(mock_object_store(), NoopPathProvider),
1631            write_cache_enabled: false,
1632            intermediate_manager: intm_manager,
1633            index_options: IndexOptions::default(),
1634            inverted_index_config: InvertedIndexConfig::default(),
1635            fulltext_index_config: FulltextIndexConfig::default(),
1636            bloom_filter_index_config: BloomFilterConfig::default(),
1637            #[cfg(feature = "vector_index")]
1638            vector_index_config: Default::default(),
1639        }
1640        .build(FileId::random(), 0)
1641        .await;
1642
1643        assert!(indexer.inverted_indexer.is_some());
1644        assert!(indexer.fulltext_indexer.is_some());
1645        assert!(indexer.bloom_filter_indexer.is_none());
1646    }
1647
1648    #[tokio::test]
1649    async fn test_build_indexer_zero_row_group() {
1650        let (dir, factory) =
1651            PuffinManagerFactory::new_for_test_async("test_build_indexer_zero_row_group_").await;
1652        let intm_manager = mock_intm_mgr(dir.path().to_string_lossy()).await;
1653
1654        let metadata = mock_region_metadata(MetaConfig {
1655            with_inverted: true,
1656            with_fulltext: true,
1657            with_skipping_bloom: true,
1658            #[cfg(feature = "vector_index")]
1659            with_vector: false,
1660        });
1661        let indexer = IndexerBuilderImpl {
1662            build_type: IndexBuildType::Flush,
1663            metadata,
1664            row_group_size: 0,
1665            puffin_manager: factory.build(mock_object_store(), NoopPathProvider),
1666            write_cache_enabled: false,
1667            intermediate_manager: intm_manager,
1668            index_options: IndexOptions::default(),
1669            inverted_index_config: InvertedIndexConfig::default(),
1670            fulltext_index_config: FulltextIndexConfig::default(),
1671            bloom_filter_index_config: BloomFilterConfig::default(),
1672            #[cfg(feature = "vector_index")]
1673            vector_index_config: Default::default(),
1674        }
1675        .build(FileId::random(), 0)
1676        .await;
1677
1678        assert!(indexer.inverted_indexer.is_none());
1679    }
1680
1681    #[cfg(feature = "vector_index")]
1682    #[tokio::test]
1683    async fn test_update_flat_builds_vector_index() {
1684        use datatypes::arrow::array::BinaryBuilder;
1685        use datatypes::arrow::datatypes::{DataType, Field, Schema};
1686
1687        struct TestPathProvider;
1688
1689        impl FilePathProvider for TestPathProvider {
1690            fn build_index_file_path(&self, file_id: RegionFileId) -> String {
1691                format!("index/{}.puffin", file_id)
1692            }
1693
1694            fn build_index_file_path_with_version(&self, index_id: RegionIndexId) -> String {
1695                format!("index/{}.puffin", index_id)
1696            }
1697
1698            fn build_sst_file_path(&self, file_id: RegionFileId) -> String {
1699                format!("sst/{}.parquet", file_id)
1700            }
1701        }
1702
1703        fn f32s_to_bytes(values: &[f32]) -> Vec<u8> {
1704            let mut bytes = Vec::with_capacity(values.len() * 4);
1705            for v in values {
1706                bytes.extend_from_slice(&v.to_le_bytes());
1707            }
1708            bytes
1709        }
1710
1711        let (dir, factory) =
1712            PuffinManagerFactory::new_for_test_async("test_update_flat_builds_vector_index_").await;
1713        let intm_manager = mock_intm_mgr(dir.path().to_string_lossy()).await;
1714
1715        let metadata = mock_region_metadata(MetaConfig {
1716            with_inverted: false,
1717            with_fulltext: false,
1718            with_skipping_bloom: false,
1719            with_vector: true,
1720        });
1721
1722        let mut indexer = IndexerBuilderImpl {
1723            build_type: IndexBuildType::Flush,
1724            metadata,
1725            row_group_size: 1024,
1726            puffin_manager: factory.build(mock_object_store(), TestPathProvider),
1727            write_cache_enabled: false,
1728            intermediate_manager: intm_manager,
1729            index_options: IndexOptions::default(),
1730            inverted_index_config: InvertedIndexConfig::default(),
1731            fulltext_index_config: FulltextIndexConfig::default(),
1732            bloom_filter_index_config: BloomFilterConfig::default(),
1733            vector_index_config: Default::default(),
1734        }
1735        .build(FileId::random(), 0)
1736        .await;
1737
1738        assert!(indexer.vector_indexer.is_some());
1739
1740        let vec1 = f32s_to_bytes(&[1.0, 0.0, 0.0, 0.0]);
1741        let vec2 = f32s_to_bytes(&[0.0, 1.0, 0.0, 0.0]);
1742
1743        let mut builder = BinaryBuilder::with_capacity(2, vec1.len() + vec2.len());
1744        builder.append_value(&vec1);
1745        builder.append_value(&vec2);
1746
1747        let schema = Arc::new(Schema::new(vec![Field::new("vec", DataType::Binary, true)]));
1748        let batch = RecordBatch::try_new(schema, vec![Arc::new(builder.finish())]).unwrap();
1749
1750        indexer.update_flat(&batch).await;
1751        let output = indexer.finish().await;
1752
1753        assert!(output.vector_index.is_available());
1754        assert!(output.vector_index.columns.contains(&6));
1755    }
1756
1757    #[tokio::test]
1758    async fn test_index_build_task_sst_not_exist() {
1759        let env = SchedulerEnv::new().await;
1760        let (tx, _rx) = mpsc::channel(4);
1761        let (result_tx, mut result_rx) = mpsc::channel::<Result<IndexBuildOutcome>>(4);
1762        let mut scheduler = env.mock_index_build_scheduler(4);
1763        let metadata = Arc::new(sst_region_metadata());
1764        let manifest_ctx = env.mock_manifest_context(metadata.clone()).await;
1765        let file_purger = Arc::new(NoopFilePurger {});
1766        let files = HashMap::new();
1767        let version_control =
1768            mock_version_control(metadata.clone(), file_purger.clone(), files).await;
1769        let region_id = metadata.region_id;
1770        let indexer_builder = mock_indexer_builder(metadata, &env).await;
1771
1772        let file_meta = FileMeta {
1773            region_id,
1774            file_id: FileId::random(),
1775            file_size: 100,
1776            ..Default::default()
1777        };
1778
1779        let file = FileHandle::new(file_meta.clone(), file_purger.clone());
1780
1781        // Create mock task.
1782        let task = IndexBuildTask {
1783            file,
1784            file_meta,
1785            reason: IndexBuildType::Flush,
1786            access_layer: env.access_layer.clone(),
1787            listener: WorkerListener::default(),
1788            manifest_ctx,
1789            write_cache: None,
1790            file_purger,
1791            indexer_builder,
1792            request_sender: tx,
1793            result_sender: result_tx,
1794        };
1795
1796        // Schedule the build task and check result.
1797        scheduler
1798            .schedule_build(&version_control, task)
1799            .await
1800            .unwrap();
1801        match result_rx.recv().await.unwrap() {
1802            Ok(outcome) => {
1803                if outcome == IndexBuildOutcome::Finished {
1804                    panic!("Expect aborted result due to missing SST file")
1805                }
1806            }
1807            _ => panic!("Expect aborted result due to missing SST file"),
1808        }
1809    }
1810
1811    #[tokio::test]
1812    async fn test_index_build_task_sst_exist() {
1813        let env = SchedulerEnv::new().await;
1814        let mut scheduler = env.mock_index_build_scheduler(4);
1815        let metadata = Arc::new(sst_region_metadata());
1816        let manifest_ctx = env.mock_manifest_context(metadata.clone()).await;
1817        let region_id = metadata.region_id;
1818        let file_purger = Arc::new(NoopFilePurger {});
1819        let sst_info = mock_sst_file(metadata.clone(), &env, IndexBuildMode::Async).await;
1820        let file_meta = FileMeta {
1821            region_id,
1822            file_id: sst_info.file_id,
1823            file_size: sst_info.file_size,
1824            max_row_group_uncompressed_size: sst_info.max_row_group_uncompressed_size,
1825            index_file_size: sst_info.index_metadata.file_size,
1826            num_rows: sst_info.num_rows as u64,
1827            num_row_groups: sst_info.num_row_groups,
1828            ..Default::default()
1829        };
1830        let files = HashMap::from([(file_meta.file_id, file_meta.clone())]);
1831        let version_control =
1832            mock_version_control(metadata.clone(), file_purger.clone(), files).await;
1833        let indexer_builder = mock_indexer_builder(metadata.clone(), &env).await;
1834
1835        let file = FileHandle::new(file_meta.clone(), file_purger.clone());
1836
1837        // Create mock task.
1838        let (tx, mut rx) = mpsc::channel(4);
1839        let (result_tx, mut result_rx) = mpsc::channel::<Result<IndexBuildOutcome>>(4);
1840        let task = IndexBuildTask {
1841            file,
1842            file_meta: file_meta.clone(),
1843            reason: IndexBuildType::Flush,
1844            access_layer: env.access_layer.clone(),
1845            listener: WorkerListener::default(),
1846            manifest_ctx,
1847            write_cache: None,
1848            file_purger,
1849            indexer_builder,
1850            request_sender: tx,
1851            result_sender: result_tx,
1852        };
1853
1854        scheduler
1855            .schedule_build(&version_control, task)
1856            .await
1857            .unwrap();
1858
1859        // The task should finish successfully.
1860        match result_rx.recv().await.unwrap() {
1861            Ok(outcome) => {
1862                assert_eq!(outcome, IndexBuildOutcome::Finished);
1863            }
1864            _ => panic!("Expect finished result"),
1865        }
1866
1867        // A notification should be sent to the worker to update the manifest.
1868        let worker_req = rx.recv().await.unwrap().request;
1869        match worker_req {
1870            WorkerRequest::Background {
1871                region_id: req_region_id,
1872                notify: BackgroundNotify::IndexBuildFinished(finished),
1873            } => {
1874                assert_eq!(req_region_id, region_id);
1875                assert_eq!(finished.edit.files_to_add.len(), 1);
1876                let updated_meta = &finished.edit.files_to_add[0];
1877
1878                // The mock indexer builder creates all index types.
1879                assert!(!updated_meta.available_indexes.is_empty());
1880                assert!(updated_meta.index_file_size > 0);
1881                assert_eq!(updated_meta.file_id, file_meta.file_id);
1882            }
1883            _ => panic!("Unexpected worker request: {:?}", worker_req),
1884        }
1885    }
1886
1887    async fn schedule_index_build_task_with_mode(build_mode: IndexBuildMode) {
1888        let env = SchedulerEnv::new().await;
1889        let mut scheduler = env.mock_index_build_scheduler(4);
1890        let metadata = Arc::new(sst_region_metadata());
1891        let manifest_ctx = env.mock_manifest_context(metadata.clone()).await;
1892        let file_purger = Arc::new(NoopFilePurger {});
1893        let region_id = metadata.region_id;
1894        let sst_info = mock_sst_file(metadata.clone(), &env, build_mode.clone()).await;
1895        let file_meta = FileMeta {
1896            region_id,
1897            file_id: sst_info.file_id,
1898            file_size: sst_info.file_size,
1899            max_row_group_uncompressed_size: sst_info.max_row_group_uncompressed_size,
1900            index_file_size: sst_info.index_metadata.file_size,
1901            num_rows: sst_info.num_rows as u64,
1902            num_row_groups: sst_info.num_row_groups,
1903            ..Default::default()
1904        };
1905        let files = HashMap::from([(file_meta.file_id, file_meta.clone())]);
1906        let version_control =
1907            mock_version_control(metadata.clone(), file_purger.clone(), files).await;
1908        let indexer_builder = mock_indexer_builder(metadata.clone(), &env).await;
1909
1910        let file = FileHandle::new(file_meta.clone(), file_purger.clone());
1911
1912        // Create mock task.
1913        let (tx, _rx) = mpsc::channel(4);
1914        let (result_tx, mut result_rx) = mpsc::channel::<Result<IndexBuildOutcome>>(4);
1915        let task = IndexBuildTask {
1916            file,
1917            file_meta: file_meta.clone(),
1918            reason: IndexBuildType::Flush,
1919            access_layer: env.access_layer.clone(),
1920            listener: WorkerListener::default(),
1921            manifest_ctx,
1922            write_cache: None,
1923            file_purger,
1924            indexer_builder,
1925            request_sender: tx,
1926            result_sender: result_tx,
1927        };
1928
1929        scheduler
1930            .schedule_build(&version_control, task)
1931            .await
1932            .unwrap();
1933
1934        let puffin_path = location::index_file_path(
1935            env.access_layer.table_dir(),
1936            RegionIndexId::new(RegionFileId::new(region_id, file_meta.file_id), 0),
1937            env.access_layer.path_type(),
1938        );
1939
1940        if build_mode == IndexBuildMode::Async {
1941            // The index file should not exist before the task finishes.
1942            assert!(
1943                !env.access_layer
1944                    .object_store()
1945                    .exists(&puffin_path)
1946                    .await
1947                    .unwrap()
1948            );
1949        } else {
1950            // The index file should exist before the task finishes.
1951            assert!(
1952                env.access_layer
1953                    .object_store()
1954                    .exists(&puffin_path)
1955                    .await
1956                    .unwrap()
1957            );
1958        }
1959
1960        // The task should finish successfully.
1961        match result_rx.recv().await.unwrap() {
1962            Ok(outcome) => {
1963                assert_eq!(outcome, IndexBuildOutcome::Finished);
1964            }
1965            _ => panic!("Expect finished result"),
1966        }
1967
1968        // The index file should exist after the task finishes.
1969        assert!(
1970            env.access_layer
1971                .object_store()
1972                .exists(&puffin_path)
1973                .await
1974                .unwrap()
1975        );
1976    }
1977
1978    #[tokio::test]
1979    async fn test_index_build_task_build_mode() {
1980        schedule_index_build_task_with_mode(IndexBuildMode::Async).await;
1981        schedule_index_build_task_with_mode(IndexBuildMode::Sync).await;
1982    }
1983
1984    #[tokio::test]
1985    async fn test_index_build_task_no_index() {
1986        let env = SchedulerEnv::new().await;
1987        let mut scheduler = env.mock_index_build_scheduler(4);
1988        let mut metadata = sst_region_metadata();
1989        // Unset indexes in metadata to simulate no index scenario.
1990        metadata.column_metadatas.iter_mut().for_each(|col| {
1991            col.column_schema.set_inverted_index(false);
1992            let _ = col.column_schema.unset_skipping_options();
1993        });
1994        let region_id = metadata.region_id;
1995        let metadata = Arc::new(metadata);
1996        let manifest_ctx = env.mock_manifest_context(metadata.clone()).await;
1997        let file_purger = Arc::new(NoopFilePurger {});
1998        let sst_info = mock_sst_file(metadata.clone(), &env, IndexBuildMode::Async).await;
1999        let file_meta = FileMeta {
2000            region_id,
2001            file_id: sst_info.file_id,
2002            file_size: sst_info.file_size,
2003            max_row_group_uncompressed_size: sst_info.max_row_group_uncompressed_size,
2004            index_file_size: sst_info.index_metadata.file_size,
2005            num_rows: sst_info.num_rows as u64,
2006            num_row_groups: sst_info.num_row_groups,
2007            ..Default::default()
2008        };
2009        let files = HashMap::from([(file_meta.file_id, file_meta.clone())]);
2010        let version_control =
2011            mock_version_control(metadata.clone(), file_purger.clone(), files).await;
2012        let indexer_builder = mock_indexer_builder(metadata.clone(), &env).await;
2013
2014        let file = FileHandle::new(file_meta.clone(), file_purger.clone());
2015
2016        // Create mock task.
2017        let (tx, mut rx) = mpsc::channel(4);
2018        let (result_tx, mut result_rx) = mpsc::channel::<Result<IndexBuildOutcome>>(4);
2019        let task = IndexBuildTask {
2020            file,
2021            file_meta: file_meta.clone(),
2022            reason: IndexBuildType::Flush,
2023            access_layer: env.access_layer.clone(),
2024            listener: WorkerListener::default(),
2025            manifest_ctx,
2026            write_cache: None,
2027            file_purger,
2028            indexer_builder,
2029            request_sender: tx,
2030            result_sender: result_tx,
2031        };
2032
2033        scheduler
2034            .schedule_build(&version_control, task)
2035            .await
2036            .unwrap();
2037
2038        // The task should finish successfully.
2039        match result_rx.recv().await.unwrap() {
2040            Ok(outcome) => {
2041                assert_eq!(outcome, IndexBuildOutcome::Finished);
2042            }
2043            _ => panic!("Expect finished result"),
2044        }
2045
2046        // No index is built, so no notification should be sent to the worker.
2047        let _ = rx.recv().await.is_none();
2048    }
2049
2050    #[tokio::test]
2051    async fn test_index_build_task_with_write_cache() {
2052        let env = SchedulerEnv::new().await;
2053        let mut scheduler = env.mock_index_build_scheduler(4);
2054        let metadata = Arc::new(sst_region_metadata());
2055        let manifest_ctx = env.mock_manifest_context(metadata.clone()).await;
2056        let file_purger = Arc::new(NoopFilePurger {});
2057        let region_id = metadata.region_id;
2058
2059        let (dir, factory) = PuffinManagerFactory::new_for_test_async("test_write_cache").await;
2060        let intm_manager = mock_intm_mgr(dir.path().to_string_lossy()).await;
2061
2062        // Create mock write cache
2063        let write_cache = Arc::new(
2064            WriteCache::new_fs(
2065                dir.path().to_str().unwrap(),
2066                ReadableSize::mb(10),
2067                None,
2068                None,
2069                true, // enable_background_worker
2070                factory,
2071                intm_manager,
2072                ReadableSize::mb(10),
2073            )
2074            .await
2075            .unwrap(),
2076        );
2077        // Indexer builder built from write cache.
2078        let indexer_builder = Arc::new(IndexerBuilderImpl {
2079            build_type: IndexBuildType::Flush,
2080            metadata: metadata.clone(),
2081            row_group_size: 1024,
2082            puffin_manager: write_cache.build_puffin_manager().clone(),
2083            write_cache_enabled: true,
2084            intermediate_manager: write_cache.intermediate_manager().clone(),
2085            index_options: IndexOptions::default(),
2086            inverted_index_config: InvertedIndexConfig::default(),
2087            fulltext_index_config: FulltextIndexConfig::default(),
2088            bloom_filter_index_config: BloomFilterConfig::default(),
2089            #[cfg(feature = "vector_index")]
2090            vector_index_config: Default::default(),
2091        });
2092
2093        let sst_info = mock_sst_file(metadata.clone(), &env, IndexBuildMode::Async).await;
2094        let file_meta = FileMeta {
2095            region_id,
2096            file_id: sst_info.file_id,
2097            file_size: sst_info.file_size,
2098            index_file_size: sst_info.index_metadata.file_size,
2099            num_rows: sst_info.num_rows as u64,
2100            num_row_groups: sst_info.num_row_groups,
2101            ..Default::default()
2102        };
2103        let files = HashMap::from([(file_meta.file_id, file_meta.clone())]);
2104        let version_control =
2105            mock_version_control(metadata.clone(), file_purger.clone(), files).await;
2106
2107        let file = FileHandle::new(file_meta.clone(), file_purger.clone());
2108
2109        // Create mock task.
2110        let (tx, mut _rx) = mpsc::channel(4);
2111        let (result_tx, mut result_rx) = mpsc::channel::<Result<IndexBuildOutcome>>(4);
2112        let task = IndexBuildTask {
2113            file,
2114            file_meta: file_meta.clone(),
2115            reason: IndexBuildType::Flush,
2116            access_layer: env.access_layer.clone(),
2117            listener: WorkerListener::default(),
2118            manifest_ctx,
2119            write_cache: Some(write_cache.clone()),
2120            file_purger,
2121            indexer_builder,
2122            request_sender: tx,
2123            result_sender: result_tx,
2124        };
2125
2126        scheduler
2127            .schedule_build(&version_control, task)
2128            .await
2129            .unwrap();
2130
2131        // The task should finish successfully.
2132        match result_rx.recv().await.unwrap() {
2133            Ok(outcome) => {
2134                assert_eq!(outcome, IndexBuildOutcome::Finished);
2135            }
2136            _ => panic!("Expect finished result"),
2137        }
2138
2139        // The write cache should contain the uploaded index file.
2140        let index_key = IndexKey::new(
2141            region_id,
2142            file_meta.file_id,
2143            FileType::Puffin(sst_info.index_metadata.version),
2144        );
2145        assert!(write_cache.file_cache().contains_key(&index_key));
2146    }
2147
2148    async fn create_mock_task_for_schedule(
2149        env: &SchedulerEnv,
2150        file_id: FileId,
2151        region_id: RegionId,
2152        reason: IndexBuildType,
2153    ) -> IndexBuildTask {
2154        let metadata = Arc::new(sst_region_metadata());
2155        let manifest_ctx = env.mock_manifest_context(metadata.clone()).await;
2156        let file_purger = Arc::new(NoopFilePurger {});
2157        let indexer_builder = mock_indexer_builder(metadata, env).await;
2158        let (tx, _rx) = mpsc::channel(4);
2159        let (result_tx, _result_rx) = mpsc::channel::<Result<IndexBuildOutcome>>(4);
2160
2161        let file_meta = FileMeta {
2162            region_id,
2163            file_id,
2164            file_size: 100,
2165            ..Default::default()
2166        };
2167
2168        let file = FileHandle::new(file_meta.clone(), file_purger.clone());
2169
2170        IndexBuildTask {
2171            file,
2172            file_meta,
2173            reason,
2174            access_layer: env.access_layer.clone(),
2175            listener: WorkerListener::default(),
2176            manifest_ctx,
2177            write_cache: None,
2178            file_purger,
2179            indexer_builder,
2180            request_sender: tx,
2181            result_sender: result_tx,
2182        }
2183    }
2184
2185    #[tokio::test]
2186    async fn test_scheduler_comprehensive() {
2187        let env = SchedulerEnv::new().await;
2188        let mut scheduler = env.mock_index_build_scheduler(2);
2189        let metadata = Arc::new(sst_region_metadata());
2190        let region_id = metadata.region_id;
2191        let file_purger = Arc::new(NoopFilePurger {});
2192
2193        // Prepare multiple files for testing
2194        let file_id1 = FileId::random();
2195        let file_id2 = FileId::random();
2196        let file_id3 = FileId::random();
2197        let file_id4 = FileId::random();
2198        let file_id5 = FileId::random();
2199
2200        let mut files = HashMap::new();
2201        for file_id in [file_id1, file_id2, file_id3, file_id4, file_id5] {
2202            files.insert(
2203                file_id,
2204                FileMeta {
2205                    region_id,
2206                    file_id,
2207                    file_size: 100,
2208                    ..Default::default()
2209                },
2210            );
2211        }
2212
2213        let version_control = mock_version_control(metadata, file_purger, files).await;
2214
2215        // Test 1: Basic scheduling
2216        let task1 =
2217            create_mock_task_for_schedule(&env, file_id1, region_id, IndexBuildType::Flush).await;
2218        assert!(
2219            scheduler
2220                .schedule_build(&version_control, task1)
2221                .await
2222                .is_ok()
2223        );
2224        assert!(scheduler.region_status.contains_key(&region_id));
2225        let status = scheduler.region_status.get(&region_id).unwrap();
2226        assert_eq!(status.building_files.len(), 1);
2227        assert!(status.building_files.contains(&file_id1));
2228
2229        // Test 2: Duplicate file scheduling (should be skipped)
2230        let task1_dup =
2231            create_mock_task_for_schedule(&env, file_id1, region_id, IndexBuildType::Flush).await;
2232        scheduler
2233            .schedule_build(&version_control, task1_dup)
2234            .await
2235            .unwrap();
2236        let status = scheduler.region_status.get(&region_id).unwrap();
2237        assert_eq!(status.building_files.len(), 1); // Still only one
2238
2239        // Test 3: Fill up to limit (2 building tasks)
2240        let task2 =
2241            create_mock_task_for_schedule(&env, file_id2, region_id, IndexBuildType::Flush).await;
2242        scheduler
2243            .schedule_build(&version_control, task2)
2244            .await
2245            .unwrap();
2246        let status = scheduler.region_status.get(&region_id).unwrap();
2247        assert_eq!(status.building_files.len(), 2); // Reached limit
2248        assert_eq!(status.pending_tasks.len(), 0);
2249
2250        // Test 4: Add tasks with different priorities to pending queue
2251        // Now all new tasks will be pending since we reached the limit
2252        let task3 =
2253            create_mock_task_for_schedule(&env, file_id3, region_id, IndexBuildType::Compact).await;
2254        let task4 =
2255            create_mock_task_for_schedule(&env, file_id4, region_id, IndexBuildType::SchemaChange)
2256                .await;
2257        let task5 =
2258            create_mock_task_for_schedule(&env, file_id5, region_id, IndexBuildType::Manual).await;
2259
2260        scheduler
2261            .schedule_build(&version_control, task3)
2262            .await
2263            .unwrap();
2264        scheduler
2265            .schedule_build(&version_control, task4)
2266            .await
2267            .unwrap();
2268        scheduler
2269            .schedule_build(&version_control, task5)
2270            .await
2271            .unwrap();
2272
2273        let status = scheduler.region_status.get(&region_id).unwrap();
2274        assert_eq!(status.building_files.len(), 2); // Still at limit
2275        assert_eq!(status.pending_tasks.len(), 3); // Three pending
2276
2277        // Test 5: Task completion triggers scheduling next highest priority task (Manual)
2278        scheduler.on_task_stopped(region_id, file_id1, &version_control);
2279        let status = scheduler.region_status.get(&region_id).unwrap();
2280        assert!(!status.building_files.contains(&file_id1));
2281        assert_eq!(status.building_files.len(), 2); // Should schedule next task
2282        assert_eq!(status.pending_tasks.len(), 2); // One less pending
2283        // The highest priority task (Manual) should now be building
2284        assert!(status.building_files.contains(&file_id5));
2285
2286        // Test 6: Complete another task, should schedule SchemaChange (second highest priority)
2287        scheduler.on_task_stopped(region_id, file_id2, &version_control);
2288        let status = scheduler.region_status.get(&region_id).unwrap();
2289        assert_eq!(status.building_files.len(), 2);
2290        assert_eq!(status.pending_tasks.len(), 1); // One less pending
2291        assert!(status.building_files.contains(&file_id4)); // SchemaChange should be building
2292
2293        // Test 7: Complete remaining tasks and cleanup
2294        scheduler.on_task_stopped(region_id, file_id5, &version_control);
2295        scheduler.on_task_stopped(region_id, file_id4, &version_control);
2296
2297        let status = scheduler.region_status.get(&region_id).unwrap();
2298        assert_eq!(status.building_files.len(), 1); // Last task (Compact) should be building
2299        assert_eq!(status.pending_tasks.len(), 0);
2300        assert!(status.building_files.contains(&file_id3));
2301
2302        scheduler.on_task_stopped(region_id, file_id3, &version_control);
2303
2304        // Region should be removed when all tasks complete
2305        assert!(!scheduler.region_status.contains_key(&region_id));
2306
2307        // Test 8: Region dropped with pending tasks
2308        let task6 =
2309            create_mock_task_for_schedule(&env, file_id1, region_id, IndexBuildType::Flush).await;
2310        let task7 =
2311            create_mock_task_for_schedule(&env, file_id2, region_id, IndexBuildType::Flush).await;
2312        let task8 =
2313            create_mock_task_for_schedule(&env, file_id3, region_id, IndexBuildType::Manual).await;
2314
2315        scheduler
2316            .schedule_build(&version_control, task6)
2317            .await
2318            .unwrap();
2319        scheduler
2320            .schedule_build(&version_control, task7)
2321            .await
2322            .unwrap();
2323        scheduler
2324            .schedule_build(&version_control, task8)
2325            .await
2326            .unwrap();
2327
2328        assert!(scheduler.region_status.contains_key(&region_id));
2329        let status = scheduler.region_status.get(&region_id).unwrap();
2330        assert_eq!(status.building_files.len(), 2);
2331        assert_eq!(status.pending_tasks.len(), 1);
2332
2333        scheduler.on_region_dropped(region_id).await;
2334        assert!(!scheduler.region_status.contains_key(&region_id));
2335    }
2336}