1use std::any::TypeId;
18use std::collections::HashMap;
19use std::sync::atomic::{AtomicI64, AtomicU64};
20use std::sync::{Arc, Mutex};
21use std::time::Instant;
22
23use common_telemetry::{debug, error, info, warn};
24use common_wal::options::WalOptions;
25use futures::StreamExt;
26use futures::future::BoxFuture;
27use log_store::kafka::log_store::KafkaLogStore;
28use log_store::noop::log_store::NoopLogStore;
29use log_store::raft_engine::log_store::RaftEngineLogStore;
30use object_store::manager::ObjectStoreManagerRef;
31use object_store::util::normalize_dir;
32use snafu::{OptionExt, ResultExt, ensure};
33use store_api::logstore::LogStore;
34use store_api::logstore::provider::Provider;
35use store_api::metadata::{
36 ColumnMetadata, RegionMetadata, RegionMetadataBuilder, RegionMetadataRef,
37};
38use store_api::region_engine::RegionRole;
39use store_api::region_request::PathType;
40use store_api::storage::{ColumnId, RegionId};
41
42use crate::access_layer::AccessLayer;
43use crate::cache::CacheManagerRef;
44use crate::cache::file_cache::{FileCache, FileType, IndexKey};
45use crate::config::MitoConfig;
46use crate::error;
47use crate::error::{
48 EmptyRegionDirSnafu, InvalidMetadataSnafu, ObjectStoreNotFoundSnafu, RegionCorruptedSnafu,
49 Result, StaleLogEntrySnafu,
50};
51use crate::manifest::action::RegionManifest;
52use crate::manifest::manager::{RegionManifestManager, RegionManifestOptions};
53use crate::memtable::MemtableBuilderProvider;
54use crate::memtable::bulk::part::BulkPart;
55use crate::memtable::time_partition::{TimePartitions, TimePartitionsRef};
56use crate::metrics::{CACHE_FILL_DOWNLOADED_FILES, CACHE_FILL_PENDING_FILES};
57use crate::region::options::RegionOptions;
58use crate::region::version::{VersionBuilder, VersionControl, VersionControlRef};
59use crate::region::{
60 ManifestContext, ManifestStats, MitoRegion, MitoRegionRef, RegionLeaderState, RegionRoleState,
61};
62use crate::region_write_ctx::RegionWriteCtx;
63use crate::request::OptionOutputTx;
64use crate::schedule::scheduler::SchedulerRef;
65use crate::sst::FormatType;
66use crate::sst::file::{RegionFileId, RegionIndexId};
67use crate::sst::file_purger::{FilePurgerRef, create_file_purger};
68use crate::sst::file_ref::FileReferenceManagerRef;
69use crate::sst::index::intermediate::IntermediateManager;
70use crate::sst::index::puffin_manager::PuffinManagerFactory;
71use crate::sst::location::{self, region_dir_from_table_dir};
72use crate::time_provider::TimeProviderRef;
73use crate::wal::entry_reader::WalEntryReader;
74use crate::wal::{EntryId, Wal};
75
76#[async_trait::async_trait]
82pub trait PartitionExprFetcher {
83 async fn fetch_expr(&self, region_id: RegionId) -> Option<String>;
84}
85
86pub type PartitionExprFetcherRef = Arc<dyn PartitionExprFetcher + Send + Sync>;
87
88pub(crate) struct RegionOpener {
90 region_id: RegionId,
91 metadata_builder: Option<RegionMetadataBuilder>,
92 memtable_builder_provider: MemtableBuilderProvider,
93 object_store_manager: ObjectStoreManagerRef,
94 table_dir: String,
95 path_type: PathType,
96 purge_scheduler: SchedulerRef,
97 options: Option<RegionOptions>,
98 cache_manager: Option<CacheManagerRef>,
99 skip_wal_replay: bool,
100 puffin_manager_factory: PuffinManagerFactory,
101 intermediate_manager: IntermediateManager,
102 time_provider: TimeProviderRef,
103 stats: ManifestStats,
104 wal_entry_reader: Option<Box<dyn WalEntryReader>>,
105 replay_checkpoint: Option<u64>,
106 file_ref_manager: FileReferenceManagerRef,
107 partition_expr_fetcher: PartitionExprFetcherRef,
108}
109
110impl RegionOpener {
111 #[allow(clippy::too_many_arguments)]
114 pub(crate) fn new(
115 region_id: RegionId,
116 table_dir: &str,
117 path_type: PathType,
118 memtable_builder_provider: MemtableBuilderProvider,
119 object_store_manager: ObjectStoreManagerRef,
120 purge_scheduler: SchedulerRef,
121 puffin_manager_factory: PuffinManagerFactory,
122 intermediate_manager: IntermediateManager,
123 time_provider: TimeProviderRef,
124 file_ref_manager: FileReferenceManagerRef,
125 partition_expr_fetcher: PartitionExprFetcherRef,
126 ) -> RegionOpener {
127 RegionOpener {
128 region_id,
129 metadata_builder: None,
130 memtable_builder_provider,
131 object_store_manager,
132 table_dir: normalize_dir(table_dir),
133 path_type,
134 purge_scheduler,
135 options: None,
136 cache_manager: None,
137 skip_wal_replay: false,
138 puffin_manager_factory,
139 intermediate_manager,
140 time_provider,
141 stats: Default::default(),
142 wal_entry_reader: None,
143 replay_checkpoint: None,
144 file_ref_manager,
145 partition_expr_fetcher,
146 }
147 }
148
149 pub(crate) fn metadata_builder(mut self, builder: RegionMetadataBuilder) -> Self {
151 self.metadata_builder = Some(builder);
152 self
153 }
154
155 fn region_dir(&self) -> String {
157 region_dir_from_table_dir(&self.table_dir, self.region_id, self.path_type)
158 }
159
160 fn build_metadata(&mut self) -> Result<RegionMetadata> {
166 let options = self.options.as_ref().unwrap();
167 let mut metadata_builder = self.metadata_builder.take().unwrap();
168 metadata_builder.primary_key_encoding(options.primary_key_encoding());
169 metadata_builder.build().context(InvalidMetadataSnafu)
170 }
171
172 pub(crate) fn parse_options(self, options: HashMap<String, String>) -> Result<Self> {
174 self.options(RegionOptions::try_from(&options)?)
175 }
176
177 pub(crate) fn replay_checkpoint(mut self, replay_checkpoint: Option<u64>) -> Self {
179 self.replay_checkpoint = replay_checkpoint;
180 self
181 }
182
183 pub(crate) fn wal_entry_reader(
186 mut self,
187 wal_entry_reader: Option<Box<dyn WalEntryReader>>,
188 ) -> Self {
189 self.wal_entry_reader = wal_entry_reader;
190 self
191 }
192
193 pub(crate) fn options(mut self, options: RegionOptions) -> Result<Self> {
195 options.validate()?;
196 self.options = Some(options);
197 Ok(self)
198 }
199
200 pub(crate) fn cache(mut self, cache_manager: Option<CacheManagerRef>) -> Self {
202 self.cache_manager = cache_manager;
203 self
204 }
205
206 pub(crate) fn skip_wal_replay(mut self, skip: bool) -> Self {
208 self.skip_wal_replay = skip;
209 self
210 }
211
212 pub(crate) async fn create_or_open<S: LogStore>(
219 mut self,
220 config: &MitoConfig,
221 wal: &Wal<S>,
222 ) -> Result<MitoRegionRef> {
223 let region_id = self.region_id;
224 let region_dir = self.region_dir();
225 let metadata = self.build_metadata()?;
226 match self.maybe_open(config, wal).await {
228 Ok(Some(region)) => {
229 let recovered = region.metadata();
230 let expect = &metadata;
232 check_recovered_region(
233 &recovered,
234 expect.region_id,
235 &expect.column_metadatas,
236 &expect.primary_key,
237 )?;
238 region.set_role(RegionRole::Leader);
240
241 return Ok(region);
242 }
243 Ok(None) => {
244 debug!(
245 "No data under directory {}, region_id: {}",
246 region_dir, self.region_id
247 );
248 }
249 Err(e) => {
250 warn!(e;
251 "Failed to open region {} before creating it, region_dir: {}",
252 self.region_id, region_dir
253 );
254 }
255 }
256 let mut options = self.options.take().unwrap();
258 let object_store = get_object_store(&options.storage, &self.object_store_manager)?;
259 let provider = self.provider::<S>(&options.wal_options)?;
260 let metadata = Arc::new(metadata);
261 let sst_format = if let Some(format) = options.sst_format {
263 format
264 } else if config.default_experimental_flat_format {
265 options.sst_format = Some(FormatType::Flat);
266 FormatType::Flat
267 } else {
268 options.sst_format = Some(FormatType::PrimaryKey);
270 FormatType::PrimaryKey
271 };
272 let mut region_manifest_options =
274 RegionManifestOptions::new(config, ®ion_dir, &object_store);
275 region_manifest_options.manifest_cache = self
277 .cache_manager
278 .as_ref()
279 .and_then(|cm| cm.write_cache())
280 .and_then(|wc| wc.manifest_cache());
281 let flushed_entry_id = provider.initial_flushed_entry_id::<S>(wal.store());
283 let manifest_manager = RegionManifestManager::new(
284 metadata.clone(),
285 flushed_entry_id,
286 region_manifest_options,
287 sst_format,
288 &self.stats,
289 )
290 .await?;
291
292 let memtable_builder = self.memtable_builder_provider.builder_for_options(&options);
293 let part_duration = options.compaction.time_window();
294 let mutable = Arc::new(TimePartitions::new(
296 metadata.clone(),
297 memtable_builder.clone(),
298 0,
299 part_duration,
300 ));
301
302 debug!(
303 "Create region {} with options: {:?}, default_flat_format: {}",
304 region_id, options, config.default_experimental_flat_format
305 );
306
307 let version = VersionBuilder::new(metadata, mutable)
308 .options(options)
309 .build();
310 let version_control = Arc::new(VersionControl::new(version));
311 let access_layer = Arc::new(AccessLayer::new(
312 self.table_dir.clone(),
313 self.path_type,
314 object_store,
315 self.puffin_manager_factory,
316 self.intermediate_manager,
317 ));
318 let now = self.time_provider.current_time_millis();
319
320 Ok(Arc::new(MitoRegion {
321 region_id,
322 version_control,
323 access_layer: access_layer.clone(),
324 manifest_ctx: Arc::new(ManifestContext::new(
326 manifest_manager,
327 RegionRoleState::Leader(RegionLeaderState::Writable),
328 )),
329 file_purger: create_file_purger(
330 config.gc.enable,
331 self.path_type,
332 self.purge_scheduler,
333 access_layer,
334 self.cache_manager,
335 self.file_ref_manager.clone(),
336 ),
337 provider,
338 last_flush_millis: AtomicI64::new(now),
339 last_compaction_millis: AtomicI64::new(now),
340 time_provider: self.time_provider.clone(),
341 topic_latest_entry_id: AtomicU64::new(0),
342 written_bytes: Arc::new(AtomicU64::new(0)),
343 stats: self.stats,
344 staging_partition_expr: Mutex::new(None),
345 }))
346 }
347
348 pub(crate) async fn open<S: LogStore>(
352 mut self,
353 config: &MitoConfig,
354 wal: &Wal<S>,
355 ) -> Result<MitoRegionRef> {
356 let region_id = self.region_id;
357 let region_dir = self.region_dir();
358 let region = self
359 .maybe_open(config, wal)
360 .await?
361 .with_context(|| EmptyRegionDirSnafu {
362 region_id,
363 region_dir: ®ion_dir,
364 })?;
365
366 ensure!(
367 region.region_id == self.region_id,
368 RegionCorruptedSnafu {
369 region_id: self.region_id,
370 reason: format!(
371 "recovered region has different region id {}",
372 region.region_id
373 ),
374 }
375 );
376
377 Ok(region)
378 }
379
380 fn provider<S: LogStore>(&self, wal_options: &WalOptions) -> Result<Provider> {
381 match wal_options {
382 WalOptions::RaftEngine => {
383 ensure!(
384 TypeId::of::<RaftEngineLogStore>() == TypeId::of::<S>()
385 || TypeId::of::<NoopLogStore>() == TypeId::of::<S>(),
386 error::IncompatibleWalProviderChangeSnafu {
387 global: "`kafka`",
388 region: "`raft_engine`",
389 }
390 );
391 Ok(Provider::raft_engine_provider(self.region_id.as_u64()))
392 }
393 WalOptions::Kafka(options) => {
394 ensure!(
395 TypeId::of::<KafkaLogStore>() == TypeId::of::<S>()
396 || TypeId::of::<NoopLogStore>() == TypeId::of::<S>(),
397 error::IncompatibleWalProviderChangeSnafu {
398 global: "`raft_engine`",
399 region: "`kafka`",
400 }
401 );
402 Ok(Provider::kafka_provider(options.topic.clone()))
403 }
404 WalOptions::Noop => Ok(Provider::noop_provider()),
405 }
406 }
407
408 async fn maybe_open<S: LogStore>(
410 &mut self,
411 config: &MitoConfig,
412 wal: &Wal<S>,
413 ) -> Result<Option<MitoRegionRef>> {
414 let now = Instant::now();
415 let mut region_options = self.options.as_ref().unwrap().clone();
416 let object_storage = get_object_store(®ion_options.storage, &self.object_store_manager)?;
417 let mut region_manifest_options =
418 RegionManifestOptions::new(config, &self.region_dir(), &object_storage);
419 region_manifest_options.manifest_cache = self
421 .cache_manager
422 .as_ref()
423 .and_then(|cm| cm.write_cache())
424 .and_then(|wc| wc.manifest_cache());
425 let Some(manifest_manager) =
426 RegionManifestManager::open(region_manifest_options, &self.stats).await?
427 else {
428 return Ok(None);
429 };
430
431 let manifest = manifest_manager.manifest();
433 let metadata = if manifest.metadata.partition_expr.is_none()
434 && let Some(expr_json) = self.partition_expr_fetcher.fetch_expr(self.region_id).await
435 {
436 let metadata = manifest.metadata.as_ref().clone();
437 let mut builder = RegionMetadataBuilder::from_existing(metadata);
438 builder.partition_expr_json(Some(expr_json));
439 Arc::new(builder.build().context(InvalidMetadataSnafu)?)
440 } else {
441 manifest.metadata.clone()
442 };
443 sanitize_region_options(&manifest, &mut region_options);
445
446 let region_id = self.region_id;
447 let provider = self.provider::<S>(®ion_options.wal_options)?;
448 let wal_entry_reader = self
449 .wal_entry_reader
450 .take()
451 .unwrap_or_else(|| wal.wal_entry_reader(&provider, region_id, None));
452 let on_region_opened = wal.on_region_opened();
453 let object_store = get_object_store(®ion_options.storage, &self.object_store_manager)?;
454
455 debug!(
456 "Open region {} at {} with options: {:?}",
457 region_id, self.table_dir, self.options
458 );
459
460 let access_layer = Arc::new(AccessLayer::new(
461 self.table_dir.clone(),
462 self.path_type,
463 object_store,
464 self.puffin_manager_factory.clone(),
465 self.intermediate_manager.clone(),
466 ));
467 let file_purger = create_file_purger(
468 config.gc.enable,
469 self.path_type,
470 self.purge_scheduler.clone(),
471 access_layer.clone(),
472 self.cache_manager.clone(),
473 self.file_ref_manager.clone(),
474 );
475 let memtable_builder = self
477 .memtable_builder_provider
478 .builder_for_options(®ion_options);
479 let part_duration = region_options
482 .compaction
483 .time_window()
484 .or(manifest.compaction_time_window);
485 let mutable = Arc::new(TimePartitions::new(
487 metadata.clone(),
488 memtable_builder.clone(),
489 0,
490 part_duration,
491 ));
492
493 let version_builder = version_builder_from_manifest(
495 &manifest,
496 metadata,
497 file_purger.clone(),
498 mutable,
499 region_options,
500 );
501 let version = version_builder.build();
502 let flushed_entry_id = version.flushed_entry_id;
503 let version_control = Arc::new(VersionControl::new(version));
504
505 let topic_latest_entry_id = if !self.skip_wal_replay {
506 let replay_from_entry_id = self
507 .replay_checkpoint
508 .unwrap_or_default()
509 .max(flushed_entry_id);
510 info!(
511 "Start replaying memtable at replay_from_entry_id: {} for region {}, manifest version: {}, flushed entry id: {}, elapsed: {:?}",
512 replay_from_entry_id,
513 region_id,
514 manifest.manifest_version,
515 flushed_entry_id,
516 now.elapsed()
517 );
518 replay_memtable(
519 &provider,
520 wal_entry_reader,
521 region_id,
522 replay_from_entry_id,
523 &version_control,
524 config.allow_stale_entries,
525 on_region_opened,
526 )
527 .await?;
528 if provider.is_remote_wal() && version_control.current().version.memtables.is_empty() {
532 wal.store().latest_entry_id(&provider).unwrap_or(0)
533 } else {
534 0
535 }
536 } else {
537 info!(
538 "Skip the WAL replay for region: {}, manifest version: {}, flushed_entry_id: {}, elapsed: {:?}",
539 region_id,
540 manifest.manifest_version,
541 flushed_entry_id,
542 now.elapsed()
543 );
544
545 0
546 };
547
548 if let Some(committed_in_manifest) = manifest.committed_sequence {
549 let committed_after_replay = version_control.committed_sequence();
550 if committed_in_manifest > committed_after_replay {
551 info!(
552 "Overriding committed sequence, region: {}, flushed_sequence: {}, committed_sequence: {} -> {}",
553 self.region_id,
554 version_control.current().version.flushed_sequence,
555 version_control.committed_sequence(),
556 committed_in_manifest
557 );
558 version_control.set_committed_sequence(committed_in_manifest);
559 }
560 }
561
562 let now = self.time_provider.current_time_millis();
563
564 let region = MitoRegion {
565 region_id: self.region_id,
566 version_control: version_control.clone(),
567 access_layer: access_layer.clone(),
568 manifest_ctx: Arc::new(ManifestContext::new(
570 manifest_manager,
571 RegionRoleState::Follower,
572 )),
573 file_purger,
574 provider: provider.clone(),
575 last_flush_millis: AtomicI64::new(now),
576 last_compaction_millis: AtomicI64::new(now),
577 time_provider: self.time_provider.clone(),
578 topic_latest_entry_id: AtomicU64::new(topic_latest_entry_id),
579 written_bytes: Arc::new(AtomicU64::new(0)),
580 stats: self.stats.clone(),
581 staging_partition_expr: Mutex::new(None),
583 };
584
585 let region = Arc::new(region);
586
587 maybe_load_cache(®ion, config, &self.cache_manager);
588
589 Ok(Some(region))
590 }
591}
592
593pub(crate) fn version_builder_from_manifest(
595 manifest: &RegionManifest,
596 metadata: RegionMetadataRef,
597 file_purger: FilePurgerRef,
598 mutable: TimePartitionsRef,
599 region_options: RegionOptions,
600) -> VersionBuilder {
601 VersionBuilder::new(metadata, mutable)
602 .add_files(file_purger, manifest.files.values().cloned())
603 .flushed_entry_id(manifest.flushed_entry_id)
604 .flushed_sequence(manifest.flushed_sequence)
605 .truncated_entry_id(manifest.truncated_entry_id)
606 .compaction_time_window(manifest.compaction_time_window)
607 .options(region_options)
608}
609
610pub(crate) fn sanitize_region_options(manifest: &RegionManifest, options: &mut RegionOptions) {
612 let option_format = options.sst_format.unwrap_or_default();
613 if option_format != manifest.sst_format {
614 common_telemetry::warn!(
615 "Overriding SST format from {:?} to {:?} for region {}",
616 option_format,
617 manifest.sst_format,
618 manifest.metadata.region_id,
619 );
620 options.sst_format = Some(manifest.sst_format);
621 }
622}
623
624pub fn get_object_store(
626 name: &Option<String>,
627 object_store_manager: &ObjectStoreManagerRef,
628) -> Result<object_store::ObjectStore> {
629 if let Some(name) = name {
630 Ok(object_store_manager
631 .find(name)
632 .with_context(|| ObjectStoreNotFoundSnafu {
633 object_store: name.clone(),
634 })?
635 .clone())
636 } else {
637 Ok(object_store_manager.default_object_store().clone())
638 }
639}
640
641pub(crate) fn check_recovered_region(
643 recovered: &RegionMetadata,
644 region_id: RegionId,
645 column_metadatas: &[ColumnMetadata],
646 primary_key: &[ColumnId],
647) -> Result<()> {
648 if recovered.region_id != region_id {
649 error!(
650 "Recovered region {}, expect region {}",
651 recovered.region_id, region_id
652 );
653 return RegionCorruptedSnafu {
654 region_id,
655 reason: format!(
656 "recovered metadata has different region id {}",
657 recovered.region_id
658 ),
659 }
660 .fail();
661 }
662 if recovered.column_metadatas != column_metadatas {
663 error!(
664 "Unexpected schema in recovered region {}, recovered: {:?}, expect: {:?}",
665 recovered.region_id, recovered.column_metadatas, column_metadatas
666 );
667
668 return RegionCorruptedSnafu {
669 region_id,
670 reason: "recovered metadata has different schema",
671 }
672 .fail();
673 }
674 if recovered.primary_key != primary_key {
675 error!(
676 "Unexpected primary key in recovered region {}, recovered: {:?}, expect: {:?}",
677 recovered.region_id, recovered.primary_key, primary_key
678 );
679
680 return RegionCorruptedSnafu {
681 region_id,
682 reason: "recovered metadata has different primary key",
683 }
684 .fail();
685 }
686
687 Ok(())
688}
689
690pub(crate) async fn replay_memtable<F>(
692 provider: &Provider,
693 mut wal_entry_reader: Box<dyn WalEntryReader>,
694 region_id: RegionId,
695 flushed_entry_id: EntryId,
696 version_control: &VersionControlRef,
697 allow_stale_entries: bool,
698 on_region_opened: F,
699) -> Result<EntryId>
700where
701 F: FnOnce(RegionId, EntryId, &Provider) -> BoxFuture<Result<()>> + Send,
702{
703 let now = Instant::now();
704 let mut rows_replayed = 0;
705 let mut last_entry_id = flushed_entry_id;
708 let replay_from_entry_id = flushed_entry_id + 1;
709
710 let mut wal_stream = wal_entry_reader.read(provider, replay_from_entry_id)?;
711 while let Some(res) = wal_stream.next().await {
712 let (entry_id, entry) = res?;
713 if entry_id <= flushed_entry_id {
714 warn!(
715 "Stale WAL entries read during replay, region id: {}, flushed entry id: {}, entry id read: {}",
716 region_id, flushed_entry_id, entry_id
717 );
718 ensure!(
719 allow_stale_entries,
720 StaleLogEntrySnafu {
721 region_id,
722 flushed_entry_id,
723 unexpected_entry_id: entry_id,
724 }
725 );
726 }
727 last_entry_id = last_entry_id.max(entry_id);
728
729 let mut region_write_ctx = RegionWriteCtx::new(
730 region_id,
731 version_control,
732 provider.clone(),
733 None,
735 );
736 for mutation in entry.mutations {
737 rows_replayed += mutation
738 .rows
739 .as_ref()
740 .map(|rows| rows.rows.len())
741 .unwrap_or(0);
742 region_write_ctx.push_mutation(
743 mutation.op_type,
744 mutation.rows,
745 mutation.write_hint,
746 OptionOutputTx::none(),
747 Some(mutation.sequence),
749 );
750 }
751
752 for bulk_entry in entry.bulk_entries {
753 let part = BulkPart::try_from(bulk_entry)?;
754 rows_replayed += part.num_rows();
755 let bulk_sequence_from_wal = part.sequence;
757 ensure!(
758 region_write_ctx.push_bulk(
759 OptionOutputTx::none(),
760 part,
761 Some(bulk_sequence_from_wal)
762 ),
763 RegionCorruptedSnafu {
764 region_id,
765 reason: "unable to replay memtable with bulk entries",
766 }
767 );
768 }
769
770 region_write_ctx.set_next_entry_id(last_entry_id + 1);
772 region_write_ctx.write_memtable().await;
773 region_write_ctx.write_bulk().await;
774 }
775
776 (on_region_opened)(region_id, flushed_entry_id, provider).await?;
779
780 let series_count = version_control.current().series_count();
781 info!(
782 "Replay WAL for region: {}, provider: {:?}, rows recovered: {}, replay from entry id: {}, last entry id: {}, total timeseries replayed: {}, elapsed: {:?}",
783 region_id,
784 provider,
785 rows_replayed,
786 replay_from_entry_id,
787 last_entry_id,
788 series_count,
789 now.elapsed()
790 );
791 Ok(last_entry_id)
792}
793
794pub(crate) struct RegionLoadCacheTask {
796 region: MitoRegionRef,
797}
798
799impl RegionLoadCacheTask {
800 pub(crate) fn new(region: MitoRegionRef) -> Self {
801 Self { region }
802 }
803
804 pub(crate) async fn fill_cache(&self, file_cache: &FileCache) {
806 let region_id = self.region.region_id;
807 let table_dir = self.region.access_layer.table_dir();
808 let path_type = self.region.access_layer.path_type();
809 let object_store = self.region.access_layer.object_store();
810 let version_control = &self.region.version_control;
811
812 let mut files_to_download = Vec::new();
814 let mut files_already_cached = 0;
815
816 {
817 let version = version_control.current().version;
818 for level in version.ssts.levels() {
819 for file_handle in level.files.values() {
820 let file_meta = file_handle.meta_ref();
821 if file_meta.exists_index() {
822 let puffin_key = IndexKey::new(
823 file_meta.region_id,
824 file_meta.file_id,
825 FileType::Puffin(file_meta.index_version),
826 );
827
828 if !file_cache.contains_key(&puffin_key) {
829 files_to_download.push((
830 puffin_key,
831 file_meta.index_file_size,
832 file_meta.time_range.1, ));
834 } else {
835 files_already_cached += 1;
836 }
837 }
838 }
839 }
840 }
843
844 files_to_download.sort_by(|a, b| b.2.cmp(&a.2));
846
847 let total_files = files_to_download.len() as i64;
848
849 info!(
850 "Starting background index cache preload for region {}, total_files_to_download: {}, files_already_cached: {}",
851 region_id, total_files, files_already_cached
852 );
853
854 CACHE_FILL_PENDING_FILES.add(total_files);
855
856 let mut files_downloaded = 0;
857 let mut files_skipped = 0;
858
859 for (puffin_key, file_size, max_timestamp) in files_to_download {
860 let current_size = file_cache.puffin_cache_size();
861 let capacity = file_cache.puffin_cache_capacity();
862 let region_state = self.region.state();
863 if !can_load_cache(region_state) {
864 info!(
865 "Stopping index cache by state: {:?}, region: {}, current_size: {}, capacity: {}",
866 region_state, region_id, current_size, capacity
867 );
868 break;
869 }
870
871 if current_size + file_size > capacity {
873 info!(
874 "Stopping index cache preload due to capacity limit, region: {}, file_id: {}, current_size: {}, file_size: {}, capacity: {}, file_timestamp: {:?}",
875 region_id, puffin_key.file_id, current_size, file_size, capacity, max_timestamp
876 );
877 files_skipped = (total_files - files_downloaded) as usize;
878 CACHE_FILL_PENDING_FILES.sub(total_files - files_downloaded);
879 break;
880 }
881
882 let index_version = if let FileType::Puffin(version) = puffin_key.file_type {
883 version
884 } else {
885 unreachable!("`files_to_download` should only contains Puffin files");
886 };
887 let index_id = RegionIndexId::new(
888 RegionFileId::new(puffin_key.region_id, puffin_key.file_id),
889 index_version,
890 );
891
892 let index_remote_path = location::index_file_path(table_dir, index_id, path_type);
893
894 match file_cache
895 .download(puffin_key, &index_remote_path, object_store, file_size)
896 .await
897 {
898 Ok(_) => {
899 debug!(
900 "Downloaded index file to write cache, region: {}, file_id: {}",
901 region_id, puffin_key.file_id
902 );
903 files_downloaded += 1;
904 CACHE_FILL_DOWNLOADED_FILES.inc_by(1);
905 CACHE_FILL_PENDING_FILES.dec();
906 }
907 Err(e) => {
908 warn!(
909 e; "Failed to download index file to write cache, region: {}, file_id: {}",
910 region_id, puffin_key.file_id
911 );
912 CACHE_FILL_PENDING_FILES.dec();
913 }
914 }
915 }
916
917 info!(
918 "Completed background cache fill task for region {}, total_files: {}, files_downloaded: {}, files_already_cached: {}, files_skipped: {}",
919 region_id, total_files, files_downloaded, files_already_cached, files_skipped
920 );
921 }
922}
923
924fn maybe_load_cache(
926 region: &MitoRegionRef,
927 config: &MitoConfig,
928 cache_manager: &Option<CacheManagerRef>,
929) {
930 let Some(cache_manager) = cache_manager else {
931 return;
932 };
933 let Some(write_cache) = cache_manager.write_cache() else {
934 return;
935 };
936
937 let preload_enabled = config.preload_index_cache;
938 if !preload_enabled {
939 return;
940 }
941
942 let task = RegionLoadCacheTask::new(region.clone());
943 write_cache.load_region_cache(task);
944}
945
946fn can_load_cache(state: RegionRoleState) -> bool {
947 match state {
948 RegionRoleState::Leader(RegionLeaderState::Writable)
949 | RegionRoleState::Leader(RegionLeaderState::Staging)
950 | RegionRoleState::Leader(RegionLeaderState::Altering)
951 | RegionRoleState::Leader(RegionLeaderState::EnteringStaging)
952 | RegionRoleState::Leader(RegionLeaderState::Editing)
953 | RegionRoleState::Follower => true,
954 RegionRoleState::Leader(RegionLeaderState::Downgrading)
956 | RegionRoleState::Leader(RegionLeaderState::Dropping)
957 | RegionRoleState::Leader(RegionLeaderState::Truncating) => false,
958 }
959}