mito2/cache/
index.rs

1// Copyright 2023 Greptime Team
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15pub mod bloom_filter_index;
16pub mod inverted_index;
17pub mod result_cache;
18
19use std::future::Future;
20use std::hash::Hash;
21use std::ops::Range;
22use std::sync::Arc;
23
24use bytes::Bytes;
25use object_store::Buffer;
26
27use crate::metrics::{CACHE_BYTES, CACHE_HIT, CACHE_MISS};
28
29/// Metrics for index metadata.
30const INDEX_METADATA_TYPE: &str = "index_metadata";
31/// Metrics for index content.
32const INDEX_CONTENT_TYPE: &str = "index_content";
33
34#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
35pub struct PageKey {
36    page_id: u64,
37}
38
39impl PageKey {
40    /// Converts an offset to a page ID based on the page size.
41    fn calculate_page_id(offset: u64, page_size: u64) -> u64 {
42        offset / page_size
43    }
44
45    /// Calculates the total number of pages that a given size spans, starting from a specific offset.
46    fn calculate_page_count(offset: u64, size: u32, page_size: u64) -> u32 {
47        let start_page = Self::calculate_page_id(offset, page_size);
48        let end_page = Self::calculate_page_id(offset + (size as u64) - 1, page_size);
49        (end_page + 1 - start_page) as u32
50    }
51
52    /// Calculates the byte range for data retrieval based on the specified offset and size.
53    ///
54    /// This function determines the starting and ending byte positions required for reading data.
55    /// For example, with an offset of 5000 and a size of 5000, using a PAGE_SIZE of 4096,
56    /// the resulting byte range will be 904..5904. This indicates that:
57    /// - The reader will first access fixed-size pages [4096, 8192) and [8192, 12288).
58    /// - To read the range [5000..10000), it only needs to fetch bytes within the range [904, 5904) across two pages.
59    fn calculate_range(offset: u64, size: u32, page_size: u64) -> Range<usize> {
60        let start = (offset % page_size) as usize;
61        let end = start + size as usize;
62        start..end
63    }
64
65    /// Generates a iterator of `IndexKey` for the pages that a given offset and size span.
66    fn generate_page_keys(offset: u64, size: u32, page_size: u64) -> impl Iterator<Item = Self> {
67        let start_page = Self::calculate_page_id(offset, page_size);
68        let total_pages = Self::calculate_page_count(offset, size, page_size);
69        (0..total_pages).map(move |i| Self {
70            page_id: start_page + i as u64,
71        })
72    }
73}
74
75/// Cache for index metadata and content.
76pub struct IndexCache<K, M> {
77    /// Cache for index metadata
78    index_metadata: moka::sync::Cache<K, Arc<M>>,
79    /// Cache for index content.
80    index: moka::sync::Cache<(K, PageKey), Bytes>,
81    // Page size for index content.
82    page_size: u64,
83
84    /// Weighter for metadata.
85    weight_of_metadata: fn(&K, &Arc<M>) -> u32,
86    /// Weighter for content.
87    weight_of_content: fn(&(K, PageKey), &Bytes) -> u32,
88}
89
90impl<K, M> IndexCache<K, M>
91where
92    K: Hash + Eq + Send + Sync + 'static,
93    M: Send + Sync + 'static,
94{
95    pub fn new_with_weighter(
96        index_metadata_cap: u64,
97        index_content_cap: u64,
98        page_size: u64,
99        index_type: &'static str,
100        weight_of_metadata: fn(&K, &Arc<M>) -> u32,
101        weight_of_content: fn(&(K, PageKey), &Bytes) -> u32,
102    ) -> Self {
103        common_telemetry::debug!("Building IndexCache with metadata size: {index_metadata_cap}, content size: {index_content_cap}, page size: {page_size}, index type: {index_type}");
104        let index_metadata = moka::sync::CacheBuilder::new(index_metadata_cap)
105            .name(&format!("index_metadata_{}", index_type))
106            .weigher(weight_of_metadata)
107            .eviction_listener(move |k, v, _cause| {
108                let size = weight_of_metadata(&k, &v);
109                CACHE_BYTES
110                    .with_label_values(&[INDEX_METADATA_TYPE])
111                    .sub(size.into());
112            })
113            .build();
114        let index_cache = moka::sync::CacheBuilder::new(index_content_cap)
115            .name(&format!("index_content_{}", index_type))
116            .weigher(weight_of_content)
117            .eviction_listener(move |k, v, _cause| {
118                let size = weight_of_content(&k, &v);
119                CACHE_BYTES
120                    .with_label_values(&[INDEX_CONTENT_TYPE])
121                    .sub(size.into());
122            })
123            .build();
124        Self {
125            index_metadata,
126            index: index_cache,
127            page_size,
128            weight_of_content,
129            weight_of_metadata,
130        }
131    }
132}
133
134impl<K, M> IndexCache<K, M>
135where
136    K: Hash + Eq + Clone + Copy + Send + Sync + 'static,
137    M: Send + Sync + 'static,
138{
139    pub fn get_metadata(&self, key: K) -> Option<Arc<M>> {
140        self.index_metadata.get(&key)
141    }
142
143    pub fn put_metadata(&self, key: K, metadata: Arc<M>) {
144        CACHE_BYTES
145            .with_label_values(&[INDEX_METADATA_TYPE])
146            .add((self.weight_of_metadata)(&key, &metadata).into());
147        self.index_metadata.insert(key, metadata)
148    }
149
150    /// Gets given range of index data from cache, and loads from source if the file
151    /// is not already cached.
152    async fn get_or_load<F, Fut, E>(
153        &self,
154        key: K,
155        file_size: u64,
156        offset: u64,
157        size: u32,
158        load: F,
159    ) -> Result<Vec<u8>, E>
160    where
161        F: Fn(Vec<Range<u64>>) -> Fut,
162        Fut: Future<Output = Result<Vec<Bytes>, E>>,
163        E: std::error::Error,
164    {
165        let page_keys =
166            PageKey::generate_page_keys(offset, size, self.page_size).collect::<Vec<_>>();
167        // Size is 0, return empty data.
168        if page_keys.is_empty() {
169            return Ok(Vec::new());
170        }
171        let mut data = Vec::with_capacity(page_keys.len());
172        data.resize(page_keys.len(), Bytes::new());
173        let mut cache_miss_range = vec![];
174        let mut cache_miss_idx = vec![];
175        let last_index = page_keys.len() - 1;
176        // TODO: Avoid copy as much as possible.
177        for (i, page_key) in page_keys.iter().enumerate() {
178            match self.get_page(key, *page_key) {
179                Some(page) => {
180                    CACHE_HIT.with_label_values(&[INDEX_CONTENT_TYPE]).inc();
181                    data[i] = page;
182                }
183                None => {
184                    CACHE_MISS.with_label_values(&[INDEX_CONTENT_TYPE]).inc();
185                    let base_offset = page_key.page_id * self.page_size;
186                    let pruned_size = if i == last_index {
187                        prune_size(page_keys.iter(), file_size, self.page_size)
188                    } else {
189                        self.page_size
190                    };
191                    cache_miss_range.push(base_offset..base_offset + pruned_size);
192                    cache_miss_idx.push(i);
193                }
194            }
195        }
196        if !cache_miss_range.is_empty() {
197            let pages = load(cache_miss_range).await?;
198            for (i, page) in cache_miss_idx.into_iter().zip(pages.into_iter()) {
199                let page_key = page_keys[i];
200                data[i] = page.clone();
201                self.put_page(key, page_key, page.clone());
202            }
203        }
204        let buffer = Buffer::from_iter(data.into_iter());
205        Ok(buffer
206            .slice(PageKey::calculate_range(offset, size, self.page_size))
207            .to_vec())
208    }
209
210    fn get_page(&self, key: K, page_key: PageKey) -> Option<Bytes> {
211        self.index.get(&(key, page_key))
212    }
213
214    fn put_page(&self, key: K, page_key: PageKey, value: Bytes) {
215        CACHE_BYTES
216            .with_label_values(&[INDEX_CONTENT_TYPE])
217            .add((self.weight_of_content)(&(key, page_key), &value).into());
218        self.index.insert((key, page_key), value);
219    }
220}
221
222/// Prunes the size of the last page based on the indexes.
223/// We have following cases:
224/// 1. The rest file size is less than the page size, read to the end of the file.
225/// 2. Otherwise, read the page size.
226fn prune_size<'a>(
227    indexes: impl Iterator<Item = &'a PageKey>,
228    file_size: u64,
229    page_size: u64,
230) -> u64 {
231    let last_page_start = indexes.last().map(|i| i.page_id * page_size).unwrap_or(0);
232    page_size.min(file_size - last_page_start)
233}