1use core::ops::Range;
16use std::sync::Arc;
17
18use api::v1::index::InvertedIndexMetas;
19use async_trait::async_trait;
20use bytes::Bytes;
21use index::inverted_index::error::Result;
22use index::inverted_index::format::reader::InvertedIndexReader;
23use prost::Message;
24
25use crate::cache::index::{IndexCache, PageKey, INDEX_METADATA_TYPE};
26use crate::metrics::{CACHE_HIT, CACHE_MISS};
27use crate::sst::file::FileId;
28
29const INDEX_TYPE_INVERTED_INDEX: &str = "inverted_index";
30
31pub type InvertedIndexCache = IndexCache<FileId, InvertedIndexMetas>;
33pub type InvertedIndexCacheRef = Arc<InvertedIndexCache>;
34
35impl InvertedIndexCache {
36 pub fn new(index_metadata_cap: u64, index_content_cap: u64, page_size: u64) -> Self {
38 Self::new_with_weighter(
39 index_metadata_cap,
40 index_content_cap,
41 page_size,
42 INDEX_TYPE_INVERTED_INDEX,
43 inverted_index_metadata_weight,
44 inverted_index_content_weight,
45 )
46 }
47}
48
49fn inverted_index_metadata_weight(k: &FileId, v: &Arc<InvertedIndexMetas>) -> u32 {
51 (k.as_bytes().len() + v.encoded_len()) as u32
52}
53
54fn inverted_index_content_weight((k, _): &(FileId, PageKey), v: &Bytes) -> u32 {
56 (k.as_bytes().len() + v.len()) as u32
57}
58
59pub struct CachedInvertedIndexBlobReader<R> {
61 file_id: FileId,
62 blob_size: u64,
63 inner: R,
64 cache: InvertedIndexCacheRef,
65}
66
67impl<R> CachedInvertedIndexBlobReader<R> {
68 pub fn new(file_id: FileId, blob_size: u64, inner: R, cache: InvertedIndexCacheRef) -> Self {
70 Self {
71 file_id,
72 blob_size,
73 inner,
74 cache,
75 }
76 }
77}
78
79#[async_trait]
80impl<R: InvertedIndexReader> InvertedIndexReader for CachedInvertedIndexBlobReader<R> {
81 async fn range_read(&self, offset: u64, size: u32) -> Result<Vec<u8>> {
82 let inner = &self.inner;
83 self.cache
84 .get_or_load(
85 self.file_id,
86 self.blob_size,
87 offset,
88 size,
89 move |ranges| async move { inner.read_vec(&ranges).await },
90 )
91 .await
92 }
93
94 async fn read_vec(&self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
95 let mut pages = Vec::with_capacity(ranges.len());
96 for range in ranges {
97 let inner = &self.inner;
98 let page = self
99 .cache
100 .get_or_load(
101 self.file_id,
102 self.blob_size,
103 range.start,
104 (range.end - range.start) as u32,
105 move |ranges| async move { inner.read_vec(&ranges).await },
106 )
107 .await?;
108
109 pages.push(Bytes::from(page));
110 }
111
112 Ok(pages)
113 }
114
115 async fn metadata(&self) -> Result<Arc<InvertedIndexMetas>> {
116 if let Some(cached) = self.cache.get_metadata(self.file_id) {
117 CACHE_HIT.with_label_values(&[INDEX_METADATA_TYPE]).inc();
118 Ok(cached)
119 } else {
120 let meta = self.inner.metadata().await?;
121 self.cache.put_metadata(self.file_id, meta.clone());
122 CACHE_MISS.with_label_values(&[INDEX_METADATA_TYPE]).inc();
123 Ok(meta)
124 }
125 }
126}
127
128#[cfg(test)]
129mod test {
130 use std::num::NonZeroUsize;
131
132 use futures::stream;
133 use index::bitmap::{Bitmap, BitmapType};
134 use index::inverted_index::format::reader::{InvertedIndexBlobReader, InvertedIndexReader};
135 use index::inverted_index::format::writer::{InvertedIndexBlobWriter, InvertedIndexWriter};
136 use index::Bytes;
137 use prometheus::register_int_counter_vec;
138 use rand::{Rng, RngCore};
139
140 use super::*;
141 use crate::sst::index::store::InstrumentedStore;
142 use crate::test_util::TestEnv;
143
144 const FUZZ_REPEAT_TIMES: usize = 100;
146
147 #[test]
149 fn fuzz_index_calculation() {
150 let mut rng = rand::rng();
152 let mut data = vec![0u8; 1024 * 1024];
153 rng.fill_bytes(&mut data);
154
155 for _ in 0..FUZZ_REPEAT_TIMES {
156 let offset = rng.random_range(0..data.len() as u64);
157 let size = rng.random_range(0..data.len() as u32 - offset as u32);
158 let page_size: usize = rng.random_range(1..1024);
159
160 let indexes =
161 PageKey::generate_page_keys(offset, size, page_size as u64).collect::<Vec<_>>();
162 let page_num = indexes.len();
163 let mut read = Vec::with_capacity(size as usize);
164 for key in indexes.into_iter() {
165 let start = key.page_id as usize * page_size;
166 let page = if start + page_size < data.len() {
167 &data[start..start + page_size]
168 } else {
169 &data[start..]
170 };
171 read.extend_from_slice(page);
172 }
173 let expected_range = offset as usize..(offset + size as u64 as u64) as usize;
174 let read = read[PageKey::calculate_range(offset, size, page_size as u64)].to_vec();
175 if read != data.get(expected_range).unwrap() {
176 panic!(
177 "fuzz_read_index failed, offset: {}, size: {}, page_size: {}\nread len: {}, expected len: {}\nrange: {:?}, page num: {}",
178 offset, size, page_size, read.len(), size as usize,
179 PageKey::calculate_range(offset, size, page_size as u64),
180 page_num
181 );
182 }
183 }
184 }
185
186 fn unpack(fst_value: u64) -> [u32; 2] {
187 bytemuck::cast::<u64, [u32; 2]>(fst_value)
188 }
189
190 async fn create_inverted_index_blob() -> Vec<u8> {
191 let mut blob = Vec::new();
192 let mut writer = InvertedIndexBlobWriter::new(&mut blob);
193 writer
194 .add_index(
195 "tag0".to_string(),
196 Bitmap::from_lsb0_bytes(&[0b0000_0001, 0b0000_0000], BitmapType::Roaring),
197 Box::new(stream::iter(vec![
198 Ok((
199 Bytes::from("a"),
200 Bitmap::from_lsb0_bytes(&[0b0000_0001], BitmapType::Roaring),
201 )),
202 Ok((
203 Bytes::from("b"),
204 Bitmap::from_lsb0_bytes(&[0b0010_0000], BitmapType::Roaring),
205 )),
206 Ok((
207 Bytes::from("c"),
208 Bitmap::from_lsb0_bytes(&[0b0000_0001], BitmapType::Roaring),
209 )),
210 ])),
211 index::bitmap::BitmapType::Roaring,
212 )
213 .await
214 .unwrap();
215 writer
216 .add_index(
217 "tag1".to_string(),
218 Bitmap::from_lsb0_bytes(&[0b0000_0001, 0b0000_0000], BitmapType::Roaring),
219 Box::new(stream::iter(vec![
220 Ok((
221 Bytes::from("x"),
222 Bitmap::from_lsb0_bytes(&[0b0000_0001], BitmapType::Roaring),
223 )),
224 Ok((
225 Bytes::from("y"),
226 Bitmap::from_lsb0_bytes(&[0b0010_0000], BitmapType::Roaring),
227 )),
228 Ok((
229 Bytes::from("z"),
230 Bitmap::from_lsb0_bytes(&[0b0000_0001], BitmapType::Roaring),
231 )),
232 ])),
233 index::bitmap::BitmapType::Roaring,
234 )
235 .await
236 .unwrap();
237 writer
238 .finish(8, NonZeroUsize::new(1).unwrap())
239 .await
240 .unwrap();
241
242 blob
243 }
244
245 #[tokio::test]
246 async fn test_inverted_index_cache() {
247 let blob = create_inverted_index_blob().await;
248
249 let mut env = TestEnv::new().await;
251 let file_size = blob.len() as u64;
252 let store = env.init_object_store_manager();
253 let temp_path = "data";
254 store.write(temp_path, blob).await.unwrap();
255 let store = InstrumentedStore::new(store);
256 let metric =
257 register_int_counter_vec!("test_bytes", "a counter for test", &["test"]).unwrap();
258 let counter = metric.with_label_values(&["test"]);
259 let range_reader = store
260 .range_reader("data", &counter, &counter)
261 .await
262 .unwrap();
263
264 let reader = InvertedIndexBlobReader::new(range_reader);
265 let cached_reader = CachedInvertedIndexBlobReader::new(
266 FileId::random(),
267 file_size,
268 reader,
269 Arc::new(InvertedIndexCache::new(8192, 8192, 50)),
270 );
271 let metadata = cached_reader.metadata().await.unwrap();
272 assert_eq!(metadata.total_row_count, 8);
273 assert_eq!(metadata.segment_row_count, 1);
274 assert_eq!(metadata.metas.len(), 2);
275 let tag0 = metadata.metas.get("tag0").unwrap();
277 let stats0 = tag0.stats.as_ref().unwrap();
278 assert_eq!(stats0.distinct_count, 3);
279 assert_eq!(stats0.null_count, 1);
280 assert_eq!(stats0.min_value, Bytes::from("a"));
281 assert_eq!(stats0.max_value, Bytes::from("c"));
282 let fst0 = cached_reader
283 .fst(
284 tag0.base_offset + tag0.relative_fst_offset as u64,
285 tag0.fst_size,
286 )
287 .await
288 .unwrap();
289 assert_eq!(fst0.len(), 3);
290 let [offset, size] = unpack(fst0.get(b"a").unwrap());
291 let bitmap = cached_reader
292 .bitmap(tag0.base_offset + offset as u64, size, BitmapType::Roaring)
293 .await
294 .unwrap();
295 assert_eq!(
296 bitmap,
297 Bitmap::from_lsb0_bytes(&[0b0000_0001], BitmapType::Roaring)
298 );
299 let [offset, size] = unpack(fst0.get(b"b").unwrap());
300 let bitmap = cached_reader
301 .bitmap(tag0.base_offset + offset as u64, size, BitmapType::Roaring)
302 .await
303 .unwrap();
304 assert_eq!(
305 bitmap,
306 Bitmap::from_lsb0_bytes(&[0b0010_0000], BitmapType::Roaring)
307 );
308 let [offset, size] = unpack(fst0.get(b"c").unwrap());
309 let bitmap = cached_reader
310 .bitmap(tag0.base_offset + offset as u64, size, BitmapType::Roaring)
311 .await
312 .unwrap();
313 assert_eq!(
314 bitmap,
315 Bitmap::from_lsb0_bytes(&[0b0000_0001], BitmapType::Roaring)
316 );
317
318 let tag1 = metadata.metas.get("tag1").unwrap();
320 let stats1 = tag1.stats.as_ref().unwrap();
321 assert_eq!(stats1.distinct_count, 3);
322 assert_eq!(stats1.null_count, 1);
323 assert_eq!(stats1.min_value, Bytes::from("x"));
324 assert_eq!(stats1.max_value, Bytes::from("z"));
325 let fst1 = cached_reader
326 .fst(
327 tag1.base_offset + tag1.relative_fst_offset as u64,
328 tag1.fst_size,
329 )
330 .await
331 .unwrap();
332 assert_eq!(fst1.len(), 3);
333 let [offset, size] = unpack(fst1.get(b"x").unwrap());
334 let bitmap = cached_reader
335 .bitmap(tag1.base_offset + offset as u64, size, BitmapType::Roaring)
336 .await
337 .unwrap();
338 assert_eq!(
339 bitmap,
340 Bitmap::from_lsb0_bytes(&[0b0000_0001], BitmapType::Roaring)
341 );
342 let [offset, size] = unpack(fst1.get(b"y").unwrap());
343 let bitmap = cached_reader
344 .bitmap(tag1.base_offset + offset as u64, size, BitmapType::Roaring)
345 .await
346 .unwrap();
347 assert_eq!(
348 bitmap,
349 Bitmap::from_lsb0_bytes(&[0b0010_0000], BitmapType::Roaring)
350 );
351 let [offset, size] = unpack(fst1.get(b"z").unwrap());
352 let bitmap = cached_reader
353 .bitmap(tag1.base_offset + offset as u64, size, BitmapType::Roaring)
354 .await
355 .unwrap();
356 assert_eq!(
357 bitmap,
358 Bitmap::from_lsb0_bytes(&[0b0000_0001], BitmapType::Roaring)
359 );
360
361 let mut rng = rand::rng();
363 for _ in 0..FUZZ_REPEAT_TIMES {
364 let offset = rng.random_range(0..file_size);
365 let size = rng.random_range(0..file_size as u32 - offset as u32);
366 let expected = cached_reader.range_read(offset, size).await.unwrap();
367 let inner = &cached_reader.inner;
368 let read = cached_reader
369 .cache
370 .get_or_load(
371 cached_reader.file_id,
372 file_size,
373 offset,
374 size,
375 |ranges| async move { inner.read_vec(&ranges).await },
376 )
377 .await
378 .unwrap();
379 assert_eq!(read, expected);
380 }
381 }
382}