cmd/datanode/
objbench.rs

1// Copyright 2023 Greptime Team
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::path::PathBuf;
16use std::sync::Arc;
17use std::time::Instant;
18
19use clap::Parser;
20use colored::Colorize;
21use datanode::config::RegionEngineConfig;
22use datanode::store;
23use either::Either;
24use mito2::access_layer::{
25    AccessLayer, AccessLayerRef, Metrics, OperationType, SstWriteRequest, WriteType,
26};
27use mito2::cache::{CacheManager, CacheManagerRef};
28use mito2::config::{FulltextIndexConfig, MitoConfig, Mode};
29use mito2::read::Source;
30use mito2::sst::file::{FileHandle, FileMeta};
31use mito2::sst::file_purger::{FilePurger, FilePurgerRef};
32use mito2::sst::index::intermediate::IntermediateManager;
33use mito2::sst::index::puffin_manager::PuffinManagerFactory;
34use mito2::sst::parquet::reader::ParquetReaderBuilder;
35use mito2::sst::parquet::{PARQUET_METADATA_KEY, WriteOptions};
36use mito2::worker::write_cache_from_config;
37use object_store::ObjectStore;
38use parquet::file::metadata::{FooterTail, KeyValue};
39use regex::Regex;
40use snafu::OptionExt;
41use store_api::metadata::{RegionMetadata, RegionMetadataRef};
42use store_api::path_utils::region_name;
43use store_api::region_request::PathType;
44use store_api::storage::FileId;
45
46use crate::datanode::{StorageConfig, StorageConfigWrapper};
47use crate::error;
48
49/// Object storage benchmark command
50#[derive(Debug, Parser)]
51pub struct ObjbenchCommand {
52    /// Path to the object-store config file (TOML). Must deserialize into object_store::config::ObjectStoreConfig.
53    #[clap(long, value_name = "FILE")]
54    pub config: PathBuf,
55
56    /// Source SST file path in object-store (e.g. "region_dir/<uuid>.parquet").
57    #[clap(long, value_name = "PATH")]
58    pub source: String,
59
60    /// Verbose output
61    #[clap(short, long, default_value_t = false)]
62    pub verbose: bool,
63
64    /// Output file path for pprof flamegraph (enables profiling)
65    #[clap(long, value_name = "FILE")]
66    pub pprof_file: Option<PathBuf>,
67}
68
69pub(super) fn parse_config(
70    config_path: &PathBuf,
71) -> error::Result<(
72    StorageConfig,
73    MitoConfig,
74    common_wal::config::DatanodeWalConfig,
75)> {
76    let cfg_str = std::fs::read_to_string(config_path).map_err(|e| {
77        error::IllegalConfigSnafu {
78            msg: format!("failed to read config {}: {e}", config_path.display()),
79        }
80        .build()
81    })?;
82
83    let store_cfg: StorageConfigWrapper = toml::from_str(&cfg_str).map_err(|e| {
84        error::IllegalConfigSnafu {
85            msg: format!("failed to parse config {}: {e}", config_path.display()),
86        }
87        .build()
88    })?;
89
90    let wal_config = store_cfg.wal;
91    let storage_config = store_cfg.storage;
92    let mito_engine_config = store_cfg
93        .region_engine
94        .into_iter()
95        .filter_map(|c| {
96            if let RegionEngineConfig::Mito(mito) = c {
97                Some(mito)
98            } else {
99                None
100            }
101        })
102        .next()
103        .with_context(|| error::IllegalConfigSnafu {
104            msg: format!("Engine config not found in {:?}", config_path),
105        })?;
106    Ok((storage_config, mito_engine_config, wal_config))
107}
108
109impl ObjbenchCommand {
110    pub async fn run(&self) -> error::Result<()> {
111        if self.verbose {
112            common_telemetry::init_default_ut_logging();
113        }
114
115        println!("{}", "Starting objbench with config:".cyan().bold());
116
117        // Build object store from config
118        let (store_cfg, mut mito_engine_config, _wal_config) = parse_config(&self.config)?;
119
120        let object_store = build_object_store(&store_cfg).await?;
121        println!("{} Object store initialized", "✓".green());
122
123        // Prepare source identifiers
124        let components = parse_file_dir_components(&self.source)?;
125        println!(
126            "{} Source path parsed: {}, components: {:?}",
127            "✓".green(),
128            self.source,
129            components
130        );
131
132        // Load parquet metadata to extract RegionMetadata and file stats
133        println!("{}", "Loading parquet metadata...".yellow());
134        let file_size = object_store
135            .stat(&self.source)
136            .await
137            .map_err(|e| {
138                error::IllegalConfigSnafu {
139                    msg: format!("stat failed: {e}"),
140                }
141                .build()
142            })?
143            .content_length();
144        let parquet_meta = load_parquet_metadata(object_store.clone(), &self.source, file_size)
145            .await
146            .map_err(|e| {
147                error::IllegalConfigSnafu {
148                    msg: format!("read parquet metadata failed: {e}"),
149                }
150                .build()
151            })?;
152
153        let region_meta = extract_region_metadata(&self.source, &parquet_meta)?;
154        let num_rows = parquet_meta.file_metadata().num_rows() as u64;
155        let num_row_groups = parquet_meta.num_row_groups() as u64;
156        let max_row_group_uncompressed_size: u64 = parquet_meta
157            .row_groups()
158            .iter()
159            .map(|rg| {
160                rg.columns()
161                    .iter()
162                    .map(|c| c.uncompressed_size() as u64)
163                    .sum::<u64>()
164            })
165            .max()
166            .unwrap_or(0);
167
168        println!(
169            "{} Metadata loaded - rows: {}, size: {} bytes",
170            "✓".green(),
171            num_rows,
172            file_size
173        );
174
175        // Build a FileHandle for the source file
176        let file_meta = FileMeta {
177            region_id: region_meta.region_id,
178            file_id: components.file_id,
179            time_range: Default::default(),
180            level: 0,
181            file_size,
182            max_row_group_uncompressed_size,
183            available_indexes: Default::default(),
184            indexes: Default::default(),
185            index_file_size: 0,
186            index_version: 0,
187            num_rows,
188            num_row_groups,
189            sequence: None,
190            partition_expr: None,
191            num_series: 0,
192        };
193        let src_handle = FileHandle::new(file_meta, new_noop_file_purger());
194
195        // Build the reader for a single file via ParquetReaderBuilder
196        let table_dir = components.table_dir();
197        let (src_access_layer, cache_manager) = build_access_layer_simple(
198            &components,
199            object_store.clone(),
200            &mut mito_engine_config,
201            &store_cfg.data_home,
202        )
203        .await?;
204        let reader_build_start = Instant::now();
205
206        let reader = ParquetReaderBuilder::new(
207            table_dir,
208            components.path_type,
209            src_handle.clone(),
210            object_store.clone(),
211        )
212        .expected_metadata(Some(region_meta.clone()))
213        .build()
214        .await
215        .map_err(|e| {
216            error::IllegalConfigSnafu {
217                msg: format!("build reader failed: {e:?}"),
218            }
219            .build()
220        })?;
221
222        let reader_build_elapsed = reader_build_start.elapsed();
223        let total_rows = reader.parquet_metadata().file_metadata().num_rows();
224        println!("{} Reader built in {:?}", "✓".green(), reader_build_elapsed);
225
226        // Build write request
227        let fulltext_index_config = FulltextIndexConfig {
228            create_on_compaction: Mode::Disable,
229            ..Default::default()
230        };
231
232        let write_req = SstWriteRequest {
233            op_type: OperationType::Flush,
234            metadata: region_meta,
235            source: Either::Left(Source::Reader(Box::new(reader))),
236            cache_manager,
237            storage: None,
238            max_sequence: None,
239            index_options: Default::default(),
240            index_config: mito_engine_config.index.clone(),
241            inverted_index_config: MitoConfig::default().inverted_index,
242            fulltext_index_config,
243            bloom_filter_index_config: MitoConfig::default().bloom_filter_index,
244            #[cfg(feature = "vector_index")]
245            vector_index_config: Default::default(),
246        };
247
248        // Write SST
249        println!("{}", "Writing SST...".yellow());
250
251        // Start profiling if pprof_file is specified
252        #[cfg(unix)]
253        let profiler_guard = if self.pprof_file.is_some() {
254            println!("{} Starting profiling...", "⚡".yellow());
255            Some(
256                pprof::ProfilerGuardBuilder::default()
257                    .frequency(99)
258                    .blocklist(&["libc", "libgcc", "pthread", "vdso"])
259                    .build()
260                    .map_err(|e| {
261                        error::IllegalConfigSnafu {
262                            msg: format!("Failed to start profiler: {e}"),
263                        }
264                        .build()
265                    })?,
266            )
267        } else {
268            None
269        };
270
271        #[cfg(not(unix))]
272        if self.pprof_file.is_some() {
273            eprintln!(
274                "{}: Profiling is not supported on this platform",
275                "Warning".yellow()
276            );
277        }
278
279        let write_start = Instant::now();
280        let mut metrics = Metrics::new(WriteType::Flush);
281        let infos = src_access_layer
282            .write_sst(write_req, &WriteOptions::default(), &mut metrics)
283            .await
284            .map_err(|e| {
285                error::IllegalConfigSnafu {
286                    msg: format!("write_sst failed: {e:?}"),
287                }
288                .build()
289            })?;
290
291        let write_elapsed = write_start.elapsed();
292
293        // Stop profiling and generate flamegraph if enabled
294        #[cfg(unix)]
295        if let (Some(guard), Some(pprof_file)) = (profiler_guard, &self.pprof_file) {
296            println!("{} Generating flamegraph...", "🔥".yellow());
297            match guard.report().build() {
298                Ok(report) => {
299                    let mut flamegraph_data = Vec::new();
300                    if let Err(e) = report.flamegraph(&mut flamegraph_data) {
301                        println!("{}: Failed to generate flamegraph: {}", "Error".red(), e);
302                    } else if let Err(e) = std::fs::write(pprof_file, flamegraph_data) {
303                        println!(
304                            "{}: Failed to write flamegraph to {}: {}",
305                            "Error".red(),
306                            pprof_file.display(),
307                            e
308                        );
309                    } else {
310                        println!(
311                            "{} Flamegraph saved to {}",
312                            "✓".green(),
313                            pprof_file.display().to_string().cyan()
314                        );
315                    }
316                }
317                Err(e) => {
318                    println!("{}: Failed to generate pprof report: {}", "Error".red(), e);
319                }
320            }
321        }
322        assert_eq!(infos.len(), 1);
323        let dst_file_id = infos[0].file_id;
324        let dst_file_path = format!("{}/{}.parquet", components.region_dir(), dst_file_id);
325        let mut dst_index_path = None;
326        if infos[0].index_metadata.file_size > 0 {
327            dst_index_path = Some(format!(
328                "{}/index/{}.puffin",
329                components.region_dir(),
330                dst_file_id
331            ));
332        }
333
334        // Report results with ANSI colors
335        println!("\n{} {}", "Write complete!".green().bold(), "✓".green());
336        println!("  {}: {}", "Destination file".bold(), dst_file_path.cyan());
337        println!("  {}: {}", "Rows".bold(), total_rows.to_string().cyan());
338        println!(
339            "  {}: {}",
340            "File size".bold(),
341            format!("{} bytes", file_size).cyan()
342        );
343        println!(
344            "  {}: {:?}",
345            "Reader build time".bold(),
346            reader_build_elapsed
347        );
348        println!("  {}: {:?}", "Total time".bold(), write_elapsed);
349
350        // Print metrics in a formatted way
351        println!("  {}: {:?}", "Metrics".bold(), metrics,);
352
353        // Print infos
354        println!("  {}: {:?}", "Index".bold(), infos[0].index_metadata);
355
356        // Cleanup
357        println!("\n{}", "Cleaning up...".yellow());
358        object_store.delete(&dst_file_path).await.map_err(|e| {
359            error::IllegalConfigSnafu {
360                msg: format!("Failed to delete dest file {}: {}", dst_file_path, e),
361            }
362            .build()
363        })?;
364        println!("{} Temporary file {} deleted", "✓".green(), dst_file_path);
365
366        if let Some(index_path) = dst_index_path {
367            object_store.delete(&index_path).await.map_err(|e| {
368                error::IllegalConfigSnafu {
369                    msg: format!("Failed to delete dest index file {}: {}", index_path, e),
370                }
371                .build()
372            })?;
373            println!(
374                "{} Temporary index file {} deleted",
375                "✓".green(),
376                index_path
377            );
378        }
379
380        println!("\n{}", "Benchmark completed successfully!".green().bold());
381        Ok(())
382    }
383}
384
385#[derive(Debug)]
386struct FileDirComponents {
387    catalog: String,
388    schema: String,
389    table_id: u32,
390    region_sequence: u32,
391    path_type: PathType,
392    file_id: FileId,
393}
394
395impl FileDirComponents {
396    fn table_dir(&self) -> String {
397        format!("data/{}/{}/{}", self.catalog, self.schema, self.table_id)
398    }
399
400    fn region_dir(&self) -> String {
401        let region_name = region_name(self.table_id, self.region_sequence);
402        match self.path_type {
403            PathType::Bare => {
404                format!(
405                    "data/{}/{}/{}/{}",
406                    self.catalog, self.schema, self.table_id, region_name
407                )
408            }
409            PathType::Data => {
410                format!(
411                    "data/{}/{}/{}/{}/data",
412                    self.catalog, self.schema, self.table_id, region_name
413                )
414            }
415            PathType::Metadata => {
416                format!(
417                    "data/{}/{}/{}/{}/metadata",
418                    self.catalog, self.schema, self.table_id, region_name
419                )
420            }
421        }
422    }
423}
424
425fn parse_file_dir_components(path: &str) -> error::Result<FileDirComponents> {
426    // Define the regex pattern to match all three path styles
427    let pattern =
428        r"^data/([^/]+)/([^/]+)/([^/]+)/([^/]+)_([^/]+)(?:/data|/metadata)?/(.+).parquet$";
429
430    // Compile the regex
431    let re = Regex::new(pattern).expect("Invalid regex pattern");
432
433    // Determine the path type
434    let path_type = if path.contains("/data/") {
435        PathType::Data
436    } else if path.contains("/metadata/") {
437        PathType::Metadata
438    } else {
439        PathType::Bare
440    };
441
442    // Try to match the path
443    let components = (|| {
444        let captures = re.captures(path)?;
445        if captures.len() != 7 {
446            return None;
447        }
448        let mut components = FileDirComponents {
449            catalog: "".to_string(),
450            schema: "".to_string(),
451            table_id: 0,
452            region_sequence: 0,
453            path_type,
454            file_id: FileId::default(),
455        };
456        // Extract the components
457        components.catalog = captures.get(1)?.as_str().to_string();
458        components.schema = captures.get(2)?.as_str().to_string();
459        components.table_id = captures[3].parse().ok()?;
460        components.region_sequence = captures[5].parse().ok()?;
461        let file_id_str = &captures[6];
462        components.file_id = FileId::parse_str(file_id_str).ok()?;
463        Some(components)
464    })();
465    components.context(error::IllegalConfigSnafu {
466        msg: format!("Expect valid source file path, got: {}", path),
467    })
468}
469
470fn extract_region_metadata(
471    file_path: &str,
472    meta: &parquet::file::metadata::ParquetMetaData,
473) -> error::Result<RegionMetadataRef> {
474    let kvs: Option<&Vec<KeyValue>> = meta.file_metadata().key_value_metadata();
475    let Some(kvs) = kvs else {
476        return Err(error::IllegalConfigSnafu {
477            msg: format!("{file_path}: missing parquet key_value metadata"),
478        }
479        .build());
480    };
481    let json = kvs
482        .iter()
483        .find(|kv| kv.key == PARQUET_METADATA_KEY)
484        .and_then(|kv| kv.value.as_ref())
485        .ok_or_else(|| {
486            error::IllegalConfigSnafu {
487                msg: format!("{file_path}: key {PARQUET_METADATA_KEY} not found or empty"),
488            }
489            .build()
490        })?;
491    let region: RegionMetadata = RegionMetadata::from_json(json).map_err(|e| {
492        error::IllegalConfigSnafu {
493            msg: format!("invalid region metadata json: {e}"),
494        }
495        .build()
496    })?;
497    Ok(Arc::new(region))
498}
499
500pub(super) async fn build_object_store(sc: &StorageConfig) -> error::Result<ObjectStore> {
501    store::new_object_store(sc.store.clone(), &sc.data_home)
502        .await
503        .map_err(|e| {
504            error::IllegalConfigSnafu {
505                msg: format!("Failed to build object store: {e:?}"),
506            }
507            .build()
508        })
509}
510
511async fn build_access_layer_simple(
512    components: &FileDirComponents,
513    object_store: ObjectStore,
514    config: &mut MitoConfig,
515    data_home: &str,
516) -> error::Result<(AccessLayerRef, CacheManagerRef)> {
517    let _ = config.index.sanitize(data_home, &config.inverted_index);
518    let puffin_manager = PuffinManagerFactory::new(
519        &config.index.aux_path,
520        config.index.staging_size.as_bytes(),
521        Some(config.index.write_buffer_size.as_bytes() as _),
522        config.index.staging_ttl,
523    )
524    .await
525    .map_err(|e| {
526        error::IllegalConfigSnafu {
527            msg: format!("Failed to build access layer: {e:?}"),
528        }
529        .build()
530    })?;
531
532    let intermediate_manager = IntermediateManager::init_fs(&config.index.aux_path)
533        .await
534        .map_err(|e| {
535            error::IllegalConfigSnafu {
536                msg: format!("Failed to build IntermediateManager: {e:?}"),
537            }
538            .build()
539        })?
540        .with_buffer_size(Some(config.index.write_buffer_size.as_bytes() as _));
541
542    let cache_manager =
543        build_cache_manager(config, puffin_manager.clone(), intermediate_manager.clone()).await?;
544    let layer = AccessLayer::new(
545        components.table_dir(),
546        components.path_type,
547        object_store,
548        puffin_manager,
549        intermediate_manager,
550    );
551    Ok((Arc::new(layer), cache_manager))
552}
553
554async fn build_cache_manager(
555    config: &MitoConfig,
556    puffin_manager: PuffinManagerFactory,
557    intermediate_manager: IntermediateManager,
558) -> error::Result<CacheManagerRef> {
559    let write_cache = write_cache_from_config(config, puffin_manager, intermediate_manager)
560        .await
561        .map_err(|e| {
562            error::IllegalConfigSnafu {
563                msg: format!("Failed to build write cache: {e:?}"),
564            }
565            .build()
566        })?;
567    let cache_manager = Arc::new(
568        CacheManager::builder()
569            .sst_meta_cache_size(config.sst_meta_cache_size.as_bytes())
570            .vector_cache_size(config.vector_cache_size.as_bytes())
571            .page_cache_size(config.page_cache_size.as_bytes())
572            .selector_result_cache_size(config.selector_result_cache_size.as_bytes())
573            .index_metadata_size(config.index.metadata_cache_size.as_bytes())
574            .index_content_size(config.index.content_cache_size.as_bytes())
575            .index_content_page_size(config.index.content_cache_page_size.as_bytes())
576            .index_result_cache_size(config.index.result_cache_size.as_bytes())
577            .puffin_metadata_size(config.index.metadata_cache_size.as_bytes())
578            .write_cache(write_cache)
579            .build(),
580    );
581    Ok(cache_manager)
582}
583
584fn new_noop_file_purger() -> FilePurgerRef {
585    #[derive(Debug)]
586    struct Noop;
587    impl FilePurger for Noop {
588        fn remove_file(&self, _file_meta: FileMeta, _is_delete: bool, _index_outdated: bool) {}
589    }
590    Arc::new(Noop)
591}
592
593async fn load_parquet_metadata(
594    object_store: ObjectStore,
595    path: &str,
596    file_size: u64,
597) -> Result<parquet::file::metadata::ParquetMetaData, Box<dyn std::error::Error + Send + Sync>> {
598    use parquet::file::FOOTER_SIZE;
599    use parquet::file::metadata::ParquetMetaDataReader;
600    let actual_size = if file_size == 0 {
601        object_store.stat(path).await?.content_length()
602    } else {
603        file_size
604    };
605    if actual_size < FOOTER_SIZE as u64 {
606        return Err("file too small".into());
607    }
608    let prefetch: u64 = 64 * 1024;
609    let start = actual_size.saturating_sub(prefetch);
610    let buffer = object_store
611        .read_with(path)
612        .range(start..actual_size)
613        .await?
614        .to_vec();
615    let buffer_len = buffer.len();
616    let mut footer = [0; 8];
617    footer.copy_from_slice(&buffer[buffer_len - FOOTER_SIZE..]);
618    let footer = FooterTail::try_new(&footer)?;
619    let metadata_len = footer.metadata_length() as u64;
620    if actual_size - (FOOTER_SIZE as u64) < metadata_len {
621        return Err("invalid footer/metadata length".into());
622    }
623    if (metadata_len as usize) <= buffer_len - FOOTER_SIZE {
624        let metadata_start = buffer_len - metadata_len as usize - FOOTER_SIZE;
625        let meta = ParquetMetaDataReader::decode_metadata(
626            &buffer[metadata_start..buffer_len - FOOTER_SIZE],
627        )?;
628        Ok(meta)
629    } else {
630        let metadata_start = actual_size - metadata_len - FOOTER_SIZE as u64;
631        let data = object_store
632            .read_with(path)
633            .range(metadata_start..(actual_size - FOOTER_SIZE as u64))
634            .await?
635            .to_vec();
636        let meta = ParquetMetaDataReader::decode_metadata(&data)?;
637        Ok(meta)
638    }
639}
640
641#[cfg(test)]
642mod tests {
643    use std::path::PathBuf;
644    use std::str::FromStr;
645
646    use common_base::readable_size::ReadableSize;
647    use store_api::region_request::PathType;
648
649    use crate::datanode::objbench::{parse_config, parse_file_dir_components};
650
651    #[test]
652    fn test_parse_dir() {
653        let meta_path = "data/greptime/public/1024/1024_0000000000/metadata/00020380-009c-426d-953e-b4e34c15af34.parquet";
654        let c = parse_file_dir_components(meta_path).unwrap();
655        assert_eq!(
656            c.file_id.to_string(),
657            "00020380-009c-426d-953e-b4e34c15af34"
658        );
659        assert_eq!(c.catalog, "greptime");
660        assert_eq!(c.schema, "public");
661        assert_eq!(c.table_id, 1024);
662        assert_eq!(c.region_sequence, 0);
663        assert_eq!(c.path_type, PathType::Metadata);
664
665        let c = parse_file_dir_components(
666            "data/greptime/public/1024/1024_0000000000/data/00020380-009c-426d-953e-b4e34c15af34.parquet",
667        ).unwrap();
668        assert_eq!(
669            c.file_id.to_string(),
670            "00020380-009c-426d-953e-b4e34c15af34"
671        );
672        assert_eq!(c.catalog, "greptime");
673        assert_eq!(c.schema, "public");
674        assert_eq!(c.table_id, 1024);
675        assert_eq!(c.region_sequence, 0);
676        assert_eq!(c.path_type, PathType::Data);
677
678        let c = parse_file_dir_components(
679            "data/greptime/public/1024/1024_0000000000/00020380-009c-426d-953e-b4e34c15af34.parquet",
680        ).unwrap();
681        assert_eq!(
682            c.file_id.to_string(),
683            "00020380-009c-426d-953e-b4e34c15af34"
684        );
685        assert_eq!(c.catalog, "greptime");
686        assert_eq!(c.schema, "public");
687        assert_eq!(c.table_id, 1024);
688        assert_eq!(c.region_sequence, 0);
689        assert_eq!(c.path_type, PathType::Bare);
690    }
691
692    #[test]
693    fn test_parse_config() {
694        let path = "../../config/datanode.example.toml";
695        let (storage, engine, _wal) = parse_config(&PathBuf::from_str(path).unwrap()).unwrap();
696        assert_eq!(storage.data_home, "./greptimedb_data");
697        assert_eq!(engine.index.staging_size, ReadableSize::gb(2));
698    }
699}