Skip to main content

mito2/sst/parquet/
flat_format.rs

1// Copyright 2023 Greptime Team
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Format to store in parquet.
16//!
17//! It can store both encoded primary key and raw key columns.
18//!
19//! We store two additional internal columns at last:
20//! - `__primary_key`, the encoded primary key of the row (tags). Type: dictionary(uint32, binary)
21//! - `__sequence`, the sequence number of a row. Type: uint64
22//! - `__op_type`, the op type of the row. Type: uint8
23//!
24//! The format is
25//! ```text
26//! primary key columns, field columns, time index, encoded primary key, __sequence, __op_type.
27//!
28//! It stores field columns in the same order as [RegionMetadata::field_columns()](store_api::metadata::RegionMetadata::field_columns())
29//! and stores primary key columns in the same order as [RegionMetadata::primary_key].
30
31use std::borrow::Borrow;
32use std::collections::HashMap;
33use std::sync::Arc;
34
35use api::v1::SemanticType;
36use datatypes::arrow::array::{
37    Array, ArrayRef, BinaryArray, DictionaryArray, UInt32Array, UInt64Array,
38};
39use datatypes::arrow::compute::kernels::take::take;
40use datatypes::arrow::datatypes::{Schema, SchemaRef};
41use datatypes::arrow::record_batch::RecordBatch;
42use datatypes::prelude::{ConcreteDataType, DataType};
43use mito_codec::row_converter::{CompositeValues, PrimaryKeyCodec, build_primary_key_codec};
44use parquet::file::metadata::RowGroupMetaData;
45use snafu::{OptionExt, ResultExt, ensure};
46use store_api::codec::PrimaryKeyEncoding;
47use store_api::metadata::{RegionMetadata, RegionMetadataRef};
48use store_api::storage::{ColumnId, SequenceNumber};
49
50use crate::error::{
51    ComputeArrowSnafu, DecodeSnafu, InvalidParquetSnafu, InvalidRecordBatchSnafu,
52    NewRecordBatchSnafu, Result,
53};
54use crate::read::read_columns::ReadColumns;
55use crate::sst::parquet::format::{
56    FIXED_POS_COLUMN_NUM, FormatProjection, INTERNAL_COLUMN_NUM, PrimaryKeyArray,
57    PrimaryKeyReadFormat, StatValues, column_null_counts, column_values,
58};
59use crate::sst::parquet::read_columns::ParquetReadColumns;
60use crate::sst::{
61    FlatSchemaOptions, flat_sst_arrow_schema_column_num, tag_maybe_to_dictionary_field,
62    to_flat_sst_arrow_schema, with_field_id,
63};
64
65/// Helper for writing the SST format.
66pub(crate) struct FlatWriteFormat {
67    /// SST file schema.
68    arrow_schema: SchemaRef,
69    override_sequence: Option<SequenceNumber>,
70}
71
72impl FlatWriteFormat {
73    /// Creates a new helper.
74    pub(crate) fn new(metadata: RegionMetadataRef, options: &FlatSchemaOptions) -> FlatWriteFormat {
75        let arrow_schema = to_flat_sst_arrow_schema(&metadata, options);
76        FlatWriteFormat {
77            arrow_schema,
78            override_sequence: None,
79        }
80    }
81
82    /// Set override sequence.
83    pub(crate) fn with_override_sequence(
84        mut self,
85        override_sequence: Option<SequenceNumber>,
86    ) -> Self {
87        self.override_sequence = override_sequence;
88        self
89    }
90
91    /// Gets the arrow schema to store in parquet.
92    #[cfg(test)]
93    pub(crate) fn arrow_schema(&self) -> &SchemaRef {
94        &self.arrow_schema
95    }
96
97    /// Convert `batch` to a arrow record batch to store in parquet.
98    pub(crate) fn convert_batch(&self, batch: &RecordBatch) -> Result<RecordBatch> {
99        debug_assert_eq!(batch.num_columns(), self.arrow_schema.fields().len());
100
101        let Some(override_sequence) = self.override_sequence else {
102            return Ok(batch.clone());
103        };
104
105        let mut columns = batch.columns().to_vec();
106        let sequence_array = Arc::new(UInt64Array::from(vec![override_sequence; batch.num_rows()]));
107        columns[sequence_column_index(batch.num_columns())] = sequence_array;
108
109        RecordBatch::try_new(batch.schema(), columns).context(NewRecordBatchSnafu)
110    }
111}
112
113/// Returns the position of the sequence column.
114pub(crate) fn sequence_column_index(num_columns: usize) -> usize {
115    num_columns - 2
116}
117
118/// Returns the position of the time index column.
119pub(crate) fn time_index_column_index(num_columns: usize) -> usize {
120    num_columns - 4
121}
122
123/// Returns the position of the primary key column.
124pub(crate) fn primary_key_column_index(num_columns: usize) -> usize {
125    num_columns - 3
126}
127
128/// Returns the position of the op type key column.
129pub(crate) fn op_type_column_index(num_columns: usize) -> usize {
130    num_columns - 1
131}
132
133/// Returns the start index of field columns in a flat batch.
134///
135/// `num_columns` is the total number of columns in the flat batch schema,
136/// including tag columns (if present), field columns, and fixed position columns
137/// (time index, primary key, sequence, op type).
138///
139/// For Dense encoding (raw PK columns included): field_column_start = primary_key.len()
140/// For Sparse encoding (no raw PK columns): field_column_start = 0
141pub(crate) fn field_column_start(metadata: &RegionMetadata, num_columns: usize) -> usize {
142    // Calculates field column start: total columns - fixed columns - field columns
143    // Field column count = total metadata columns - time index column - primary key columns
144    let field_column_count = metadata.column_metadatas.len() - 1 - metadata.primary_key.len();
145    num_columns - FIXED_POS_COLUMN_NUM - field_column_count
146}
147
148// TODO(yingwen): Add an option to skip reading internal columns if the region is
149// append only and doesn't use sparse encoding (We need to check the table id under
150// sparse encoding).
151/// Helper for reading the flat SST format with projection.
152///
153/// It only supports flat format that stores primary keys additionally.
154pub struct FlatReadFormat {
155    /// Sequence number to override the sequence read from the SST.
156    override_sequence: Option<SequenceNumber>,
157    /// Parquet format adapter.
158    parquet_adapter: ParquetAdapter,
159}
160
161impl FlatReadFormat {
162    /// Creates a helper with existing `metadata` and `column_ids` to read.
163    ///
164    /// If `skip_auto_convert` is true, skips auto conversion of format when the encoding is sparse encoding.
165    pub fn new(
166        metadata: RegionMetadataRef,
167        read_cols: ReadColumns,
168        num_columns: Option<usize>,
169        file_path: &str,
170        skip_auto_convert: bool,
171    ) -> Result<FlatReadFormat> {
172        let is_legacy = match num_columns {
173            Some(num) => Self::is_legacy_format(&metadata, num, file_path)?,
174            None => metadata.primary_key_encoding == PrimaryKeyEncoding::Sparse,
175        };
176
177        let parquet_adapter = if is_legacy {
178            // Safety: is_legacy_format() ensures primary_key is not empty.
179            if metadata.primary_key_encoding == PrimaryKeyEncoding::Sparse {
180                // Only skip auto convert when the primary key encoding is sparse.
181                ParquetAdapter::PrimaryKeyToFlat(ParquetPrimaryKeyToFlat::new(
182                    metadata,
183                    read_cols,
184                    skip_auto_convert,
185                ))
186            } else {
187                ParquetAdapter::PrimaryKeyToFlat(ParquetPrimaryKeyToFlat::new(
188                    metadata, read_cols, false,
189                ))
190            }
191        } else {
192            ParquetAdapter::Flat(ParquetFlat::new(metadata, read_cols))
193        };
194
195        Ok(FlatReadFormat {
196            override_sequence: None,
197            parquet_adapter,
198        })
199    }
200
201    /// Sets the sequence number to override.
202    pub(crate) fn set_override_sequence(&mut self, sequence: Option<SequenceNumber>) {
203        self.override_sequence = sequence;
204    }
205
206    /// Index of a column in the projected batch by its column id.
207    pub fn projected_index_by_id(&self, column_id: ColumnId) -> Option<usize> {
208        self.format_projection()
209            .column_id_to_projected_index
210            .get(&column_id)
211            .copied()
212    }
213
214    /// Returns min values of specific column in row groups.
215    pub fn min_values(
216        &self,
217        row_groups: &[impl Borrow<RowGroupMetaData>],
218        column_id: ColumnId,
219    ) -> StatValues {
220        match &self.parquet_adapter {
221            ParquetAdapter::Flat(p) => p.min_values(row_groups, column_id),
222            ParquetAdapter::PrimaryKeyToFlat(p) => p.format.min_values(row_groups, column_id),
223        }
224    }
225
226    /// Returns max values of specific column in row groups.
227    pub fn max_values(
228        &self,
229        row_groups: &[impl Borrow<RowGroupMetaData>],
230        column_id: ColumnId,
231    ) -> StatValues {
232        match &self.parquet_adapter {
233            ParquetAdapter::Flat(p) => p.max_values(row_groups, column_id),
234            ParquetAdapter::PrimaryKeyToFlat(p) => p.format.max_values(row_groups, column_id),
235        }
236    }
237
238    /// Returns null counts of specific column in row groups.
239    pub fn null_counts(
240        &self,
241        row_groups: &[impl Borrow<RowGroupMetaData>],
242        column_id: ColumnId,
243    ) -> StatValues {
244        match &self.parquet_adapter {
245            ParquetAdapter::Flat(p) => p.null_counts(row_groups, column_id),
246            ParquetAdapter::PrimaryKeyToFlat(p) => p.format.null_counts(row_groups, column_id),
247        }
248    }
249
250    /// Gets the arrow schema of the SST file.
251    ///
252    /// This schema is computed from the region metadata but should be the same
253    /// as the arrow schema decoded from the file metadata.
254    pub(crate) fn arrow_schema(&self) -> &SchemaRef {
255        match &self.parquet_adapter {
256            ParquetAdapter::Flat(p) => &p.arrow_schema,
257            ParquetAdapter::PrimaryKeyToFlat(p) => p.format.arrow_schema(),
258        }
259    }
260
261    /// Gets the projected output schema produced by parquet reading.
262    pub(crate) fn output_arrow_schema(&self) -> Result<SchemaRef> {
263        let projection = self.parquet_read_columns().root_indices();
264        let schema = self
265            .arrow_schema()
266            .project(projection)
267            .context(ComputeArrowSnafu)?;
268        Ok(Arc::new(schema))
269    }
270
271    /// Gets the metadata of the SST.
272    pub(crate) fn metadata(&self) -> &RegionMetadataRef {
273        match &self.parquet_adapter {
274            ParquetAdapter::Flat(p) => &p.metadata,
275            ParquetAdapter::PrimaryKeyToFlat(p) => p.format.metadata(),
276        }
277    }
278
279    /// Get the sorted read columns to read from the sst file.
280    pub(crate) fn parquet_read_columns(&self) -> &ParquetReadColumns {
281        match &self.parquet_adapter {
282            ParquetAdapter::Flat(p) => &p.format_projection.parquet_read_cols,
283            ParquetAdapter::PrimaryKeyToFlat(p) => p.format.parquet_read_columns(),
284        }
285    }
286
287    /// Gets the projection in the flat format.
288    ///
289    /// When `skip_auto_convert` is enabled (primary-key format read), this returns the
290    /// primary-key format projection so filter/prune can resolve projected indices.
291    pub(crate) fn format_projection(&self) -> &FormatProjection {
292        match &self.parquet_adapter {
293            ParquetAdapter::Flat(p) => &p.format_projection,
294            ParquetAdapter::PrimaryKeyToFlat(p) => &p.format_projection,
295        }
296    }
297
298    /// Returns `true` if raw batches from parquet use the flat layout and
299    /// stores primary key columns as raw columns.
300    /// Returns `false` for the legacy primary-key-to-flat conversion path.
301    pub(crate) fn batch_has_raw_pk_columns(&self) -> bool {
302        matches!(&self.parquet_adapter, ParquetAdapter::Flat(_))
303    }
304
305    /// Creates a sequence array to override.
306    pub(crate) fn new_override_sequence_array(&self, length: usize) -> Option<ArrayRef> {
307        self.override_sequence
308            .map(|seq| Arc::new(UInt64Array::from_value(seq, length)) as ArrayRef)
309    }
310
311    /// Convert a record batch to apply flat format conversion and override sequence array.
312    ///
313    /// Returns a new RecordBatch with flat format conversion applied first (if enabled),
314    /// then the sequence column replaced by the override sequence array.
315    pub(crate) fn convert_batch(
316        &self,
317        record_batch: RecordBatch,
318        override_sequence_array: Option<&ArrayRef>,
319    ) -> Result<RecordBatch> {
320        // First, apply flat format conversion.
321        let batch = match &self.parquet_adapter {
322            ParquetAdapter::Flat(_) => record_batch,
323            ParquetAdapter::PrimaryKeyToFlat(p) => p.convert_batch(record_batch)?,
324        };
325
326        // Then apply sequence override if provided
327        let Some(override_array) = override_sequence_array else {
328            return Ok(batch);
329        };
330
331        let mut columns = batch.columns().to_vec();
332        let sequence_column_idx = sequence_column_index(batch.num_columns());
333
334        // Use the provided override sequence array, slicing if necessary to match batch length
335        let sequence_array = if override_array.len() > batch.num_rows() {
336            override_array.slice(0, batch.num_rows())
337        } else {
338            override_array.clone()
339        };
340
341        columns[sequence_column_idx] = sequence_array;
342
343        RecordBatch::try_new(batch.schema(), columns).context(NewRecordBatchSnafu)
344    }
345
346    /// Checks whether the batch from the parquet file needs to be converted to match the flat format.
347    ///
348    /// * `metadata` is the region metadata (always assumes flat format).
349    /// * `num_columns` is the number of columns in the parquet file.
350    /// * `file_path` is the path to the parquet file, for error message.
351    pub(crate) fn is_legacy_format(
352        metadata: &RegionMetadata,
353        num_columns: usize,
354        file_path: &str,
355    ) -> Result<bool> {
356        if metadata.primary_key.is_empty() {
357            return Ok(false);
358        }
359
360        // For flat format, compute expected column number:
361        // all columns + internal columns (pk, sequence, op_type)
362        let expected_columns = metadata.column_metadatas.len() + INTERNAL_COLUMN_NUM;
363
364        if expected_columns == num_columns {
365            // Same number of columns, no conversion needed
366            Ok(false)
367        } else {
368            ensure!(
369                expected_columns >= num_columns,
370                InvalidParquetSnafu {
371                    file: file_path,
372                    reason: format!(
373                        "Expected columns {} should be >= actual columns {}",
374                        expected_columns, num_columns
375                    )
376                }
377            );
378
379            // Different number of columns, check if the difference matches primary key count
380            let column_diff = expected_columns - num_columns;
381
382            ensure!(
383                column_diff == metadata.primary_key.len(),
384                InvalidParquetSnafu {
385                    file: file_path,
386                    reason: format!(
387                        "Column number difference {} does not match primary key count {}",
388                        column_diff,
389                        metadata.primary_key.len()
390                    )
391                }
392            );
393
394            Ok(true)
395        }
396    }
397}
398
399/// Wraps the parquet helper for different formats.
400enum ParquetAdapter {
401    Flat(ParquetFlat),
402    PrimaryKeyToFlat(ParquetPrimaryKeyToFlat),
403}
404
405/// Helper to reads the parquet from primary key format into the flat format.
406struct ParquetPrimaryKeyToFlat {
407    /// The primary key format to read the parquet.
408    format: PrimaryKeyReadFormat,
409    /// Format converter for handling flat format conversion.
410    convert_format: Option<FlatConvertFormat>,
411    /// Projection computed for the flat format.
412    format_projection: FormatProjection,
413}
414
415impl ParquetPrimaryKeyToFlat {
416    /// Creates a helper with existing `metadata` and `column_ids` to read.
417    fn new(
418        metadata: RegionMetadataRef,
419        read_cols: ReadColumns,
420        skip_auto_convert: bool,
421    ) -> ParquetPrimaryKeyToFlat {
422        assert!(if skip_auto_convert {
423            metadata.primary_key_encoding == PrimaryKeyEncoding::Sparse
424        } else {
425            true
426        });
427
428        // Creates a map to lookup index based on the new format.
429        let id_to_index = sst_column_id_indices(&metadata);
430        let sst_column_num =
431            flat_sst_arrow_schema_column_num(&metadata, &FlatSchemaOptions::default());
432
433        let codec = build_primary_key_codec(&metadata);
434        let format = PrimaryKeyReadFormat::new(metadata.clone(), read_cols.clone());
435        let (convert_format, format_projection) = if skip_auto_convert {
436            (
437                None,
438                FormatProjection {
439                    parquet_read_cols: format.parquet_read_columns().clone(),
440                    column_id_to_projected_index: format.field_id_to_projected_index().clone(),
441                },
442            )
443        } else {
444            // Computes the format projection for the new format.
445            let format_projection = FormatProjection::compute_format_projection(
446                &id_to_index,
447                sst_column_num,
448                read_cols.clone(),
449            );
450            (
451                FlatConvertFormat::new(Arc::clone(&metadata), &format_projection, codec),
452                format_projection,
453            )
454        };
455
456        Self {
457            format,
458            convert_format,
459            format_projection,
460        }
461    }
462
463    fn convert_batch(&self, record_batch: RecordBatch) -> Result<RecordBatch> {
464        if let Some(convert_format) = &self.convert_format {
465            convert_format.convert(record_batch)
466        } else {
467            Ok(record_batch)
468        }
469    }
470}
471
472/// Helper to reads the parquet in flat format directly.
473struct ParquetFlat {
474    /// The metadata stored in the SST.
475    metadata: RegionMetadataRef,
476    /// SST file schema.
477    arrow_schema: SchemaRef,
478    /// Projection computed for the flat format.
479    format_projection: FormatProjection,
480    /// Column id to index in SST.
481    column_id_to_sst_index: HashMap<ColumnId, usize>,
482}
483
484impl ParquetFlat {
485    /// Creates a helper with existing `metadata` and `column_ids` to read.
486    fn new(metadata: RegionMetadataRef, read_cols: ReadColumns) -> ParquetFlat {
487        // Creates a map to lookup index.
488        let id_to_index = sst_column_id_indices(&metadata);
489        let arrow_schema = to_flat_sst_arrow_schema(&metadata, &FlatSchemaOptions::default());
490        let sst_column_num =
491            flat_sst_arrow_schema_column_num(&metadata, &FlatSchemaOptions::default());
492        let format_projection =
493            FormatProjection::compute_format_projection(&id_to_index, sst_column_num, read_cols);
494
495        Self {
496            metadata,
497            arrow_schema,
498            format_projection,
499            column_id_to_sst_index: id_to_index,
500        }
501    }
502
503    /// Returns min values of specific column in row groups.
504    fn min_values(
505        &self,
506        row_groups: &[impl Borrow<RowGroupMetaData>],
507        column_id: ColumnId,
508    ) -> StatValues {
509        self.get_stat_values(row_groups, column_id, true)
510    }
511
512    /// Returns max values of specific column in row groups.
513    fn max_values(
514        &self,
515        row_groups: &[impl Borrow<RowGroupMetaData>],
516        column_id: ColumnId,
517    ) -> StatValues {
518        self.get_stat_values(row_groups, column_id, false)
519    }
520
521    /// Returns null counts of specific column in row groups.
522    fn null_counts(
523        &self,
524        row_groups: &[impl Borrow<RowGroupMetaData>],
525        column_id: ColumnId,
526    ) -> StatValues {
527        let Some(index) = self.column_id_to_sst_index.get(&column_id) else {
528            // No such column in the SST.
529            return StatValues::NoColumn;
530        };
531
532        let stats = column_null_counts(row_groups, *index);
533        StatValues::from_stats_opt(stats)
534    }
535
536    fn get_stat_values(
537        &self,
538        row_groups: &[impl Borrow<RowGroupMetaData>],
539        column_id: ColumnId,
540        is_min: bool,
541    ) -> StatValues {
542        let Some(column) = self.metadata.column_by_id(column_id) else {
543            // No such column in the SST.
544            return StatValues::NoColumn;
545        };
546        // Safety: `column_id_to_sst_index` is built from `metadata`.
547        let index = self.column_id_to_sst_index.get(&column_id).unwrap();
548
549        let stats = column_values(row_groups, column, *index, is_min);
550        StatValues::from_stats_opt(stats)
551    }
552}
553
554/// Returns a map that the key is the column id and the value is the column position
555/// in the SST.
556/// It only supports SSTs with raw primary key columns.
557pub(crate) fn sst_column_id_indices(metadata: &RegionMetadata) -> HashMap<ColumnId, usize> {
558    let mut id_to_index = HashMap::with_capacity(metadata.column_metadatas.len());
559    let mut column_index = 0;
560    // keys
561    for pk_id in &metadata.primary_key {
562        id_to_index.insert(*pk_id, column_index);
563        column_index += 1;
564    }
565    // fields
566    for column in &metadata.column_metadatas {
567        if column.semantic_type == SemanticType::Field {
568            id_to_index.insert(column.column_id, column_index);
569            column_index += 1;
570        }
571    }
572    // time index
573    id_to_index.insert(metadata.time_index_column().column_id, column_index);
574
575    id_to_index
576}
577
578/// Decodes primary keys from a batch and returns decoded primary key information.
579///
580/// The batch must contain a primary key column at the expected index.
581pub(crate) fn decode_primary_keys(
582    codec: &dyn PrimaryKeyCodec,
583    batch: &RecordBatch,
584) -> Result<DecodedPrimaryKeys> {
585    let primary_key_index = primary_key_column_index(batch.num_columns());
586    let pk_dict_array = batch
587        .column(primary_key_index)
588        .as_any()
589        .downcast_ref::<PrimaryKeyArray>()
590        .with_context(|| InvalidRecordBatchSnafu {
591            reason: "Primary key column is not a dictionary array".to_string(),
592        })?;
593    let pk_values_array = pk_dict_array
594        .values()
595        .as_any()
596        .downcast_ref::<BinaryArray>()
597        .with_context(|| InvalidRecordBatchSnafu {
598            reason: "Primary key values are not binary array".to_string(),
599        })?;
600
601    let keys = pk_dict_array.keys();
602
603    // Decodes primary key values by iterating through keys, reusing decoded values for duplicate keys.
604    // Maps original key index -> new decoded value index
605    let mut key_to_decoded_index = Vec::with_capacity(keys.len());
606    let mut decoded_pk_values = Vec::new();
607    let mut prev_key: Option<u32> = None;
608
609    // The parquet reader may read the whole dictionary page into the dictionary values, so
610    // we may decode many primary keys not in this batch if we decode the values array directly.
611    let pk_indices = keys.values();
612    for &current_key in pk_indices.iter().take(keys.len()) {
613        // Check if current key is the same as previous key
614        if let Some(prev) = prev_key
615            && prev == current_key
616        {
617            // Reuse the last decoded index
618            key_to_decoded_index.push((decoded_pk_values.len() - 1) as u32);
619            continue;
620        }
621
622        // New key, decodes the value
623        let pk_bytes = pk_values_array.value(current_key as usize);
624        let decoded_value = codec.decode(pk_bytes).context(DecodeSnafu)?;
625
626        decoded_pk_values.push(decoded_value);
627        key_to_decoded_index.push((decoded_pk_values.len() - 1) as u32);
628        prev_key = Some(current_key);
629    }
630
631    // Create the keys array from key_to_decoded_index
632    let keys_array = UInt32Array::from(key_to_decoded_index);
633
634    Ok(DecodedPrimaryKeys {
635        decoded_pk_values,
636        keys_array,
637    })
638}
639
640/// Holds decoded primary key values and their indices.
641pub(crate) struct DecodedPrimaryKeys {
642    /// Decoded primary key values for unique keys in the dictionary.
643    decoded_pk_values: Vec<CompositeValues>,
644    /// Prebuilt keys array for creating dictionary arrays.
645    keys_array: UInt32Array,
646}
647
648impl DecodedPrimaryKeys {
649    /// Gets a tag column array by column id and data type.
650    ///
651    /// For sparse encoding, uses column_id to lookup values.
652    /// For dense encoding, uses pk_index to get values.
653    pub(crate) fn get_tag_column(
654        &self,
655        column_id: ColumnId,
656        pk_index: Option<usize>,
657        column_type: &ConcreteDataType,
658    ) -> Result<ArrayRef> {
659        // Gets values from the primary key.
660        let mut builder = column_type.create_mutable_vector(self.decoded_pk_values.len());
661        for decoded in &self.decoded_pk_values {
662            match decoded {
663                CompositeValues::Dense(dense) => {
664                    let pk_idx = pk_index.expect("pk_index required for dense encoding");
665                    if pk_idx < dense.len() {
666                        builder.push_value_ref(&dense[pk_idx].1.as_value_ref());
667                    } else {
668                        builder.push_null();
669                    }
670                }
671                CompositeValues::Sparse(sparse) => {
672                    let value = sparse.get_or_null(column_id);
673                    builder.push_value_ref(&value.as_value_ref());
674                }
675            };
676        }
677
678        let values_vector = builder.to_vector();
679        let values_array = values_vector.to_arrow_array();
680
681        // Only creates dictionary array for string types, otherwise take values by keys
682        if column_type.is_string() {
683            // Creates dictionary array using the same keys for string types
684            // Note that the dictionary values may have nulls.
685            let dict_array = DictionaryArray::new(self.keys_array.clone(), values_array);
686            Ok(Arc::new(dict_array))
687        } else {
688            // For non-string types, takes values by keys indices to create a regular array
689            let taken_array =
690                take(&values_array, &self.keys_array, None).context(ComputeArrowSnafu)?;
691            Ok(taken_array)
692        }
693    }
694}
695
696/// Converts a batch that doesn't have decoded primary key columns into a batch that has decoded
697/// primary key columns in flat format.
698pub(crate) struct FlatConvertFormat {
699    /// Metadata of the region.
700    metadata: RegionMetadataRef,
701    /// Primary key codec to decode primary keys.
702    codec: Arc<dyn PrimaryKeyCodec>,
703    /// Projected primary key column information: (column_id, pk_index, column_index in metadata).
704    projected_primary_keys: Vec<(ColumnId, usize, usize)>,
705}
706
707impl FlatConvertFormat {
708    /// Creates a new `FlatConvertFormat`.
709    ///
710    /// The `format_projection` is the projection computed in the [FlatReadFormat] with the `metadata`.
711    /// The `codec` is the primary key codec of the `metadata`.
712    ///
713    /// Returns `None` if there is no primary key.
714    pub(crate) fn new(
715        metadata: RegionMetadataRef,
716        format_projection: &FormatProjection,
717        codec: Arc<dyn PrimaryKeyCodec>,
718    ) -> Option<Self> {
719        if metadata.primary_key.is_empty() {
720            return None;
721        }
722
723        // Builds projected primary keys list maintaining the order of RegionMetadata::primary_key
724        let mut projected_primary_keys = Vec::new();
725        for (pk_index, &column_id) in metadata.primary_key.iter().enumerate() {
726            if format_projection
727                .column_id_to_projected_index
728                .contains_key(&column_id)
729            {
730                // We expect the format_projection is built from the metadata.
731                let column_index = metadata.column_index_by_id(column_id).unwrap();
732                projected_primary_keys.push((column_id, pk_index, column_index));
733            }
734        }
735
736        Some(Self {
737            metadata,
738            codec,
739            projected_primary_keys,
740        })
741    }
742
743    /// Converts a batch to have decoded primary key columns in flat format.
744    ///
745    /// The primary key array in the batch is a dictionary array.
746    pub(crate) fn convert(&self, batch: RecordBatch) -> Result<RecordBatch> {
747        if self.projected_primary_keys.is_empty() {
748            return Ok(batch);
749        }
750
751        let decoded_pks = decode_primary_keys(self.codec.as_ref(), &batch)?;
752
753        // Builds decoded tag column arrays.
754        let mut decoded_columns = Vec::new();
755        for (column_id, pk_index, column_index) in &self.projected_primary_keys {
756            let column_metadata = &self.metadata.column_metadatas[*column_index];
757            let tag_column = decoded_pks.get_tag_column(
758                *column_id,
759                Some(*pk_index),
760                &column_metadata.column_schema.data_type,
761            )?;
762            decoded_columns.push(tag_column);
763        }
764
765        // Builds new columns: decoded tag columns first, then original columns
766        let mut new_columns = Vec::with_capacity(batch.num_columns() + decoded_columns.len());
767        new_columns.extend(decoded_columns);
768        new_columns.extend_from_slice(batch.columns());
769
770        // Builds new schema
771        let mut new_fields =
772            Vec::with_capacity(batch.schema().fields().len() + self.projected_primary_keys.len());
773        for (column_id, _, column_index) in &self.projected_primary_keys {
774            let column_metadata = &self.metadata.column_metadatas[*column_index];
775            let old_field = &self.metadata.schema.arrow_schema().fields()[*column_index];
776            let field =
777                tag_maybe_to_dictionary_field(&column_metadata.column_schema.data_type, old_field);
778            new_fields.push(Arc::new(with_field_id((*field).clone(), *column_id)));
779        }
780        new_fields.extend(batch.schema().fields().iter().cloned());
781
782        let new_schema = Arc::new(Schema::new(new_fields));
783        RecordBatch::try_new(new_schema, new_columns).context(NewRecordBatchSnafu)
784    }
785}
786
787#[cfg(test)]
788impl FlatReadFormat {
789    /// Creates a helper with existing `metadata` and all columns.
790    pub fn new_with_all_columns(metadata: RegionMetadataRef) -> FlatReadFormat {
791        Self::new(
792            Arc::clone(&metadata),
793            ReadColumns::from_deduped_column_ids(
794                metadata.column_metadatas.iter().map(|c| c.column_id),
795            ),
796            None,
797            "test",
798            false,
799        )
800        .unwrap()
801    }
802}
803
804#[cfg(test)]
805mod tests {
806    use std::sync::Arc;
807
808    use api::v1::SemanticType;
809    use datatypes::prelude::ConcreteDataType;
810    use datatypes::schema::ColumnSchema;
811    use store_api::codec::PrimaryKeyEncoding;
812    use store_api::metadata::{ColumnMetadata, RegionMetadata, RegionMetadataBuilder};
813    use store_api::storage::RegionId;
814
815    use super::{FlatReadFormat, field_column_start};
816    use crate::read::read_columns::ReadColumns;
817    use crate::sst::{
818        FlatSchemaOptions, flat_sst_arrow_schema_column_num, to_flat_sst_arrow_schema,
819    };
820
821    /// Builds a `RegionMetadata` with the given number of tags and fields.
822    fn build_metadata(
823        num_tags: usize,
824        num_fields: usize,
825        encoding: PrimaryKeyEncoding,
826    ) -> RegionMetadata {
827        let mut builder = RegionMetadataBuilder::new(RegionId::new(0, 0));
828        let mut col_id = 0u32;
829
830        for i in 0..num_tags {
831            builder.push_column_metadata(ColumnMetadata {
832                column_schema: ColumnSchema::new(
833                    format!("tag_{i}"),
834                    ConcreteDataType::string_datatype(),
835                    true,
836                ),
837                semantic_type: SemanticType::Tag,
838                column_id: col_id,
839            });
840            col_id += 1;
841        }
842
843        for i in 0..num_fields {
844            builder.push_column_metadata(ColumnMetadata {
845                column_schema: ColumnSchema::new(
846                    format!("field_{i}"),
847                    ConcreteDataType::uint64_datatype(),
848                    true,
849                ),
850                semantic_type: SemanticType::Field,
851                column_id: col_id,
852            });
853            col_id += 1;
854        }
855
856        builder.push_column_metadata(ColumnMetadata {
857            column_schema: ColumnSchema::new(
858                "ts".to_string(),
859                ConcreteDataType::timestamp_millisecond_datatype(),
860                false,
861            ),
862            semantic_type: SemanticType::Timestamp,
863            column_id: col_id,
864        });
865
866        let primary_key: Vec<u32> = (0..num_tags as u32).collect();
867        builder.primary_key(primary_key);
868        builder.primary_key_encoding(encoding);
869        builder.build().unwrap()
870    }
871
872    #[test]
873    fn test_field_column_start() {
874        // (num_tags, num_fields, encoding, expected)
875        let cases = [
876            (1, 1, PrimaryKeyEncoding::Dense, 1),
877            (2, 2, PrimaryKeyEncoding::Dense, 2),
878            (0, 2, PrimaryKeyEncoding::Dense, 0),
879            (2, 2, PrimaryKeyEncoding::Sparse, 0),
880        ];
881
882        for (num_tags, num_fields, encoding, expected) in cases {
883            let metadata = build_metadata(num_tags, num_fields, encoding);
884            let options = FlatSchemaOptions::from_encoding(encoding);
885            let num_columns = flat_sst_arrow_schema_column_num(&metadata, &options);
886            let result = field_column_start(&metadata, num_columns);
887            assert_eq!(
888                result, expected,
889                "num_tags={num_tags}, num_fields={num_fields}, encoding={encoding:?}"
890            );
891        }
892    }
893
894    #[test]
895    fn test_output_arrow_schema_uses_projection() {
896        let metadata = Arc::new(build_metadata(1, 2, PrimaryKeyEncoding::Dense));
897        let read_format = FlatReadFormat::new(
898            metadata.clone(),
899            ReadColumns::from_deduped_column_ids([0_u32, 2_u32]),
900            None,
901            "test",
902            false,
903        )
904        .unwrap();
905
906        let output_schema = read_format.output_arrow_schema().unwrap();
907        let projection = read_format.parquet_read_columns().root_indices();
908        let expected = Arc::new(
909            to_flat_sst_arrow_schema(&metadata, &FlatSchemaOptions::default())
910                .project(projection)
911                .unwrap(),
912        );
913
914        assert_eq!(expected, output_schema);
915    }
916}