1use std::borrow::Borrow;
32use std::collections::HashMap;
33use std::sync::Arc;
34
35use api::v1::SemanticType;
36use datatypes::arrow::array::{
37 Array, ArrayRef, BinaryArray, DictionaryArray, UInt32Array, UInt64Array,
38};
39use datatypes::arrow::compute::kernels::take::take;
40use datatypes::arrow::datatypes::{Schema, SchemaRef};
41use datatypes::arrow::record_batch::RecordBatch;
42use datatypes::prelude::{ConcreteDataType, DataType};
43use mito_codec::row_converter::{CompositeValues, PrimaryKeyCodec, build_primary_key_codec};
44use parquet::file::metadata::RowGroupMetaData;
45use snafu::{OptionExt, ResultExt, ensure};
46use store_api::codec::PrimaryKeyEncoding;
47use store_api::metadata::{RegionMetadata, RegionMetadataRef};
48use store_api::storage::{ColumnId, SequenceNumber};
49
50use crate::error::{
51 ComputeArrowSnafu, DecodeSnafu, InvalidParquetSnafu, InvalidRecordBatchSnafu,
52 NewRecordBatchSnafu, Result,
53};
54use crate::read::read_columns::ReadColumns;
55use crate::sst::parquet::format::{
56 FIXED_POS_COLUMN_NUM, FormatProjection, INTERNAL_COLUMN_NUM, PrimaryKeyArray,
57 PrimaryKeyReadFormat, StatValues, column_null_counts, column_values,
58};
59use crate::sst::parquet::read_columns::ParquetReadColumns;
60use crate::sst::{
61 FlatSchemaOptions, flat_sst_arrow_schema_column_num, tag_maybe_to_dictionary_field,
62 to_flat_sst_arrow_schema, with_field_id,
63};
64
65pub(crate) struct FlatWriteFormat {
67 arrow_schema: SchemaRef,
69 override_sequence: Option<SequenceNumber>,
70}
71
72impl FlatWriteFormat {
73 pub(crate) fn new(metadata: RegionMetadataRef, options: &FlatSchemaOptions) -> FlatWriteFormat {
75 let arrow_schema = to_flat_sst_arrow_schema(&metadata, options);
76 FlatWriteFormat {
77 arrow_schema,
78 override_sequence: None,
79 }
80 }
81
82 pub(crate) fn with_override_sequence(
84 mut self,
85 override_sequence: Option<SequenceNumber>,
86 ) -> Self {
87 self.override_sequence = override_sequence;
88 self
89 }
90
91 #[cfg(test)]
93 pub(crate) fn arrow_schema(&self) -> &SchemaRef {
94 &self.arrow_schema
95 }
96
97 pub(crate) fn convert_batch(&self, batch: &RecordBatch) -> Result<RecordBatch> {
99 debug_assert_eq!(batch.num_columns(), self.arrow_schema.fields().len());
100
101 let Some(override_sequence) = self.override_sequence else {
102 return Ok(batch.clone());
103 };
104
105 let mut columns = batch.columns().to_vec();
106 let sequence_array = Arc::new(UInt64Array::from(vec![override_sequence; batch.num_rows()]));
107 columns[sequence_column_index(batch.num_columns())] = sequence_array;
108
109 RecordBatch::try_new(batch.schema(), columns).context(NewRecordBatchSnafu)
110 }
111}
112
113pub(crate) fn sequence_column_index(num_columns: usize) -> usize {
115 num_columns - 2
116}
117
118pub(crate) fn time_index_column_index(num_columns: usize) -> usize {
120 num_columns - 4
121}
122
123pub(crate) fn primary_key_column_index(num_columns: usize) -> usize {
125 num_columns - 3
126}
127
128pub(crate) fn op_type_column_index(num_columns: usize) -> usize {
130 num_columns - 1
131}
132
133pub(crate) fn field_column_start(metadata: &RegionMetadata, num_columns: usize) -> usize {
142 let field_column_count = metadata.column_metadatas.len() - 1 - metadata.primary_key.len();
145 num_columns - FIXED_POS_COLUMN_NUM - field_column_count
146}
147
148pub struct FlatReadFormat {
155 override_sequence: Option<SequenceNumber>,
157 parquet_adapter: ParquetAdapter,
159}
160
161impl FlatReadFormat {
162 pub fn new(
166 metadata: RegionMetadataRef,
167 read_cols: ReadColumns,
168 num_columns: Option<usize>,
169 file_path: &str,
170 skip_auto_convert: bool,
171 ) -> Result<FlatReadFormat> {
172 let is_legacy = match num_columns {
173 Some(num) => Self::is_legacy_format(&metadata, num, file_path)?,
174 None => metadata.primary_key_encoding == PrimaryKeyEncoding::Sparse,
175 };
176
177 let parquet_adapter = if is_legacy {
178 if metadata.primary_key_encoding == PrimaryKeyEncoding::Sparse {
180 ParquetAdapter::PrimaryKeyToFlat(ParquetPrimaryKeyToFlat::new(
182 metadata,
183 read_cols,
184 skip_auto_convert,
185 ))
186 } else {
187 ParquetAdapter::PrimaryKeyToFlat(ParquetPrimaryKeyToFlat::new(
188 metadata, read_cols, false,
189 ))
190 }
191 } else {
192 ParquetAdapter::Flat(ParquetFlat::new(metadata, read_cols))
193 };
194
195 Ok(FlatReadFormat {
196 override_sequence: None,
197 parquet_adapter,
198 })
199 }
200
201 pub(crate) fn set_override_sequence(&mut self, sequence: Option<SequenceNumber>) {
203 self.override_sequence = sequence;
204 }
205
206 pub fn projected_index_by_id(&self, column_id: ColumnId) -> Option<usize> {
208 self.format_projection()
209 .column_id_to_projected_index
210 .get(&column_id)
211 .copied()
212 }
213
214 pub fn min_values(
216 &self,
217 row_groups: &[impl Borrow<RowGroupMetaData>],
218 column_id: ColumnId,
219 ) -> StatValues {
220 match &self.parquet_adapter {
221 ParquetAdapter::Flat(p) => p.min_values(row_groups, column_id),
222 ParquetAdapter::PrimaryKeyToFlat(p) => p.format.min_values(row_groups, column_id),
223 }
224 }
225
226 pub fn max_values(
228 &self,
229 row_groups: &[impl Borrow<RowGroupMetaData>],
230 column_id: ColumnId,
231 ) -> StatValues {
232 match &self.parquet_adapter {
233 ParquetAdapter::Flat(p) => p.max_values(row_groups, column_id),
234 ParquetAdapter::PrimaryKeyToFlat(p) => p.format.max_values(row_groups, column_id),
235 }
236 }
237
238 pub fn null_counts(
240 &self,
241 row_groups: &[impl Borrow<RowGroupMetaData>],
242 column_id: ColumnId,
243 ) -> StatValues {
244 match &self.parquet_adapter {
245 ParquetAdapter::Flat(p) => p.null_counts(row_groups, column_id),
246 ParquetAdapter::PrimaryKeyToFlat(p) => p.format.null_counts(row_groups, column_id),
247 }
248 }
249
250 pub(crate) fn arrow_schema(&self) -> &SchemaRef {
255 match &self.parquet_adapter {
256 ParquetAdapter::Flat(p) => &p.arrow_schema,
257 ParquetAdapter::PrimaryKeyToFlat(p) => p.format.arrow_schema(),
258 }
259 }
260
261 pub(crate) fn output_arrow_schema(&self) -> Result<SchemaRef> {
263 let projection = self.parquet_read_columns().root_indices();
264 let schema = self
265 .arrow_schema()
266 .project(projection)
267 .context(ComputeArrowSnafu)?;
268 Ok(Arc::new(schema))
269 }
270
271 pub(crate) fn metadata(&self) -> &RegionMetadataRef {
273 match &self.parquet_adapter {
274 ParquetAdapter::Flat(p) => &p.metadata,
275 ParquetAdapter::PrimaryKeyToFlat(p) => p.format.metadata(),
276 }
277 }
278
279 pub(crate) fn parquet_read_columns(&self) -> &ParquetReadColumns {
281 match &self.parquet_adapter {
282 ParquetAdapter::Flat(p) => &p.format_projection.parquet_read_cols,
283 ParquetAdapter::PrimaryKeyToFlat(p) => p.format.parquet_read_columns(),
284 }
285 }
286
287 pub(crate) fn format_projection(&self) -> &FormatProjection {
292 match &self.parquet_adapter {
293 ParquetAdapter::Flat(p) => &p.format_projection,
294 ParquetAdapter::PrimaryKeyToFlat(p) => &p.format_projection,
295 }
296 }
297
298 pub(crate) fn batch_has_raw_pk_columns(&self) -> bool {
302 matches!(&self.parquet_adapter, ParquetAdapter::Flat(_))
303 }
304
305 pub(crate) fn new_override_sequence_array(&self, length: usize) -> Option<ArrayRef> {
307 self.override_sequence
308 .map(|seq| Arc::new(UInt64Array::from_value(seq, length)) as ArrayRef)
309 }
310
311 pub(crate) fn convert_batch(
316 &self,
317 record_batch: RecordBatch,
318 override_sequence_array: Option<&ArrayRef>,
319 ) -> Result<RecordBatch> {
320 let batch = match &self.parquet_adapter {
322 ParquetAdapter::Flat(_) => record_batch,
323 ParquetAdapter::PrimaryKeyToFlat(p) => p.convert_batch(record_batch)?,
324 };
325
326 let Some(override_array) = override_sequence_array else {
328 return Ok(batch);
329 };
330
331 let mut columns = batch.columns().to_vec();
332 let sequence_column_idx = sequence_column_index(batch.num_columns());
333
334 let sequence_array = if override_array.len() > batch.num_rows() {
336 override_array.slice(0, batch.num_rows())
337 } else {
338 override_array.clone()
339 };
340
341 columns[sequence_column_idx] = sequence_array;
342
343 RecordBatch::try_new(batch.schema(), columns).context(NewRecordBatchSnafu)
344 }
345
346 pub(crate) fn is_legacy_format(
352 metadata: &RegionMetadata,
353 num_columns: usize,
354 file_path: &str,
355 ) -> Result<bool> {
356 if metadata.primary_key.is_empty() {
357 return Ok(false);
358 }
359
360 let expected_columns = metadata.column_metadatas.len() + INTERNAL_COLUMN_NUM;
363
364 if expected_columns == num_columns {
365 Ok(false)
367 } else {
368 ensure!(
369 expected_columns >= num_columns,
370 InvalidParquetSnafu {
371 file: file_path,
372 reason: format!(
373 "Expected columns {} should be >= actual columns {}",
374 expected_columns, num_columns
375 )
376 }
377 );
378
379 let column_diff = expected_columns - num_columns;
381
382 ensure!(
383 column_diff == metadata.primary_key.len(),
384 InvalidParquetSnafu {
385 file: file_path,
386 reason: format!(
387 "Column number difference {} does not match primary key count {}",
388 column_diff,
389 metadata.primary_key.len()
390 )
391 }
392 );
393
394 Ok(true)
395 }
396 }
397}
398
399enum ParquetAdapter {
401 Flat(ParquetFlat),
402 PrimaryKeyToFlat(ParquetPrimaryKeyToFlat),
403}
404
405struct ParquetPrimaryKeyToFlat {
407 format: PrimaryKeyReadFormat,
409 convert_format: Option<FlatConvertFormat>,
411 format_projection: FormatProjection,
413}
414
415impl ParquetPrimaryKeyToFlat {
416 fn new(
418 metadata: RegionMetadataRef,
419 read_cols: ReadColumns,
420 skip_auto_convert: bool,
421 ) -> ParquetPrimaryKeyToFlat {
422 assert!(if skip_auto_convert {
423 metadata.primary_key_encoding == PrimaryKeyEncoding::Sparse
424 } else {
425 true
426 });
427
428 let id_to_index = sst_column_id_indices(&metadata);
430 let sst_column_num =
431 flat_sst_arrow_schema_column_num(&metadata, &FlatSchemaOptions::default());
432
433 let codec = build_primary_key_codec(&metadata);
434 let format = PrimaryKeyReadFormat::new(metadata.clone(), read_cols.clone());
435 let (convert_format, format_projection) = if skip_auto_convert {
436 (
437 None,
438 FormatProjection {
439 parquet_read_cols: format.parquet_read_columns().clone(),
440 column_id_to_projected_index: format.field_id_to_projected_index().clone(),
441 },
442 )
443 } else {
444 let format_projection = FormatProjection::compute_format_projection(
446 &id_to_index,
447 sst_column_num,
448 read_cols.clone(),
449 );
450 (
451 FlatConvertFormat::new(Arc::clone(&metadata), &format_projection, codec),
452 format_projection,
453 )
454 };
455
456 Self {
457 format,
458 convert_format,
459 format_projection,
460 }
461 }
462
463 fn convert_batch(&self, record_batch: RecordBatch) -> Result<RecordBatch> {
464 if let Some(convert_format) = &self.convert_format {
465 convert_format.convert(record_batch)
466 } else {
467 Ok(record_batch)
468 }
469 }
470}
471
472struct ParquetFlat {
474 metadata: RegionMetadataRef,
476 arrow_schema: SchemaRef,
478 format_projection: FormatProjection,
480 column_id_to_sst_index: HashMap<ColumnId, usize>,
482}
483
484impl ParquetFlat {
485 fn new(metadata: RegionMetadataRef, read_cols: ReadColumns) -> ParquetFlat {
487 let id_to_index = sst_column_id_indices(&metadata);
489 let arrow_schema = to_flat_sst_arrow_schema(&metadata, &FlatSchemaOptions::default());
490 let sst_column_num =
491 flat_sst_arrow_schema_column_num(&metadata, &FlatSchemaOptions::default());
492 let format_projection =
493 FormatProjection::compute_format_projection(&id_to_index, sst_column_num, read_cols);
494
495 Self {
496 metadata,
497 arrow_schema,
498 format_projection,
499 column_id_to_sst_index: id_to_index,
500 }
501 }
502
503 fn min_values(
505 &self,
506 row_groups: &[impl Borrow<RowGroupMetaData>],
507 column_id: ColumnId,
508 ) -> StatValues {
509 self.get_stat_values(row_groups, column_id, true)
510 }
511
512 fn max_values(
514 &self,
515 row_groups: &[impl Borrow<RowGroupMetaData>],
516 column_id: ColumnId,
517 ) -> StatValues {
518 self.get_stat_values(row_groups, column_id, false)
519 }
520
521 fn null_counts(
523 &self,
524 row_groups: &[impl Borrow<RowGroupMetaData>],
525 column_id: ColumnId,
526 ) -> StatValues {
527 let Some(index) = self.column_id_to_sst_index.get(&column_id) else {
528 return StatValues::NoColumn;
530 };
531
532 let stats = column_null_counts(row_groups, *index);
533 StatValues::from_stats_opt(stats)
534 }
535
536 fn get_stat_values(
537 &self,
538 row_groups: &[impl Borrow<RowGroupMetaData>],
539 column_id: ColumnId,
540 is_min: bool,
541 ) -> StatValues {
542 let Some(column) = self.metadata.column_by_id(column_id) else {
543 return StatValues::NoColumn;
545 };
546 let index = self.column_id_to_sst_index.get(&column_id).unwrap();
548
549 let stats = column_values(row_groups, column, *index, is_min);
550 StatValues::from_stats_opt(stats)
551 }
552}
553
554pub(crate) fn sst_column_id_indices(metadata: &RegionMetadata) -> HashMap<ColumnId, usize> {
558 let mut id_to_index = HashMap::with_capacity(metadata.column_metadatas.len());
559 let mut column_index = 0;
560 for pk_id in &metadata.primary_key {
562 id_to_index.insert(*pk_id, column_index);
563 column_index += 1;
564 }
565 for column in &metadata.column_metadatas {
567 if column.semantic_type == SemanticType::Field {
568 id_to_index.insert(column.column_id, column_index);
569 column_index += 1;
570 }
571 }
572 id_to_index.insert(metadata.time_index_column().column_id, column_index);
574
575 id_to_index
576}
577
578pub(crate) fn decode_primary_keys(
582 codec: &dyn PrimaryKeyCodec,
583 batch: &RecordBatch,
584) -> Result<DecodedPrimaryKeys> {
585 let primary_key_index = primary_key_column_index(batch.num_columns());
586 let pk_dict_array = batch
587 .column(primary_key_index)
588 .as_any()
589 .downcast_ref::<PrimaryKeyArray>()
590 .with_context(|| InvalidRecordBatchSnafu {
591 reason: "Primary key column is not a dictionary array".to_string(),
592 })?;
593 let pk_values_array = pk_dict_array
594 .values()
595 .as_any()
596 .downcast_ref::<BinaryArray>()
597 .with_context(|| InvalidRecordBatchSnafu {
598 reason: "Primary key values are not binary array".to_string(),
599 })?;
600
601 let keys = pk_dict_array.keys();
602
603 let mut key_to_decoded_index = Vec::with_capacity(keys.len());
606 let mut decoded_pk_values = Vec::new();
607 let mut prev_key: Option<u32> = None;
608
609 let pk_indices = keys.values();
612 for ¤t_key in pk_indices.iter().take(keys.len()) {
613 if let Some(prev) = prev_key
615 && prev == current_key
616 {
617 key_to_decoded_index.push((decoded_pk_values.len() - 1) as u32);
619 continue;
620 }
621
622 let pk_bytes = pk_values_array.value(current_key as usize);
624 let decoded_value = codec.decode(pk_bytes).context(DecodeSnafu)?;
625
626 decoded_pk_values.push(decoded_value);
627 key_to_decoded_index.push((decoded_pk_values.len() - 1) as u32);
628 prev_key = Some(current_key);
629 }
630
631 let keys_array = UInt32Array::from(key_to_decoded_index);
633
634 Ok(DecodedPrimaryKeys {
635 decoded_pk_values,
636 keys_array,
637 })
638}
639
640pub(crate) struct DecodedPrimaryKeys {
642 decoded_pk_values: Vec<CompositeValues>,
644 keys_array: UInt32Array,
646}
647
648impl DecodedPrimaryKeys {
649 pub(crate) fn get_tag_column(
654 &self,
655 column_id: ColumnId,
656 pk_index: Option<usize>,
657 column_type: &ConcreteDataType,
658 ) -> Result<ArrayRef> {
659 let mut builder = column_type.create_mutable_vector(self.decoded_pk_values.len());
661 for decoded in &self.decoded_pk_values {
662 match decoded {
663 CompositeValues::Dense(dense) => {
664 let pk_idx = pk_index.expect("pk_index required for dense encoding");
665 if pk_idx < dense.len() {
666 builder.push_value_ref(&dense[pk_idx].1.as_value_ref());
667 } else {
668 builder.push_null();
669 }
670 }
671 CompositeValues::Sparse(sparse) => {
672 let value = sparse.get_or_null(column_id);
673 builder.push_value_ref(&value.as_value_ref());
674 }
675 };
676 }
677
678 let values_vector = builder.to_vector();
679 let values_array = values_vector.to_arrow_array();
680
681 if column_type.is_string() {
683 let dict_array = DictionaryArray::new(self.keys_array.clone(), values_array);
686 Ok(Arc::new(dict_array))
687 } else {
688 let taken_array =
690 take(&values_array, &self.keys_array, None).context(ComputeArrowSnafu)?;
691 Ok(taken_array)
692 }
693 }
694}
695
696pub(crate) struct FlatConvertFormat {
699 metadata: RegionMetadataRef,
701 codec: Arc<dyn PrimaryKeyCodec>,
703 projected_primary_keys: Vec<(ColumnId, usize, usize)>,
705}
706
707impl FlatConvertFormat {
708 pub(crate) fn new(
715 metadata: RegionMetadataRef,
716 format_projection: &FormatProjection,
717 codec: Arc<dyn PrimaryKeyCodec>,
718 ) -> Option<Self> {
719 if metadata.primary_key.is_empty() {
720 return None;
721 }
722
723 let mut projected_primary_keys = Vec::new();
725 for (pk_index, &column_id) in metadata.primary_key.iter().enumerate() {
726 if format_projection
727 .column_id_to_projected_index
728 .contains_key(&column_id)
729 {
730 let column_index = metadata.column_index_by_id(column_id).unwrap();
732 projected_primary_keys.push((column_id, pk_index, column_index));
733 }
734 }
735
736 Some(Self {
737 metadata,
738 codec,
739 projected_primary_keys,
740 })
741 }
742
743 pub(crate) fn convert(&self, batch: RecordBatch) -> Result<RecordBatch> {
747 if self.projected_primary_keys.is_empty() {
748 return Ok(batch);
749 }
750
751 let decoded_pks = decode_primary_keys(self.codec.as_ref(), &batch)?;
752
753 let mut decoded_columns = Vec::new();
755 for (column_id, pk_index, column_index) in &self.projected_primary_keys {
756 let column_metadata = &self.metadata.column_metadatas[*column_index];
757 let tag_column = decoded_pks.get_tag_column(
758 *column_id,
759 Some(*pk_index),
760 &column_metadata.column_schema.data_type,
761 )?;
762 decoded_columns.push(tag_column);
763 }
764
765 let mut new_columns = Vec::with_capacity(batch.num_columns() + decoded_columns.len());
767 new_columns.extend(decoded_columns);
768 new_columns.extend_from_slice(batch.columns());
769
770 let mut new_fields =
772 Vec::with_capacity(batch.schema().fields().len() + self.projected_primary_keys.len());
773 for (column_id, _, column_index) in &self.projected_primary_keys {
774 let column_metadata = &self.metadata.column_metadatas[*column_index];
775 let old_field = &self.metadata.schema.arrow_schema().fields()[*column_index];
776 let field =
777 tag_maybe_to_dictionary_field(&column_metadata.column_schema.data_type, old_field);
778 new_fields.push(Arc::new(with_field_id((*field).clone(), *column_id)));
779 }
780 new_fields.extend(batch.schema().fields().iter().cloned());
781
782 let new_schema = Arc::new(Schema::new(new_fields));
783 RecordBatch::try_new(new_schema, new_columns).context(NewRecordBatchSnafu)
784 }
785}
786
787#[cfg(test)]
788impl FlatReadFormat {
789 pub fn new_with_all_columns(metadata: RegionMetadataRef) -> FlatReadFormat {
791 Self::new(
792 Arc::clone(&metadata),
793 ReadColumns::from_deduped_column_ids(
794 metadata.column_metadatas.iter().map(|c| c.column_id),
795 ),
796 None,
797 "test",
798 false,
799 )
800 .unwrap()
801 }
802}
803
804#[cfg(test)]
805mod tests {
806 use std::sync::Arc;
807
808 use api::v1::SemanticType;
809 use datatypes::prelude::ConcreteDataType;
810 use datatypes::schema::ColumnSchema;
811 use store_api::codec::PrimaryKeyEncoding;
812 use store_api::metadata::{ColumnMetadata, RegionMetadata, RegionMetadataBuilder};
813 use store_api::storage::RegionId;
814
815 use super::{FlatReadFormat, field_column_start};
816 use crate::read::read_columns::ReadColumns;
817 use crate::sst::{
818 FlatSchemaOptions, flat_sst_arrow_schema_column_num, to_flat_sst_arrow_schema,
819 };
820
821 fn build_metadata(
823 num_tags: usize,
824 num_fields: usize,
825 encoding: PrimaryKeyEncoding,
826 ) -> RegionMetadata {
827 let mut builder = RegionMetadataBuilder::new(RegionId::new(0, 0));
828 let mut col_id = 0u32;
829
830 for i in 0..num_tags {
831 builder.push_column_metadata(ColumnMetadata {
832 column_schema: ColumnSchema::new(
833 format!("tag_{i}"),
834 ConcreteDataType::string_datatype(),
835 true,
836 ),
837 semantic_type: SemanticType::Tag,
838 column_id: col_id,
839 });
840 col_id += 1;
841 }
842
843 for i in 0..num_fields {
844 builder.push_column_metadata(ColumnMetadata {
845 column_schema: ColumnSchema::new(
846 format!("field_{i}"),
847 ConcreteDataType::uint64_datatype(),
848 true,
849 ),
850 semantic_type: SemanticType::Field,
851 column_id: col_id,
852 });
853 col_id += 1;
854 }
855
856 builder.push_column_metadata(ColumnMetadata {
857 column_schema: ColumnSchema::new(
858 "ts".to_string(),
859 ConcreteDataType::timestamp_millisecond_datatype(),
860 false,
861 ),
862 semantic_type: SemanticType::Timestamp,
863 column_id: col_id,
864 });
865
866 let primary_key: Vec<u32> = (0..num_tags as u32).collect();
867 builder.primary_key(primary_key);
868 builder.primary_key_encoding(encoding);
869 builder.build().unwrap()
870 }
871
872 #[test]
873 fn test_field_column_start() {
874 let cases = [
876 (1, 1, PrimaryKeyEncoding::Dense, 1),
877 (2, 2, PrimaryKeyEncoding::Dense, 2),
878 (0, 2, PrimaryKeyEncoding::Dense, 0),
879 (2, 2, PrimaryKeyEncoding::Sparse, 0),
880 ];
881
882 for (num_tags, num_fields, encoding, expected) in cases {
883 let metadata = build_metadata(num_tags, num_fields, encoding);
884 let options = FlatSchemaOptions::from_encoding(encoding);
885 let num_columns = flat_sst_arrow_schema_column_num(&metadata, &options);
886 let result = field_column_start(&metadata, num_columns);
887 assert_eq!(
888 result, expected,
889 "num_tags={num_tags}, num_fields={num_fields}, encoding={encoding:?}"
890 );
891 }
892 }
893
894 #[test]
895 fn test_output_arrow_schema_uses_projection() {
896 let metadata = Arc::new(build_metadata(1, 2, PrimaryKeyEncoding::Dense));
897 let read_format = FlatReadFormat::new(
898 metadata.clone(),
899 ReadColumns::from_deduped_column_ids([0_u32, 2_u32]),
900 None,
901 "test",
902 false,
903 )
904 .unwrap();
905
906 let output_schema = read_format.output_arrow_schema().unwrap();
907 let projection = read_format.parquet_read_columns().root_indices();
908 let expected = Arc::new(
909 to_flat_sst_arrow_schema(&metadata, &FlatSchemaOptions::default())
910 .project(projection)
911 .unwrap(),
912 );
913
914 assert_eq!(expected, output_schema);
915 }
916}