file_engine/
query.rs

1// Copyright 2023 Greptime Team
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15pub(crate) mod file_stream;
16
17use std::collections::HashSet;
18use std::pin::Pin;
19use std::sync::Arc;
20use std::task::{Context, Poll};
21
22use common_datasource::object_store::build_backend;
23use common_error::ext::BoxedError;
24use common_recordbatch::adapter::RecordBatchMetrics;
25use common_recordbatch::error::{CastVectorSnafu, ExternalSnafu, Result as RecordBatchResult};
26use common_recordbatch::{OrderOption, RecordBatch, RecordBatchStream, SendableRecordBatchStream};
27use datafusion::logical_expr::utils as df_logical_expr_utils;
28use datafusion_expr::expr::Expr;
29use datatypes::prelude::ConcreteDataType;
30use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
31use datatypes::vectors::VectorRef;
32use futures::Stream;
33use snafu::{ensure, OptionExt, ResultExt};
34use store_api::storage::ScanRequest;
35
36use self::file_stream::ScanPlanConfig;
37use crate::error::{
38    BuildBackendSnafu, CreateDefaultSnafu, ExtractColumnFromFilterSnafu,
39    MissingColumnNoDefaultSnafu, ProjectSchemaSnafu, ProjectionOutOfBoundsSnafu, Result,
40};
41use crate::region::FileRegion;
42
43impl FileRegion {
44    pub fn query(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
45        let store = build_backend(&self.url, &self.options).context(BuildBackendSnafu)?;
46
47        let file_projection = self.projection_pushdown_to_file(&request.projection)?;
48        let file_filters = self.filters_pushdown_to_file(&request.filters)?;
49        let file_schema = Arc::new(Schema::new(self.file_options.file_column_schemas.clone()));
50
51        let file_stream = file_stream::create_stream(
52            &self.format,
53            &ScanPlanConfig {
54                file_schema,
55                files: &self.file_options.files,
56                projection: file_projection.as_ref(),
57                filters: &file_filters,
58                limit: request.limit,
59                store,
60            },
61        )?;
62
63        let scan_schema = self.scan_schema(&request.projection)?;
64
65        Ok(Box::pin(FileToScanRegionStream::new(
66            scan_schema,
67            file_stream,
68        )))
69    }
70
71    fn projection_pushdown_to_file(
72        &self,
73        req_projection: &Option<Vec<usize>>,
74    ) -> Result<Option<Vec<usize>>> {
75        let Some(scan_projection) = req_projection.as_ref() else {
76            return Ok(None);
77        };
78
79        let file_column_schemas = &self.file_options.file_column_schemas;
80        let mut file_projection = Vec::with_capacity(scan_projection.len());
81        for column_index in scan_projection {
82            ensure!(
83                *column_index < self.metadata.schema.num_columns(),
84                ProjectionOutOfBoundsSnafu {
85                    column_index: *column_index,
86                    bounds: self.metadata.schema.num_columns()
87                }
88            );
89
90            let column_name = self.metadata.schema.column_name_by_index(*column_index);
91            let file_column_index = file_column_schemas
92                .iter()
93                .position(|c| c.name == column_name);
94            if let Some(file_column_index) = file_column_index {
95                file_projection.push(file_column_index);
96            }
97        }
98        Ok(Some(file_projection))
99    }
100
101    // Collects filters that can be pushed down to the file, specifically filters where Expr
102    // only contains columns from the file.
103    fn filters_pushdown_to_file(&self, scan_filters: &[Expr]) -> Result<Vec<Expr>> {
104        let mut file_filters = Vec::with_capacity(scan_filters.len());
105
106        let file_column_names = self
107            .file_options
108            .file_column_schemas
109            .iter()
110            .map(|c| &c.name)
111            .collect::<HashSet<_>>();
112
113        let mut aux_column_set = HashSet::new();
114        for scan_filter in scan_filters {
115            df_logical_expr_utils::expr_to_columns(scan_filter, &mut aux_column_set)
116                .context(ExtractColumnFromFilterSnafu)?;
117
118            let all_file_columns = aux_column_set
119                .iter()
120                .all(|column_in_expr| file_column_names.contains(&column_in_expr.name));
121            if all_file_columns {
122                file_filters.push(scan_filter.clone());
123            }
124            aux_column_set.clear();
125        }
126        Ok(file_filters)
127    }
128
129    fn scan_schema(&self, req_projection: &Option<Vec<usize>>) -> Result<SchemaRef> {
130        let schema = if let Some(indices) = req_projection {
131            Arc::new(
132                self.metadata
133                    .schema
134                    .try_project(indices)
135                    .context(ProjectSchemaSnafu)?,
136            )
137        } else {
138            self.metadata.schema.clone()
139        };
140
141        Ok(schema)
142    }
143}
144
145struct FileToScanRegionStream {
146    scan_schema: SchemaRef,
147    file_stream: SendableRecordBatchStream,
148}
149
150impl RecordBatchStream for FileToScanRegionStream {
151    fn schema(&self) -> SchemaRef {
152        self.scan_schema.clone()
153    }
154
155    fn output_ordering(&self) -> Option<&[OrderOption]> {
156        None
157    }
158
159    fn metrics(&self) -> Option<RecordBatchMetrics> {
160        None
161    }
162}
163
164impl Stream for FileToScanRegionStream {
165    type Item = RecordBatchResult<RecordBatch>;
166
167    fn poll_next(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
168        match Pin::new(&mut self.file_stream).poll_next(ctx) {
169            Poll::Pending => Poll::Pending,
170            Poll::Ready(Some(file_record_batch)) => {
171                let file_record_batch = file_record_batch?;
172                let scan_record_batch = if self.schema_eq(&file_record_batch) {
173                    Ok(file_record_batch)
174                } else {
175                    self.convert_record_batch(&file_record_batch)
176                };
177
178                Poll::Ready(Some(scan_record_batch))
179            }
180            Poll::Ready(None) => Poll::Ready(None),
181        }
182    }
183}
184
185impl FileToScanRegionStream {
186    fn new(scan_schema: SchemaRef, file_stream: SendableRecordBatchStream) -> Self {
187        Self {
188            scan_schema,
189            file_stream,
190        }
191    }
192
193    fn schema_eq(&self, file_record_batch: &RecordBatch) -> bool {
194        self.scan_schema
195            .column_schemas()
196            .iter()
197            .all(|scan_column_schema| {
198                file_record_batch
199                    .column_by_name(&scan_column_schema.name)
200                    .map(|rb| rb.data_type() == scan_column_schema.data_type)
201                    .unwrap_or_default()
202            })
203    }
204
205    /// Converts a RecordBatch from file schema to scan schema.
206    ///
207    /// This function performs the following operations:
208    /// - Projection: Only columns present in scan schema are retained.
209    /// - Cast Type: Columns present in both file schema and scan schema but with different types are cast to the type in scan schema.
210    /// - Backfill: Columns present in scan schema but not in file schema are backfilled with default values.
211    fn convert_record_batch(
212        &self,
213        file_record_batch: &RecordBatch,
214    ) -> RecordBatchResult<RecordBatch> {
215        let file_row_count = file_record_batch.num_rows();
216        let columns = self
217            .scan_schema
218            .column_schemas()
219            .iter()
220            .map(|scan_column_schema| {
221                let file_column = file_record_batch.column_by_name(&scan_column_schema.name);
222                if let Some(file_column) = file_column {
223                    Self::cast_column_type(file_column, &scan_column_schema.data_type)
224                } else {
225                    Self::backfill_column(scan_column_schema, file_row_count)
226                }
227            })
228            .collect::<RecordBatchResult<Vec<_>>>()?;
229
230        RecordBatch::new(self.scan_schema.clone(), columns)
231    }
232
233    fn cast_column_type(
234        source_column: &VectorRef,
235        target_data_type: &ConcreteDataType,
236    ) -> RecordBatchResult<VectorRef> {
237        if &source_column.data_type() == target_data_type {
238            Ok(source_column.clone())
239        } else {
240            source_column
241                .cast(target_data_type)
242                .context(CastVectorSnafu {
243                    from_type: source_column.data_type(),
244                    to_type: target_data_type.clone(),
245                })
246        }
247    }
248
249    fn backfill_column(
250        column_schema: &ColumnSchema,
251        num_rows: usize,
252    ) -> RecordBatchResult<VectorRef> {
253        Self::create_default_vector(column_schema, num_rows)
254            .map_err(BoxedError::new)
255            .context(ExternalSnafu)
256    }
257
258    fn create_default_vector(column_schema: &ColumnSchema, num_rows: usize) -> Result<VectorRef> {
259        column_schema
260            .create_default_vector(num_rows)
261            .with_context(|_| CreateDefaultSnafu {
262                column: column_schema.name.clone(),
263            })?
264            .with_context(|| MissingColumnNoDefaultSnafu {
265                column: column_schema.name.clone(),
266            })
267    }
268}