query/optimizer/
windowed_sort.rs1use std::collections::HashSet;
16use std::sync::Arc;
17
18use datafusion::physical_optimizer::PhysicalOptimizerRule;
19use datafusion::physical_plan::coalesce_batches::CoalesceBatchesExec;
20use datafusion::physical_plan::coalesce_partitions::CoalescePartitionsExec;
21use datafusion::physical_plan::filter::FilterExec;
22use datafusion::physical_plan::projection::ProjectionExec;
23use datafusion::physical_plan::repartition::RepartitionExec;
24use datafusion::physical_plan::sorts::sort::SortExec;
25use datafusion::physical_plan::sorts::sort_preserving_merge::SortPreservingMergeExec;
26use datafusion::physical_plan::ExecutionPlan;
27use datafusion_common::tree_node::{Transformed, TreeNode};
28use datafusion_common::Result as DataFusionResult;
29use datafusion_physical_expr::expressions::Column as PhysicalColumn;
30use datafusion_physical_expr::LexOrdering;
31use store_api::region_engine::PartitionRange;
32use table::table::scan::RegionScanExec;
33
34use crate::part_sort::PartSortExec;
35use crate::window_sort::WindowedSortExec;
36
37#[derive(Debug)]
47pub struct WindowedSortPhysicalRule;
48
49impl PhysicalOptimizerRule for WindowedSortPhysicalRule {
50 fn optimize(
51 &self,
52 plan: Arc<dyn ExecutionPlan>,
53 config: &datafusion::config::ConfigOptions,
54 ) -> DataFusionResult<Arc<dyn ExecutionPlan>> {
55 Self::do_optimize(plan, config)
56 }
57
58 fn name(&self) -> &str {
59 "WindowedSortRule"
60 }
61
62 fn schema_check(&self) -> bool {
63 false
64 }
65}
66
67impl WindowedSortPhysicalRule {
68 fn do_optimize(
69 plan: Arc<dyn ExecutionPlan>,
70 _config: &datafusion::config::ConfigOptions,
71 ) -> DataFusionResult<Arc<dyn ExecutionPlan>> {
72 let result = plan
73 .transform_down(|plan| {
74 if let Some(sort_exec) = plan.as_any().downcast_ref::<SortExec>() {
75 if sort_exec.expr().len() != 1 {
77 return Ok(Transformed::no(plan));
78 }
79
80 let preserve_partitioning = sort_exec.preserve_partitioning();
81
82 let sort_input = remove_repartition(sort_exec.input().clone())?.data;
83 let sort_input =
84 remove_coalesce_batches_exec(sort_input, sort_exec.fetch())?.data;
85
86 let Some(scanner_info) = fetch_partition_range(sort_input.clone())? else {
88 return Ok(Transformed::no(plan));
89 };
90 let input_schema = sort_input.schema();
91
92 if let Some(first_sort_expr) = sort_exec.expr().first()
93 && let Some(column_expr) = first_sort_expr
94 .expr
95 .as_any()
96 .downcast_ref::<PhysicalColumn>()
97 && scanner_info
98 .time_index
99 .contains(input_schema.field(column_expr.index()).name())
100 {
101 } else {
102 return Ok(Transformed::no(plan));
103 }
104 let first_sort_expr = sort_exec.expr().first().unwrap().clone();
105
106 let new_input = if scanner_info.tag_columns.is_empty()
110 && !first_sort_expr.options.descending
111 {
112 sort_input
113 } else {
114 Arc::new(PartSortExec::new(
115 first_sort_expr.clone(),
116 sort_exec.fetch(),
117 scanner_info.partition_ranges.clone(),
118 sort_input,
119 ))
120 };
121
122 let windowed_sort_exec = WindowedSortExec::try_new(
123 first_sort_expr,
124 sort_exec.fetch(),
125 scanner_info.partition_ranges,
126 new_input,
127 )?;
128
129 if !preserve_partitioning {
130 let order_preserving_merge = SortPreservingMergeExec::new(
131 LexOrdering::new(sort_exec.expr().to_vec()),
132 Arc::new(windowed_sort_exec),
133 );
134 return Ok(Transformed {
135 data: Arc::new(order_preserving_merge),
136 transformed: true,
137 tnr: datafusion_common::tree_node::TreeNodeRecursion::Stop,
138 });
139 } else {
140 return Ok(Transformed {
141 data: Arc::new(windowed_sort_exec),
142 transformed: true,
143 tnr: datafusion_common::tree_node::TreeNodeRecursion::Stop,
144 });
145 }
146 }
147
148 Ok(Transformed::no(plan))
149 })?
150 .data;
151
152 Ok(result)
153 }
154}
155
156#[derive(Debug)]
157struct ScannerInfo {
158 partition_ranges: Vec<Vec<PartitionRange>>,
159 time_index: HashSet<String>,
160 tag_columns: Vec<String>,
161}
162
163fn fetch_partition_range(input: Arc<dyn ExecutionPlan>) -> DataFusionResult<Option<ScannerInfo>> {
164 let mut partition_ranges = None;
165 let mut time_index = HashSet::new();
166 let mut alias_map = Vec::new();
167 let mut tag_columns = None;
168 let mut is_batch_coalesced = false;
169
170 input.transform_up(|plan| {
171 if plan.as_any().is::<RepartitionExec>()
173 || plan.as_any().is::<CoalescePartitionsExec>()
174 || plan.as_any().is::<SortExec>()
175 || plan.as_any().is::<WindowedSortExec>()
176 {
177 partition_ranges = None;
178 }
179
180 if plan.as_any().is::<CoalesceBatchesExec>() {
181 is_batch_coalesced = true;
182 }
183
184 if let Some(projection) = plan.as_any().downcast_ref::<ProjectionExec>() {
187 for (expr, output_name) in projection.expr() {
188 if let Some(column_expr) = expr.as_any().downcast_ref::<PhysicalColumn>() {
189 alias_map.push((column_expr.name().to_string(), output_name.clone()));
190 }
191 }
192 time_index = resolve_alias(&alias_map, &time_index);
194 }
195
196 if let Some(region_scan_exec) = plan.as_any().downcast_ref::<RegionScanExec>() {
197 partition_ranges = Some(region_scan_exec.get_uncollapsed_partition_ranges());
198 time_index = HashSet::from([region_scan_exec.time_index()]);
200 tag_columns = Some(region_scan_exec.tag_columns());
201
202 if !is_batch_coalesced {
204 region_scan_exec.with_distinguish_partition_range(true);
205 }
206 }
207
208 Ok(Transformed::no(plan))
209 })?;
210
211 let result = try {
212 ScannerInfo {
213 partition_ranges: partition_ranges?,
214 time_index,
215 tag_columns: tag_columns?,
216 }
217 };
218
219 Ok(result)
220}
221
222fn remove_repartition(
224 plan: Arc<dyn ExecutionPlan>,
225) -> DataFusionResult<Transformed<Arc<dyn ExecutionPlan>>> {
226 plan.transform_down(|plan| {
227 if plan.as_any().is::<FilterExec>() {
228 let maybe_repartition = plan.children()[0];
230 if maybe_repartition.as_any().is::<RepartitionExec>() {
231 let maybe_scan = maybe_repartition.children()[0];
232 if maybe_scan.as_any().is::<RegionScanExec>() {
233 let new_filter = plan.clone().with_new_children(vec![maybe_scan.clone()])?;
234 return Ok(Transformed::yes(new_filter));
235 }
236 }
237 }
238
239 Ok(Transformed::no(plan))
240 })
241}
242
243fn remove_coalesce_batches_exec(
247 plan: Arc<dyn ExecutionPlan>,
248 fetch: Option<usize>,
249) -> DataFusionResult<Transformed<Arc<dyn ExecutionPlan>>> {
250 let Some(fetch) = fetch else {
251 return Ok(Transformed::no(plan));
252 };
253
254 let mut is_done = false;
256
257 plan.transform_down(|plan| {
258 if let Some(coalesce_batches_exec) = plan.as_any().downcast_ref::<CoalesceBatchesExec>() {
259 let target_batch_size = coalesce_batches_exec.target_batch_size();
260 if fetch < target_batch_size && !is_done {
261 is_done = true;
262 return Ok(Transformed::yes(coalesce_batches_exec.input().clone()));
263 }
264 }
265
266 Ok(Transformed::no(plan))
267 })
268}
269
270fn resolve_alias(alias_map: &[(String, String)], time_index: &HashSet<String>) -> HashSet<String> {
275 let mut avail_old_name = time_index.clone();
277 let mut new_time_index = HashSet::new();
278 for (old, new) in alias_map {
279 if time_index.contains(old) {
280 new_time_index.insert(new.clone());
281 } else if time_index.contains(new) && old != new {
282 avail_old_name.remove(new);
284 continue;
285 }
286 }
287 new_time_index.extend(avail_old_name);
289 new_time_index
290}
291
292#[cfg(test)]
293mod test {
294 use itertools::Itertools;
295
296 use super::*;
297
298 #[test]
299 fn test_alias() {
300 let testcases = [
301 (
303 vec![("a", "b"), ("b", "c")],
304 HashSet::from(["a"]),
305 HashSet::from(["a", "b"]),
306 ),
307 (
309 vec![("b", "a"), ("a", "b")],
310 HashSet::from(["a"]),
311 HashSet::from(["b"]),
312 ),
313 (
314 vec![("b", "a"), ("b", "c")],
315 HashSet::from(["a"]),
316 HashSet::from([]),
317 ),
318 (
320 vec![("c", "d"), ("d", "c")],
321 HashSet::from(["a"]),
322 HashSet::from(["a"]),
323 ),
324 (vec![], HashSet::from(["a"]), HashSet::from(["a"])),
326 (vec![], HashSet::from([]), HashSet::from([])),
328 ];
329 for (alias_map, time_index, expected) in testcases {
330 let alias_map = alias_map
331 .into_iter()
332 .map(|(k, v)| (k.to_string(), v.to_string()))
333 .collect_vec();
334 let time_index = time_index.into_iter().map(|i| i.to_string()).collect();
335 let expected: HashSet<String> = expected.into_iter().map(|i| i.to_string()).collect();
336
337 assert_eq!(
338 expected,
339 resolve_alias(&alias_map, &time_index),
340 "alias_map={:?}, time_index={:?}",
341 alias_map,
342 time_index
343 );
344 }
345 }
346}