query/optimizer/
windowed_sort.rs1use std::collections::HashSet;
16use std::sync::Arc;
17
18use datafusion::physical_optimizer::PhysicalOptimizerRule;
19use datafusion::physical_plan::ExecutionPlan;
20use datafusion::physical_plan::coalesce_partitions::CoalescePartitionsExec;
21use datafusion::physical_plan::coop::CooperativeExec;
22use datafusion::physical_plan::filter::FilterExec;
23use datafusion::physical_plan::projection::ProjectionExec;
24use datafusion::physical_plan::repartition::RepartitionExec;
25use datafusion::physical_plan::sorts::sort::SortExec;
26use datafusion::physical_plan::sorts::sort_preserving_merge::SortPreservingMergeExec;
27use datafusion_common::Result as DataFusionResult;
28use datafusion_common::tree_node::{Transformed, TreeNode};
29use datafusion_physical_expr::expressions::Column as PhysicalColumn;
30use store_api::region_engine::PartitionRange;
31use table::table::scan::RegionScanExec;
32
33use crate::part_sort::PartSortExec;
34use crate::window_sort::WindowedSortExec;
35
36#[derive(Debug)]
46pub struct WindowedSortPhysicalRule;
47
48impl PhysicalOptimizerRule for WindowedSortPhysicalRule {
49 fn optimize(
50 &self,
51 plan: Arc<dyn ExecutionPlan>,
52 config: &datafusion::config::ConfigOptions,
53 ) -> DataFusionResult<Arc<dyn ExecutionPlan>> {
54 Self::do_optimize(plan, config)
55 }
56
57 fn name(&self) -> &str {
58 "WindowedSortRule"
59 }
60
61 fn schema_check(&self) -> bool {
62 false
63 }
64}
65
66impl WindowedSortPhysicalRule {
67 fn do_optimize(
68 plan: Arc<dyn ExecutionPlan>,
69 _config: &datafusion::config::ConfigOptions,
70 ) -> DataFusionResult<Arc<dyn ExecutionPlan>> {
71 let result = plan
72 .transform_down(|plan| {
73 if let Some(sort_exec) = plan.as_any().downcast_ref::<SortExec>() {
74 if sort_exec.expr().len() != 1 {
76 return Ok(Transformed::no(plan));
77 }
78
79 let preserve_partitioning = sort_exec.preserve_partitioning();
80
81 let sort_input = remove_repartition(sort_exec.input().clone())?.data;
82
83 let Some(scanner_info) = fetch_partition_range(sort_input.clone())? else {
85 return Ok(Transformed::no(plan));
86 };
87 let input_schema = sort_input.schema();
88
89 let first_sort_expr = sort_exec.expr().first();
90 if let Some(column_expr) = first_sort_expr
91 .expr
92 .as_any()
93 .downcast_ref::<PhysicalColumn>()
94 && scanner_info
95 .time_index
96 .contains(input_schema.field(column_expr.index()).name())
97 {
98 } else {
99 return Ok(Transformed::no(plan));
100 }
101
102 let new_input = if scanner_info.tag_columns.is_empty()
106 && !first_sort_expr.options.descending
107 {
108 sort_input
109 } else {
110 Arc::new(PartSortExec::try_new(
111 first_sort_expr.clone(),
112 sort_exec.fetch(),
113 scanner_info.partition_ranges.clone(),
114 sort_input,
115 )?)
116 };
117
118 let windowed_sort_exec = WindowedSortExec::try_new(
119 first_sort_expr.clone(),
120 sort_exec.fetch(),
121 scanner_info.partition_ranges,
122 new_input,
123 )?;
124
125 if !preserve_partitioning {
126 let order_preserving_merge = SortPreservingMergeExec::new(
127 sort_exec.expr().clone(),
128 Arc::new(windowed_sort_exec),
129 );
130 return Ok(Transformed {
131 data: Arc::new(order_preserving_merge),
132 transformed: true,
133 tnr: datafusion_common::tree_node::TreeNodeRecursion::Stop,
134 });
135 } else {
136 return Ok(Transformed {
137 data: Arc::new(windowed_sort_exec),
138 transformed: true,
139 tnr: datafusion_common::tree_node::TreeNodeRecursion::Stop,
140 });
141 }
142 }
143
144 Ok(Transformed::no(plan))
145 })?
146 .data;
147
148 Ok(result)
149 }
150}
151
152#[derive(Debug)]
153struct ScannerInfo {
154 partition_ranges: Vec<Vec<PartitionRange>>,
155 time_index: HashSet<String>,
156 tag_columns: Vec<String>,
157}
158
159fn fetch_partition_range(input: Arc<dyn ExecutionPlan>) -> DataFusionResult<Option<ScannerInfo>> {
160 let mut partition_ranges = None;
161 let mut time_index = HashSet::new();
162 let mut alias_map = Vec::new();
163 let mut tag_columns = None;
164
165 input.transform_up(|plan| {
166 if plan.as_any().is::<CooperativeExec>() {
167 return Ok(Transformed::no(plan));
168 }
169
170 if plan.as_any().is::<RepartitionExec>()
172 || plan.as_any().is::<CoalescePartitionsExec>()
173 || plan.as_any().is::<SortExec>()
174 || plan.as_any().is::<WindowedSortExec>()
175 {
176 partition_ranges = None;
177 }
178
179 if !(plan.as_any().is::<ProjectionExec>() || plan.as_any().is::<FilterExec>()) {
182 partition_ranges = None;
183 }
184
185 if let Some(projection) = plan.as_any().downcast_ref::<ProjectionExec>() {
188 for expr in projection.expr() {
189 if let Some(column_expr) = expr.expr.as_any().downcast_ref::<PhysicalColumn>() {
190 alias_map.push((column_expr.name().to_string(), expr.alias.clone()));
191 }
192 }
193 time_index = resolve_alias(&alias_map, &time_index);
195 }
196
197 if let Some(region_scan_exec) = plan.as_any().downcast_ref::<RegionScanExec>() {
198 if region_scan_exec.distribution()
200 == Some(store_api::storage::TimeSeriesDistribution::PerSeries)
201 {
202 partition_ranges = None;
203 return Ok(Transformed::no(plan));
204 }
205
206 partition_ranges = Some(region_scan_exec.get_uncollapsed_partition_ranges());
207 time_index = HashSet::from([region_scan_exec.time_index()]);
209 tag_columns = Some(region_scan_exec.tag_columns());
210
211 region_scan_exec.with_distinguish_partition_range(true);
212 }
213
214 Ok(Transformed::no(plan))
215 })?;
216
217 let result = try {
218 ScannerInfo {
219 partition_ranges: partition_ranges?,
220 time_index,
221 tag_columns: tag_columns?,
222 }
223 };
224
225 Ok(result)
226}
227
228fn remove_repartition(
230 plan: Arc<dyn ExecutionPlan>,
231) -> DataFusionResult<Transformed<Arc<dyn ExecutionPlan>>> {
232 plan.transform_down(|plan| {
233 if plan.as_any().is::<FilterExec>() {
234 let maybe_repartition = plan.children()[0];
236 if maybe_repartition.as_any().is::<RepartitionExec>() {
237 let maybe_scan = maybe_repartition.children()[0];
238 if maybe_scan.as_any().is::<RegionScanExec>() {
239 let new_filter = plan.clone().with_new_children(vec![maybe_scan.clone()])?;
240 return Ok(Transformed::yes(new_filter));
241 }
242 }
243 }
244
245 Ok(Transformed::no(plan))
246 })
247}
248
249fn resolve_alias(alias_map: &[(String, String)], time_index: &HashSet<String>) -> HashSet<String> {
254 let mut avail_old_name = time_index.clone();
256 let mut new_time_index = HashSet::new();
257 for (old, new) in alias_map {
258 if time_index.contains(old) {
259 new_time_index.insert(new.clone());
260 } else if time_index.contains(new) && old != new {
261 avail_old_name.remove(new);
263 continue;
264 }
265 }
266 new_time_index.extend(avail_old_name);
268 new_time_index
269}
270
271#[cfg(test)]
272mod test {
273 use itertools::Itertools;
274
275 use super::*;
276
277 #[test]
278 fn test_alias() {
279 let testcases = [
280 (
282 vec![("a", "b"), ("b", "c")],
283 HashSet::from(["a"]),
284 HashSet::from(["a", "b"]),
285 ),
286 (
288 vec![("b", "a"), ("a", "b")],
289 HashSet::from(["a"]),
290 HashSet::from(["b"]),
291 ),
292 (
293 vec![("b", "a"), ("b", "c")],
294 HashSet::from(["a"]),
295 HashSet::from([]),
296 ),
297 (
299 vec![("c", "d"), ("d", "c")],
300 HashSet::from(["a"]),
301 HashSet::from(["a"]),
302 ),
303 (vec![], HashSet::from(["a"]), HashSet::from(["a"])),
305 (vec![], HashSet::from([]), HashSet::from([])),
307 ];
308 for (alias_map, time_index, expected) in testcases {
309 let alias_map = alias_map
310 .into_iter()
311 .map(|(k, v)| (k.to_string(), v.to_string()))
312 .collect_vec();
313 let time_index = time_index.into_iter().map(|i| i.to_string()).collect();
314 let expected: HashSet<String> = expected.into_iter().map(|i| i.to_string()).collect();
315
316 assert_eq!(
317 expected,
318 resolve_alias(&alias_map, &time_index),
319 "alias_map={:?}, time_index={:?}",
320 alias_map,
321 time_index
322 );
323 }
324 }
325}