1mod cluster_info;
16pub mod columns;
17pub mod flows;
18mod information_memory_table;
19pub mod key_column_usage;
20mod partitions;
21mod procedure_info;
22pub mod process_list;
23pub mod region_peers;
24mod region_statistics;
25pub mod schemata;
26mod ssts;
27mod table_constraints;
28mod table_names;
29pub mod tables;
30mod views;
31
32use std::collections::HashMap;
33use std::sync::{Arc, Weak};
34
35use common_catalog::consts::{self, DEFAULT_CATALOG_NAME, INFORMATION_SCHEMA_NAME};
36use common_error::ext::ErrorExt;
37use common_meta::cluster::NodeInfo;
38use common_meta::datanode::RegionStat;
39use common_meta::key::flow::FlowMetadataManager;
40use common_meta::key::flow::flow_state::FlowStat;
41use common_meta::kv_backend::KvBackendRef;
42use common_procedure::ProcedureInfo;
43use common_recordbatch::SendableRecordBatchStream;
44use datafusion::error::DataFusionError;
45use datafusion::logical_expr::LogicalPlan;
46use datatypes::schema::SchemaRef;
47use lazy_static::lazy_static;
48use paste::paste;
49use process_list::InformationSchemaProcessList;
50use store_api::sst_entry::{ManifestSstEntry, PuffinIndexMetaEntry, StorageSstEntry};
51use store_api::storage::{ScanRequest, TableId};
52use table::TableRef;
53use table::metadata::TableType;
54pub use table_names::*;
55use views::InformationSchemaViews;
56
57use self::columns::InformationSchemaColumns;
58use crate::CatalogManager;
59use crate::error::{Error, Result};
60use crate::process_manager::ProcessManagerRef;
61use crate::system_schema::information_schema::cluster_info::InformationSchemaClusterInfo;
62use crate::system_schema::information_schema::flows::InformationSchemaFlows;
63use crate::system_schema::information_schema::information_memory_table::get_schema_columns;
64use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
65use crate::system_schema::information_schema::partitions::InformationSchemaPartitions;
66use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers;
67use crate::system_schema::information_schema::schemata::InformationSchemaSchemata;
68use crate::system_schema::information_schema::ssts::{
69 InformationSchemaSstsIndexMeta, InformationSchemaSstsManifest, InformationSchemaSstsStorage,
70};
71use crate::system_schema::information_schema::table_constraints::InformationSchemaTableConstraints;
72use crate::system_schema::information_schema::tables::InformationSchemaTables;
73use crate::system_schema::memory_table::MemoryTable;
74pub(crate) use crate::system_schema::predicate::Predicates;
75use crate::system_schema::{
76 SystemSchemaProvider, SystemSchemaProviderInner, SystemTable, SystemTableRef,
77};
78
79lazy_static! {
80 static ref MEMORY_TABLES: &'static [&'static str] = &[
82 ENGINES,
83 COLUMN_PRIVILEGES,
84 COLUMN_STATISTICS,
85 CHARACTER_SETS,
86 COLLATIONS,
87 COLLATION_CHARACTER_SET_APPLICABILITY,
88 CHECK_CONSTRAINTS,
89 EVENTS,
90 FILES,
91 OPTIMIZER_TRACE,
92 PARAMETERS,
93 PROFILING,
94 REFERENTIAL_CONSTRAINTS,
95 ROUTINES,
96 SCHEMA_PRIVILEGES,
97 TABLE_PRIVILEGES,
98 GLOBAL_STATUS,
99 SESSION_STATUS,
100 PARTITIONS,
101 ];
102}
103
104macro_rules! setup_memory_table {
105 ($name: expr) => {
106 paste! {
107 {
108 let (schema, columns) = get_schema_columns($name);
109 Some(Arc::new(MemoryTable::new(
110 consts::[<INFORMATION_SCHEMA_ $name _TABLE_ID>],
111 $name,
112 schema,
113 columns
114 )) as _)
115 }
116 }
117 };
118}
119
120pub struct MakeInformationTableRequest {
121 pub catalog_name: String,
122 pub catalog_manager: Weak<dyn CatalogManager>,
123 pub kv_backend: KvBackendRef,
124}
125
126pub trait InformationSchemaTableFactory {
131 fn make_information_table(&self, req: MakeInformationTableRequest) -> SystemTableRef;
132}
133
134pub type InformationSchemaTableFactoryRef = Arc<dyn InformationSchemaTableFactory + Send + Sync>;
135
136pub struct InformationSchemaProvider {
138 catalog_name: String,
139 catalog_manager: Weak<dyn CatalogManager>,
140 process_manager: Option<ProcessManagerRef>,
141 flow_metadata_manager: Arc<FlowMetadataManager>,
142 tables: HashMap<String, TableRef>,
143 kv_backend: KvBackendRef,
144 extra_table_factories: HashMap<String, InformationSchemaTableFactoryRef>,
145}
146
147impl SystemSchemaProvider for InformationSchemaProvider {
148 fn tables(&self) -> &HashMap<String, TableRef> {
149 assert!(!self.tables.is_empty());
150
151 &self.tables
152 }
153}
154
155impl SystemSchemaProviderInner for InformationSchemaProvider {
156 fn catalog_name(&self) -> &str {
157 &self.catalog_name
158 }
159 fn schema_name() -> &'static str {
160 INFORMATION_SCHEMA_NAME
161 }
162
163 fn system_table(&self, name: &str) -> Option<SystemTableRef> {
164 if let Some(factory) = self.extra_table_factories.get(name) {
165 let req = MakeInformationTableRequest {
166 catalog_name: self.catalog_name.clone(),
167 catalog_manager: self.catalog_manager.clone(),
168 kv_backend: self.kv_backend.clone(),
169 };
170 return Some(factory.make_information_table(req));
171 }
172
173 match name.to_ascii_lowercase().as_str() {
174 TABLES => Some(Arc::new(InformationSchemaTables::new(
175 self.catalog_name.clone(),
176 self.catalog_manager.clone(),
177 )) as _),
178 COLUMNS => Some(Arc::new(InformationSchemaColumns::new(
179 self.catalog_name.clone(),
180 self.catalog_manager.clone(),
181 )) as _),
182 ENGINES => setup_memory_table!(ENGINES),
183 COLUMN_PRIVILEGES => setup_memory_table!(COLUMN_PRIVILEGES),
184 COLUMN_STATISTICS => setup_memory_table!(COLUMN_STATISTICS),
185 BUILD_INFO => setup_memory_table!(BUILD_INFO),
186 CHARACTER_SETS => setup_memory_table!(CHARACTER_SETS),
187 COLLATIONS => setup_memory_table!(COLLATIONS),
188 COLLATION_CHARACTER_SET_APPLICABILITY => {
189 setup_memory_table!(COLLATION_CHARACTER_SET_APPLICABILITY)
190 }
191 CHECK_CONSTRAINTS => setup_memory_table!(CHECK_CONSTRAINTS),
192 EVENTS => setup_memory_table!(EVENTS),
193 FILES => setup_memory_table!(FILES),
194 OPTIMIZER_TRACE => setup_memory_table!(OPTIMIZER_TRACE),
195 PARAMETERS => setup_memory_table!(PARAMETERS),
196 PROFILING => setup_memory_table!(PROFILING),
197 REFERENTIAL_CONSTRAINTS => setup_memory_table!(REFERENTIAL_CONSTRAINTS),
198 ROUTINES => setup_memory_table!(ROUTINES),
199 SCHEMA_PRIVILEGES => setup_memory_table!(SCHEMA_PRIVILEGES),
200 TABLE_PRIVILEGES => setup_memory_table!(TABLE_PRIVILEGES),
201 GLOBAL_STATUS => setup_memory_table!(GLOBAL_STATUS),
202 SESSION_STATUS => setup_memory_table!(SESSION_STATUS),
203 KEY_COLUMN_USAGE => Some(Arc::new(InformationSchemaKeyColumnUsage::new(
204 self.catalog_name.clone(),
205 self.catalog_manager.clone(),
206 )) as _),
207 SCHEMATA => Some(Arc::new(InformationSchemaSchemata::new(
208 self.catalog_name.clone(),
209 self.catalog_manager.clone(),
210 )) as _),
211 PARTITIONS => Some(Arc::new(InformationSchemaPartitions::new(
212 self.catalog_name.clone(),
213 self.catalog_manager.clone(),
214 )) as _),
215 REGION_PEERS => Some(Arc::new(InformationSchemaRegionPeers::new(
216 self.catalog_name.clone(),
217 self.catalog_manager.clone(),
218 )) as _),
219 TABLE_CONSTRAINTS => Some(Arc::new(InformationSchemaTableConstraints::new(
220 self.catalog_name.clone(),
221 self.catalog_manager.clone(),
222 )) as _),
223 CLUSTER_INFO => Some(Arc::new(InformationSchemaClusterInfo::new(
224 self.catalog_manager.clone(),
225 )) as _),
226 VIEWS => Some(Arc::new(InformationSchemaViews::new(
227 self.catalog_name.clone(),
228 self.catalog_manager.clone(),
229 )) as _),
230 FLOWS => Some(Arc::new(InformationSchemaFlows::new(
231 self.catalog_name.clone(),
232 self.catalog_manager.clone(),
233 self.flow_metadata_manager.clone(),
234 )) as _),
235 PROCEDURE_INFO => Some(
236 Arc::new(procedure_info::InformationSchemaProcedureInfo::new(
237 self.catalog_manager.clone(),
238 )) as _,
239 ),
240 REGION_STATISTICS => Some(Arc::new(
241 region_statistics::InformationSchemaRegionStatistics::new(
242 self.catalog_manager.clone(),
243 ),
244 ) as _),
245 PROCESS_LIST => self
246 .process_manager
247 .as_ref()
248 .map(|p| Arc::new(InformationSchemaProcessList::new(p.clone())) as _),
249 SSTS_MANIFEST => Some(Arc::new(InformationSchemaSstsManifest::new(
250 self.catalog_manager.clone(),
251 )) as _),
252 SSTS_STORAGE => Some(Arc::new(InformationSchemaSstsStorage::new(
253 self.catalog_manager.clone(),
254 )) as _),
255 SSTS_INDEX_META => Some(Arc::new(InformationSchemaSstsIndexMeta::new(
256 self.catalog_manager.clone(),
257 )) as _),
258 _ => None,
259 }
260 }
261}
262
263impl InformationSchemaProvider {
264 pub fn new(
265 catalog_name: String,
266 catalog_manager: Weak<dyn CatalogManager>,
267 flow_metadata_manager: Arc<FlowMetadataManager>,
268 process_manager: Option<ProcessManagerRef>,
269 kv_backend: KvBackendRef,
270 ) -> Self {
271 let mut provider = Self {
272 catalog_name,
273 catalog_manager,
274 flow_metadata_manager,
275 process_manager,
276 tables: HashMap::new(),
277 kv_backend,
278 extra_table_factories: HashMap::new(),
279 };
280
281 provider.build_tables();
282
283 provider
284 }
285
286 pub(crate) fn with_extra_table_factories(
287 mut self,
288 factories: HashMap<String, InformationSchemaTableFactoryRef>,
289 ) -> Self {
290 self.extra_table_factories = factories;
291 self.build_tables();
292 self
293 }
294
295 fn build_tables(&mut self) {
296 let mut tables = HashMap::new();
297
298 if self.catalog_name == DEFAULT_CATALOG_NAME {
303 tables.insert(
304 BUILD_INFO.to_string(),
305 self.build_table(BUILD_INFO).unwrap(),
306 );
307 tables.insert(
308 REGION_PEERS.to_string(),
309 self.build_table(REGION_PEERS).unwrap(),
310 );
311 tables.insert(
312 CLUSTER_INFO.to_string(),
313 self.build_table(CLUSTER_INFO).unwrap(),
314 );
315 tables.insert(
316 PROCEDURE_INFO.to_string(),
317 self.build_table(PROCEDURE_INFO).unwrap(),
318 );
319 tables.insert(
320 REGION_STATISTICS.to_string(),
321 self.build_table(REGION_STATISTICS).unwrap(),
322 );
323 tables.insert(
324 SSTS_MANIFEST.to_string(),
325 self.build_table(SSTS_MANIFEST).unwrap(),
326 );
327 tables.insert(
328 SSTS_STORAGE.to_string(),
329 self.build_table(SSTS_STORAGE).unwrap(),
330 );
331 tables.insert(
332 SSTS_INDEX_META.to_string(),
333 self.build_table(SSTS_INDEX_META).unwrap(),
334 );
335 }
336
337 tables.insert(TABLES.to_string(), self.build_table(TABLES).unwrap());
338 tables.insert(VIEWS.to_string(), self.build_table(VIEWS).unwrap());
339 tables.insert(SCHEMATA.to_string(), self.build_table(SCHEMATA).unwrap());
340 tables.insert(COLUMNS.to_string(), self.build_table(COLUMNS).unwrap());
341 tables.insert(
342 KEY_COLUMN_USAGE.to_string(),
343 self.build_table(KEY_COLUMN_USAGE).unwrap(),
344 );
345 tables.insert(
346 TABLE_CONSTRAINTS.to_string(),
347 self.build_table(TABLE_CONSTRAINTS).unwrap(),
348 );
349 tables.insert(FLOWS.to_string(), self.build_table(FLOWS).unwrap());
350 if let Some(process_list) = self.build_table(PROCESS_LIST) {
351 tables.insert(PROCESS_LIST.to_string(), process_list);
352 }
353 for name in self.extra_table_factories.keys() {
354 tables.insert(name.clone(), self.build_table(name).expect(name));
355 }
356 for name in MEMORY_TABLES.iter() {
358 tables.insert((*name).to_string(), self.build_table(name).expect(name));
359 }
360 self.tables = tables;
361 }
362}
363
364pub trait InformationTable {
365 fn table_id(&self) -> TableId;
366
367 fn table_name(&self) -> &'static str;
368
369 fn schema(&self) -> SchemaRef;
370
371 fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream>;
372
373 fn table_type(&self) -> TableType {
374 TableType::Temporary
375 }
376}
377
378impl<T> SystemTable for T
380where
381 T: InformationTable,
382{
383 fn table_id(&self) -> TableId {
384 InformationTable::table_id(self)
385 }
386
387 fn table_name(&self) -> &'static str {
388 InformationTable::table_name(self)
389 }
390
391 fn schema(&self) -> SchemaRef {
392 InformationTable::schema(self)
393 }
394
395 fn table_type(&self) -> TableType {
396 InformationTable::table_type(self)
397 }
398
399 fn to_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream> {
400 InformationTable::to_stream(self, request)
401 }
402}
403
404pub type InformationExtensionRef = Arc<dyn InformationExtension<Error = Error> + Send + Sync>;
405
406#[async_trait::async_trait]
408pub trait InformationExtension {
409 type Error: ErrorExt;
410
411 async fn nodes(&self) -> std::result::Result<Vec<NodeInfo>, Self::Error>;
413
414 async fn procedures(&self) -> std::result::Result<Vec<(String, ProcedureInfo)>, Self::Error>;
416
417 async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error>;
419
420 async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error>;
422
423 async fn inspect_datanode(
425 &self,
426 request: DatanodeInspectRequest,
427 ) -> std::result::Result<SendableRecordBatchStream, Self::Error>;
428}
429
430#[derive(Debug, Clone, PartialEq, Eq)]
432pub struct DatanodeInspectRequest {
433 pub kind: DatanodeInspectKind,
435
436 pub scan: ScanRequest,
439}
440
441#[derive(Debug, Clone, Copy, PartialEq, Eq)]
443pub enum DatanodeInspectKind {
444 SstManifest,
446 SstStorage,
448 SstIndexMeta,
450}
451
452impl DatanodeInspectRequest {
453 pub fn build_plan(self) -> std::result::Result<LogicalPlan, DataFusionError> {
455 match self.kind {
456 DatanodeInspectKind::SstManifest => ManifestSstEntry::build_plan(self.scan),
457 DatanodeInspectKind::SstStorage => StorageSstEntry::build_plan(self.scan),
458 DatanodeInspectKind::SstIndexMeta => PuffinIndexMetaEntry::build_plan(self.scan),
459 }
460 }
461}
462pub struct NoopInformationExtension;
463
464#[async_trait::async_trait]
465impl InformationExtension for NoopInformationExtension {
466 type Error = Error;
467
468 async fn nodes(&self) -> std::result::Result<Vec<NodeInfo>, Self::Error> {
469 Ok(vec![])
470 }
471
472 async fn procedures(&self) -> std::result::Result<Vec<(String, ProcedureInfo)>, Self::Error> {
473 Ok(vec![])
474 }
475
476 async fn region_stats(&self) -> std::result::Result<Vec<RegionStat>, Self::Error> {
477 Ok(vec![])
478 }
479
480 async fn flow_stats(&self) -> std::result::Result<Option<FlowStat>, Self::Error> {
481 Ok(None)
482 }
483
484 async fn inspect_datanode(
485 &self,
486 _request: DatanodeInspectRequest,
487 ) -> std::result::Result<SendableRecordBatchStream, Self::Error> {
488 Ok(common_recordbatch::RecordBatches::empty().as_stream())
489 }
490}