1use std::sync::Arc;
16
17use api::v1::ddl_request::{Expr as DdlExpr, Expr};
18use api::v1::greptime_request::Request;
19use api::v1::query_request::Query;
20use api::v1::{
21 DeleteRequests, DropFlowExpr, InsertIntoPlan, InsertRequests, RowDeleteRequests,
22 RowInsertRequests,
23};
24use async_trait::async_trait;
25use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
26use common_base::AffectedRows;
27use common_grpc::flight::FlightDecoder;
28use common_grpc::FlightData;
29use common_query::logical_plan::add_insert_to_logical_plan;
30use common_query::Output;
31use common_telemetry::tracing::{self};
32use query::parser::PromQuery;
33use servers::interceptor::{GrpcQueryInterceptor, GrpcQueryInterceptorRef};
34use servers::query_handler::grpc::GrpcQueryHandler;
35use servers::query_handler::sql::SqlQueryHandler;
36use session::context::QueryContextRef;
37use snafu::{ensure, OptionExt, ResultExt};
38use table::metadata::TableId;
39use table::table_name::TableName;
40
41use crate::error::{
42 CatalogSnafu, DataFusionSnafu, Error, InFlightWriteBytesExceededSnafu,
43 IncompleteGrpcRequestSnafu, NotSupportedSnafu, PermissionSnafu, PlanStatementSnafu, Result,
44 SubstraitDecodeLogicalPlanSnafu, TableNotFoundSnafu, TableOperationSnafu,
45};
46use crate::instance::{attach_timer, Instance};
47use crate::metrics::{
48 GRPC_HANDLE_PLAN_ELAPSED, GRPC_HANDLE_PROMQL_ELAPSED, GRPC_HANDLE_SQL_ELAPSED,
49};
50
51#[async_trait]
52impl GrpcQueryHandler for Instance {
53 type Error = Error;
54
55 async fn do_query(&self, request: Request, ctx: QueryContextRef) -> Result<Output> {
56 let interceptor_ref = self.plugins.get::<GrpcQueryInterceptorRef<Error>>();
57 let interceptor = interceptor_ref.as_ref();
58 interceptor.pre_execute(&request, ctx.clone())?;
59
60 self.plugins
61 .get::<PermissionCheckerRef>()
62 .as_ref()
63 .check_permission(ctx.current_user(), PermissionReq::GrpcRequest(&request))
64 .context(PermissionSnafu)?;
65
66 let _guard = if let Some(limiter) = &self.limiter {
67 let result = limiter.limit_request(&request);
68 if result.is_none() {
69 return InFlightWriteBytesExceededSnafu.fail();
70 }
71 result
72 } else {
73 None
74 };
75
76 let output = match request {
77 Request::Inserts(requests) => self.handle_inserts(requests, ctx.clone()).await?,
78 Request::RowInserts(requests) => {
79 self.handle_row_inserts(requests, ctx.clone(), false)
80 .await?
81 }
82 Request::Deletes(requests) => self.handle_deletes(requests, ctx.clone()).await?,
83 Request::RowDeletes(requests) => self.handle_row_deletes(requests, ctx.clone()).await?,
84 Request::Query(query_request) => {
85 let query = query_request.query.context(IncompleteGrpcRequestSnafu {
86 err_msg: "Missing field 'QueryRequest.query'",
87 })?;
88 match query {
89 Query::Sql(sql) => {
90 let timer = GRPC_HANDLE_SQL_ELAPSED.start_timer();
91 let mut result = SqlQueryHandler::do_query(self, &sql, ctx.clone()).await;
92 ensure!(
93 result.len() == 1,
94 NotSupportedSnafu {
95 feat: "execute multiple statements in SQL query string through GRPC interface"
96 }
97 );
98 let output = result.remove(0)?;
99 attach_timer(output, timer)
100 }
101 Query::LogicalPlan(plan) => {
102 let timer = GRPC_HANDLE_PLAN_ELAPSED.start_timer();
104
105 let plan_decoder = self
107 .query_engine()
108 .engine_context(ctx.clone())
109 .new_plan_decoder()
110 .context(PlanStatementSnafu)?;
111
112 let dummy_catalog_list =
113 Arc::new(catalog::table_source::dummy_catalog::DummyCatalogList::new(
114 self.catalog_manager().clone(),
115 ));
116
117 let logical_plan = plan_decoder
118 .decode(bytes::Bytes::from(plan), dummy_catalog_list, true)
119 .await
120 .context(SubstraitDecodeLogicalPlanSnafu)?;
121 let output =
122 SqlQueryHandler::do_exec_plan(self, logical_plan, ctx.clone()).await?;
123
124 attach_timer(output, timer)
125 }
126 Query::InsertIntoPlan(insert) => {
127 self.handle_insert_plan(insert, ctx.clone()).await?
128 }
129 Query::PromRangeQuery(promql) => {
130 let timer = GRPC_HANDLE_PROMQL_ELAPSED.start_timer();
131 let prom_query = PromQuery {
132 query: promql.query,
133 start: promql.start,
134 end: promql.end,
135 step: promql.step,
136 lookback: promql.lookback,
137 };
138 let mut result =
139 SqlQueryHandler::do_promql_query(self, &prom_query, ctx.clone()).await;
140 ensure!(
141 result.len() == 1,
142 NotSupportedSnafu {
143 feat: "execute multiple statements in PromQL query string through GRPC interface"
144 }
145 );
146 let output = result.remove(0)?;
147 attach_timer(output, timer)
148 }
149 }
150 }
151 Request::Ddl(request) => {
152 let mut expr = request.expr.context(IncompleteGrpcRequestSnafu {
153 err_msg: "'expr' is absent in DDL request",
154 })?;
155
156 fill_catalog_and_schema_from_context(&mut expr, &ctx);
157
158 match expr {
159 DdlExpr::CreateTable(mut expr) => {
160 let _ = self
161 .statement_executor
162 .create_table_inner(&mut expr, None, ctx.clone())
163 .await?;
164 Output::new_with_affected_rows(0)
165 }
166 DdlExpr::AlterDatabase(expr) => {
167 let _ = self
168 .statement_executor
169 .alter_database_inner(expr, ctx.clone())
170 .await?;
171 Output::new_with_affected_rows(0)
172 }
173 DdlExpr::AlterTable(expr) => {
174 self.statement_executor
175 .alter_table_inner(expr, ctx.clone())
176 .await?
177 }
178 DdlExpr::CreateDatabase(expr) => {
179 self.statement_executor
180 .create_database(
181 &expr.schema_name,
182 expr.create_if_not_exists,
183 expr.options,
184 ctx.clone(),
185 )
186 .await?
187 }
188 DdlExpr::DropTable(expr) => {
189 let table_name =
190 TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
191 self.statement_executor
192 .drop_table(table_name, expr.drop_if_exists, ctx.clone())
193 .await?
194 }
195 DdlExpr::TruncateTable(expr) => {
196 let table_name =
197 TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
198 self.statement_executor
199 .truncate_table(table_name, ctx.clone())
200 .await?
201 }
202 DdlExpr::CreateFlow(expr) => {
203 self.statement_executor
204 .create_flow_inner(expr, ctx.clone())
205 .await?
206 }
207 DdlExpr::DropFlow(DropFlowExpr {
208 catalog_name,
209 flow_name,
210 drop_if_exists,
211 ..
212 }) => {
213 self.statement_executor
214 .drop_flow(catalog_name, flow_name, drop_if_exists, ctx.clone())
215 .await?
216 }
217 DdlExpr::CreateView(expr) => {
218 let _ = self
219 .statement_executor
220 .create_view_by_expr(expr, ctx.clone())
221 .await?;
222
223 Output::new_with_affected_rows(0)
224 }
225 DdlExpr::DropView(_) => {
226 todo!("implemented in the following PR")
227 }
228 }
229 }
230 };
231
232 let output = interceptor.post_execute(output, ctx)?;
233 Ok(output)
234 }
235
236 async fn put_record_batch(
237 &self,
238 table: &TableName,
239 table_id: &mut Option<TableId>,
240 decoder: &mut FlightDecoder,
241 data: FlightData,
242 ) -> Result<AffectedRows> {
243 let table_id = if let Some(table_id) = table_id {
244 *table_id
245 } else {
246 let table = self
247 .catalog_manager()
248 .table(
249 &table.catalog_name,
250 &table.schema_name,
251 &table.table_name,
252 None,
253 )
254 .await
255 .context(CatalogSnafu)?
256 .with_context(|| TableNotFoundSnafu {
257 table_name: table.to_string(),
258 })?;
259 let id = table.table_info().table_id();
260 *table_id = Some(id);
261 id
262 };
263
264 self.inserter
265 .handle_bulk_insert(table_id, decoder, data)
266 .await
267 .context(TableOperationSnafu)
268 }
269}
270
271fn fill_catalog_and_schema_from_context(ddl_expr: &mut DdlExpr, ctx: &QueryContextRef) {
272 let catalog = ctx.current_catalog();
273 let schema = ctx.current_schema();
274
275 macro_rules! check_and_fill {
276 ($expr:ident) => {
277 if $expr.catalog_name.is_empty() {
278 $expr.catalog_name = catalog.to_string();
279 }
280 if $expr.schema_name.is_empty() {
281 $expr.schema_name = schema.to_string();
282 }
283 };
284 }
285
286 match ddl_expr {
287 Expr::CreateDatabase(_) | Expr::AlterDatabase(_) => { }
288 Expr::CreateTable(expr) => {
289 check_and_fill!(expr);
290 }
291 Expr::AlterTable(expr) => {
292 check_and_fill!(expr);
293 }
294 Expr::DropTable(expr) => {
295 check_and_fill!(expr);
296 }
297 Expr::TruncateTable(expr) => {
298 check_and_fill!(expr);
299 }
300 Expr::CreateFlow(expr) => {
301 if expr.catalog_name.is_empty() {
302 expr.catalog_name = catalog.to_string();
303 }
304 }
305 Expr::DropFlow(expr) => {
306 if expr.catalog_name.is_empty() {
307 expr.catalog_name = catalog.to_string();
308 }
309 }
310 Expr::CreateView(expr) => {
311 check_and_fill!(expr);
312 }
313 Expr::DropView(expr) => {
314 check_and_fill!(expr);
315 }
316 }
317}
318
319impl Instance {
320 async fn handle_insert_plan(
321 &self,
322 insert: InsertIntoPlan,
323 ctx: QueryContextRef,
324 ) -> Result<Output> {
325 let timer = GRPC_HANDLE_PLAN_ELAPSED.start_timer();
326 let table_name = insert.table_name.context(IncompleteGrpcRequestSnafu {
327 err_msg: "'table_name' is absent in InsertIntoPlan",
328 })?;
329
330 let plan_decoder = self
332 .query_engine()
333 .engine_context(ctx.clone())
334 .new_plan_decoder()
335 .context(PlanStatementSnafu)?;
336
337 let dummy_catalog_list =
338 Arc::new(catalog::table_source::dummy_catalog::DummyCatalogList::new(
339 self.catalog_manager().clone(),
340 ));
341
342 let logical_plan = plan_decoder
344 .decode(
345 bytes::Bytes::from(insert.logical_plan),
346 dummy_catalog_list,
347 false,
348 )
349 .await
350 .context(SubstraitDecodeLogicalPlanSnafu)?;
351
352 let table = self
353 .catalog_manager()
354 .table(
355 &table_name.catalog_name,
356 &table_name.schema_name,
357 &table_name.table_name,
358 None,
359 )
360 .await
361 .context(CatalogSnafu)?
362 .with_context(|| TableNotFoundSnafu {
363 table_name: [
364 table_name.catalog_name.clone(),
365 table_name.schema_name.clone(),
366 table_name.table_name.clone(),
367 ]
368 .join("."),
369 })?;
370
371 let table_info = table.table_info();
372
373 let df_schema = Arc::new(
374 table_info
375 .meta
376 .schema
377 .arrow_schema()
378 .clone()
379 .try_into()
380 .context(DataFusionSnafu)?,
381 );
382
383 let insert_into = add_insert_to_logical_plan(table_name, df_schema, logical_plan)
384 .context(SubstraitDecodeLogicalPlanSnafu)?;
385
386 let engine_ctx = self.query_engine().engine_context(ctx.clone());
387 let state = engine_ctx.state();
388 let analyzed_plan = state
390 .analyzer()
391 .execute_and_check(insert_into, state.config_options(), |_, _| {})
392 .context(common_query::error::GeneralDataFusionSnafu)
393 .context(SubstraitDecodeLogicalPlanSnafu)?;
394
395 let optimized_plan = state
397 .optimize(&analyzed_plan)
398 .context(common_query::error::GeneralDataFusionSnafu)
399 .context(SubstraitDecodeLogicalPlanSnafu)?;
400
401 let output = SqlQueryHandler::do_exec_plan(self, optimized_plan, ctx.clone()).await?;
402
403 Ok(attach_timer(output, timer))
404 }
405 #[tracing::instrument(skip_all)]
406 pub async fn handle_inserts(
407 &self,
408 requests: InsertRequests,
409 ctx: QueryContextRef,
410 ) -> Result<Output> {
411 self.inserter
412 .handle_column_inserts(requests, ctx, self.statement_executor.as_ref())
413 .await
414 .context(TableOperationSnafu)
415 }
416
417 #[tracing::instrument(skip_all)]
418 pub async fn handle_row_inserts(
419 &self,
420 requests: RowInsertRequests,
421 ctx: QueryContextRef,
422 accommodate_existing_schema: bool,
423 ) -> Result<Output> {
424 self.inserter
425 .handle_row_inserts(
426 requests,
427 ctx,
428 self.statement_executor.as_ref(),
429 accommodate_existing_schema,
430 )
431 .await
432 .context(TableOperationSnafu)
433 }
434
435 #[tracing::instrument(skip_all)]
436 pub async fn handle_influx_row_inserts(
437 &self,
438 requests: RowInsertRequests,
439 ctx: QueryContextRef,
440 ) -> Result<Output> {
441 self.inserter
442 .handle_last_non_null_inserts(requests, ctx, self.statement_executor.as_ref(), true)
443 .await
444 .context(TableOperationSnafu)
445 }
446
447 #[tracing::instrument(skip_all)]
448 pub async fn handle_metric_row_inserts(
449 &self,
450 requests: RowInsertRequests,
451 ctx: QueryContextRef,
452 physical_table: String,
453 ) -> Result<Output> {
454 self.inserter
455 .handle_metric_row_inserts(requests, ctx, &self.statement_executor, physical_table)
456 .await
457 .context(TableOperationSnafu)
458 }
459
460 #[tracing::instrument(skip_all)]
461 pub async fn handle_deletes(
462 &self,
463 requests: DeleteRequests,
464 ctx: QueryContextRef,
465 ) -> Result<Output> {
466 self.deleter
467 .handle_column_deletes(requests, ctx)
468 .await
469 .context(TableOperationSnafu)
470 }
471
472 #[tracing::instrument(skip_all)]
473 pub async fn handle_row_deletes(
474 &self,
475 requests: RowDeleteRequests,
476 ctx: QueryContextRef,
477 ) -> Result<Output> {
478 self.deleter
479 .handle_row_deletes(requests, ctx)
480 .await
481 .context(TableOperationSnafu)
482 }
483}