1use std::collections::HashMap;
16use std::fmt::Display;
17use std::net::SocketAddr;
18use std::sync::Mutex as StdMutex;
19use std::time::Duration;
20
21use async_trait::async_trait;
22use auth::UserProviderRef;
23use axum::extract::DefaultBodyLimit;
24use axum::http::StatusCode as HttpStatusCode;
25use axum::response::{IntoResponse, Response};
26use axum::serve::ListenerExt;
27use axum::{Router, middleware, routing};
28use common_base::Plugins;
29use common_base::readable_size::ReadableSize;
30use common_recordbatch::RecordBatch;
31use common_telemetry::{debug, error, info};
32use common_time::Timestamp;
33use common_time::timestamp::TimeUnit;
34use datatypes::data_type::DataType;
35use datatypes::schema::SchemaRef;
36use datatypes::value::transform_value_ref_to_json_value;
37use event::{LogState, LogValidatorRef};
38use futures::FutureExt;
39use http::{HeaderValue, Method};
40use prost::DecodeError;
41use serde::{Deserialize, Serialize};
42use serde_json::Value;
43use snafu::{ResultExt, ensure};
44use tokio::sync::Mutex;
45use tokio::sync::oneshot::{self, Sender};
46use tower::ServiceBuilder;
47use tower_http::compression::CompressionLayer;
48use tower_http::cors::{AllowOrigin, Any, CorsLayer};
49use tower_http::decompression::RequestDecompressionLayer;
50use tower_http::trace::TraceLayer;
51
52use self::authorize::AuthState;
53use self::result::table_result::TableResponse;
54use crate::configurator::ConfiguratorRef;
55use crate::elasticsearch;
56use crate::error::{
57 AddressBindSnafu, AlreadyStartedSnafu, Error, InternalIoSnafu, InvalidHeaderValueSnafu, Result,
58 ToJsonSnafu,
59};
60use crate::http::influxdb::{influxdb_health, influxdb_ping, influxdb_write_v1, influxdb_write_v2};
61use crate::http::otlp::OtlpState;
62use crate::http::prom_store::PromStoreState;
63use crate::http::prometheus::{
64 build_info_query, format_query, instant_query, label_values_query, labels_query, parse_query,
65 range_query, series_query,
66};
67use crate::http::result::arrow_result::ArrowResponse;
68use crate::http::result::csv_result::CsvResponse;
69use crate::http::result::error_result::ErrorResponse;
70use crate::http::result::greptime_result_v1::GreptimedbV1Response;
71use crate::http::result::influxdb_result_v1::InfluxdbV1Response;
72use crate::http::result::json_result::JsonResponse;
73use crate::http::result::null_result::NullResponse;
74use crate::interceptor::LogIngestInterceptorRef;
75use crate::metrics::http_metrics_layer;
76use crate::metrics_handler::MetricsHandler;
77use crate::prometheus_handler::PrometheusHandlerRef;
78use crate::query_handler::sql::ServerSqlQueryHandlerRef;
79use crate::query_handler::{
80 InfluxdbLineProtocolHandlerRef, JaegerQueryHandlerRef, LogQueryHandlerRef,
81 OpenTelemetryProtocolHandlerRef, OpentsdbProtocolHandlerRef, PipelineHandlerRef,
82 PromStoreProtocolHandlerRef,
83};
84use crate::server::Server;
85
86pub mod authorize;
87#[cfg(feature = "dashboard")]
88mod dashboard;
89pub mod dyn_log;
90pub mod event;
91pub mod extractor;
92pub mod handler;
93pub mod header;
94pub mod influxdb;
95pub mod jaeger;
96pub mod logs;
97pub mod loki;
98pub mod mem_prof;
99pub mod opentsdb;
100pub mod otlp;
101pub mod pprof;
102pub mod prom_store;
103pub mod prometheus;
104pub mod result;
105mod timeout;
106
107pub(crate) use timeout::DynamicTimeoutLayer;
108
109mod hints;
110mod read_preference;
111#[cfg(any(test, feature = "testing"))]
112pub mod test_helpers;
113
114pub const HTTP_API_VERSION: &str = "v1";
115pub const HTTP_API_PREFIX: &str = "/v1/";
116const DEFAULT_BODY_LIMIT: ReadableSize = ReadableSize::mb(64);
118
119pub const AUTHORIZATION_HEADER: &str = "x-greptime-auth";
121
122pub static PUBLIC_APIS: [&str; 3] = ["/v1/influxdb/ping", "/v1/influxdb/health", "/v1/health"];
124
125#[derive(Default)]
126pub struct HttpServer {
127 router: StdMutex<Router>,
128 shutdown_tx: Mutex<Option<Sender<()>>>,
129 user_provider: Option<UserProviderRef>,
130
131 plugins: Plugins,
133
134 options: HttpOptions,
136 bind_addr: Option<SocketAddr>,
137}
138
139#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
140#[serde(default)]
141pub struct HttpOptions {
142 pub addr: String,
143
144 #[serde(with = "humantime_serde")]
145 pub timeout: Duration,
146
147 #[serde(skip)]
148 pub disable_dashboard: bool,
149
150 pub body_limit: ReadableSize,
151
152 pub prom_validation_mode: PromValidationMode,
154
155 pub cors_allowed_origins: Vec<String>,
156
157 pub enable_cors: bool,
158}
159
160#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
161#[serde(rename_all = "snake_case")]
162pub enum PromValidationMode {
163 Strict,
165 Lossy,
167 Unchecked,
169}
170
171impl PromValidationMode {
172 pub fn decode_string(&self, bytes: &[u8]) -> std::result::Result<String, DecodeError> {
174 let result = match self {
175 PromValidationMode::Strict => match String::from_utf8(bytes.to_vec()) {
176 Ok(s) => s,
177 Err(e) => {
178 debug!("Invalid UTF-8 string value: {:?}, error: {:?}", bytes, e);
179 return Err(DecodeError::new("invalid utf-8"));
180 }
181 },
182 PromValidationMode::Lossy => String::from_utf8_lossy(bytes).to_string(),
183 PromValidationMode::Unchecked => unsafe { String::from_utf8_unchecked(bytes.to_vec()) },
184 };
185 Ok(result)
186 }
187}
188
189impl Default for HttpOptions {
190 fn default() -> Self {
191 Self {
192 addr: "127.0.0.1:4000".to_string(),
193 timeout: Duration::from_secs(0),
194 disable_dashboard: false,
195 body_limit: DEFAULT_BODY_LIMIT,
196 cors_allowed_origins: Vec::new(),
197 enable_cors: true,
198 prom_validation_mode: PromValidationMode::Strict,
199 }
200 }
201}
202
203#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)]
204pub struct ColumnSchema {
205 name: String,
206 data_type: String,
207}
208
209impl ColumnSchema {
210 pub fn new(name: String, data_type: String) -> ColumnSchema {
211 ColumnSchema { name, data_type }
212 }
213}
214
215#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)]
216pub struct OutputSchema {
217 column_schemas: Vec<ColumnSchema>,
218}
219
220impl OutputSchema {
221 pub fn new(columns: Vec<ColumnSchema>) -> OutputSchema {
222 OutputSchema {
223 column_schemas: columns,
224 }
225 }
226}
227
228impl From<SchemaRef> for OutputSchema {
229 fn from(schema: SchemaRef) -> OutputSchema {
230 OutputSchema {
231 column_schemas: schema
232 .column_schemas()
233 .iter()
234 .map(|cs| ColumnSchema {
235 name: cs.name.clone(),
236 data_type: cs.data_type.name(),
237 })
238 .collect(),
239 }
240 }
241}
242
243#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)]
244pub struct HttpRecordsOutput {
245 schema: OutputSchema,
246 rows: Vec<Vec<Value>>,
247 #[serde(default)]
250 total_rows: usize,
251
252 #[serde(skip_serializing_if = "HashMap::is_empty")]
254 #[serde(default)]
255 metrics: HashMap<String, Value>,
256}
257
258impl HttpRecordsOutput {
259 pub fn num_rows(&self) -> usize {
260 self.rows.len()
261 }
262
263 pub fn num_cols(&self) -> usize {
264 self.schema.column_schemas.len()
265 }
266
267 pub fn schema(&self) -> &OutputSchema {
268 &self.schema
269 }
270
271 pub fn rows(&self) -> &Vec<Vec<Value>> {
272 &self.rows
273 }
274}
275
276impl HttpRecordsOutput {
277 pub fn try_new(
278 schema: SchemaRef,
279 recordbatches: Vec<RecordBatch>,
280 ) -> std::result::Result<HttpRecordsOutput, Error> {
281 if recordbatches.is_empty() {
282 Ok(HttpRecordsOutput {
283 schema: OutputSchema::from(schema),
284 rows: vec![],
285 total_rows: 0,
286 metrics: Default::default(),
287 })
288 } else {
289 let num_rows = recordbatches.iter().map(|r| r.num_rows()).sum::<usize>();
290 let mut rows = Vec::with_capacity(num_rows);
291 let schemas = schema.column_schemas();
292 let num_cols = schema.column_schemas().len();
293 rows.resize_with(num_rows, || Vec::with_capacity(num_cols));
294
295 let mut finished_row_cursor = 0;
296 for recordbatch in recordbatches {
297 for (col_idx, col) in recordbatch.columns().iter().enumerate() {
298 let schema = &schemas[col_idx];
300 for row_idx in 0..recordbatch.num_rows() {
301 let value = transform_value_ref_to_json_value(col.get_ref(row_idx), schema)
302 .context(ToJsonSnafu)?;
303 rows[row_idx + finished_row_cursor].push(value);
304 }
305 }
306 finished_row_cursor += recordbatch.num_rows();
307 }
308
309 Ok(HttpRecordsOutput {
310 schema: OutputSchema::from(schema),
311 total_rows: rows.len(),
312 rows,
313 metrics: Default::default(),
314 })
315 }
316 }
317}
318
319#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
320#[serde(rename_all = "lowercase")]
321pub enum GreptimeQueryOutput {
322 AffectedRows(usize),
323 Records(HttpRecordsOutput),
324}
325
326#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
328pub enum ResponseFormat {
329 Arrow,
330 Csv(bool, bool),
332 Table,
333 #[default]
334 GreptimedbV1,
335 InfluxdbV1,
336 Json,
337 Null,
338}
339
340impl ResponseFormat {
341 pub fn parse(s: &str) -> Option<Self> {
342 match s {
343 "arrow" => Some(ResponseFormat::Arrow),
344 "csv" => Some(ResponseFormat::Csv(false, false)),
345 "csvwithnames" => Some(ResponseFormat::Csv(true, false)),
346 "csvwithnamesandtypes" => Some(ResponseFormat::Csv(true, true)),
347 "table" => Some(ResponseFormat::Table),
348 "greptimedb_v1" => Some(ResponseFormat::GreptimedbV1),
349 "influxdb_v1" => Some(ResponseFormat::InfluxdbV1),
350 "json" => Some(ResponseFormat::Json),
351 "null" => Some(ResponseFormat::Null),
352 _ => None,
353 }
354 }
355
356 pub fn as_str(&self) -> &'static str {
357 match self {
358 ResponseFormat::Arrow => "arrow",
359 ResponseFormat::Csv(_, _) => "csv",
360 ResponseFormat::Table => "table",
361 ResponseFormat::GreptimedbV1 => "greptimedb_v1",
362 ResponseFormat::InfluxdbV1 => "influxdb_v1",
363 ResponseFormat::Json => "json",
364 ResponseFormat::Null => "null",
365 }
366 }
367}
368
369#[derive(Debug, Clone, Copy, PartialEq, Eq)]
370pub enum Epoch {
371 Nanosecond,
372 Microsecond,
373 Millisecond,
374 Second,
375}
376
377impl Epoch {
378 pub fn parse(s: &str) -> Option<Epoch> {
379 match s {
384 "ns" => Some(Epoch::Nanosecond),
385 "u" | "µ" => Some(Epoch::Microsecond),
386 "ms" => Some(Epoch::Millisecond),
387 "s" => Some(Epoch::Second),
388 _ => None, }
390 }
391
392 pub fn convert_timestamp(&self, ts: Timestamp) -> Option<Timestamp> {
393 match self {
394 Epoch::Nanosecond => ts.convert_to(TimeUnit::Nanosecond),
395 Epoch::Microsecond => ts.convert_to(TimeUnit::Microsecond),
396 Epoch::Millisecond => ts.convert_to(TimeUnit::Millisecond),
397 Epoch::Second => ts.convert_to(TimeUnit::Second),
398 }
399 }
400}
401
402impl Display for Epoch {
403 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
404 match self {
405 Epoch::Nanosecond => write!(f, "Epoch::Nanosecond"),
406 Epoch::Microsecond => write!(f, "Epoch::Microsecond"),
407 Epoch::Millisecond => write!(f, "Epoch::Millisecond"),
408 Epoch::Second => write!(f, "Epoch::Second"),
409 }
410 }
411}
412
413#[derive(Serialize, Deserialize, Debug)]
414pub enum HttpResponse {
415 Arrow(ArrowResponse),
416 Csv(CsvResponse),
417 Table(TableResponse),
418 Error(ErrorResponse),
419 GreptimedbV1(GreptimedbV1Response),
420 InfluxdbV1(InfluxdbV1Response),
421 Json(JsonResponse),
422 Null(NullResponse),
423}
424
425impl HttpResponse {
426 pub fn with_execution_time(self, execution_time: u64) -> Self {
427 match self {
428 HttpResponse::Arrow(resp) => resp.with_execution_time(execution_time).into(),
429 HttpResponse::Csv(resp) => resp.with_execution_time(execution_time).into(),
430 HttpResponse::Table(resp) => resp.with_execution_time(execution_time).into(),
431 HttpResponse::GreptimedbV1(resp) => resp.with_execution_time(execution_time).into(),
432 HttpResponse::InfluxdbV1(resp) => resp.with_execution_time(execution_time).into(),
433 HttpResponse::Json(resp) => resp.with_execution_time(execution_time).into(),
434 HttpResponse::Null(resp) => resp.with_execution_time(execution_time).into(),
435 HttpResponse::Error(resp) => resp.with_execution_time(execution_time).into(),
436 }
437 }
438
439 pub fn with_limit(self, limit: usize) -> Self {
440 match self {
441 HttpResponse::Csv(resp) => resp.with_limit(limit).into(),
442 HttpResponse::Table(resp) => resp.with_limit(limit).into(),
443 HttpResponse::GreptimedbV1(resp) => resp.with_limit(limit).into(),
444 HttpResponse::Json(resp) => resp.with_limit(limit).into(),
445 _ => self,
446 }
447 }
448}
449
450pub fn process_with_limit(
451 mut outputs: Vec<GreptimeQueryOutput>,
452 limit: usize,
453) -> Vec<GreptimeQueryOutput> {
454 outputs
455 .drain(..)
456 .map(|data| match data {
457 GreptimeQueryOutput::Records(mut records) => {
458 if records.rows.len() > limit {
459 records.rows.truncate(limit);
460 records.total_rows = limit;
461 }
462 GreptimeQueryOutput::Records(records)
463 }
464 _ => data,
465 })
466 .collect()
467}
468
469impl IntoResponse for HttpResponse {
470 fn into_response(self) -> Response {
471 match self {
472 HttpResponse::Arrow(resp) => resp.into_response(),
473 HttpResponse::Csv(resp) => resp.into_response(),
474 HttpResponse::Table(resp) => resp.into_response(),
475 HttpResponse::GreptimedbV1(resp) => resp.into_response(),
476 HttpResponse::InfluxdbV1(resp) => resp.into_response(),
477 HttpResponse::Json(resp) => resp.into_response(),
478 HttpResponse::Null(resp) => resp.into_response(),
479 HttpResponse::Error(resp) => resp.into_response(),
480 }
481 }
482}
483
484impl From<ArrowResponse> for HttpResponse {
485 fn from(value: ArrowResponse) -> Self {
486 HttpResponse::Arrow(value)
487 }
488}
489
490impl From<CsvResponse> for HttpResponse {
491 fn from(value: CsvResponse) -> Self {
492 HttpResponse::Csv(value)
493 }
494}
495
496impl From<TableResponse> for HttpResponse {
497 fn from(value: TableResponse) -> Self {
498 HttpResponse::Table(value)
499 }
500}
501
502impl From<ErrorResponse> for HttpResponse {
503 fn from(value: ErrorResponse) -> Self {
504 HttpResponse::Error(value)
505 }
506}
507
508impl From<GreptimedbV1Response> for HttpResponse {
509 fn from(value: GreptimedbV1Response) -> Self {
510 HttpResponse::GreptimedbV1(value)
511 }
512}
513
514impl From<InfluxdbV1Response> for HttpResponse {
515 fn from(value: InfluxdbV1Response) -> Self {
516 HttpResponse::InfluxdbV1(value)
517 }
518}
519
520impl From<JsonResponse> for HttpResponse {
521 fn from(value: JsonResponse) -> Self {
522 HttpResponse::Json(value)
523 }
524}
525
526impl From<NullResponse> for HttpResponse {
527 fn from(value: NullResponse) -> Self {
528 HttpResponse::Null(value)
529 }
530}
531
532#[derive(Clone)]
533pub struct ApiState {
534 pub sql_handler: ServerSqlQueryHandlerRef,
535}
536
537#[derive(Clone)]
538pub struct GreptimeOptionsConfigState {
539 pub greptime_config_options: String,
540}
541
542#[derive(Default)]
543pub struct HttpServerBuilder {
544 options: HttpOptions,
545 plugins: Plugins,
546 user_provider: Option<UserProviderRef>,
547 router: Router,
548}
549
550impl HttpServerBuilder {
551 pub fn new(options: HttpOptions) -> Self {
552 Self {
553 options,
554 plugins: Plugins::default(),
555 user_provider: None,
556 router: Router::new(),
557 }
558 }
559
560 pub fn with_sql_handler(self, sql_handler: ServerSqlQueryHandlerRef) -> Self {
561 let sql_router = HttpServer::route_sql(ApiState { sql_handler });
562
563 Self {
564 router: self
565 .router
566 .nest(&format!("/{HTTP_API_VERSION}"), sql_router),
567 ..self
568 }
569 }
570
571 pub fn with_logs_handler(self, logs_handler: LogQueryHandlerRef) -> Self {
572 let logs_router = HttpServer::route_logs(logs_handler);
573
574 Self {
575 router: self
576 .router
577 .nest(&format!("/{HTTP_API_VERSION}"), logs_router),
578 ..self
579 }
580 }
581
582 pub fn with_opentsdb_handler(self, handler: OpentsdbProtocolHandlerRef) -> Self {
583 Self {
584 router: self.router.nest(
585 &format!("/{HTTP_API_VERSION}/opentsdb"),
586 HttpServer::route_opentsdb(handler),
587 ),
588 ..self
589 }
590 }
591
592 pub fn with_influxdb_handler(self, handler: InfluxdbLineProtocolHandlerRef) -> Self {
593 Self {
594 router: self.router.nest(
595 &format!("/{HTTP_API_VERSION}/influxdb"),
596 HttpServer::route_influxdb(handler),
597 ),
598 ..self
599 }
600 }
601
602 pub fn with_prom_handler(
603 self,
604 handler: PromStoreProtocolHandlerRef,
605 pipeline_handler: Option<PipelineHandlerRef>,
606 prom_store_with_metric_engine: bool,
607 prom_validation_mode: PromValidationMode,
608 ) -> Self {
609 let state = PromStoreState {
610 prom_store_handler: handler,
611 pipeline_handler,
612 prom_store_with_metric_engine,
613 prom_validation_mode,
614 };
615
616 Self {
617 router: self.router.nest(
618 &format!("/{HTTP_API_VERSION}/prometheus"),
619 HttpServer::route_prom(state),
620 ),
621 ..self
622 }
623 }
624
625 pub fn with_prometheus_handler(self, handler: PrometheusHandlerRef) -> Self {
626 Self {
627 router: self.router.nest(
628 &format!("/{HTTP_API_VERSION}/prometheus/api/v1"),
629 HttpServer::route_prometheus(handler),
630 ),
631 ..self
632 }
633 }
634
635 pub fn with_otlp_handler(
636 self,
637 handler: OpenTelemetryProtocolHandlerRef,
638 with_metric_engine: bool,
639 ) -> Self {
640 Self {
641 router: self.router.nest(
642 &format!("/{HTTP_API_VERSION}/otlp"),
643 HttpServer::route_otlp(handler, with_metric_engine),
644 ),
645 ..self
646 }
647 }
648
649 pub fn with_user_provider(self, user_provider: UserProviderRef) -> Self {
650 Self {
651 user_provider: Some(user_provider),
652 ..self
653 }
654 }
655
656 pub fn with_metrics_handler(self, handler: MetricsHandler) -> Self {
657 Self {
658 router: self.router.merge(HttpServer::route_metrics(handler)),
659 ..self
660 }
661 }
662
663 pub fn with_log_ingest_handler(
664 self,
665 handler: PipelineHandlerRef,
666 validator: Option<LogValidatorRef>,
667 ingest_interceptor: Option<LogIngestInterceptorRef<Error>>,
668 ) -> Self {
669 let log_state = LogState {
670 log_handler: handler,
671 log_validator: validator,
672 ingest_interceptor,
673 };
674
675 let router = self.router.nest(
676 &format!("/{HTTP_API_VERSION}"),
677 HttpServer::route_pipelines(log_state.clone()),
678 );
679 let router = router.nest(
681 &format!("/{HTTP_API_VERSION}/events"),
682 #[allow(deprecated)]
683 HttpServer::route_log_deprecated(log_state.clone()),
684 );
685
686 let router = router.nest(
687 &format!("/{HTTP_API_VERSION}/loki"),
688 HttpServer::route_loki(log_state.clone()),
689 );
690
691 let router = router.nest(
692 &format!("/{HTTP_API_VERSION}/elasticsearch"),
693 HttpServer::route_elasticsearch(log_state.clone()),
694 );
695
696 let router = router.nest(
697 &format!("/{HTTP_API_VERSION}/elasticsearch/"),
698 Router::new()
699 .route("/", routing::get(elasticsearch::handle_get_version))
700 .with_state(log_state),
701 );
702
703 Self { router, ..self }
704 }
705
706 pub fn with_plugins(self, plugins: Plugins) -> Self {
707 Self { plugins, ..self }
708 }
709
710 pub fn with_greptime_config_options(self, opts: String) -> Self {
711 let config_router = HttpServer::route_config(GreptimeOptionsConfigState {
712 greptime_config_options: opts,
713 });
714
715 Self {
716 router: self.router.merge(config_router),
717 ..self
718 }
719 }
720
721 pub fn with_jaeger_handler(self, handler: JaegerQueryHandlerRef) -> Self {
722 Self {
723 router: self.router.nest(
724 &format!("/{HTTP_API_VERSION}/jaeger"),
725 HttpServer::route_jaeger(handler),
726 ),
727 ..self
728 }
729 }
730
731 pub fn with_extra_router(self, router: Router) -> Self {
732 Self {
733 router: self.router.merge(router),
734 ..self
735 }
736 }
737
738 pub fn build(self) -> HttpServer {
739 HttpServer {
740 options: self.options,
741 user_provider: self.user_provider,
742 shutdown_tx: Mutex::new(None),
743 plugins: self.plugins,
744 router: StdMutex::new(self.router),
745 bind_addr: None,
746 }
747 }
748}
749
750impl HttpServer {
751 pub fn make_app(&self) -> Router {
753 let mut router = {
754 let router = self.router.lock().unwrap();
755 router.clone()
756 };
757
758 router = router
759 .route(
760 "/health",
761 routing::get(handler::health).post(handler::health),
762 )
763 .route(
764 &format!("/{HTTP_API_VERSION}/health"),
765 routing::get(handler::health).post(handler::health),
766 )
767 .route(
768 "/ready",
769 routing::get(handler::health).post(handler::health),
770 );
771
772 router = router.route("/status", routing::get(handler::status));
773
774 #[cfg(feature = "dashboard")]
775 {
776 if !self.options.disable_dashboard {
777 info!("Enable dashboard service at '/dashboard'");
778 router = router.route(
780 "/dashboard",
781 routing::get(|uri: axum::http::uri::Uri| async move {
782 let path = uri.path();
783 let query = uri.query().map(|q| format!("?{}", q)).unwrap_or_default();
784
785 let new_uri = format!("{}/{}", path, query);
786 axum::response::Redirect::permanent(&new_uri)
787 }),
788 );
789
790 router = router
794 .route(
795 "/dashboard/",
796 routing::get(dashboard::static_handler).post(dashboard::static_handler),
797 )
798 .route(
799 "/dashboard/{*x}",
800 routing::get(dashboard::static_handler).post(dashboard::static_handler),
801 );
802 }
803 }
804
805 router = router.route_layer(middleware::from_fn(http_metrics_layer));
807
808 router
809 }
810
811 pub fn build(&self, router: Router) -> Result<Router> {
814 let timeout_layer = if self.options.timeout != Duration::default() {
815 Some(ServiceBuilder::new().layer(DynamicTimeoutLayer::new(self.options.timeout)))
816 } else {
817 info!("HTTP server timeout is disabled");
818 None
819 };
820 let body_limit_layer = if self.options.body_limit != ReadableSize(0) {
821 Some(
822 ServiceBuilder::new()
823 .layer(DefaultBodyLimit::max(self.options.body_limit.0 as usize)),
824 )
825 } else {
826 info!("HTTP server body limit is disabled");
827 None
828 };
829 let cors_layer = if self.options.enable_cors {
830 Some(
831 CorsLayer::new()
832 .allow_methods([
833 Method::GET,
834 Method::POST,
835 Method::PUT,
836 Method::DELETE,
837 Method::HEAD,
838 ])
839 .allow_origin(if self.options.cors_allowed_origins.is_empty() {
840 AllowOrigin::from(Any)
841 } else {
842 AllowOrigin::from(
843 self.options
844 .cors_allowed_origins
845 .iter()
846 .map(|s| {
847 HeaderValue::from_str(s.as_str())
848 .context(InvalidHeaderValueSnafu)
849 })
850 .collect::<Result<Vec<HeaderValue>>>()?,
851 )
852 })
853 .allow_headers(Any),
854 )
855 } else {
856 info!("HTTP server cross-origin is disabled");
857 None
858 };
859
860 Ok(router
861 .layer(
863 ServiceBuilder::new()
864 .layer(TraceLayer::new_for_http().on_failure(()))
867 .option_layer(cors_layer)
868 .option_layer(timeout_layer)
869 .option_layer(body_limit_layer)
870 .layer(middleware::from_fn_with_state(
872 AuthState::new(self.user_provider.clone()),
873 authorize::check_http_auth,
874 ))
875 .layer(middleware::from_fn(hints::extract_hints))
876 .layer(middleware::from_fn(
877 read_preference::extract_read_preference,
878 )),
879 )
880 .nest(
882 "/debug",
883 Router::new()
884 .route("/log_level", routing::post(dyn_log::dyn_log_handler))
886 .nest(
887 "/prof",
888 Router::new()
889 .route("/cpu", routing::post(pprof::pprof_handler))
890 .route("/mem", routing::post(mem_prof::mem_prof_handler))
891 .route(
892 "/mem/activate",
893 routing::post(mem_prof::activate_heap_prof_handler),
894 )
895 .route(
896 "/mem/deactivate",
897 routing::post(mem_prof::deactivate_heap_prof_handler),
898 )
899 .route(
900 "/mem/status",
901 routing::get(mem_prof::heap_prof_status_handler),
902 ),
903 ),
904 ))
905 }
906
907 fn route_metrics<S>(metrics_handler: MetricsHandler) -> Router<S> {
908 Router::new()
909 .route("/metrics", routing::get(handler::metrics))
910 .with_state(metrics_handler)
911 }
912
913 fn route_loki<S>(log_state: LogState) -> Router<S> {
914 Router::new()
915 .route("/api/v1/push", routing::post(loki::loki_ingest))
916 .layer(
917 ServiceBuilder::new()
918 .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
919 )
920 .with_state(log_state)
921 }
922
923 fn route_elasticsearch<S>(log_state: LogState) -> Router<S> {
924 Router::new()
925 .route(
927 "/",
928 routing::head((HttpStatusCode::OK, elasticsearch::elasticsearch_headers())),
929 )
930 .route("/", routing::get(elasticsearch::handle_get_version))
932 .route("/_license", routing::get(elasticsearch::handle_get_license))
934 .route("/_bulk", routing::post(elasticsearch::handle_bulk_api))
935 .route(
936 "/{index}/_bulk",
937 routing::post(elasticsearch::handle_bulk_api_with_index),
938 )
939 .route(
941 "/_ilm/policy/{*path}",
942 routing::any((
943 HttpStatusCode::OK,
944 elasticsearch::elasticsearch_headers(),
945 axum::Json(serde_json::json!({})),
946 )),
947 )
948 .route(
950 "/_index_template/{*path}",
951 routing::any((
952 HttpStatusCode::OK,
953 elasticsearch::elasticsearch_headers(),
954 axum::Json(serde_json::json!({})),
955 )),
956 )
957 .route(
960 "/_ingest/{*path}",
961 routing::any((
962 HttpStatusCode::OK,
963 elasticsearch::elasticsearch_headers(),
964 axum::Json(serde_json::json!({})),
965 )),
966 )
967 .route(
970 "/_nodes/{*path}",
971 routing::any((
972 HttpStatusCode::OK,
973 elasticsearch::elasticsearch_headers(),
974 axum::Json(serde_json::json!({})),
975 )),
976 )
977 .route(
980 "/logstash/{*path}",
981 routing::any((
982 HttpStatusCode::OK,
983 elasticsearch::elasticsearch_headers(),
984 axum::Json(serde_json::json!({})),
985 )),
986 )
987 .route(
988 "/_logstash/{*path}",
989 routing::any((
990 HttpStatusCode::OK,
991 elasticsearch::elasticsearch_headers(),
992 axum::Json(serde_json::json!({})),
993 )),
994 )
995 .layer(ServiceBuilder::new().layer(RequestDecompressionLayer::new()))
996 .with_state(log_state)
997 }
998
999 #[deprecated(since = "0.11.0", note = "Use `route_pipelines()` instead.")]
1000 fn route_log_deprecated<S>(log_state: LogState) -> Router<S> {
1001 Router::new()
1002 .route("/logs", routing::post(event::log_ingester))
1003 .route(
1004 "/pipelines/{pipeline_name}",
1005 routing::get(event::query_pipeline),
1006 )
1007 .route(
1008 "/pipelines/{pipeline_name}",
1009 routing::post(event::add_pipeline),
1010 )
1011 .route(
1012 "/pipelines/{pipeline_name}",
1013 routing::delete(event::delete_pipeline),
1014 )
1015 .route("/pipelines/dryrun", routing::post(event::pipeline_dryrun))
1016 .layer(
1017 ServiceBuilder::new()
1018 .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
1019 )
1020 .with_state(log_state)
1021 }
1022
1023 fn route_pipelines<S>(log_state: LogState) -> Router<S> {
1024 Router::new()
1025 .route("/ingest", routing::post(event::log_ingester))
1026 .route(
1027 "/pipelines/{pipeline_name}",
1028 routing::get(event::query_pipeline),
1029 )
1030 .route(
1031 "/pipelines/{pipeline_name}/ddl",
1032 routing::get(event::query_pipeline_ddl),
1033 )
1034 .route(
1035 "/pipelines/{pipeline_name}",
1036 routing::post(event::add_pipeline),
1037 )
1038 .route(
1039 "/pipelines/{pipeline_name}",
1040 routing::delete(event::delete_pipeline),
1041 )
1042 .route("/pipelines/_dryrun", routing::post(event::pipeline_dryrun))
1043 .layer(
1044 ServiceBuilder::new()
1045 .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
1046 )
1047 .with_state(log_state)
1048 }
1049
1050 fn route_sql<S>(api_state: ApiState) -> Router<S> {
1051 Router::new()
1052 .route("/sql", routing::get(handler::sql).post(handler::sql))
1053 .route(
1054 "/sql/parse",
1055 routing::get(handler::sql_parse).post(handler::sql_parse),
1056 )
1057 .route(
1058 "/sql/format",
1059 routing::get(handler::sql_format).post(handler::sql_format),
1060 )
1061 .route(
1062 "/promql",
1063 routing::get(handler::promql).post(handler::promql),
1064 )
1065 .with_state(api_state)
1066 }
1067
1068 fn route_logs<S>(log_handler: LogQueryHandlerRef) -> Router<S> {
1069 Router::new()
1070 .route("/logs", routing::get(logs::logs).post(logs::logs))
1071 .with_state(log_handler)
1072 }
1073
1074 fn route_prometheus<S>(prometheus_handler: PrometheusHandlerRef) -> Router<S> {
1078 Router::new()
1079 .route(
1080 "/format_query",
1081 routing::post(format_query).get(format_query),
1082 )
1083 .route("/status/buildinfo", routing::get(build_info_query))
1084 .route("/query", routing::post(instant_query).get(instant_query))
1085 .route("/query_range", routing::post(range_query).get(range_query))
1086 .route("/labels", routing::post(labels_query).get(labels_query))
1087 .route("/series", routing::post(series_query).get(series_query))
1088 .route("/parse_query", routing::post(parse_query).get(parse_query))
1089 .route(
1090 "/label/{label_name}/values",
1091 routing::get(label_values_query),
1092 )
1093 .layer(ServiceBuilder::new().layer(CompressionLayer::new()))
1094 .with_state(prometheus_handler)
1095 }
1096
1097 fn route_prom<S>(state: PromStoreState) -> Router<S> {
1103 Router::new()
1104 .route("/read", routing::post(prom_store::remote_read))
1105 .route("/write", routing::post(prom_store::remote_write))
1106 .with_state(state)
1107 }
1108
1109 fn route_influxdb<S>(influxdb_handler: InfluxdbLineProtocolHandlerRef) -> Router<S> {
1110 Router::new()
1111 .route("/write", routing::post(influxdb_write_v1))
1112 .route("/api/v2/write", routing::post(influxdb_write_v2))
1113 .layer(
1114 ServiceBuilder::new()
1115 .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
1116 )
1117 .route("/ping", routing::get(influxdb_ping))
1118 .route("/health", routing::get(influxdb_health))
1119 .with_state(influxdb_handler)
1120 }
1121
1122 fn route_opentsdb<S>(opentsdb_handler: OpentsdbProtocolHandlerRef) -> Router<S> {
1123 Router::new()
1124 .route("/api/put", routing::post(opentsdb::put))
1125 .with_state(opentsdb_handler)
1126 }
1127
1128 fn route_otlp<S>(
1129 otlp_handler: OpenTelemetryProtocolHandlerRef,
1130 with_metric_engine: bool,
1131 ) -> Router<S> {
1132 Router::new()
1133 .route("/v1/metrics", routing::post(otlp::metrics))
1134 .route("/v1/traces", routing::post(otlp::traces))
1135 .route("/v1/logs", routing::post(otlp::logs))
1136 .layer(
1137 ServiceBuilder::new()
1138 .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
1139 )
1140 .with_state(OtlpState {
1141 with_metric_engine,
1142 handler: otlp_handler,
1143 })
1144 }
1145
1146 fn route_config<S>(state: GreptimeOptionsConfigState) -> Router<S> {
1147 Router::new()
1148 .route("/config", routing::get(handler::config))
1149 .with_state(state)
1150 }
1151
1152 fn route_jaeger<S>(handler: JaegerQueryHandlerRef) -> Router<S> {
1153 Router::new()
1154 .route("/api/services", routing::get(jaeger::handle_get_services))
1155 .route(
1156 "/api/services/{service_name}/operations",
1157 routing::get(jaeger::handle_get_operations_by_service),
1158 )
1159 .route(
1160 "/api/operations",
1161 routing::get(jaeger::handle_get_operations),
1162 )
1163 .route("/api/traces", routing::get(jaeger::handle_find_traces))
1164 .route(
1165 "/api/traces/{trace_id}",
1166 routing::get(jaeger::handle_get_trace),
1167 )
1168 .with_state(handler)
1169 }
1170}
1171
1172pub const HTTP_SERVER: &str = "HTTP_SERVER";
1173
1174#[async_trait]
1175impl Server for HttpServer {
1176 async fn shutdown(&self) -> Result<()> {
1177 let mut shutdown_tx = self.shutdown_tx.lock().await;
1178 if let Some(tx) = shutdown_tx.take()
1179 && tx.send(()).is_err()
1180 {
1181 info!("Receiver dropped, the HTTP server has already exited");
1182 }
1183 info!("Shutdown HTTP server");
1184
1185 Ok(())
1186 }
1187
1188 async fn start(&mut self, listening: SocketAddr) -> Result<()> {
1189 let (tx, rx) = oneshot::channel();
1190 let serve = {
1191 let mut shutdown_tx = self.shutdown_tx.lock().await;
1192 ensure!(
1193 shutdown_tx.is_none(),
1194 AlreadyStartedSnafu { server: "HTTP" }
1195 );
1196
1197 let mut app = self.make_app();
1198 if let Some(configurator) = self.plugins.get::<ConfiguratorRef>() {
1199 app = configurator.config_http(app);
1200 }
1201 let app = self.build(app)?;
1202 let listener = tokio::net::TcpListener::bind(listening)
1203 .await
1204 .context(AddressBindSnafu { addr: listening })?
1205 .tap_io(|tcp_stream| {
1206 if let Err(e) = tcp_stream.set_nodelay(true) {
1207 error!(e; "Failed to set TCP_NODELAY on incoming connection");
1208 }
1209 });
1210 let serve = axum::serve(listener, app.into_make_service());
1211
1212 *shutdown_tx = Some(tx);
1229
1230 serve
1231 };
1232 let listening = serve.local_addr().context(InternalIoSnafu)?;
1233 info!("HTTP server is bound to {}", listening);
1234
1235 common_runtime::spawn_global(async move {
1236 if let Err(e) = serve
1237 .with_graceful_shutdown(rx.map(drop))
1238 .await
1239 .context(InternalIoSnafu)
1240 {
1241 error!(e; "Failed to shutdown http server");
1242 }
1243 });
1244
1245 self.bind_addr = Some(listening);
1246 Ok(())
1247 }
1248
1249 fn name(&self) -> &str {
1250 HTTP_SERVER
1251 }
1252
1253 fn bind_addr(&self) -> Option<SocketAddr> {
1254 self.bind_addr
1255 }
1256}
1257
1258#[cfg(test)]
1259mod test {
1260 use std::future::pending;
1261 use std::io::Cursor;
1262 use std::sync::Arc;
1263
1264 use arrow_ipc::reader::FileReader;
1265 use arrow_schema::DataType;
1266 use axum::handler::Handler;
1267 use axum::http::StatusCode;
1268 use axum::routing::get;
1269 use common_query::Output;
1270 use common_recordbatch::RecordBatches;
1271 use datafusion_expr::LogicalPlan;
1272 use datatypes::prelude::*;
1273 use datatypes::schema::{ColumnSchema, Schema};
1274 use datatypes::vectors::{StringVector, UInt32Vector};
1275 use header::constants::GREPTIME_DB_HEADER_TIMEOUT;
1276 use query::parser::PromQuery;
1277 use query::query_engine::DescribeResult;
1278 use session::context::QueryContextRef;
1279 use sql::statements::statement::Statement;
1280 use tokio::sync::mpsc;
1281 use tokio::time::Instant;
1282
1283 use super::*;
1284 use crate::error::Error;
1285 use crate::http::test_helpers::TestClient;
1286 use crate::query_handler::sql::{ServerSqlQueryHandlerAdapter, SqlQueryHandler};
1287
1288 struct DummyInstance {
1289 _tx: mpsc::Sender<(String, Vec<u8>)>,
1290 }
1291
1292 #[async_trait]
1293 impl SqlQueryHandler for DummyInstance {
1294 type Error = Error;
1295
1296 async fn do_query(&self, _: &str, _: QueryContextRef) -> Vec<Result<Output>> {
1297 unimplemented!()
1298 }
1299
1300 async fn do_promql_query(
1301 &self,
1302 _: &PromQuery,
1303 _: QueryContextRef,
1304 ) -> Vec<std::result::Result<Output, Self::Error>> {
1305 unimplemented!()
1306 }
1307
1308 async fn do_exec_plan(
1309 &self,
1310 _stmt: Option<Statement>,
1311 _plan: LogicalPlan,
1312 _query_ctx: QueryContextRef,
1313 ) -> std::result::Result<Output, Self::Error> {
1314 unimplemented!()
1315 }
1316
1317 async fn do_describe(
1318 &self,
1319 _stmt: sql::statements::statement::Statement,
1320 _query_ctx: QueryContextRef,
1321 ) -> Result<Option<DescribeResult>> {
1322 unimplemented!()
1323 }
1324
1325 async fn is_valid_schema(&self, _catalog: &str, _schema: &str) -> Result<bool> {
1326 Ok(true)
1327 }
1328 }
1329
1330 fn timeout() -> DynamicTimeoutLayer {
1331 DynamicTimeoutLayer::new(Duration::from_millis(10))
1332 }
1333
1334 async fn forever() {
1335 pending().await
1336 }
1337
1338 fn make_test_app(tx: mpsc::Sender<(String, Vec<u8>)>) -> Router {
1339 make_test_app_custom(tx, HttpOptions::default())
1340 }
1341
1342 fn make_test_app_custom(tx: mpsc::Sender<(String, Vec<u8>)>, options: HttpOptions) -> Router {
1343 let instance = Arc::new(DummyInstance { _tx: tx });
1344 let sql_instance = ServerSqlQueryHandlerAdapter::arc(instance.clone());
1345 let server = HttpServerBuilder::new(options)
1346 .with_sql_handler(sql_instance)
1347 .build();
1348 server.build(server.make_app()).unwrap().route(
1349 "/test/timeout",
1350 get(forever.layer(ServiceBuilder::new().layer(timeout()))),
1351 )
1352 }
1353
1354 #[tokio::test]
1355 pub async fn test_cors() {
1356 let (tx, _rx) = mpsc::channel(100);
1358 let app = make_test_app(tx);
1359 let client = TestClient::new(app).await;
1360
1361 let res = client.get("/health").send().await;
1362
1363 assert_eq!(res.status(), StatusCode::OK);
1364 assert_eq!(
1365 res.headers()
1366 .get(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1367 .expect("expect cors header origin"),
1368 "*"
1369 );
1370
1371 let res = client.get("/v1/health").send().await;
1372
1373 assert_eq!(res.status(), StatusCode::OK);
1374 assert_eq!(
1375 res.headers()
1376 .get(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1377 .expect("expect cors header origin"),
1378 "*"
1379 );
1380
1381 let res = client
1382 .options("/health")
1383 .header("Access-Control-Request-Headers", "x-greptime-auth")
1384 .header("Access-Control-Request-Method", "DELETE")
1385 .header("Origin", "https://example.com")
1386 .send()
1387 .await;
1388 assert_eq!(res.status(), StatusCode::OK);
1389 assert_eq!(
1390 res.headers()
1391 .get(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1392 .expect("expect cors header origin"),
1393 "*"
1394 );
1395 assert_eq!(
1396 res.headers()
1397 .get(http::header::ACCESS_CONTROL_ALLOW_HEADERS)
1398 .expect("expect cors header headers"),
1399 "*"
1400 );
1401 assert_eq!(
1402 res.headers()
1403 .get(http::header::ACCESS_CONTROL_ALLOW_METHODS)
1404 .expect("expect cors header methods"),
1405 "GET,POST,PUT,DELETE,HEAD"
1406 );
1407 }
1408
1409 #[tokio::test]
1410 pub async fn test_cors_custom_origins() {
1411 let (tx, _rx) = mpsc::channel(100);
1413 let origin = "https://example.com";
1414
1415 let options = HttpOptions {
1416 cors_allowed_origins: vec![origin.to_string()],
1417 ..Default::default()
1418 };
1419
1420 let app = make_test_app_custom(tx, options);
1421 let client = TestClient::new(app).await;
1422
1423 let res = client.get("/health").header("Origin", origin).send().await;
1424
1425 assert_eq!(res.status(), StatusCode::OK);
1426 assert_eq!(
1427 res.headers()
1428 .get(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1429 .expect("expect cors header origin"),
1430 origin
1431 );
1432
1433 let res = client
1434 .get("/health")
1435 .header("Origin", "https://notallowed.com")
1436 .send()
1437 .await;
1438
1439 assert_eq!(res.status(), StatusCode::OK);
1440 assert!(
1441 !res.headers()
1442 .contains_key(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1443 );
1444 }
1445
1446 #[tokio::test]
1447 pub async fn test_cors_disabled() {
1448 let (tx, _rx) = mpsc::channel(100);
1450
1451 let options = HttpOptions {
1452 enable_cors: false,
1453 ..Default::default()
1454 };
1455
1456 let app = make_test_app_custom(tx, options);
1457 let client = TestClient::new(app).await;
1458
1459 let res = client.get("/health").send().await;
1460
1461 assert_eq!(res.status(), StatusCode::OK);
1462 assert!(
1463 !res.headers()
1464 .contains_key(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1465 );
1466 }
1467
1468 #[test]
1469 fn test_http_options_default() {
1470 let default = HttpOptions::default();
1471 assert_eq!("127.0.0.1:4000".to_string(), default.addr);
1472 assert_eq!(Duration::from_secs(0), default.timeout)
1473 }
1474
1475 #[tokio::test]
1476 async fn test_http_server_request_timeout() {
1477 common_telemetry::init_default_ut_logging();
1478
1479 let (tx, _rx) = mpsc::channel(100);
1480 let app = make_test_app(tx);
1481 let client = TestClient::new(app).await;
1482 let res = client.get("/test/timeout").send().await;
1483 assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT);
1484
1485 let now = Instant::now();
1486 let res = client
1487 .get("/test/timeout")
1488 .header(GREPTIME_DB_HEADER_TIMEOUT, "20ms")
1489 .send()
1490 .await;
1491 assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT);
1492 let elapsed = now.elapsed();
1493 assert!(elapsed > Duration::from_millis(15));
1494
1495 tokio::time::timeout(
1496 Duration::from_millis(15),
1497 client
1498 .get("/test/timeout")
1499 .header(GREPTIME_DB_HEADER_TIMEOUT, "0s")
1500 .send(),
1501 )
1502 .await
1503 .unwrap_err();
1504
1505 tokio::time::timeout(
1506 Duration::from_millis(15),
1507 client
1508 .get("/test/timeout")
1509 .header(
1510 GREPTIME_DB_HEADER_TIMEOUT,
1511 humantime::format_duration(Duration::default()).to_string(),
1512 )
1513 .send(),
1514 )
1515 .await
1516 .unwrap_err();
1517 }
1518
1519 #[tokio::test]
1520 async fn test_schema_for_empty_response() {
1521 let column_schemas = vec![
1522 ColumnSchema::new("numbers", ConcreteDataType::uint32_datatype(), false),
1523 ColumnSchema::new("strings", ConcreteDataType::string_datatype(), true),
1524 ];
1525 let schema = Arc::new(Schema::new(column_schemas));
1526
1527 let recordbatches = RecordBatches::try_new(schema.clone(), vec![]).unwrap();
1528 let outputs = vec![Ok(Output::new_with_record_batches(recordbatches))];
1529
1530 let json_resp = GreptimedbV1Response::from_output(outputs).await;
1531 if let HttpResponse::GreptimedbV1(json_resp) = json_resp {
1532 let json_output = &json_resp.output[0];
1533 if let GreptimeQueryOutput::Records(r) = json_output {
1534 assert_eq!(r.num_rows(), 0);
1535 assert_eq!(r.num_cols(), 2);
1536 assert_eq!(r.schema.column_schemas[0].name, "numbers");
1537 assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1538 } else {
1539 panic!("invalid output type");
1540 }
1541 } else {
1542 panic!("invalid format")
1543 }
1544 }
1545
1546 #[tokio::test]
1547 async fn test_recordbatches_conversion() {
1548 let column_schemas = vec![
1549 ColumnSchema::new("numbers", ConcreteDataType::uint32_datatype(), false),
1550 ColumnSchema::new("strings", ConcreteDataType::string_datatype(), true),
1551 ];
1552 let schema = Arc::new(Schema::new(column_schemas));
1553 let columns: Vec<VectorRef> = vec![
1554 Arc::new(UInt32Vector::from_slice(vec![1, 2, 3, 4])),
1555 Arc::new(StringVector::from(vec![
1556 None,
1557 Some("hello"),
1558 Some("greptime"),
1559 None,
1560 ])),
1561 ];
1562 let recordbatch = RecordBatch::new(schema.clone(), columns).unwrap();
1563
1564 for format in [
1565 ResponseFormat::GreptimedbV1,
1566 ResponseFormat::InfluxdbV1,
1567 ResponseFormat::Csv(true, true),
1568 ResponseFormat::Table,
1569 ResponseFormat::Arrow,
1570 ResponseFormat::Json,
1571 ResponseFormat::Null,
1572 ] {
1573 let recordbatches =
1574 RecordBatches::try_new(schema.clone(), vec![recordbatch.clone()]).unwrap();
1575 let outputs = vec![Ok(Output::new_with_record_batches(recordbatches))];
1576 let json_resp = match format {
1577 ResponseFormat::Arrow => ArrowResponse::from_output(outputs, None).await,
1578 ResponseFormat::Csv(with_names, with_types) => {
1579 CsvResponse::from_output(outputs, with_names, with_types).await
1580 }
1581 ResponseFormat::Table => TableResponse::from_output(outputs).await,
1582 ResponseFormat::GreptimedbV1 => GreptimedbV1Response::from_output(outputs).await,
1583 ResponseFormat::InfluxdbV1 => InfluxdbV1Response::from_output(outputs, None).await,
1584 ResponseFormat::Json => JsonResponse::from_output(outputs).await,
1585 ResponseFormat::Null => NullResponse::from_output(outputs).await,
1586 };
1587
1588 match json_resp {
1589 HttpResponse::GreptimedbV1(resp) => {
1590 let json_output = &resp.output[0];
1591 if let GreptimeQueryOutput::Records(r) = json_output {
1592 assert_eq!(r.num_rows(), 4);
1593 assert_eq!(r.num_cols(), 2);
1594 assert_eq!(r.schema.column_schemas[0].name, "numbers");
1595 assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1596 assert_eq!(r.rows[0][0], serde_json::Value::from(1));
1597 assert_eq!(r.rows[0][1], serde_json::Value::Null);
1598 } else {
1599 panic!("invalid output type");
1600 }
1601 }
1602 HttpResponse::InfluxdbV1(resp) => {
1603 let json_output = &resp.results()[0];
1604 assert_eq!(json_output.num_rows(), 4);
1605 assert_eq!(json_output.num_cols(), 2);
1606 assert_eq!(json_output.series[0].columns.clone()[0], "numbers");
1607 assert_eq!(
1608 json_output.series[0].values[0][0],
1609 serde_json::Value::from(1)
1610 );
1611 assert_eq!(json_output.series[0].values[0][1], serde_json::Value::Null);
1612 }
1613 HttpResponse::Csv(resp) => {
1614 let output = &resp.output()[0];
1615 if let GreptimeQueryOutput::Records(r) = output {
1616 assert_eq!(r.num_rows(), 4);
1617 assert_eq!(r.num_cols(), 2);
1618 assert_eq!(r.schema.column_schemas[0].name, "numbers");
1619 assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1620 assert_eq!(r.rows[0][0], serde_json::Value::from(1));
1621 assert_eq!(r.rows[0][1], serde_json::Value::Null);
1622 } else {
1623 panic!("invalid output type");
1624 }
1625 }
1626
1627 HttpResponse::Table(resp) => {
1628 let output = &resp.output()[0];
1629 if let GreptimeQueryOutput::Records(r) = output {
1630 assert_eq!(r.num_rows(), 4);
1631 assert_eq!(r.num_cols(), 2);
1632 assert_eq!(r.schema.column_schemas[0].name, "numbers");
1633 assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1634 assert_eq!(r.rows[0][0], serde_json::Value::from(1));
1635 assert_eq!(r.rows[0][1], serde_json::Value::Null);
1636 } else {
1637 panic!("invalid output type");
1638 }
1639 }
1640
1641 HttpResponse::Arrow(resp) => {
1642 let output = resp.data;
1643 let mut reader =
1644 FileReader::try_new(Cursor::new(output), None).expect("Arrow reader error");
1645 let schema = reader.schema();
1646 assert_eq!(schema.fields[0].name(), "numbers");
1647 assert_eq!(schema.fields[0].data_type(), &DataType::UInt32);
1648 assert_eq!(schema.fields[1].name(), "strings");
1649 assert_eq!(schema.fields[1].data_type(), &DataType::Utf8);
1650
1651 let rb = reader.next().unwrap().expect("read record batch failed");
1652 assert_eq!(rb.num_columns(), 2);
1653 assert_eq!(rb.num_rows(), 4);
1654 }
1655
1656 HttpResponse::Json(resp) => {
1657 let output = &resp.output()[0];
1658 if let GreptimeQueryOutput::Records(r) = output {
1659 assert_eq!(r.num_rows(), 4);
1660 assert_eq!(r.num_cols(), 2);
1661 assert_eq!(r.schema.column_schemas[0].name, "numbers");
1662 assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1663 assert_eq!(r.rows[0][0], serde_json::Value::from(1));
1664 assert_eq!(r.rows[0][1], serde_json::Value::Null);
1665 } else {
1666 panic!("invalid output type");
1667 }
1668 }
1669
1670 HttpResponse::Null(resp) => {
1671 assert_eq!(resp.rows(), 4);
1672 }
1673
1674 HttpResponse::Error(err) => unreachable!("{err:?}"),
1675 }
1676 }
1677 }
1678
1679 #[test]
1680 fn test_response_format_misc() {
1681 assert_eq!(ResponseFormat::default(), ResponseFormat::GreptimedbV1);
1682 assert_eq!(ResponseFormat::parse("arrow"), Some(ResponseFormat::Arrow));
1683 assert_eq!(
1684 ResponseFormat::parse("csv"),
1685 Some(ResponseFormat::Csv(false, false))
1686 );
1687 assert_eq!(
1688 ResponseFormat::parse("csvwithnames"),
1689 Some(ResponseFormat::Csv(true, false))
1690 );
1691 assert_eq!(
1692 ResponseFormat::parse("csvwithnamesandtypes"),
1693 Some(ResponseFormat::Csv(true, true))
1694 );
1695 assert_eq!(ResponseFormat::parse("table"), Some(ResponseFormat::Table));
1696 assert_eq!(
1697 ResponseFormat::parse("greptimedb_v1"),
1698 Some(ResponseFormat::GreptimedbV1)
1699 );
1700 assert_eq!(
1701 ResponseFormat::parse("influxdb_v1"),
1702 Some(ResponseFormat::InfluxdbV1)
1703 );
1704 assert_eq!(ResponseFormat::parse("json"), Some(ResponseFormat::Json));
1705 assert_eq!(ResponseFormat::parse("null"), Some(ResponseFormat::Null));
1706
1707 assert_eq!(ResponseFormat::parse("invalid"), None);
1709 assert_eq!(ResponseFormat::parse(""), None);
1710 assert_eq!(ResponseFormat::parse("CSV"), None); assert_eq!(ResponseFormat::Arrow.as_str(), "arrow");
1714 assert_eq!(ResponseFormat::Csv(false, false).as_str(), "csv");
1715 assert_eq!(ResponseFormat::Csv(true, true).as_str(), "csv");
1716 assert_eq!(ResponseFormat::Table.as_str(), "table");
1717 assert_eq!(ResponseFormat::GreptimedbV1.as_str(), "greptimedb_v1");
1718 assert_eq!(ResponseFormat::InfluxdbV1.as_str(), "influxdb_v1");
1719 assert_eq!(ResponseFormat::Json.as_str(), "json");
1720 assert_eq!(ResponseFormat::Null.as_str(), "null");
1721 assert_eq!(ResponseFormat::default().as_str(), "greptimedb_v1");
1722 }
1723}