1use std::collections::HashMap;
16use std::fmt::Display;
17use std::net::SocketAddr;
18use std::sync::Mutex as StdMutex;
19use std::time::Duration;
20
21use async_trait::async_trait;
22use auth::UserProviderRef;
23use axum::extract::DefaultBodyLimit;
24use axum::http::StatusCode as HttpStatusCode;
25use axum::response::{IntoResponse, Response};
26use axum::serve::ListenerExt;
27use axum::{Router, middleware, routing};
28use common_base::Plugins;
29use common_base::readable_size::ReadableSize;
30use common_recordbatch::RecordBatch;
31use common_telemetry::{debug, error, info};
32use common_time::Timestamp;
33use common_time::timestamp::TimeUnit;
34use datatypes::data_type::DataType;
35use datatypes::schema::SchemaRef;
36use event::{LogState, LogValidatorRef};
37use futures::FutureExt;
38use http::{HeaderValue, Method};
39use prost::DecodeError;
40use serde::{Deserialize, Serialize};
41use serde_json::Value;
42use snafu::{ResultExt, ensure};
43use tokio::sync::Mutex;
44use tokio::sync::oneshot::{self, Sender};
45use tower::ServiceBuilder;
46use tower_http::compression::CompressionLayer;
47use tower_http::cors::{AllowOrigin, Any, CorsLayer};
48use tower_http::decompression::RequestDecompressionLayer;
49use tower_http::trace::TraceLayer;
50
51use self::authorize::AuthState;
52use self::result::table_result::TableResponse;
53use crate::configurator::ConfiguratorRef;
54use crate::elasticsearch;
55use crate::error::{
56 AddressBindSnafu, AlreadyStartedSnafu, Error, InternalIoSnafu, InvalidHeaderValueSnafu, Result,
57};
58use crate::http::influxdb::{influxdb_health, influxdb_ping, influxdb_write_v1, influxdb_write_v2};
59use crate::http::otlp::OtlpState;
60use crate::http::prom_store::PromStoreState;
61use crate::http::prometheus::{
62 build_info_query, format_query, instant_query, label_values_query, labels_query, parse_query,
63 range_query, series_query,
64};
65use crate::http::result::arrow_result::ArrowResponse;
66use crate::http::result::csv_result::CsvResponse;
67use crate::http::result::error_result::ErrorResponse;
68use crate::http::result::greptime_result_v1::GreptimedbV1Response;
69use crate::http::result::influxdb_result_v1::InfluxdbV1Response;
70use crate::http::result::json_result::JsonResponse;
71use crate::http::result::null_result::NullResponse;
72use crate::interceptor::LogIngestInterceptorRef;
73use crate::metrics::http_metrics_layer;
74use crate::metrics_handler::MetricsHandler;
75use crate::prometheus_handler::PrometheusHandlerRef;
76use crate::query_handler::sql::ServerSqlQueryHandlerRef;
77use crate::query_handler::{
78 InfluxdbLineProtocolHandlerRef, JaegerQueryHandlerRef, LogQueryHandlerRef,
79 OpenTelemetryProtocolHandlerRef, OpentsdbProtocolHandlerRef, PipelineHandlerRef,
80 PromStoreProtocolHandlerRef,
81};
82use crate::request_limiter::RequestMemoryLimiter;
83use crate::server::Server;
84
85pub mod authorize;
86#[cfg(feature = "dashboard")]
87mod dashboard;
88pub mod dyn_log;
89pub mod dyn_trace;
90pub mod event;
91pub mod extractor;
92pub mod handler;
93pub mod header;
94pub mod influxdb;
95pub mod jaeger;
96pub mod logs;
97pub mod loki;
98pub mod mem_prof;
99mod memory_limit;
100pub mod opentsdb;
101pub mod otlp;
102pub mod pprof;
103pub mod prom_store;
104pub mod prometheus;
105pub mod result;
106mod timeout;
107pub mod utils;
108
109use result::HttpOutputWriter;
110pub(crate) use timeout::DynamicTimeoutLayer;
111
112mod hints;
113mod read_preference;
114#[cfg(any(test, feature = "testing"))]
115pub mod test_helpers;
116
117pub const HTTP_API_VERSION: &str = "v1";
118pub const HTTP_API_PREFIX: &str = "/v1/";
119const DEFAULT_BODY_LIMIT: ReadableSize = ReadableSize::mb(64);
121
122pub const AUTHORIZATION_HEADER: &str = "x-greptime-auth";
124
125pub static PUBLIC_APIS: [&str; 3] = ["/v1/influxdb/ping", "/v1/influxdb/health", "/v1/health"];
127
128#[derive(Default)]
129pub struct HttpServer {
130 router: StdMutex<Router>,
131 shutdown_tx: Mutex<Option<Sender<()>>>,
132 user_provider: Option<UserProviderRef>,
133 memory_limiter: RequestMemoryLimiter,
134
135 plugins: Plugins,
137
138 options: HttpOptions,
140 bind_addr: Option<SocketAddr>,
141}
142
143#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
144#[serde(default)]
145pub struct HttpOptions {
146 pub addr: String,
147
148 #[serde(with = "humantime_serde")]
149 pub timeout: Duration,
150
151 #[serde(skip)]
152 pub disable_dashboard: bool,
153
154 pub body_limit: ReadableSize,
155
156 pub max_total_body_memory: ReadableSize,
158
159 pub prom_validation_mode: PromValidationMode,
161
162 pub cors_allowed_origins: Vec<String>,
163
164 pub enable_cors: bool,
165}
166
167#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
168#[serde(rename_all = "snake_case")]
169pub enum PromValidationMode {
170 Strict,
172 Lossy,
174 Unchecked,
176}
177
178impl PromValidationMode {
179 pub fn decode_string(&self, bytes: &[u8]) -> std::result::Result<String, DecodeError> {
181 let result = match self {
182 PromValidationMode::Strict => match String::from_utf8(bytes.to_vec()) {
183 Ok(s) => s,
184 Err(e) => {
185 debug!("Invalid UTF-8 string value: {:?}, error: {:?}", bytes, e);
186 return Err(DecodeError::new("invalid utf-8"));
187 }
188 },
189 PromValidationMode::Lossy => String::from_utf8_lossy(bytes).to_string(),
190 PromValidationMode::Unchecked => unsafe { String::from_utf8_unchecked(bytes.to_vec()) },
191 };
192 Ok(result)
193 }
194}
195
196impl Default for HttpOptions {
197 fn default() -> Self {
198 Self {
199 addr: "127.0.0.1:4000".to_string(),
200 timeout: Duration::from_secs(0),
201 disable_dashboard: false,
202 body_limit: DEFAULT_BODY_LIMIT,
203 max_total_body_memory: ReadableSize(0),
204 cors_allowed_origins: Vec::new(),
205 enable_cors: true,
206 prom_validation_mode: PromValidationMode::Strict,
207 }
208 }
209}
210
211#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)]
212pub struct ColumnSchema {
213 name: String,
214 data_type: String,
215}
216
217impl ColumnSchema {
218 pub fn new(name: String, data_type: String) -> ColumnSchema {
219 ColumnSchema { name, data_type }
220 }
221}
222
223#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)]
224pub struct OutputSchema {
225 column_schemas: Vec<ColumnSchema>,
226}
227
228impl OutputSchema {
229 pub fn new(columns: Vec<ColumnSchema>) -> OutputSchema {
230 OutputSchema {
231 column_schemas: columns,
232 }
233 }
234}
235
236impl From<SchemaRef> for OutputSchema {
237 fn from(schema: SchemaRef) -> OutputSchema {
238 OutputSchema {
239 column_schemas: schema
240 .column_schemas()
241 .iter()
242 .map(|cs| ColumnSchema {
243 name: cs.name.clone(),
244 data_type: cs.data_type.name(),
245 })
246 .collect(),
247 }
248 }
249}
250
251#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)]
252pub struct HttpRecordsOutput {
253 schema: OutputSchema,
254 rows: Vec<Vec<Value>>,
255 #[serde(default)]
258 total_rows: usize,
259
260 #[serde(skip_serializing_if = "HashMap::is_empty")]
262 #[serde(default)]
263 metrics: HashMap<String, Value>,
264}
265
266impl HttpRecordsOutput {
267 pub fn num_rows(&self) -> usize {
268 self.rows.len()
269 }
270
271 pub fn num_cols(&self) -> usize {
272 self.schema.column_schemas.len()
273 }
274
275 pub fn schema(&self) -> &OutputSchema {
276 &self.schema
277 }
278
279 pub fn rows(&self) -> &Vec<Vec<Value>> {
280 &self.rows
281 }
282}
283
284impl HttpRecordsOutput {
285 pub fn try_new(
286 schema: SchemaRef,
287 recordbatches: Vec<RecordBatch>,
288 ) -> std::result::Result<HttpRecordsOutput, Error> {
289 if recordbatches.is_empty() {
290 Ok(HttpRecordsOutput {
291 schema: OutputSchema::from(schema),
292 rows: vec![],
293 total_rows: 0,
294 metrics: Default::default(),
295 })
296 } else {
297 let num_rows = recordbatches.iter().map(|r| r.num_rows()).sum::<usize>();
298 let mut rows = Vec::with_capacity(num_rows);
299
300 for recordbatch in recordbatches {
301 let mut writer = HttpOutputWriter::new(schema.num_columns(), None);
302 writer.write(recordbatch, &mut rows)?;
303 }
304
305 Ok(HttpRecordsOutput {
306 schema: OutputSchema::from(schema),
307 total_rows: rows.len(),
308 rows,
309 metrics: Default::default(),
310 })
311 }
312 }
313}
314
315#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
316#[serde(rename_all = "lowercase")]
317pub enum GreptimeQueryOutput {
318 AffectedRows(usize),
319 Records(HttpRecordsOutput),
320}
321
322#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
324pub enum ResponseFormat {
325 Arrow,
326 Csv(bool, bool),
328 Table,
329 #[default]
330 GreptimedbV1,
331 InfluxdbV1,
332 Json,
333 Null,
334}
335
336impl ResponseFormat {
337 pub fn parse(s: &str) -> Option<Self> {
338 match s {
339 "arrow" => Some(ResponseFormat::Arrow),
340 "csv" => Some(ResponseFormat::Csv(false, false)),
341 "csvwithnames" => Some(ResponseFormat::Csv(true, false)),
342 "csvwithnamesandtypes" => Some(ResponseFormat::Csv(true, true)),
343 "table" => Some(ResponseFormat::Table),
344 "greptimedb_v1" => Some(ResponseFormat::GreptimedbV1),
345 "influxdb_v1" => Some(ResponseFormat::InfluxdbV1),
346 "json" => Some(ResponseFormat::Json),
347 "null" => Some(ResponseFormat::Null),
348 _ => None,
349 }
350 }
351
352 pub fn as_str(&self) -> &'static str {
353 match self {
354 ResponseFormat::Arrow => "arrow",
355 ResponseFormat::Csv(_, _) => "csv",
356 ResponseFormat::Table => "table",
357 ResponseFormat::GreptimedbV1 => "greptimedb_v1",
358 ResponseFormat::InfluxdbV1 => "influxdb_v1",
359 ResponseFormat::Json => "json",
360 ResponseFormat::Null => "null",
361 }
362 }
363}
364
365#[derive(Debug, Clone, Copy, PartialEq, Eq)]
366pub enum Epoch {
367 Nanosecond,
368 Microsecond,
369 Millisecond,
370 Second,
371}
372
373impl Epoch {
374 pub fn parse(s: &str) -> Option<Epoch> {
375 match s {
380 "ns" => Some(Epoch::Nanosecond),
381 "u" | "µ" => Some(Epoch::Microsecond),
382 "ms" => Some(Epoch::Millisecond),
383 "s" => Some(Epoch::Second),
384 _ => None, }
386 }
387
388 pub fn convert_timestamp(&self, ts: Timestamp) -> Option<Timestamp> {
389 match self {
390 Epoch::Nanosecond => ts.convert_to(TimeUnit::Nanosecond),
391 Epoch::Microsecond => ts.convert_to(TimeUnit::Microsecond),
392 Epoch::Millisecond => ts.convert_to(TimeUnit::Millisecond),
393 Epoch::Second => ts.convert_to(TimeUnit::Second),
394 }
395 }
396}
397
398impl Display for Epoch {
399 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
400 match self {
401 Epoch::Nanosecond => write!(f, "Epoch::Nanosecond"),
402 Epoch::Microsecond => write!(f, "Epoch::Microsecond"),
403 Epoch::Millisecond => write!(f, "Epoch::Millisecond"),
404 Epoch::Second => write!(f, "Epoch::Second"),
405 }
406 }
407}
408
409#[derive(Serialize, Deserialize, Debug)]
410pub enum HttpResponse {
411 Arrow(ArrowResponse),
412 Csv(CsvResponse),
413 Table(TableResponse),
414 Error(ErrorResponse),
415 GreptimedbV1(GreptimedbV1Response),
416 InfluxdbV1(InfluxdbV1Response),
417 Json(JsonResponse),
418 Null(NullResponse),
419}
420
421impl HttpResponse {
422 pub fn with_execution_time(self, execution_time: u64) -> Self {
423 match self {
424 HttpResponse::Arrow(resp) => resp.with_execution_time(execution_time).into(),
425 HttpResponse::Csv(resp) => resp.with_execution_time(execution_time).into(),
426 HttpResponse::Table(resp) => resp.with_execution_time(execution_time).into(),
427 HttpResponse::GreptimedbV1(resp) => resp.with_execution_time(execution_time).into(),
428 HttpResponse::InfluxdbV1(resp) => resp.with_execution_time(execution_time).into(),
429 HttpResponse::Json(resp) => resp.with_execution_time(execution_time).into(),
430 HttpResponse::Null(resp) => resp.with_execution_time(execution_time).into(),
431 HttpResponse::Error(resp) => resp.with_execution_time(execution_time).into(),
432 }
433 }
434
435 pub fn with_limit(self, limit: usize) -> Self {
436 match self {
437 HttpResponse::Csv(resp) => resp.with_limit(limit).into(),
438 HttpResponse::Table(resp) => resp.with_limit(limit).into(),
439 HttpResponse::GreptimedbV1(resp) => resp.with_limit(limit).into(),
440 HttpResponse::Json(resp) => resp.with_limit(limit).into(),
441 _ => self,
442 }
443 }
444}
445
446pub fn process_with_limit(
447 mut outputs: Vec<GreptimeQueryOutput>,
448 limit: usize,
449) -> Vec<GreptimeQueryOutput> {
450 outputs
451 .drain(..)
452 .map(|data| match data {
453 GreptimeQueryOutput::Records(mut records) => {
454 if records.rows.len() > limit {
455 records.rows.truncate(limit);
456 records.total_rows = limit;
457 }
458 GreptimeQueryOutput::Records(records)
459 }
460 _ => data,
461 })
462 .collect()
463}
464
465impl IntoResponse for HttpResponse {
466 fn into_response(self) -> Response {
467 match self {
468 HttpResponse::Arrow(resp) => resp.into_response(),
469 HttpResponse::Csv(resp) => resp.into_response(),
470 HttpResponse::Table(resp) => resp.into_response(),
471 HttpResponse::GreptimedbV1(resp) => resp.into_response(),
472 HttpResponse::InfluxdbV1(resp) => resp.into_response(),
473 HttpResponse::Json(resp) => resp.into_response(),
474 HttpResponse::Null(resp) => resp.into_response(),
475 HttpResponse::Error(resp) => resp.into_response(),
476 }
477 }
478}
479
480impl From<ArrowResponse> for HttpResponse {
481 fn from(value: ArrowResponse) -> Self {
482 HttpResponse::Arrow(value)
483 }
484}
485
486impl From<CsvResponse> for HttpResponse {
487 fn from(value: CsvResponse) -> Self {
488 HttpResponse::Csv(value)
489 }
490}
491
492impl From<TableResponse> for HttpResponse {
493 fn from(value: TableResponse) -> Self {
494 HttpResponse::Table(value)
495 }
496}
497
498impl From<ErrorResponse> for HttpResponse {
499 fn from(value: ErrorResponse) -> Self {
500 HttpResponse::Error(value)
501 }
502}
503
504impl From<GreptimedbV1Response> for HttpResponse {
505 fn from(value: GreptimedbV1Response) -> Self {
506 HttpResponse::GreptimedbV1(value)
507 }
508}
509
510impl From<InfluxdbV1Response> for HttpResponse {
511 fn from(value: InfluxdbV1Response) -> Self {
512 HttpResponse::InfluxdbV1(value)
513 }
514}
515
516impl From<JsonResponse> for HttpResponse {
517 fn from(value: JsonResponse) -> Self {
518 HttpResponse::Json(value)
519 }
520}
521
522impl From<NullResponse> for HttpResponse {
523 fn from(value: NullResponse) -> Self {
524 HttpResponse::Null(value)
525 }
526}
527
528#[derive(Clone)]
529pub struct ApiState {
530 pub sql_handler: ServerSqlQueryHandlerRef,
531}
532
533#[derive(Clone)]
534pub struct GreptimeOptionsConfigState {
535 pub greptime_config_options: String,
536}
537
538#[derive(Default)]
539pub struct HttpServerBuilder {
540 options: HttpOptions,
541 plugins: Plugins,
542 user_provider: Option<UserProviderRef>,
543 router: Router,
544}
545
546impl HttpServerBuilder {
547 pub fn new(options: HttpOptions) -> Self {
548 Self {
549 options,
550 plugins: Plugins::default(),
551 user_provider: None,
552 router: Router::new(),
553 }
554 }
555
556 pub fn with_sql_handler(self, sql_handler: ServerSqlQueryHandlerRef) -> Self {
557 let sql_router = HttpServer::route_sql(ApiState { sql_handler });
558
559 Self {
560 router: self
561 .router
562 .nest(&format!("/{HTTP_API_VERSION}"), sql_router),
563 ..self
564 }
565 }
566
567 pub fn with_logs_handler(self, logs_handler: LogQueryHandlerRef) -> Self {
568 let logs_router = HttpServer::route_logs(logs_handler);
569
570 Self {
571 router: self
572 .router
573 .nest(&format!("/{HTTP_API_VERSION}"), logs_router),
574 ..self
575 }
576 }
577
578 pub fn with_opentsdb_handler(self, handler: OpentsdbProtocolHandlerRef) -> Self {
579 Self {
580 router: self.router.nest(
581 &format!("/{HTTP_API_VERSION}/opentsdb"),
582 HttpServer::route_opentsdb(handler),
583 ),
584 ..self
585 }
586 }
587
588 pub fn with_influxdb_handler(self, handler: InfluxdbLineProtocolHandlerRef) -> Self {
589 Self {
590 router: self.router.nest(
591 &format!("/{HTTP_API_VERSION}/influxdb"),
592 HttpServer::route_influxdb(handler),
593 ),
594 ..self
595 }
596 }
597
598 pub fn with_prom_handler(
599 self,
600 handler: PromStoreProtocolHandlerRef,
601 pipeline_handler: Option<PipelineHandlerRef>,
602 prom_store_with_metric_engine: bool,
603 prom_validation_mode: PromValidationMode,
604 ) -> Self {
605 let state = PromStoreState {
606 prom_store_handler: handler,
607 pipeline_handler,
608 prom_store_with_metric_engine,
609 prom_validation_mode,
610 };
611
612 Self {
613 router: self.router.nest(
614 &format!("/{HTTP_API_VERSION}/prometheus"),
615 HttpServer::route_prom(state),
616 ),
617 ..self
618 }
619 }
620
621 pub fn with_prometheus_handler(self, handler: PrometheusHandlerRef) -> Self {
622 Self {
623 router: self.router.nest(
624 &format!("/{HTTP_API_VERSION}/prometheus/api/v1"),
625 HttpServer::route_prometheus(handler),
626 ),
627 ..self
628 }
629 }
630
631 pub fn with_otlp_handler(
632 self,
633 handler: OpenTelemetryProtocolHandlerRef,
634 with_metric_engine: bool,
635 ) -> Self {
636 Self {
637 router: self.router.nest(
638 &format!("/{HTTP_API_VERSION}/otlp"),
639 HttpServer::route_otlp(handler, with_metric_engine),
640 ),
641 ..self
642 }
643 }
644
645 pub fn with_user_provider(self, user_provider: UserProviderRef) -> Self {
646 Self {
647 user_provider: Some(user_provider),
648 ..self
649 }
650 }
651
652 pub fn with_metrics_handler(self, handler: MetricsHandler) -> Self {
653 Self {
654 router: self.router.merge(HttpServer::route_metrics(handler)),
655 ..self
656 }
657 }
658
659 pub fn with_log_ingest_handler(
660 self,
661 handler: PipelineHandlerRef,
662 validator: Option<LogValidatorRef>,
663 ingest_interceptor: Option<LogIngestInterceptorRef<Error>>,
664 ) -> Self {
665 let log_state = LogState {
666 log_handler: handler,
667 log_validator: validator,
668 ingest_interceptor,
669 };
670
671 let router = self.router.nest(
672 &format!("/{HTTP_API_VERSION}"),
673 HttpServer::route_pipelines(log_state.clone()),
674 );
675 let router = router.nest(
677 &format!("/{HTTP_API_VERSION}/events"),
678 #[allow(deprecated)]
679 HttpServer::route_log_deprecated(log_state.clone()),
680 );
681
682 let router = router.nest(
683 &format!("/{HTTP_API_VERSION}/loki"),
684 HttpServer::route_loki(log_state.clone()),
685 );
686
687 let router = router.nest(
688 &format!("/{HTTP_API_VERSION}/elasticsearch"),
689 HttpServer::route_elasticsearch(log_state.clone()),
690 );
691
692 let router = router.nest(
693 &format!("/{HTTP_API_VERSION}/elasticsearch/"),
694 Router::new()
695 .route("/", routing::get(elasticsearch::handle_get_version))
696 .with_state(log_state),
697 );
698
699 Self { router, ..self }
700 }
701
702 pub fn with_plugins(self, plugins: Plugins) -> Self {
703 Self { plugins, ..self }
704 }
705
706 pub fn with_greptime_config_options(self, opts: String) -> Self {
707 let config_router = HttpServer::route_config(GreptimeOptionsConfigState {
708 greptime_config_options: opts,
709 });
710
711 Self {
712 router: self.router.merge(config_router),
713 ..self
714 }
715 }
716
717 pub fn with_jaeger_handler(self, handler: JaegerQueryHandlerRef) -> Self {
718 Self {
719 router: self.router.nest(
720 &format!("/{HTTP_API_VERSION}/jaeger"),
721 HttpServer::route_jaeger(handler),
722 ),
723 ..self
724 }
725 }
726
727 pub fn with_extra_router(self, router: Router) -> Self {
728 Self {
729 router: self.router.merge(router),
730 ..self
731 }
732 }
733
734 pub fn build(self) -> HttpServer {
735 let memory_limiter =
736 RequestMemoryLimiter::new(self.options.max_total_body_memory.as_bytes() as usize);
737 HttpServer {
738 options: self.options,
739 user_provider: self.user_provider,
740 shutdown_tx: Mutex::new(None),
741 plugins: self.plugins,
742 router: StdMutex::new(self.router),
743 bind_addr: None,
744 memory_limiter,
745 }
746 }
747}
748
749impl HttpServer {
750 pub fn make_app(&self) -> Router {
752 let mut router = {
753 let router = self.router.lock().unwrap();
754 router.clone()
755 };
756
757 router = router
758 .route(
759 "/health",
760 routing::get(handler::health).post(handler::health),
761 )
762 .route(
763 &format!("/{HTTP_API_VERSION}/health"),
764 routing::get(handler::health).post(handler::health),
765 )
766 .route(
767 "/ready",
768 routing::get(handler::health).post(handler::health),
769 );
770
771 router = router.route("/status", routing::get(handler::status));
772
773 #[cfg(feature = "dashboard")]
774 {
775 if !self.options.disable_dashboard {
776 info!("Enable dashboard service at '/dashboard'");
777 router = router.route(
779 "/dashboard",
780 routing::get(|uri: axum::http::uri::Uri| async move {
781 let path = uri.path();
782 let query = uri.query().map(|q| format!("?{}", q)).unwrap_or_default();
783
784 let new_uri = format!("{}/{}", path, query);
785 axum::response::Redirect::permanent(&new_uri)
786 }),
787 );
788
789 router = router
793 .route(
794 "/dashboard/",
795 routing::get(dashboard::static_handler).post(dashboard::static_handler),
796 )
797 .route(
798 "/dashboard/{*x}",
799 routing::get(dashboard::static_handler).post(dashboard::static_handler),
800 );
801 }
802 }
803
804 router = router.route_layer(middleware::from_fn(http_metrics_layer));
806
807 router
808 }
809
810 pub fn build(&self, router: Router) -> Result<Router> {
813 let timeout_layer = if self.options.timeout != Duration::default() {
814 Some(ServiceBuilder::new().layer(DynamicTimeoutLayer::new(self.options.timeout)))
815 } else {
816 info!("HTTP server timeout is disabled");
817 None
818 };
819 let body_limit_layer = if self.options.body_limit != ReadableSize(0) {
820 Some(
821 ServiceBuilder::new()
822 .layer(DefaultBodyLimit::max(self.options.body_limit.0 as usize)),
823 )
824 } else {
825 info!("HTTP server body limit is disabled");
826 None
827 };
828 let cors_layer = if self.options.enable_cors {
829 Some(
830 CorsLayer::new()
831 .allow_methods([
832 Method::GET,
833 Method::POST,
834 Method::PUT,
835 Method::DELETE,
836 Method::HEAD,
837 ])
838 .allow_origin(if self.options.cors_allowed_origins.is_empty() {
839 AllowOrigin::from(Any)
840 } else {
841 AllowOrigin::from(
842 self.options
843 .cors_allowed_origins
844 .iter()
845 .map(|s| {
846 HeaderValue::from_str(s.as_str())
847 .context(InvalidHeaderValueSnafu)
848 })
849 .collect::<Result<Vec<HeaderValue>>>()?,
850 )
851 })
852 .allow_headers(Any),
853 )
854 } else {
855 info!("HTTP server cross-origin is disabled");
856 None
857 };
858
859 Ok(router
860 .layer(
862 ServiceBuilder::new()
863 .layer(TraceLayer::new_for_http().on_failure(()))
866 .option_layer(cors_layer)
867 .option_layer(timeout_layer)
868 .option_layer(body_limit_layer)
869 .layer(middleware::from_fn_with_state(
871 self.memory_limiter.clone(),
872 memory_limit::memory_limit_middleware,
873 ))
874 .layer(middleware::from_fn_with_state(
876 AuthState::new(self.user_provider.clone()),
877 authorize::check_http_auth,
878 ))
879 .layer(middleware::from_fn(hints::extract_hints))
880 .layer(middleware::from_fn(
881 read_preference::extract_read_preference,
882 )),
883 )
884 .nest(
886 "/debug",
887 Router::new()
888 .route("/log_level", routing::post(dyn_log::dyn_log_handler))
890 .route("/enable_trace", routing::post(dyn_trace::dyn_trace_handler))
891 .nest(
892 "/prof",
893 Router::new()
894 .route("/cpu", routing::post(pprof::pprof_handler))
895 .route("/mem", routing::post(mem_prof::mem_prof_handler))
896 .route(
897 "/mem/activate",
898 routing::post(mem_prof::activate_heap_prof_handler),
899 )
900 .route(
901 "/mem/deactivate",
902 routing::post(mem_prof::deactivate_heap_prof_handler),
903 )
904 .route(
905 "/mem/status",
906 routing::get(mem_prof::heap_prof_status_handler),
907 ) .route(
909 "/mem/gdump",
910 routing::get(mem_prof::gdump_status_handler)
911 .post(mem_prof::gdump_toggle_handler),
912 ),
913 ),
914 ))
915 }
916
917 fn route_metrics<S>(metrics_handler: MetricsHandler) -> Router<S> {
918 Router::new()
919 .route("/metrics", routing::get(handler::metrics))
920 .with_state(metrics_handler)
921 }
922
923 fn route_loki<S>(log_state: LogState) -> Router<S> {
924 Router::new()
925 .route("/api/v1/push", routing::post(loki::loki_ingest))
926 .layer(
927 ServiceBuilder::new()
928 .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
929 )
930 .with_state(log_state)
931 }
932
933 fn route_elasticsearch<S>(log_state: LogState) -> Router<S> {
934 Router::new()
935 .route(
937 "/",
938 routing::head((HttpStatusCode::OK, elasticsearch::elasticsearch_headers())),
939 )
940 .route("/", routing::get(elasticsearch::handle_get_version))
942 .route("/_license", routing::get(elasticsearch::handle_get_license))
944 .route("/_bulk", routing::post(elasticsearch::handle_bulk_api))
945 .route(
946 "/{index}/_bulk",
947 routing::post(elasticsearch::handle_bulk_api_with_index),
948 )
949 .route(
951 "/_ilm/policy/{*path}",
952 routing::any((
953 HttpStatusCode::OK,
954 elasticsearch::elasticsearch_headers(),
955 axum::Json(serde_json::json!({})),
956 )),
957 )
958 .route(
960 "/_index_template/{*path}",
961 routing::any((
962 HttpStatusCode::OK,
963 elasticsearch::elasticsearch_headers(),
964 axum::Json(serde_json::json!({})),
965 )),
966 )
967 .route(
970 "/_ingest/{*path}",
971 routing::any((
972 HttpStatusCode::OK,
973 elasticsearch::elasticsearch_headers(),
974 axum::Json(serde_json::json!({})),
975 )),
976 )
977 .route(
980 "/_nodes/{*path}",
981 routing::any((
982 HttpStatusCode::OK,
983 elasticsearch::elasticsearch_headers(),
984 axum::Json(serde_json::json!({})),
985 )),
986 )
987 .route(
990 "/logstash/{*path}",
991 routing::any((
992 HttpStatusCode::OK,
993 elasticsearch::elasticsearch_headers(),
994 axum::Json(serde_json::json!({})),
995 )),
996 )
997 .route(
998 "/_logstash/{*path}",
999 routing::any((
1000 HttpStatusCode::OK,
1001 elasticsearch::elasticsearch_headers(),
1002 axum::Json(serde_json::json!({})),
1003 )),
1004 )
1005 .layer(ServiceBuilder::new().layer(RequestDecompressionLayer::new()))
1006 .with_state(log_state)
1007 }
1008
1009 #[deprecated(since = "0.11.0", note = "Use `route_pipelines()` instead.")]
1010 fn route_log_deprecated<S>(log_state: LogState) -> Router<S> {
1011 Router::new()
1012 .route("/logs", routing::post(event::log_ingester))
1013 .route(
1014 "/pipelines/{pipeline_name}",
1015 routing::get(event::query_pipeline),
1016 )
1017 .route(
1018 "/pipelines/{pipeline_name}",
1019 routing::post(event::add_pipeline),
1020 )
1021 .route(
1022 "/pipelines/{pipeline_name}",
1023 routing::delete(event::delete_pipeline),
1024 )
1025 .route("/pipelines/dryrun", routing::post(event::pipeline_dryrun))
1026 .layer(
1027 ServiceBuilder::new()
1028 .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
1029 )
1030 .with_state(log_state)
1031 }
1032
1033 fn route_pipelines<S>(log_state: LogState) -> Router<S> {
1034 Router::new()
1035 .route("/ingest", routing::post(event::log_ingester))
1036 .route(
1037 "/pipelines/{pipeline_name}",
1038 routing::get(event::query_pipeline),
1039 )
1040 .route(
1041 "/pipelines/{pipeline_name}/ddl",
1042 routing::get(event::query_pipeline_ddl),
1043 )
1044 .route(
1045 "/pipelines/{pipeline_name}",
1046 routing::post(event::add_pipeline),
1047 )
1048 .route(
1049 "/pipelines/{pipeline_name}",
1050 routing::delete(event::delete_pipeline),
1051 )
1052 .route("/pipelines/_dryrun", routing::post(event::pipeline_dryrun))
1053 .layer(
1054 ServiceBuilder::new()
1055 .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
1056 )
1057 .with_state(log_state)
1058 }
1059
1060 fn route_sql<S>(api_state: ApiState) -> Router<S> {
1061 Router::new()
1062 .route("/sql", routing::get(handler::sql).post(handler::sql))
1063 .route(
1064 "/sql/parse",
1065 routing::get(handler::sql_parse).post(handler::sql_parse),
1066 )
1067 .route(
1068 "/sql/format",
1069 routing::get(handler::sql_format).post(handler::sql_format),
1070 )
1071 .route(
1072 "/promql",
1073 routing::get(handler::promql).post(handler::promql),
1074 )
1075 .with_state(api_state)
1076 }
1077
1078 fn route_logs<S>(log_handler: LogQueryHandlerRef) -> Router<S> {
1079 Router::new()
1080 .route("/logs", routing::get(logs::logs).post(logs::logs))
1081 .with_state(log_handler)
1082 }
1083
1084 pub fn route_prometheus<S>(prometheus_handler: PrometheusHandlerRef) -> Router<S> {
1088 Router::new()
1089 .route(
1090 "/format_query",
1091 routing::post(format_query).get(format_query),
1092 )
1093 .route("/status/buildinfo", routing::get(build_info_query))
1094 .route("/query", routing::post(instant_query).get(instant_query))
1095 .route("/query_range", routing::post(range_query).get(range_query))
1096 .route("/labels", routing::post(labels_query).get(labels_query))
1097 .route("/series", routing::post(series_query).get(series_query))
1098 .route("/parse_query", routing::post(parse_query).get(parse_query))
1099 .route(
1100 "/label/{label_name}/values",
1101 routing::get(label_values_query),
1102 )
1103 .layer(ServiceBuilder::new().layer(CompressionLayer::new()))
1104 .with_state(prometheus_handler)
1105 }
1106
1107 fn route_prom<S>(state: PromStoreState) -> Router<S> {
1113 Router::new()
1114 .route("/read", routing::post(prom_store::remote_read))
1115 .route("/write", routing::post(prom_store::remote_write))
1116 .with_state(state)
1117 }
1118
1119 fn route_influxdb<S>(influxdb_handler: InfluxdbLineProtocolHandlerRef) -> Router<S> {
1120 Router::new()
1121 .route("/write", routing::post(influxdb_write_v1))
1122 .route("/api/v2/write", routing::post(influxdb_write_v2))
1123 .layer(
1124 ServiceBuilder::new()
1125 .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
1126 )
1127 .route("/ping", routing::get(influxdb_ping))
1128 .route("/health", routing::get(influxdb_health))
1129 .with_state(influxdb_handler)
1130 }
1131
1132 fn route_opentsdb<S>(opentsdb_handler: OpentsdbProtocolHandlerRef) -> Router<S> {
1133 Router::new()
1134 .route("/api/put", routing::post(opentsdb::put))
1135 .with_state(opentsdb_handler)
1136 }
1137
1138 fn route_otlp<S>(
1139 otlp_handler: OpenTelemetryProtocolHandlerRef,
1140 with_metric_engine: bool,
1141 ) -> Router<S> {
1142 Router::new()
1143 .route("/v1/metrics", routing::post(otlp::metrics))
1144 .route("/v1/traces", routing::post(otlp::traces))
1145 .route("/v1/logs", routing::post(otlp::logs))
1146 .layer(
1147 ServiceBuilder::new()
1148 .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
1149 )
1150 .with_state(OtlpState {
1151 with_metric_engine,
1152 handler: otlp_handler,
1153 })
1154 }
1155
1156 fn route_config<S>(state: GreptimeOptionsConfigState) -> Router<S> {
1157 Router::new()
1158 .route("/config", routing::get(handler::config))
1159 .with_state(state)
1160 }
1161
1162 fn route_jaeger<S>(handler: JaegerQueryHandlerRef) -> Router<S> {
1163 Router::new()
1164 .route("/api/services", routing::get(jaeger::handle_get_services))
1165 .route(
1166 "/api/services/{service_name}/operations",
1167 routing::get(jaeger::handle_get_operations_by_service),
1168 )
1169 .route(
1170 "/api/operations",
1171 routing::get(jaeger::handle_get_operations),
1172 )
1173 .route("/api/traces", routing::get(jaeger::handle_find_traces))
1174 .route(
1175 "/api/traces/{trace_id}",
1176 routing::get(jaeger::handle_get_trace),
1177 )
1178 .with_state(handler)
1179 }
1180}
1181
1182pub const HTTP_SERVER: &str = "HTTP_SERVER";
1183
1184#[async_trait]
1185impl Server for HttpServer {
1186 async fn shutdown(&self) -> Result<()> {
1187 let mut shutdown_tx = self.shutdown_tx.lock().await;
1188 if let Some(tx) = shutdown_tx.take()
1189 && tx.send(()).is_err()
1190 {
1191 info!("Receiver dropped, the HTTP server has already exited");
1192 }
1193 info!("Shutdown HTTP server");
1194
1195 Ok(())
1196 }
1197
1198 async fn start(&mut self, listening: SocketAddr) -> Result<()> {
1199 let (tx, rx) = oneshot::channel();
1200 let serve = {
1201 let mut shutdown_tx = self.shutdown_tx.lock().await;
1202 ensure!(
1203 shutdown_tx.is_none(),
1204 AlreadyStartedSnafu { server: "HTTP" }
1205 );
1206
1207 let mut app = self.make_app();
1208 if let Some(configurator) = self.plugins.get::<ConfiguratorRef>() {
1209 app = configurator.config_http(app);
1210 }
1211 let app = self.build(app)?;
1212 let listener = tokio::net::TcpListener::bind(listening)
1213 .await
1214 .context(AddressBindSnafu { addr: listening })?
1215 .tap_io(|tcp_stream| {
1216 if let Err(e) = tcp_stream.set_nodelay(true) {
1217 error!(e; "Failed to set TCP_NODELAY on incoming connection");
1218 }
1219 });
1220 let serve = axum::serve(listener, app.into_make_service());
1221
1222 *shutdown_tx = Some(tx);
1239
1240 serve
1241 };
1242 let listening = serve.local_addr().context(InternalIoSnafu)?;
1243 info!("HTTP server is bound to {}", listening);
1244
1245 common_runtime::spawn_global(async move {
1246 if let Err(e) = serve
1247 .with_graceful_shutdown(rx.map(drop))
1248 .await
1249 .context(InternalIoSnafu)
1250 {
1251 error!(e; "Failed to shutdown http server");
1252 }
1253 });
1254
1255 self.bind_addr = Some(listening);
1256 Ok(())
1257 }
1258
1259 fn name(&self) -> &str {
1260 HTTP_SERVER
1261 }
1262
1263 fn bind_addr(&self) -> Option<SocketAddr> {
1264 self.bind_addr
1265 }
1266}
1267
1268#[cfg(test)]
1269mod test {
1270 use std::future::pending;
1271 use std::io::Cursor;
1272 use std::sync::Arc;
1273
1274 use arrow_ipc::reader::FileReader;
1275 use arrow_schema::DataType;
1276 use axum::handler::Handler;
1277 use axum::http::StatusCode;
1278 use axum::routing::get;
1279 use common_query::Output;
1280 use common_recordbatch::RecordBatches;
1281 use datafusion_expr::LogicalPlan;
1282 use datatypes::prelude::*;
1283 use datatypes::schema::{ColumnSchema, Schema};
1284 use datatypes::vectors::{StringVector, UInt32Vector};
1285 use header::constants::GREPTIME_DB_HEADER_TIMEOUT;
1286 use query::parser::PromQuery;
1287 use query::query_engine::DescribeResult;
1288 use session::context::QueryContextRef;
1289 use sql::statements::statement::Statement;
1290 use tokio::sync::mpsc;
1291 use tokio::time::Instant;
1292
1293 use super::*;
1294 use crate::error::Error;
1295 use crate::http::test_helpers::TestClient;
1296 use crate::query_handler::sql::{ServerSqlQueryHandlerAdapter, SqlQueryHandler};
1297
1298 struct DummyInstance {
1299 _tx: mpsc::Sender<(String, Vec<u8>)>,
1300 }
1301
1302 #[async_trait]
1303 impl SqlQueryHandler for DummyInstance {
1304 type Error = Error;
1305
1306 async fn do_query(&self, _: &str, _: QueryContextRef) -> Vec<Result<Output>> {
1307 unimplemented!()
1308 }
1309
1310 async fn do_promql_query(
1311 &self,
1312 _: &PromQuery,
1313 _: QueryContextRef,
1314 ) -> Vec<std::result::Result<Output, Self::Error>> {
1315 unimplemented!()
1316 }
1317
1318 async fn do_exec_plan(
1319 &self,
1320 _stmt: Option<Statement>,
1321 _plan: LogicalPlan,
1322 _query_ctx: QueryContextRef,
1323 ) -> std::result::Result<Output, Self::Error> {
1324 unimplemented!()
1325 }
1326
1327 async fn do_describe(
1328 &self,
1329 _stmt: sql::statements::statement::Statement,
1330 _query_ctx: QueryContextRef,
1331 ) -> Result<Option<DescribeResult>> {
1332 unimplemented!()
1333 }
1334
1335 async fn is_valid_schema(&self, _catalog: &str, _schema: &str) -> Result<bool> {
1336 Ok(true)
1337 }
1338 }
1339
1340 fn timeout() -> DynamicTimeoutLayer {
1341 DynamicTimeoutLayer::new(Duration::from_millis(10))
1342 }
1343
1344 async fn forever() {
1345 pending().await
1346 }
1347
1348 fn make_test_app(tx: mpsc::Sender<(String, Vec<u8>)>) -> Router {
1349 make_test_app_custom(tx, HttpOptions::default())
1350 }
1351
1352 fn make_test_app_custom(tx: mpsc::Sender<(String, Vec<u8>)>, options: HttpOptions) -> Router {
1353 let instance = Arc::new(DummyInstance { _tx: tx });
1354 let sql_instance = ServerSqlQueryHandlerAdapter::arc(instance.clone());
1355 let server = HttpServerBuilder::new(options)
1356 .with_sql_handler(sql_instance)
1357 .build();
1358 server.build(server.make_app()).unwrap().route(
1359 "/test/timeout",
1360 get(forever.layer(ServiceBuilder::new().layer(timeout()))),
1361 )
1362 }
1363
1364 #[tokio::test]
1365 pub async fn test_cors() {
1366 let (tx, _rx) = mpsc::channel(100);
1368 let app = make_test_app(tx);
1369 let client = TestClient::new(app).await;
1370
1371 let res = client.get("/health").send().await;
1372
1373 assert_eq!(res.status(), StatusCode::OK);
1374 assert_eq!(
1375 res.headers()
1376 .get(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1377 .expect("expect cors header origin"),
1378 "*"
1379 );
1380
1381 let res = client.get("/v1/health").send().await;
1382
1383 assert_eq!(res.status(), StatusCode::OK);
1384 assert_eq!(
1385 res.headers()
1386 .get(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1387 .expect("expect cors header origin"),
1388 "*"
1389 );
1390
1391 let res = client
1392 .options("/health")
1393 .header("Access-Control-Request-Headers", "x-greptime-auth")
1394 .header("Access-Control-Request-Method", "DELETE")
1395 .header("Origin", "https://example.com")
1396 .send()
1397 .await;
1398 assert_eq!(res.status(), StatusCode::OK);
1399 assert_eq!(
1400 res.headers()
1401 .get(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1402 .expect("expect cors header origin"),
1403 "*"
1404 );
1405 assert_eq!(
1406 res.headers()
1407 .get(http::header::ACCESS_CONTROL_ALLOW_HEADERS)
1408 .expect("expect cors header headers"),
1409 "*"
1410 );
1411 assert_eq!(
1412 res.headers()
1413 .get(http::header::ACCESS_CONTROL_ALLOW_METHODS)
1414 .expect("expect cors header methods"),
1415 "GET,POST,PUT,DELETE,HEAD"
1416 );
1417 }
1418
1419 #[tokio::test]
1420 pub async fn test_cors_custom_origins() {
1421 let (tx, _rx) = mpsc::channel(100);
1423 let origin = "https://example.com";
1424
1425 let options = HttpOptions {
1426 cors_allowed_origins: vec![origin.to_string()],
1427 ..Default::default()
1428 };
1429
1430 let app = make_test_app_custom(tx, options);
1431 let client = TestClient::new(app).await;
1432
1433 let res = client.get("/health").header("Origin", origin).send().await;
1434
1435 assert_eq!(res.status(), StatusCode::OK);
1436 assert_eq!(
1437 res.headers()
1438 .get(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1439 .expect("expect cors header origin"),
1440 origin
1441 );
1442
1443 let res = client
1444 .get("/health")
1445 .header("Origin", "https://notallowed.com")
1446 .send()
1447 .await;
1448
1449 assert_eq!(res.status(), StatusCode::OK);
1450 assert!(
1451 !res.headers()
1452 .contains_key(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1453 );
1454 }
1455
1456 #[tokio::test]
1457 pub async fn test_cors_disabled() {
1458 let (tx, _rx) = mpsc::channel(100);
1460
1461 let options = HttpOptions {
1462 enable_cors: false,
1463 ..Default::default()
1464 };
1465
1466 let app = make_test_app_custom(tx, options);
1467 let client = TestClient::new(app).await;
1468
1469 let res = client.get("/health").send().await;
1470
1471 assert_eq!(res.status(), StatusCode::OK);
1472 assert!(
1473 !res.headers()
1474 .contains_key(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1475 );
1476 }
1477
1478 #[test]
1479 fn test_http_options_default() {
1480 let default = HttpOptions::default();
1481 assert_eq!("127.0.0.1:4000".to_string(), default.addr);
1482 assert_eq!(Duration::from_secs(0), default.timeout)
1483 }
1484
1485 #[tokio::test]
1486 async fn test_http_server_request_timeout() {
1487 common_telemetry::init_default_ut_logging();
1488
1489 let (tx, _rx) = mpsc::channel(100);
1490 let app = make_test_app(tx);
1491 let client = TestClient::new(app).await;
1492 let res = client.get("/test/timeout").send().await;
1493 assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT);
1494
1495 let now = Instant::now();
1496 let res = client
1497 .get("/test/timeout")
1498 .header(GREPTIME_DB_HEADER_TIMEOUT, "20ms")
1499 .send()
1500 .await;
1501 assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT);
1502 let elapsed = now.elapsed();
1503 assert!(elapsed > Duration::from_millis(15));
1504
1505 tokio::time::timeout(
1506 Duration::from_millis(15),
1507 client
1508 .get("/test/timeout")
1509 .header(GREPTIME_DB_HEADER_TIMEOUT, "0s")
1510 .send(),
1511 )
1512 .await
1513 .unwrap_err();
1514
1515 tokio::time::timeout(
1516 Duration::from_millis(15),
1517 client
1518 .get("/test/timeout")
1519 .header(
1520 GREPTIME_DB_HEADER_TIMEOUT,
1521 humantime::format_duration(Duration::default()).to_string(),
1522 )
1523 .send(),
1524 )
1525 .await
1526 .unwrap_err();
1527 }
1528
1529 #[tokio::test]
1530 async fn test_schema_for_empty_response() {
1531 let column_schemas = vec![
1532 ColumnSchema::new("numbers", ConcreteDataType::uint32_datatype(), false),
1533 ColumnSchema::new("strings", ConcreteDataType::string_datatype(), true),
1534 ];
1535 let schema = Arc::new(Schema::new(column_schemas));
1536
1537 let recordbatches = RecordBatches::try_new(schema.clone(), vec![]).unwrap();
1538 let outputs = vec![Ok(Output::new_with_record_batches(recordbatches))];
1539
1540 let json_resp = GreptimedbV1Response::from_output(outputs).await;
1541 if let HttpResponse::GreptimedbV1(json_resp) = json_resp {
1542 let json_output = &json_resp.output[0];
1543 if let GreptimeQueryOutput::Records(r) = json_output {
1544 assert_eq!(r.num_rows(), 0);
1545 assert_eq!(r.num_cols(), 2);
1546 assert_eq!(r.schema.column_schemas[0].name, "numbers");
1547 assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1548 } else {
1549 panic!("invalid output type");
1550 }
1551 } else {
1552 panic!("invalid format")
1553 }
1554 }
1555
1556 #[tokio::test]
1557 async fn test_recordbatches_conversion() {
1558 let column_schemas = vec![
1559 ColumnSchema::new("numbers", ConcreteDataType::uint32_datatype(), false),
1560 ColumnSchema::new("strings", ConcreteDataType::string_datatype(), true),
1561 ];
1562 let schema = Arc::new(Schema::new(column_schemas));
1563 let columns: Vec<VectorRef> = vec![
1564 Arc::new(UInt32Vector::from_slice(vec![1, 2, 3, 4])),
1565 Arc::new(StringVector::from(vec![
1566 None,
1567 Some("hello"),
1568 Some("greptime"),
1569 None,
1570 ])),
1571 ];
1572 let recordbatch = RecordBatch::new(schema.clone(), columns).unwrap();
1573
1574 for format in [
1575 ResponseFormat::GreptimedbV1,
1576 ResponseFormat::InfluxdbV1,
1577 ResponseFormat::Csv(true, true),
1578 ResponseFormat::Table,
1579 ResponseFormat::Arrow,
1580 ResponseFormat::Json,
1581 ResponseFormat::Null,
1582 ] {
1583 let recordbatches =
1584 RecordBatches::try_new(schema.clone(), vec![recordbatch.clone()]).unwrap();
1585 let outputs = vec![Ok(Output::new_with_record_batches(recordbatches))];
1586 let json_resp = match format {
1587 ResponseFormat::Arrow => ArrowResponse::from_output(outputs, None).await,
1588 ResponseFormat::Csv(with_names, with_types) => {
1589 CsvResponse::from_output(outputs, with_names, with_types).await
1590 }
1591 ResponseFormat::Table => TableResponse::from_output(outputs).await,
1592 ResponseFormat::GreptimedbV1 => GreptimedbV1Response::from_output(outputs).await,
1593 ResponseFormat::InfluxdbV1 => InfluxdbV1Response::from_output(outputs, None).await,
1594 ResponseFormat::Json => JsonResponse::from_output(outputs).await,
1595 ResponseFormat::Null => NullResponse::from_output(outputs).await,
1596 };
1597
1598 match json_resp {
1599 HttpResponse::GreptimedbV1(resp) => {
1600 let json_output = &resp.output[0];
1601 if let GreptimeQueryOutput::Records(r) = json_output {
1602 assert_eq!(r.num_rows(), 4);
1603 assert_eq!(r.num_cols(), 2);
1604 assert_eq!(r.schema.column_schemas[0].name, "numbers");
1605 assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1606 assert_eq!(r.rows[0][0], serde_json::Value::from(1));
1607 assert_eq!(r.rows[0][1], serde_json::Value::Null);
1608 } else {
1609 panic!("invalid output type");
1610 }
1611 }
1612 HttpResponse::InfluxdbV1(resp) => {
1613 let json_output = &resp.results()[0];
1614 assert_eq!(json_output.num_rows(), 4);
1615 assert_eq!(json_output.num_cols(), 2);
1616 assert_eq!(json_output.series[0].columns.clone()[0], "numbers");
1617 assert_eq!(
1618 json_output.series[0].values[0][0],
1619 serde_json::Value::from(1)
1620 );
1621 assert_eq!(json_output.series[0].values[0][1], serde_json::Value::Null);
1622 }
1623 HttpResponse::Csv(resp) => {
1624 let output = &resp.output()[0];
1625 if let GreptimeQueryOutput::Records(r) = output {
1626 assert_eq!(r.num_rows(), 4);
1627 assert_eq!(r.num_cols(), 2);
1628 assert_eq!(r.schema.column_schemas[0].name, "numbers");
1629 assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1630 assert_eq!(r.rows[0][0], serde_json::Value::from(1));
1631 assert_eq!(r.rows[0][1], serde_json::Value::Null);
1632 } else {
1633 panic!("invalid output type");
1634 }
1635 }
1636
1637 HttpResponse::Table(resp) => {
1638 let output = &resp.output()[0];
1639 if let GreptimeQueryOutput::Records(r) = output {
1640 assert_eq!(r.num_rows(), 4);
1641 assert_eq!(r.num_cols(), 2);
1642 assert_eq!(r.schema.column_schemas[0].name, "numbers");
1643 assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1644 assert_eq!(r.rows[0][0], serde_json::Value::from(1));
1645 assert_eq!(r.rows[0][1], serde_json::Value::Null);
1646 } else {
1647 panic!("invalid output type");
1648 }
1649 }
1650
1651 HttpResponse::Arrow(resp) => {
1652 let output = resp.data;
1653 let mut reader =
1654 FileReader::try_new(Cursor::new(output), None).expect("Arrow reader error");
1655 let schema = reader.schema();
1656 assert_eq!(schema.fields[0].name(), "numbers");
1657 assert_eq!(schema.fields[0].data_type(), &DataType::UInt32);
1658 assert_eq!(schema.fields[1].name(), "strings");
1659 assert_eq!(schema.fields[1].data_type(), &DataType::Utf8);
1660
1661 let rb = reader.next().unwrap().expect("read record batch failed");
1662 assert_eq!(rb.num_columns(), 2);
1663 assert_eq!(rb.num_rows(), 4);
1664 }
1665
1666 HttpResponse::Json(resp) => {
1667 let output = &resp.output()[0];
1668 if let GreptimeQueryOutput::Records(r) = output {
1669 assert_eq!(r.num_rows(), 4);
1670 assert_eq!(r.num_cols(), 2);
1671 assert_eq!(r.schema.column_schemas[0].name, "numbers");
1672 assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1673 assert_eq!(r.rows[0][0], serde_json::Value::from(1));
1674 assert_eq!(r.rows[0][1], serde_json::Value::Null);
1675 } else {
1676 panic!("invalid output type");
1677 }
1678 }
1679
1680 HttpResponse::Null(resp) => {
1681 assert_eq!(resp.rows(), 4);
1682 }
1683
1684 HttpResponse::Error(err) => unreachable!("{err:?}"),
1685 }
1686 }
1687 }
1688
1689 #[test]
1690 fn test_response_format_misc() {
1691 assert_eq!(ResponseFormat::default(), ResponseFormat::GreptimedbV1);
1692 assert_eq!(ResponseFormat::parse("arrow"), Some(ResponseFormat::Arrow));
1693 assert_eq!(
1694 ResponseFormat::parse("csv"),
1695 Some(ResponseFormat::Csv(false, false))
1696 );
1697 assert_eq!(
1698 ResponseFormat::parse("csvwithnames"),
1699 Some(ResponseFormat::Csv(true, false))
1700 );
1701 assert_eq!(
1702 ResponseFormat::parse("csvwithnamesandtypes"),
1703 Some(ResponseFormat::Csv(true, true))
1704 );
1705 assert_eq!(ResponseFormat::parse("table"), Some(ResponseFormat::Table));
1706 assert_eq!(
1707 ResponseFormat::parse("greptimedb_v1"),
1708 Some(ResponseFormat::GreptimedbV1)
1709 );
1710 assert_eq!(
1711 ResponseFormat::parse("influxdb_v1"),
1712 Some(ResponseFormat::InfluxdbV1)
1713 );
1714 assert_eq!(ResponseFormat::parse("json"), Some(ResponseFormat::Json));
1715 assert_eq!(ResponseFormat::parse("null"), Some(ResponseFormat::Null));
1716
1717 assert_eq!(ResponseFormat::parse("invalid"), None);
1719 assert_eq!(ResponseFormat::parse(""), None);
1720 assert_eq!(ResponseFormat::parse("CSV"), None); assert_eq!(ResponseFormat::Arrow.as_str(), "arrow");
1724 assert_eq!(ResponseFormat::Csv(false, false).as_str(), "csv");
1725 assert_eq!(ResponseFormat::Csv(true, true).as_str(), "csv");
1726 assert_eq!(ResponseFormat::Table.as_str(), "table");
1727 assert_eq!(ResponseFormat::GreptimedbV1.as_str(), "greptimedb_v1");
1728 assert_eq!(ResponseFormat::InfluxdbV1.as_str(), "influxdb_v1");
1729 assert_eq!(ResponseFormat::Json.as_str(), "json");
1730 assert_eq!(ResponseFormat::Null.as_str(), "null");
1731 assert_eq!(ResponseFormat::default().as_str(), "greptimedb_v1");
1732 }
1733}