servers/
http.rs

1// Copyright 2023 Greptime Team
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::collections::HashMap;
16use std::fmt::Display;
17use std::net::SocketAddr;
18use std::sync::Mutex as StdMutex;
19use std::time::Duration;
20
21use async_trait::async_trait;
22use auth::UserProviderRef;
23use axum::extract::DefaultBodyLimit;
24use axum::http::StatusCode as HttpStatusCode;
25use axum::response::{IntoResponse, Response};
26use axum::serve::ListenerExt;
27use axum::{middleware, routing, Router};
28use common_base::readable_size::ReadableSize;
29use common_base::Plugins;
30use common_recordbatch::RecordBatch;
31use common_telemetry::{debug, error, info};
32use common_time::timestamp::TimeUnit;
33use common_time::Timestamp;
34use datatypes::data_type::DataType;
35use datatypes::schema::SchemaRef;
36use datatypes::value::transform_value_ref_to_json_value;
37use event::{LogState, LogValidatorRef};
38use futures::FutureExt;
39use http::{HeaderValue, Method};
40use prost::DecodeError;
41use serde::{Deserialize, Serialize};
42use serde_json::Value;
43use snafu::{ensure, ResultExt};
44use tokio::sync::oneshot::{self, Sender};
45use tokio::sync::Mutex;
46use tower::ServiceBuilder;
47use tower_http::compression::CompressionLayer;
48use tower_http::cors::{AllowOrigin, Any, CorsLayer};
49use tower_http::decompression::RequestDecompressionLayer;
50use tower_http::trace::TraceLayer;
51
52use self::authorize::AuthState;
53use self::result::table_result::TableResponse;
54use crate::configurator::ConfiguratorRef;
55use crate::elasticsearch;
56use crate::error::{
57    AddressBindSnafu, AlreadyStartedSnafu, Error, InternalIoSnafu, InvalidHeaderValueSnafu, Result,
58    ToJsonSnafu,
59};
60use crate::http::influxdb::{influxdb_health, influxdb_ping, influxdb_write_v1, influxdb_write_v2};
61use crate::http::otlp::OtlpState;
62use crate::http::prom_store::PromStoreState;
63use crate::http::prometheus::{
64    build_info_query, format_query, instant_query, label_values_query, labels_query, parse_query,
65    range_query, series_query,
66};
67use crate::http::result::arrow_result::ArrowResponse;
68use crate::http::result::csv_result::CsvResponse;
69use crate::http::result::error_result::ErrorResponse;
70use crate::http::result::greptime_result_v1::GreptimedbV1Response;
71use crate::http::result::influxdb_result_v1::InfluxdbV1Response;
72use crate::http::result::json_result::JsonResponse;
73use crate::http::result::null_result::NullResponse;
74use crate::interceptor::LogIngestInterceptorRef;
75use crate::metrics::http_metrics_layer;
76use crate::metrics_handler::MetricsHandler;
77use crate::prometheus_handler::PrometheusHandlerRef;
78use crate::query_handler::sql::ServerSqlQueryHandlerRef;
79use crate::query_handler::{
80    InfluxdbLineProtocolHandlerRef, JaegerQueryHandlerRef, LogQueryHandlerRef,
81    OpenTelemetryProtocolHandlerRef, OpentsdbProtocolHandlerRef, PipelineHandlerRef,
82    PromStoreProtocolHandlerRef,
83};
84use crate::server::Server;
85
86pub mod authorize;
87#[cfg(feature = "dashboard")]
88mod dashboard;
89pub mod dyn_log;
90pub mod event;
91pub mod extractor;
92pub mod handler;
93pub mod header;
94pub mod influxdb;
95pub mod jaeger;
96pub mod logs;
97pub mod loki;
98pub mod mem_prof;
99pub mod opentsdb;
100pub mod otlp;
101pub mod pprof;
102pub mod prom_store;
103pub mod prometheus;
104pub mod result;
105mod timeout;
106
107pub(crate) use timeout::DynamicTimeoutLayer;
108
109mod hints;
110mod read_preference;
111#[cfg(any(test, feature = "testing"))]
112pub mod test_helpers;
113
114pub const HTTP_API_VERSION: &str = "v1";
115pub const HTTP_API_PREFIX: &str = "/v1/";
116/// Default http body limit (64M).
117const DEFAULT_BODY_LIMIT: ReadableSize = ReadableSize::mb(64);
118
119/// Authorization header
120pub const AUTHORIZATION_HEADER: &str = "x-greptime-auth";
121
122// TODO(fys): This is a temporary workaround, it will be improved later
123pub static PUBLIC_APIS: [&str; 3] = ["/v1/influxdb/ping", "/v1/influxdb/health", "/v1/health"];
124
125#[derive(Default)]
126pub struct HttpServer {
127    router: StdMutex<Router>,
128    shutdown_tx: Mutex<Option<Sender<()>>>,
129    user_provider: Option<UserProviderRef>,
130
131    // plugins
132    plugins: Plugins,
133
134    // server configs
135    options: HttpOptions,
136    bind_addr: Option<SocketAddr>,
137}
138
139#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
140#[serde(default)]
141pub struct HttpOptions {
142    pub addr: String,
143
144    #[serde(with = "humantime_serde")]
145    pub timeout: Duration,
146
147    #[serde(skip)]
148    pub disable_dashboard: bool,
149
150    pub body_limit: ReadableSize,
151
152    /// Validation mode while decoding Prometheus remote write requests.
153    pub prom_validation_mode: PromValidationMode,
154
155    pub cors_allowed_origins: Vec<String>,
156
157    pub enable_cors: bool,
158}
159
160#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
161#[serde(rename_all = "snake_case")]
162pub enum PromValidationMode {
163    /// Force UTF8 validation
164    Strict,
165    /// Allow lossy UTF8 strings
166    Lossy,
167    /// Do not validate UTF8 strings.
168    Unchecked,
169}
170
171impl PromValidationMode {
172    /// Decodes provided bytes to [String] with optional UTF-8 validation.
173    pub fn decode_string(&self, bytes: &[u8]) -> std::result::Result<String, DecodeError> {
174        let result = match self {
175            PromValidationMode::Strict => match String::from_utf8(bytes.to_vec()) {
176                Ok(s) => s,
177                Err(e) => {
178                    debug!("Invalid UTF-8 string value: {:?}, error: {:?}", bytes, e);
179                    return Err(DecodeError::new("invalid utf-8"));
180                }
181            },
182            PromValidationMode::Lossy => String::from_utf8_lossy(bytes).to_string(),
183            PromValidationMode::Unchecked => unsafe { String::from_utf8_unchecked(bytes.to_vec()) },
184        };
185        Ok(result)
186    }
187}
188
189impl Default for HttpOptions {
190    fn default() -> Self {
191        Self {
192            addr: "127.0.0.1:4000".to_string(),
193            timeout: Duration::from_secs(0),
194            disable_dashboard: false,
195            body_limit: DEFAULT_BODY_LIMIT,
196            cors_allowed_origins: Vec::new(),
197            enable_cors: true,
198            prom_validation_mode: PromValidationMode::Strict,
199        }
200    }
201}
202
203#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)]
204pub struct ColumnSchema {
205    name: String,
206    data_type: String,
207}
208
209impl ColumnSchema {
210    pub fn new(name: String, data_type: String) -> ColumnSchema {
211        ColumnSchema { name, data_type }
212    }
213}
214
215#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)]
216pub struct OutputSchema {
217    column_schemas: Vec<ColumnSchema>,
218}
219
220impl OutputSchema {
221    pub fn new(columns: Vec<ColumnSchema>) -> OutputSchema {
222        OutputSchema {
223            column_schemas: columns,
224        }
225    }
226}
227
228impl From<SchemaRef> for OutputSchema {
229    fn from(schema: SchemaRef) -> OutputSchema {
230        OutputSchema {
231            column_schemas: schema
232                .column_schemas()
233                .iter()
234                .map(|cs| ColumnSchema {
235                    name: cs.name.clone(),
236                    data_type: cs.data_type.name(),
237                })
238                .collect(),
239        }
240    }
241}
242
243#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)]
244pub struct HttpRecordsOutput {
245    schema: OutputSchema,
246    rows: Vec<Vec<Value>>,
247    // total_rows is equal to rows.len() in most cases,
248    // the Dashboard query result may be truncated, so we need to return the total_rows.
249    #[serde(default)]
250    total_rows: usize,
251
252    // plan level execution metrics
253    #[serde(skip_serializing_if = "HashMap::is_empty")]
254    #[serde(default)]
255    metrics: HashMap<String, Value>,
256}
257
258impl HttpRecordsOutput {
259    pub fn num_rows(&self) -> usize {
260        self.rows.len()
261    }
262
263    pub fn num_cols(&self) -> usize {
264        self.schema.column_schemas.len()
265    }
266
267    pub fn schema(&self) -> &OutputSchema {
268        &self.schema
269    }
270
271    pub fn rows(&self) -> &Vec<Vec<Value>> {
272        &self.rows
273    }
274}
275
276impl HttpRecordsOutput {
277    pub fn try_new(
278        schema: SchemaRef,
279        recordbatches: Vec<RecordBatch>,
280    ) -> std::result::Result<HttpRecordsOutput, Error> {
281        if recordbatches.is_empty() {
282            Ok(HttpRecordsOutput {
283                schema: OutputSchema::from(schema),
284                rows: vec![],
285                total_rows: 0,
286                metrics: Default::default(),
287            })
288        } else {
289            let num_rows = recordbatches.iter().map(|r| r.num_rows()).sum::<usize>();
290            let mut rows = Vec::with_capacity(num_rows);
291            let schemas = schema.column_schemas();
292            let num_cols = schema.column_schemas().len();
293            rows.resize_with(num_rows, || Vec::with_capacity(num_cols));
294
295            let mut finished_row_cursor = 0;
296            for recordbatch in recordbatches {
297                for (col_idx, col) in recordbatch.columns().iter().enumerate() {
298                    // safety here: schemas length is equal to the number of columns in the recordbatch
299                    let schema = &schemas[col_idx];
300                    for row_idx in 0..recordbatch.num_rows() {
301                        let value = transform_value_ref_to_json_value(col.get_ref(row_idx), schema)
302                            .context(ToJsonSnafu)?;
303                        rows[row_idx + finished_row_cursor].push(value);
304                    }
305                }
306                finished_row_cursor += recordbatch.num_rows();
307            }
308
309            Ok(HttpRecordsOutput {
310                schema: OutputSchema::from(schema),
311                total_rows: rows.len(),
312                rows,
313                metrics: Default::default(),
314            })
315        }
316    }
317}
318
319#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
320#[serde(rename_all = "lowercase")]
321pub enum GreptimeQueryOutput {
322    AffectedRows(usize),
323    Records(HttpRecordsOutput),
324}
325
326/// It allows the results of SQL queries to be presented in different formats.
327#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
328pub enum ResponseFormat {
329    Arrow,
330    // (with_names, with_types)
331    Csv(bool, bool),
332    Table,
333    #[default]
334    GreptimedbV1,
335    InfluxdbV1,
336    Json,
337    Null,
338}
339
340impl ResponseFormat {
341    pub fn parse(s: &str) -> Option<Self> {
342        match s {
343            "arrow" => Some(ResponseFormat::Arrow),
344            "csv" => Some(ResponseFormat::Csv(false, false)),
345            "csvwithnames" => Some(ResponseFormat::Csv(true, false)),
346            "csvwithnamesandtypes" => Some(ResponseFormat::Csv(true, true)),
347            "table" => Some(ResponseFormat::Table),
348            "greptimedb_v1" => Some(ResponseFormat::GreptimedbV1),
349            "influxdb_v1" => Some(ResponseFormat::InfluxdbV1),
350            "json" => Some(ResponseFormat::Json),
351            "null" => Some(ResponseFormat::Null),
352            _ => None,
353        }
354    }
355
356    pub fn as_str(&self) -> &'static str {
357        match self {
358            ResponseFormat::Arrow => "arrow",
359            ResponseFormat::Csv(_, _) => "csv",
360            ResponseFormat::Table => "table",
361            ResponseFormat::GreptimedbV1 => "greptimedb_v1",
362            ResponseFormat::InfluxdbV1 => "influxdb_v1",
363            ResponseFormat::Json => "json",
364            ResponseFormat::Null => "null",
365        }
366    }
367}
368
369#[derive(Debug, Clone, Copy, PartialEq, Eq)]
370pub enum Epoch {
371    Nanosecond,
372    Microsecond,
373    Millisecond,
374    Second,
375}
376
377impl Epoch {
378    pub fn parse(s: &str) -> Option<Epoch> {
379        // Both u and µ indicate microseconds.
380        // epoch = [ns,u,µ,ms,s],
381        // For details, see the Influxdb documents.
382        // https://docs.influxdata.com/influxdb/v1/tools/api/#query-string-parameters-1
383        match s {
384            "ns" => Some(Epoch::Nanosecond),
385            "u" | "µ" => Some(Epoch::Microsecond),
386            "ms" => Some(Epoch::Millisecond),
387            "s" => Some(Epoch::Second),
388            _ => None, // just returns None for other cases
389        }
390    }
391
392    pub fn convert_timestamp(&self, ts: Timestamp) -> Option<Timestamp> {
393        match self {
394            Epoch::Nanosecond => ts.convert_to(TimeUnit::Nanosecond),
395            Epoch::Microsecond => ts.convert_to(TimeUnit::Microsecond),
396            Epoch::Millisecond => ts.convert_to(TimeUnit::Millisecond),
397            Epoch::Second => ts.convert_to(TimeUnit::Second),
398        }
399    }
400}
401
402impl Display for Epoch {
403    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
404        match self {
405            Epoch::Nanosecond => write!(f, "Epoch::Nanosecond"),
406            Epoch::Microsecond => write!(f, "Epoch::Microsecond"),
407            Epoch::Millisecond => write!(f, "Epoch::Millisecond"),
408            Epoch::Second => write!(f, "Epoch::Second"),
409        }
410    }
411}
412
413#[derive(Serialize, Deserialize, Debug)]
414pub enum HttpResponse {
415    Arrow(ArrowResponse),
416    Csv(CsvResponse),
417    Table(TableResponse),
418    Error(ErrorResponse),
419    GreptimedbV1(GreptimedbV1Response),
420    InfluxdbV1(InfluxdbV1Response),
421    Json(JsonResponse),
422    Null(NullResponse),
423}
424
425impl HttpResponse {
426    pub fn with_execution_time(self, execution_time: u64) -> Self {
427        match self {
428            HttpResponse::Arrow(resp) => resp.with_execution_time(execution_time).into(),
429            HttpResponse::Csv(resp) => resp.with_execution_time(execution_time).into(),
430            HttpResponse::Table(resp) => resp.with_execution_time(execution_time).into(),
431            HttpResponse::GreptimedbV1(resp) => resp.with_execution_time(execution_time).into(),
432            HttpResponse::InfluxdbV1(resp) => resp.with_execution_time(execution_time).into(),
433            HttpResponse::Json(resp) => resp.with_execution_time(execution_time).into(),
434            HttpResponse::Null(resp) => resp.with_execution_time(execution_time).into(),
435            HttpResponse::Error(resp) => resp.with_execution_time(execution_time).into(),
436        }
437    }
438
439    pub fn with_limit(self, limit: usize) -> Self {
440        match self {
441            HttpResponse::Csv(resp) => resp.with_limit(limit).into(),
442            HttpResponse::Table(resp) => resp.with_limit(limit).into(),
443            HttpResponse::GreptimedbV1(resp) => resp.with_limit(limit).into(),
444            HttpResponse::Json(resp) => resp.with_limit(limit).into(),
445            _ => self,
446        }
447    }
448}
449
450pub fn process_with_limit(
451    mut outputs: Vec<GreptimeQueryOutput>,
452    limit: usize,
453) -> Vec<GreptimeQueryOutput> {
454    outputs
455        .drain(..)
456        .map(|data| match data {
457            GreptimeQueryOutput::Records(mut records) => {
458                if records.rows.len() > limit {
459                    records.rows.truncate(limit);
460                    records.total_rows = limit;
461                }
462                GreptimeQueryOutput::Records(records)
463            }
464            _ => data,
465        })
466        .collect()
467}
468
469impl IntoResponse for HttpResponse {
470    fn into_response(self) -> Response {
471        match self {
472            HttpResponse::Arrow(resp) => resp.into_response(),
473            HttpResponse::Csv(resp) => resp.into_response(),
474            HttpResponse::Table(resp) => resp.into_response(),
475            HttpResponse::GreptimedbV1(resp) => resp.into_response(),
476            HttpResponse::InfluxdbV1(resp) => resp.into_response(),
477            HttpResponse::Json(resp) => resp.into_response(),
478            HttpResponse::Null(resp) => resp.into_response(),
479            HttpResponse::Error(resp) => resp.into_response(),
480        }
481    }
482}
483
484impl From<ArrowResponse> for HttpResponse {
485    fn from(value: ArrowResponse) -> Self {
486        HttpResponse::Arrow(value)
487    }
488}
489
490impl From<CsvResponse> for HttpResponse {
491    fn from(value: CsvResponse) -> Self {
492        HttpResponse::Csv(value)
493    }
494}
495
496impl From<TableResponse> for HttpResponse {
497    fn from(value: TableResponse) -> Self {
498        HttpResponse::Table(value)
499    }
500}
501
502impl From<ErrorResponse> for HttpResponse {
503    fn from(value: ErrorResponse) -> Self {
504        HttpResponse::Error(value)
505    }
506}
507
508impl From<GreptimedbV1Response> for HttpResponse {
509    fn from(value: GreptimedbV1Response) -> Self {
510        HttpResponse::GreptimedbV1(value)
511    }
512}
513
514impl From<InfluxdbV1Response> for HttpResponse {
515    fn from(value: InfluxdbV1Response) -> Self {
516        HttpResponse::InfluxdbV1(value)
517    }
518}
519
520impl From<JsonResponse> for HttpResponse {
521    fn from(value: JsonResponse) -> Self {
522        HttpResponse::Json(value)
523    }
524}
525
526impl From<NullResponse> for HttpResponse {
527    fn from(value: NullResponse) -> Self {
528        HttpResponse::Null(value)
529    }
530}
531
532#[derive(Clone)]
533pub struct ApiState {
534    pub sql_handler: ServerSqlQueryHandlerRef,
535}
536
537#[derive(Clone)]
538pub struct GreptimeOptionsConfigState {
539    pub greptime_config_options: String,
540}
541
542#[derive(Default)]
543pub struct HttpServerBuilder {
544    options: HttpOptions,
545    plugins: Plugins,
546    user_provider: Option<UserProviderRef>,
547    router: Router,
548}
549
550impl HttpServerBuilder {
551    pub fn new(options: HttpOptions) -> Self {
552        Self {
553            options,
554            plugins: Plugins::default(),
555            user_provider: None,
556            router: Router::new(),
557        }
558    }
559
560    pub fn with_sql_handler(self, sql_handler: ServerSqlQueryHandlerRef) -> Self {
561        let sql_router = HttpServer::route_sql(ApiState { sql_handler });
562
563        Self {
564            router: self
565                .router
566                .nest(&format!("/{HTTP_API_VERSION}"), sql_router),
567            ..self
568        }
569    }
570
571    pub fn with_logs_handler(self, logs_handler: LogQueryHandlerRef) -> Self {
572        let logs_router = HttpServer::route_logs(logs_handler);
573
574        Self {
575            router: self
576                .router
577                .nest(&format!("/{HTTP_API_VERSION}"), logs_router),
578            ..self
579        }
580    }
581
582    pub fn with_opentsdb_handler(self, handler: OpentsdbProtocolHandlerRef) -> Self {
583        Self {
584            router: self.router.nest(
585                &format!("/{HTTP_API_VERSION}/opentsdb"),
586                HttpServer::route_opentsdb(handler),
587            ),
588            ..self
589        }
590    }
591
592    pub fn with_influxdb_handler(self, handler: InfluxdbLineProtocolHandlerRef) -> Self {
593        Self {
594            router: self.router.nest(
595                &format!("/{HTTP_API_VERSION}/influxdb"),
596                HttpServer::route_influxdb(handler),
597            ),
598            ..self
599        }
600    }
601
602    pub fn with_prom_handler(
603        self,
604        handler: PromStoreProtocolHandlerRef,
605        pipeline_handler: Option<PipelineHandlerRef>,
606        prom_store_with_metric_engine: bool,
607        prom_validation_mode: PromValidationMode,
608    ) -> Self {
609        let state = PromStoreState {
610            prom_store_handler: handler,
611            pipeline_handler,
612            prom_store_with_metric_engine,
613            prom_validation_mode,
614        };
615
616        Self {
617            router: self.router.nest(
618                &format!("/{HTTP_API_VERSION}/prometheus"),
619                HttpServer::route_prom(state),
620            ),
621            ..self
622        }
623    }
624
625    pub fn with_prometheus_handler(self, handler: PrometheusHandlerRef) -> Self {
626        Self {
627            router: self.router.nest(
628                &format!("/{HTTP_API_VERSION}/prometheus/api/v1"),
629                HttpServer::route_prometheus(handler),
630            ),
631            ..self
632        }
633    }
634
635    pub fn with_otlp_handler(
636        self,
637        handler: OpenTelemetryProtocolHandlerRef,
638        with_metric_engine: bool,
639    ) -> Self {
640        Self {
641            router: self.router.nest(
642                &format!("/{HTTP_API_VERSION}/otlp"),
643                HttpServer::route_otlp(handler, with_metric_engine),
644            ),
645            ..self
646        }
647    }
648
649    pub fn with_user_provider(self, user_provider: UserProviderRef) -> Self {
650        Self {
651            user_provider: Some(user_provider),
652            ..self
653        }
654    }
655
656    pub fn with_metrics_handler(self, handler: MetricsHandler) -> Self {
657        Self {
658            router: self.router.merge(HttpServer::route_metrics(handler)),
659            ..self
660        }
661    }
662
663    pub fn with_log_ingest_handler(
664        self,
665        handler: PipelineHandlerRef,
666        validator: Option<LogValidatorRef>,
667        ingest_interceptor: Option<LogIngestInterceptorRef<Error>>,
668    ) -> Self {
669        let log_state = LogState {
670            log_handler: handler,
671            log_validator: validator,
672            ingest_interceptor,
673        };
674
675        let router = self.router.nest(
676            &format!("/{HTTP_API_VERSION}"),
677            HttpServer::route_pipelines(log_state.clone()),
678        );
679        // deprecated since v0.11.0. Use `/logs` and `/pipelines` instead.
680        let router = router.nest(
681            &format!("/{HTTP_API_VERSION}/events"),
682            #[allow(deprecated)]
683            HttpServer::route_log_deprecated(log_state.clone()),
684        );
685
686        let router = router.nest(
687            &format!("/{HTTP_API_VERSION}/loki"),
688            HttpServer::route_loki(log_state.clone()),
689        );
690
691        let router = router.nest(
692            &format!("/{HTTP_API_VERSION}/elasticsearch"),
693            HttpServer::route_elasticsearch(log_state.clone()),
694        );
695
696        let router = router.nest(
697            &format!("/{HTTP_API_VERSION}/elasticsearch/"),
698            Router::new()
699                .route("/", routing::get(elasticsearch::handle_get_version))
700                .with_state(log_state),
701        );
702
703        Self { router, ..self }
704    }
705
706    pub fn with_plugins(self, plugins: Plugins) -> Self {
707        Self { plugins, ..self }
708    }
709
710    pub fn with_greptime_config_options(self, opts: String) -> Self {
711        let config_router = HttpServer::route_config(GreptimeOptionsConfigState {
712            greptime_config_options: opts,
713        });
714
715        Self {
716            router: self.router.merge(config_router),
717            ..self
718        }
719    }
720
721    pub fn with_jaeger_handler(self, handler: JaegerQueryHandlerRef) -> Self {
722        Self {
723            router: self.router.nest(
724                &format!("/{HTTP_API_VERSION}/jaeger"),
725                HttpServer::route_jaeger(handler),
726            ),
727            ..self
728        }
729    }
730
731    pub fn with_extra_router(self, router: Router) -> Self {
732        Self {
733            router: self.router.merge(router),
734            ..self
735        }
736    }
737
738    pub fn build(self) -> HttpServer {
739        HttpServer {
740            options: self.options,
741            user_provider: self.user_provider,
742            shutdown_tx: Mutex::new(None),
743            plugins: self.plugins,
744            router: StdMutex::new(self.router),
745            bind_addr: None,
746        }
747    }
748}
749
750impl HttpServer {
751    /// Gets the router and adds necessary root routes (health, status, dashboard).
752    pub fn make_app(&self) -> Router {
753        let mut router = {
754            let router = self.router.lock().unwrap();
755            router.clone()
756        };
757
758        router = router
759            .route(
760                "/health",
761                routing::get(handler::health).post(handler::health),
762            )
763            .route(
764                &format!("/{HTTP_API_VERSION}/health"),
765                routing::get(handler::health).post(handler::health),
766            )
767            .route(
768                "/ready",
769                routing::get(handler::health).post(handler::health),
770            );
771
772        router = router.route("/status", routing::get(handler::status));
773
774        #[cfg(feature = "dashboard")]
775        {
776            if !self.options.disable_dashboard {
777                info!("Enable dashboard service at '/dashboard'");
778                // redirect /dashboard to /dashboard/
779                router = router.route(
780                    "/dashboard",
781                    routing::get(|uri: axum::http::uri::Uri| async move {
782                        let path = uri.path();
783                        let query = uri.query().map(|q| format!("?{}", q)).unwrap_or_default();
784
785                        let new_uri = format!("{}/{}", path, query);
786                        axum::response::Redirect::permanent(&new_uri)
787                    }),
788                );
789
790                // "/dashboard" and "/dashboard/" are two different paths in Axum.
791                // We cannot nest "/dashboard/", because we already mapping "/dashboard/{*x}" while nesting "/dashboard".
792                // So we explicitly route "/dashboard/" here.
793                router = router
794                    .route(
795                        "/dashboard/",
796                        routing::get(dashboard::static_handler).post(dashboard::static_handler),
797                    )
798                    .route(
799                        "/dashboard/{*x}",
800                        routing::get(dashboard::static_handler).post(dashboard::static_handler),
801                    );
802            }
803        }
804
805        // Add a layer to collect HTTP metrics for axum.
806        router = router.route_layer(middleware::from_fn(http_metrics_layer));
807
808        router
809    }
810
811    /// Attaches middlewares and debug routes to the router.
812    /// Callers should call this method after [HttpServer::make_app()].
813    pub fn build(&self, router: Router) -> Result<Router> {
814        let timeout_layer = if self.options.timeout != Duration::default() {
815            Some(ServiceBuilder::new().layer(DynamicTimeoutLayer::new(self.options.timeout)))
816        } else {
817            info!("HTTP server timeout is disabled");
818            None
819        };
820        let body_limit_layer = if self.options.body_limit != ReadableSize(0) {
821            Some(
822                ServiceBuilder::new()
823                    .layer(DefaultBodyLimit::max(self.options.body_limit.0 as usize)),
824            )
825        } else {
826            info!("HTTP server body limit is disabled");
827            None
828        };
829        let cors_layer = if self.options.enable_cors {
830            Some(
831                CorsLayer::new()
832                    .allow_methods([
833                        Method::GET,
834                        Method::POST,
835                        Method::PUT,
836                        Method::DELETE,
837                        Method::HEAD,
838                    ])
839                    .allow_origin(if self.options.cors_allowed_origins.is_empty() {
840                        AllowOrigin::from(Any)
841                    } else {
842                        AllowOrigin::from(
843                            self.options
844                                .cors_allowed_origins
845                                .iter()
846                                .map(|s| {
847                                    HeaderValue::from_str(s.as_str())
848                                        .context(InvalidHeaderValueSnafu)
849                                })
850                                .collect::<Result<Vec<HeaderValue>>>()?,
851                        )
852                    })
853                    .allow_headers(Any),
854            )
855        } else {
856            info!("HTTP server cross-origin is disabled");
857            None
858        };
859
860        Ok(router
861            // middlewares
862            .layer(
863                ServiceBuilder::new()
864                    // disable on failure tracing. because printing out isn't very helpful,
865                    // and we have impl IntoResponse for Error. It will print out more detailed error messages
866                    .layer(TraceLayer::new_for_http().on_failure(()))
867                    .option_layer(cors_layer)
868                    .option_layer(timeout_layer)
869                    .option_layer(body_limit_layer)
870                    // auth layer
871                    .layer(middleware::from_fn_with_state(
872                        AuthState::new(self.user_provider.clone()),
873                        authorize::check_http_auth,
874                    ))
875                    .layer(middleware::from_fn(hints::extract_hints))
876                    .layer(middleware::from_fn(
877                        read_preference::extract_read_preference,
878                    )),
879            )
880            // Handlers for debug, we don't expect a timeout.
881            .nest(
882                "/debug",
883                Router::new()
884                    // handler for changing log level dynamically
885                    .route("/log_level", routing::post(dyn_log::dyn_log_handler))
886                    .nest(
887                        "/prof",
888                        Router::new()
889                            .route("/cpu", routing::post(pprof::pprof_handler))
890                            .route("/mem", routing::post(mem_prof::mem_prof_handler))
891                            .route(
892                                "/mem/activate",
893                                routing::post(mem_prof::activate_heap_prof_handler),
894                            )
895                            .route(
896                                "/mem/deactivate",
897                                routing::post(mem_prof::deactivate_heap_prof_handler),
898                            )
899                            .route(
900                                "/mem/status",
901                                routing::get(mem_prof::heap_prof_status_handler),
902                            ),
903                    ),
904            ))
905    }
906
907    fn route_metrics<S>(metrics_handler: MetricsHandler) -> Router<S> {
908        Router::new()
909            .route("/metrics", routing::get(handler::metrics))
910            .with_state(metrics_handler)
911    }
912
913    fn route_loki<S>(log_state: LogState) -> Router<S> {
914        Router::new()
915            .route("/api/v1/push", routing::post(loki::loki_ingest))
916            .layer(
917                ServiceBuilder::new()
918                    .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
919            )
920            .with_state(log_state)
921    }
922
923    fn route_elasticsearch<S>(log_state: LogState) -> Router<S> {
924        Router::new()
925            // Return fake responsefor HEAD '/' request.
926            .route(
927                "/",
928                routing::head((HttpStatusCode::OK, elasticsearch::elasticsearch_headers())),
929            )
930            // Return fake response for Elasticsearch version request.
931            .route("/", routing::get(elasticsearch::handle_get_version))
932            // Return fake response for Elasticsearch license request.
933            .route("/_license", routing::get(elasticsearch::handle_get_license))
934            .route("/_bulk", routing::post(elasticsearch::handle_bulk_api))
935            .route(
936                "/{index}/_bulk",
937                routing::post(elasticsearch::handle_bulk_api_with_index),
938            )
939            // Return fake response for Elasticsearch ilm request.
940            .route(
941                "/_ilm/policy/{*path}",
942                routing::any((
943                    HttpStatusCode::OK,
944                    elasticsearch::elasticsearch_headers(),
945                    axum::Json(serde_json::json!({})),
946                )),
947            )
948            // Return fake response for Elasticsearch index template request.
949            .route(
950                "/_index_template/{*path}",
951                routing::any((
952                    HttpStatusCode::OK,
953                    elasticsearch::elasticsearch_headers(),
954                    axum::Json(serde_json::json!({})),
955                )),
956            )
957            // Return fake response for Elasticsearch ingest pipeline request.
958            // See: https://www.elastic.co/guide/en/elasticsearch/reference/8.8/put-pipeline-api.html.
959            .route(
960                "/_ingest/{*path}",
961                routing::any((
962                    HttpStatusCode::OK,
963                    elasticsearch::elasticsearch_headers(),
964                    axum::Json(serde_json::json!({})),
965                )),
966            )
967            // Return fake response for Elasticsearch nodes discovery request.
968            // See: https://www.elastic.co/guide/en/elasticsearch/reference/8.8/cluster.html.
969            .route(
970                "/_nodes/{*path}",
971                routing::any((
972                    HttpStatusCode::OK,
973                    elasticsearch::elasticsearch_headers(),
974                    axum::Json(serde_json::json!({})),
975                )),
976            )
977            // Return fake response for Logstash APIs requests.
978            // See: https://www.elastic.co/guide/en/elasticsearch/reference/8.8/logstash-apis.html
979            .route(
980                "/logstash/{*path}",
981                routing::any((
982                    HttpStatusCode::OK,
983                    elasticsearch::elasticsearch_headers(),
984                    axum::Json(serde_json::json!({})),
985                )),
986            )
987            .route(
988                "/_logstash/{*path}",
989                routing::any((
990                    HttpStatusCode::OK,
991                    elasticsearch::elasticsearch_headers(),
992                    axum::Json(serde_json::json!({})),
993                )),
994            )
995            .layer(ServiceBuilder::new().layer(RequestDecompressionLayer::new()))
996            .with_state(log_state)
997    }
998
999    #[deprecated(since = "0.11.0", note = "Use `route_pipelines()` instead.")]
1000    fn route_log_deprecated<S>(log_state: LogState) -> Router<S> {
1001        Router::new()
1002            .route("/logs", routing::post(event::log_ingester))
1003            .route(
1004                "/pipelines/{pipeline_name}",
1005                routing::get(event::query_pipeline),
1006            )
1007            .route(
1008                "/pipelines/{pipeline_name}",
1009                routing::post(event::add_pipeline),
1010            )
1011            .route(
1012                "/pipelines/{pipeline_name}",
1013                routing::delete(event::delete_pipeline),
1014            )
1015            .route("/pipelines/dryrun", routing::post(event::pipeline_dryrun))
1016            .layer(
1017                ServiceBuilder::new()
1018                    .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
1019            )
1020            .with_state(log_state)
1021    }
1022
1023    fn route_pipelines<S>(log_state: LogState) -> Router<S> {
1024        Router::new()
1025            .route("/ingest", routing::post(event::log_ingester))
1026            .route(
1027                "/pipelines/{pipeline_name}",
1028                routing::get(event::query_pipeline),
1029            )
1030            .route(
1031                "/pipelines/{pipeline_name}",
1032                routing::post(event::add_pipeline),
1033            )
1034            .route(
1035                "/pipelines/{pipeline_name}",
1036                routing::delete(event::delete_pipeline),
1037            )
1038            .route("/pipelines/_dryrun", routing::post(event::pipeline_dryrun))
1039            .layer(
1040                ServiceBuilder::new()
1041                    .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
1042            )
1043            .with_state(log_state)
1044    }
1045
1046    fn route_sql<S>(api_state: ApiState) -> Router<S> {
1047        Router::new()
1048            .route("/sql", routing::get(handler::sql).post(handler::sql))
1049            .route(
1050                "/sql/parse",
1051                routing::get(handler::sql_parse).post(handler::sql_parse),
1052            )
1053            .route(
1054                "/sql/format",
1055                routing::get(handler::sql_format).post(handler::sql_format),
1056            )
1057            .route(
1058                "/promql",
1059                routing::get(handler::promql).post(handler::promql),
1060            )
1061            .with_state(api_state)
1062    }
1063
1064    fn route_logs<S>(log_handler: LogQueryHandlerRef) -> Router<S> {
1065        Router::new()
1066            .route("/logs", routing::get(logs::logs).post(logs::logs))
1067            .with_state(log_handler)
1068    }
1069
1070    /// Route Prometheus [HTTP API].
1071    ///
1072    /// [HTTP API]: https://prometheus.io/docs/prometheus/latest/querying/api/
1073    fn route_prometheus<S>(prometheus_handler: PrometheusHandlerRef) -> Router<S> {
1074        Router::new()
1075            .route(
1076                "/format_query",
1077                routing::post(format_query).get(format_query),
1078            )
1079            .route("/status/buildinfo", routing::get(build_info_query))
1080            .route("/query", routing::post(instant_query).get(instant_query))
1081            .route("/query_range", routing::post(range_query).get(range_query))
1082            .route("/labels", routing::post(labels_query).get(labels_query))
1083            .route("/series", routing::post(series_query).get(series_query))
1084            .route("/parse_query", routing::post(parse_query).get(parse_query))
1085            .route(
1086                "/label/{label_name}/values",
1087                routing::get(label_values_query),
1088            )
1089            .layer(ServiceBuilder::new().layer(CompressionLayer::new()))
1090            .with_state(prometheus_handler)
1091    }
1092
1093    /// Route Prometheus remote [read] and [write] API. In other places the related modules are
1094    /// called `prom_store`.
1095    ///
1096    /// [read]: https://prometheus.io/docs/prometheus/latest/querying/remote_read_api/
1097    /// [write]: https://prometheus.io/docs/concepts/remote_write_spec/
1098    fn route_prom<S>(state: PromStoreState) -> Router<S> {
1099        Router::new()
1100            .route("/read", routing::post(prom_store::remote_read))
1101            .route("/write", routing::post(prom_store::remote_write))
1102            .with_state(state)
1103    }
1104
1105    fn route_influxdb<S>(influxdb_handler: InfluxdbLineProtocolHandlerRef) -> Router<S> {
1106        Router::new()
1107            .route("/write", routing::post(influxdb_write_v1))
1108            .route("/api/v2/write", routing::post(influxdb_write_v2))
1109            .layer(
1110                ServiceBuilder::new()
1111                    .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
1112            )
1113            .route("/ping", routing::get(influxdb_ping))
1114            .route("/health", routing::get(influxdb_health))
1115            .with_state(influxdb_handler)
1116    }
1117
1118    fn route_opentsdb<S>(opentsdb_handler: OpentsdbProtocolHandlerRef) -> Router<S> {
1119        Router::new()
1120            .route("/api/put", routing::post(opentsdb::put))
1121            .with_state(opentsdb_handler)
1122    }
1123
1124    fn route_otlp<S>(
1125        otlp_handler: OpenTelemetryProtocolHandlerRef,
1126        with_metric_engine: bool,
1127    ) -> Router<S> {
1128        Router::new()
1129            .route("/v1/metrics", routing::post(otlp::metrics))
1130            .route("/v1/traces", routing::post(otlp::traces))
1131            .route("/v1/logs", routing::post(otlp::logs))
1132            .layer(
1133                ServiceBuilder::new()
1134                    .layer(RequestDecompressionLayer::new().pass_through_unaccepted(true)),
1135            )
1136            .with_state(OtlpState {
1137                with_metric_engine,
1138                handler: otlp_handler,
1139            })
1140    }
1141
1142    fn route_config<S>(state: GreptimeOptionsConfigState) -> Router<S> {
1143        Router::new()
1144            .route("/config", routing::get(handler::config))
1145            .with_state(state)
1146    }
1147
1148    fn route_jaeger<S>(handler: JaegerQueryHandlerRef) -> Router<S> {
1149        Router::new()
1150            .route("/api/services", routing::get(jaeger::handle_get_services))
1151            .route(
1152                "/api/services/{service_name}/operations",
1153                routing::get(jaeger::handle_get_operations_by_service),
1154            )
1155            .route(
1156                "/api/operations",
1157                routing::get(jaeger::handle_get_operations),
1158            )
1159            .route("/api/traces", routing::get(jaeger::handle_find_traces))
1160            .route(
1161                "/api/traces/{trace_id}",
1162                routing::get(jaeger::handle_get_trace),
1163            )
1164            .with_state(handler)
1165    }
1166}
1167
1168pub const HTTP_SERVER: &str = "HTTP_SERVER";
1169
1170#[async_trait]
1171impl Server for HttpServer {
1172    async fn shutdown(&self) -> Result<()> {
1173        let mut shutdown_tx = self.shutdown_tx.lock().await;
1174        if let Some(tx) = shutdown_tx.take() {
1175            if tx.send(()).is_err() {
1176                info!("Receiver dropped, the HTTP server has already exited");
1177            }
1178        }
1179        info!("Shutdown HTTP server");
1180
1181        Ok(())
1182    }
1183
1184    async fn start(&mut self, listening: SocketAddr) -> Result<()> {
1185        let (tx, rx) = oneshot::channel();
1186        let serve = {
1187            let mut shutdown_tx = self.shutdown_tx.lock().await;
1188            ensure!(
1189                shutdown_tx.is_none(),
1190                AlreadyStartedSnafu { server: "HTTP" }
1191            );
1192
1193            let mut app = self.make_app();
1194            if let Some(configurator) = self.plugins.get::<ConfiguratorRef>() {
1195                app = configurator.config_http(app);
1196            }
1197            let app = self.build(app)?;
1198            let listener = tokio::net::TcpListener::bind(listening)
1199                .await
1200                .context(AddressBindSnafu { addr: listening })?
1201                .tap_io(|tcp_stream| {
1202                    if let Err(e) = tcp_stream.set_nodelay(true) {
1203                        error!(e; "Failed to set TCP_NODELAY on incoming connection");
1204                    }
1205                });
1206            let serve = axum::serve(listener, app.into_make_service());
1207
1208            // FIXME(yingwen): Support keepalive.
1209            // See:
1210            // - https://github.com/tokio-rs/axum/discussions/2939
1211            // - https://stackoverflow.com/questions/73069718/how-do-i-keep-alive-tokiotcpstream-in-rust
1212            // let server = axum::Server::try_bind(&listening)
1213            //     .with_context(|_| AddressBindSnafu { addr: listening })?
1214            //     .tcp_nodelay(true)
1215            //     // Enable TCP keepalive to close the dangling established connections.
1216            //     // It's configured to let the keepalive probes first send after the connection sits
1217            //     // idle for 59 minutes, and then send every 10 seconds for 6 times.
1218            //     // So the connection will be closed after roughly 1 hour.
1219            //     .tcp_keepalive(Some(Duration::from_secs(59 * 60)))
1220            //     .tcp_keepalive_interval(Some(Duration::from_secs(10)))
1221            //     .tcp_keepalive_retries(Some(6))
1222            //     .serve(app.into_make_service());
1223
1224            *shutdown_tx = Some(tx);
1225
1226            serve
1227        };
1228        let listening = serve.local_addr().context(InternalIoSnafu)?;
1229        info!("HTTP server is bound to {}", listening);
1230
1231        common_runtime::spawn_global(async move {
1232            if let Err(e) = serve
1233                .with_graceful_shutdown(rx.map(drop))
1234                .await
1235                .context(InternalIoSnafu)
1236            {
1237                error!(e; "Failed to shutdown http server");
1238            }
1239        });
1240
1241        self.bind_addr = Some(listening);
1242        Ok(())
1243    }
1244
1245    fn name(&self) -> &str {
1246        HTTP_SERVER
1247    }
1248
1249    fn bind_addr(&self) -> Option<SocketAddr> {
1250        self.bind_addr
1251    }
1252}
1253
1254#[cfg(test)]
1255mod test {
1256    use std::future::pending;
1257    use std::io::Cursor;
1258    use std::sync::Arc;
1259
1260    use arrow_ipc::reader::FileReader;
1261    use arrow_schema::DataType;
1262    use axum::handler::Handler;
1263    use axum::http::StatusCode;
1264    use axum::routing::get;
1265    use common_query::Output;
1266    use common_recordbatch::RecordBatches;
1267    use datafusion_expr::LogicalPlan;
1268    use datatypes::prelude::*;
1269    use datatypes::schema::{ColumnSchema, Schema};
1270    use datatypes::vectors::{StringVector, UInt32Vector};
1271    use header::constants::GREPTIME_DB_HEADER_TIMEOUT;
1272    use query::parser::PromQuery;
1273    use query::query_engine::DescribeResult;
1274    use session::context::QueryContextRef;
1275    use sql::statements::statement::Statement;
1276    use tokio::sync::mpsc;
1277    use tokio::time::Instant;
1278
1279    use super::*;
1280    use crate::error::Error;
1281    use crate::http::test_helpers::TestClient;
1282    use crate::query_handler::sql::{ServerSqlQueryHandlerAdapter, SqlQueryHandler};
1283
1284    struct DummyInstance {
1285        _tx: mpsc::Sender<(String, Vec<u8>)>,
1286    }
1287
1288    #[async_trait]
1289    impl SqlQueryHandler for DummyInstance {
1290        type Error = Error;
1291
1292        async fn do_query(&self, _: &str, _: QueryContextRef) -> Vec<Result<Output>> {
1293            unimplemented!()
1294        }
1295
1296        async fn do_promql_query(
1297            &self,
1298            _: &PromQuery,
1299            _: QueryContextRef,
1300        ) -> Vec<std::result::Result<Output, Self::Error>> {
1301            unimplemented!()
1302        }
1303
1304        async fn do_exec_plan(
1305            &self,
1306            _stmt: Option<Statement>,
1307            _plan: LogicalPlan,
1308            _query_ctx: QueryContextRef,
1309        ) -> std::result::Result<Output, Self::Error> {
1310            unimplemented!()
1311        }
1312
1313        async fn do_describe(
1314            &self,
1315            _stmt: sql::statements::statement::Statement,
1316            _query_ctx: QueryContextRef,
1317        ) -> Result<Option<DescribeResult>> {
1318            unimplemented!()
1319        }
1320
1321        async fn is_valid_schema(&self, _catalog: &str, _schema: &str) -> Result<bool> {
1322            Ok(true)
1323        }
1324    }
1325
1326    fn timeout() -> DynamicTimeoutLayer {
1327        DynamicTimeoutLayer::new(Duration::from_millis(10))
1328    }
1329
1330    async fn forever() {
1331        pending().await
1332    }
1333
1334    fn make_test_app(tx: mpsc::Sender<(String, Vec<u8>)>) -> Router {
1335        make_test_app_custom(tx, HttpOptions::default())
1336    }
1337
1338    fn make_test_app_custom(tx: mpsc::Sender<(String, Vec<u8>)>, options: HttpOptions) -> Router {
1339        let instance = Arc::new(DummyInstance { _tx: tx });
1340        let sql_instance = ServerSqlQueryHandlerAdapter::arc(instance.clone());
1341        let server = HttpServerBuilder::new(options)
1342            .with_sql_handler(sql_instance)
1343            .build();
1344        server.build(server.make_app()).unwrap().route(
1345            "/test/timeout",
1346            get(forever.layer(ServiceBuilder::new().layer(timeout()))),
1347        )
1348    }
1349
1350    #[tokio::test]
1351    pub async fn test_cors() {
1352        // cors is on by default
1353        let (tx, _rx) = mpsc::channel(100);
1354        let app = make_test_app(tx);
1355        let client = TestClient::new(app).await;
1356
1357        let res = client.get("/health").send().await;
1358
1359        assert_eq!(res.status(), StatusCode::OK);
1360        assert_eq!(
1361            res.headers()
1362                .get(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1363                .expect("expect cors header origin"),
1364            "*"
1365        );
1366
1367        let res = client.get("/v1/health").send().await;
1368
1369        assert_eq!(res.status(), StatusCode::OK);
1370        assert_eq!(
1371            res.headers()
1372                .get(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1373                .expect("expect cors header origin"),
1374            "*"
1375        );
1376
1377        let res = client
1378            .options("/health")
1379            .header("Access-Control-Request-Headers", "x-greptime-auth")
1380            .header("Access-Control-Request-Method", "DELETE")
1381            .header("Origin", "https://example.com")
1382            .send()
1383            .await;
1384        assert_eq!(res.status(), StatusCode::OK);
1385        assert_eq!(
1386            res.headers()
1387                .get(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1388                .expect("expect cors header origin"),
1389            "*"
1390        );
1391        assert_eq!(
1392            res.headers()
1393                .get(http::header::ACCESS_CONTROL_ALLOW_HEADERS)
1394                .expect("expect cors header headers"),
1395            "*"
1396        );
1397        assert_eq!(
1398            res.headers()
1399                .get(http::header::ACCESS_CONTROL_ALLOW_METHODS)
1400                .expect("expect cors header methods"),
1401            "GET,POST,PUT,DELETE,HEAD"
1402        );
1403    }
1404
1405    #[tokio::test]
1406    pub async fn test_cors_custom_origins() {
1407        // cors is on by default
1408        let (tx, _rx) = mpsc::channel(100);
1409        let origin = "https://example.com";
1410
1411        let options = HttpOptions {
1412            cors_allowed_origins: vec![origin.to_string()],
1413            ..Default::default()
1414        };
1415
1416        let app = make_test_app_custom(tx, options);
1417        let client = TestClient::new(app).await;
1418
1419        let res = client.get("/health").header("Origin", origin).send().await;
1420
1421        assert_eq!(res.status(), StatusCode::OK);
1422        assert_eq!(
1423            res.headers()
1424                .get(http::header::ACCESS_CONTROL_ALLOW_ORIGIN)
1425                .expect("expect cors header origin"),
1426            origin
1427        );
1428
1429        let res = client
1430            .get("/health")
1431            .header("Origin", "https://notallowed.com")
1432            .send()
1433            .await;
1434
1435        assert_eq!(res.status(), StatusCode::OK);
1436        assert!(!res
1437            .headers()
1438            .contains_key(http::header::ACCESS_CONTROL_ALLOW_ORIGIN));
1439    }
1440
1441    #[tokio::test]
1442    pub async fn test_cors_disabled() {
1443        // cors is on by default
1444        let (tx, _rx) = mpsc::channel(100);
1445
1446        let options = HttpOptions {
1447            enable_cors: false,
1448            ..Default::default()
1449        };
1450
1451        let app = make_test_app_custom(tx, options);
1452        let client = TestClient::new(app).await;
1453
1454        let res = client.get("/health").send().await;
1455
1456        assert_eq!(res.status(), StatusCode::OK);
1457        assert!(!res
1458            .headers()
1459            .contains_key(http::header::ACCESS_CONTROL_ALLOW_ORIGIN));
1460    }
1461
1462    #[test]
1463    fn test_http_options_default() {
1464        let default = HttpOptions::default();
1465        assert_eq!("127.0.0.1:4000".to_string(), default.addr);
1466        assert_eq!(Duration::from_secs(0), default.timeout)
1467    }
1468
1469    #[tokio::test]
1470    async fn test_http_server_request_timeout() {
1471        common_telemetry::init_default_ut_logging();
1472
1473        let (tx, _rx) = mpsc::channel(100);
1474        let app = make_test_app(tx);
1475        let client = TestClient::new(app).await;
1476        let res = client.get("/test/timeout").send().await;
1477        assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT);
1478
1479        let now = Instant::now();
1480        let res = client
1481            .get("/test/timeout")
1482            .header(GREPTIME_DB_HEADER_TIMEOUT, "20ms")
1483            .send()
1484            .await;
1485        assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT);
1486        let elapsed = now.elapsed();
1487        assert!(elapsed > Duration::from_millis(15));
1488
1489        tokio::time::timeout(
1490            Duration::from_millis(15),
1491            client
1492                .get("/test/timeout")
1493                .header(GREPTIME_DB_HEADER_TIMEOUT, "0s")
1494                .send(),
1495        )
1496        .await
1497        .unwrap_err();
1498
1499        tokio::time::timeout(
1500            Duration::from_millis(15),
1501            client
1502                .get("/test/timeout")
1503                .header(
1504                    GREPTIME_DB_HEADER_TIMEOUT,
1505                    humantime::format_duration(Duration::default()).to_string(),
1506                )
1507                .send(),
1508        )
1509        .await
1510        .unwrap_err();
1511    }
1512
1513    #[tokio::test]
1514    async fn test_schema_for_empty_response() {
1515        let column_schemas = vec![
1516            ColumnSchema::new("numbers", ConcreteDataType::uint32_datatype(), false),
1517            ColumnSchema::new("strings", ConcreteDataType::string_datatype(), true),
1518        ];
1519        let schema = Arc::new(Schema::new(column_schemas));
1520
1521        let recordbatches = RecordBatches::try_new(schema.clone(), vec![]).unwrap();
1522        let outputs = vec![Ok(Output::new_with_record_batches(recordbatches))];
1523
1524        let json_resp = GreptimedbV1Response::from_output(outputs).await;
1525        if let HttpResponse::GreptimedbV1(json_resp) = json_resp {
1526            let json_output = &json_resp.output[0];
1527            if let GreptimeQueryOutput::Records(r) = json_output {
1528                assert_eq!(r.num_rows(), 0);
1529                assert_eq!(r.num_cols(), 2);
1530                assert_eq!(r.schema.column_schemas[0].name, "numbers");
1531                assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1532            } else {
1533                panic!("invalid output type");
1534            }
1535        } else {
1536            panic!("invalid format")
1537        }
1538    }
1539
1540    #[tokio::test]
1541    async fn test_recordbatches_conversion() {
1542        let column_schemas = vec![
1543            ColumnSchema::new("numbers", ConcreteDataType::uint32_datatype(), false),
1544            ColumnSchema::new("strings", ConcreteDataType::string_datatype(), true),
1545        ];
1546        let schema = Arc::new(Schema::new(column_schemas));
1547        let columns: Vec<VectorRef> = vec![
1548            Arc::new(UInt32Vector::from_slice(vec![1, 2, 3, 4])),
1549            Arc::new(StringVector::from(vec![
1550                None,
1551                Some("hello"),
1552                Some("greptime"),
1553                None,
1554            ])),
1555        ];
1556        let recordbatch = RecordBatch::new(schema.clone(), columns).unwrap();
1557
1558        for format in [
1559            ResponseFormat::GreptimedbV1,
1560            ResponseFormat::InfluxdbV1,
1561            ResponseFormat::Csv(true, true),
1562            ResponseFormat::Table,
1563            ResponseFormat::Arrow,
1564            ResponseFormat::Json,
1565            ResponseFormat::Null,
1566        ] {
1567            let recordbatches =
1568                RecordBatches::try_new(schema.clone(), vec![recordbatch.clone()]).unwrap();
1569            let outputs = vec![Ok(Output::new_with_record_batches(recordbatches))];
1570            let json_resp = match format {
1571                ResponseFormat::Arrow => ArrowResponse::from_output(outputs, None).await,
1572                ResponseFormat::Csv(with_names, with_types) => {
1573                    CsvResponse::from_output(outputs, with_names, with_types).await
1574                }
1575                ResponseFormat::Table => TableResponse::from_output(outputs).await,
1576                ResponseFormat::GreptimedbV1 => GreptimedbV1Response::from_output(outputs).await,
1577                ResponseFormat::InfluxdbV1 => InfluxdbV1Response::from_output(outputs, None).await,
1578                ResponseFormat::Json => JsonResponse::from_output(outputs).await,
1579                ResponseFormat::Null => NullResponse::from_output(outputs).await,
1580            };
1581
1582            match json_resp {
1583                HttpResponse::GreptimedbV1(resp) => {
1584                    let json_output = &resp.output[0];
1585                    if let GreptimeQueryOutput::Records(r) = json_output {
1586                        assert_eq!(r.num_rows(), 4);
1587                        assert_eq!(r.num_cols(), 2);
1588                        assert_eq!(r.schema.column_schemas[0].name, "numbers");
1589                        assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1590                        assert_eq!(r.rows[0][0], serde_json::Value::from(1));
1591                        assert_eq!(r.rows[0][1], serde_json::Value::Null);
1592                    } else {
1593                        panic!("invalid output type");
1594                    }
1595                }
1596                HttpResponse::InfluxdbV1(resp) => {
1597                    let json_output = &resp.results()[0];
1598                    assert_eq!(json_output.num_rows(), 4);
1599                    assert_eq!(json_output.num_cols(), 2);
1600                    assert_eq!(json_output.series[0].columns.clone()[0], "numbers");
1601                    assert_eq!(
1602                        json_output.series[0].values[0][0],
1603                        serde_json::Value::from(1)
1604                    );
1605                    assert_eq!(json_output.series[0].values[0][1], serde_json::Value::Null);
1606                }
1607                HttpResponse::Csv(resp) => {
1608                    let output = &resp.output()[0];
1609                    if let GreptimeQueryOutput::Records(r) = output {
1610                        assert_eq!(r.num_rows(), 4);
1611                        assert_eq!(r.num_cols(), 2);
1612                        assert_eq!(r.schema.column_schemas[0].name, "numbers");
1613                        assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1614                        assert_eq!(r.rows[0][0], serde_json::Value::from(1));
1615                        assert_eq!(r.rows[0][1], serde_json::Value::Null);
1616                    } else {
1617                        panic!("invalid output type");
1618                    }
1619                }
1620
1621                HttpResponse::Table(resp) => {
1622                    let output = &resp.output()[0];
1623                    if let GreptimeQueryOutput::Records(r) = output {
1624                        assert_eq!(r.num_rows(), 4);
1625                        assert_eq!(r.num_cols(), 2);
1626                        assert_eq!(r.schema.column_schemas[0].name, "numbers");
1627                        assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1628                        assert_eq!(r.rows[0][0], serde_json::Value::from(1));
1629                        assert_eq!(r.rows[0][1], serde_json::Value::Null);
1630                    } else {
1631                        panic!("invalid output type");
1632                    }
1633                }
1634
1635                HttpResponse::Arrow(resp) => {
1636                    let output = resp.data;
1637                    let mut reader =
1638                        FileReader::try_new(Cursor::new(output), None).expect("Arrow reader error");
1639                    let schema = reader.schema();
1640                    assert_eq!(schema.fields[0].name(), "numbers");
1641                    assert_eq!(schema.fields[0].data_type(), &DataType::UInt32);
1642                    assert_eq!(schema.fields[1].name(), "strings");
1643                    assert_eq!(schema.fields[1].data_type(), &DataType::Utf8);
1644
1645                    let rb = reader.next().unwrap().expect("read record batch failed");
1646                    assert_eq!(rb.num_columns(), 2);
1647                    assert_eq!(rb.num_rows(), 4);
1648                }
1649
1650                HttpResponse::Json(resp) => {
1651                    let output = &resp.output()[0];
1652                    if let GreptimeQueryOutput::Records(r) = output {
1653                        assert_eq!(r.num_rows(), 4);
1654                        assert_eq!(r.num_cols(), 2);
1655                        assert_eq!(r.schema.column_schemas[0].name, "numbers");
1656                        assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
1657                        assert_eq!(r.rows[0][0], serde_json::Value::from(1));
1658                        assert_eq!(r.rows[0][1], serde_json::Value::Null);
1659                    } else {
1660                        panic!("invalid output type");
1661                    }
1662                }
1663
1664                HttpResponse::Null(resp) => {
1665                    assert_eq!(resp.rows(), 4);
1666                }
1667
1668                HttpResponse::Error(err) => unreachable!("{err:?}"),
1669            }
1670        }
1671    }
1672
1673    #[test]
1674    fn test_response_format_misc() {
1675        assert_eq!(ResponseFormat::default(), ResponseFormat::GreptimedbV1);
1676        assert_eq!(ResponseFormat::parse("arrow"), Some(ResponseFormat::Arrow));
1677        assert_eq!(
1678            ResponseFormat::parse("csv"),
1679            Some(ResponseFormat::Csv(false, false))
1680        );
1681        assert_eq!(
1682            ResponseFormat::parse("csvwithnames"),
1683            Some(ResponseFormat::Csv(true, false))
1684        );
1685        assert_eq!(
1686            ResponseFormat::parse("csvwithnamesandtypes"),
1687            Some(ResponseFormat::Csv(true, true))
1688        );
1689        assert_eq!(ResponseFormat::parse("table"), Some(ResponseFormat::Table));
1690        assert_eq!(
1691            ResponseFormat::parse("greptimedb_v1"),
1692            Some(ResponseFormat::GreptimedbV1)
1693        );
1694        assert_eq!(
1695            ResponseFormat::parse("influxdb_v1"),
1696            Some(ResponseFormat::InfluxdbV1)
1697        );
1698        assert_eq!(ResponseFormat::parse("json"), Some(ResponseFormat::Json));
1699        assert_eq!(ResponseFormat::parse("null"), Some(ResponseFormat::Null));
1700
1701        // invalid formats
1702        assert_eq!(ResponseFormat::parse("invalid"), None);
1703        assert_eq!(ResponseFormat::parse(""), None);
1704        assert_eq!(ResponseFormat::parse("CSV"), None); // Case sensitive
1705
1706        // as str
1707        assert_eq!(ResponseFormat::Arrow.as_str(), "arrow");
1708        assert_eq!(ResponseFormat::Csv(false, false).as_str(), "csv");
1709        assert_eq!(ResponseFormat::Csv(true, true).as_str(), "csv");
1710        assert_eq!(ResponseFormat::Table.as_str(), "table");
1711        assert_eq!(ResponseFormat::GreptimedbV1.as_str(), "greptimedb_v1");
1712        assert_eq!(ResponseFormat::InfluxdbV1.as_str(), "influxdb_v1");
1713        assert_eq!(ResponseFormat::Json.as_str(), "json");
1714        assert_eq!(ResponseFormat::Null.as_str(), "null");
1715        assert_eq!(ResponseFormat::default().as_str(), "greptimedb_v1");
1716    }
1717}