Skip to main content

meta_srv/
metasrv.rs

1// Copyright 2023 Greptime Team
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15pub mod builder;
16
17use std::fmt::{self, Display};
18use std::sync::atomic::{AtomicBool, Ordering};
19use std::sync::{Arc, Mutex, RwLock};
20use std::time::Duration;
21
22use api::v1::meta::{HeartbeatConfig, Role};
23use clap::ValueEnum;
24use common_base::Plugins;
25use common_base::readable_size::ReadableSize;
26use common_config::{Configurable, DEFAULT_DATA_HOME};
27use common_event_recorder::EventRecorderOptions;
28use common_greptimedb_telemetry::GreptimeDBTelemetryTask;
29use common_meta::cache_invalidator::CacheInvalidatorRef;
30use common_meta::ddl::allocator::resource_id::ResourceIdAllocatorRef;
31use common_meta::ddl_manager::DdlManagerRef;
32use common_meta::distributed_time_constants::{
33    self, BASE_HEARTBEAT_INTERVAL, default_distributed_time_constants, frontend_heartbeat_interval,
34};
35use common_meta::election::LeaderChangeMessage;
36pub use common_meta::election::{ElectionRef, MetasrvNodeInfo};
37use common_meta::key::TableMetadataManagerRef;
38use common_meta::key::runtime_switch::RuntimeSwitchManagerRef;
39use common_meta::kv_backend::{KvBackendRef, ResettableKvBackend, ResettableKvBackendRef};
40use common_meta::leadership_notifier::{
41    LeadershipChangeNotifier, LeadershipChangeNotifierCustomizerRef,
42};
43use common_meta::node_expiry_listener::NodeExpiryListener;
44use common_meta::peer::{Peer, PeerDiscoveryRef};
45use common_meta::reconciliation::manager::ReconciliationManagerRef;
46use common_meta::region_keeper::MemoryRegionKeeperRef;
47use common_meta::region_registry::LeaderRegionRegistryRef;
48use common_meta::stats::topic::TopicStatsRegistryRef;
49use common_meta::wal_provider::WalProviderRef;
50use common_options::datanode::DatanodeClientOptions;
51use common_options::memory::MemoryOptions;
52use common_procedure::ProcedureManagerRef;
53use common_procedure::options::ProcedureConfig;
54use common_stat::ResourceStatRef;
55use common_telemetry::logging::{LoggingOptions, TracingOptions};
56use common_telemetry::{error, info, warn};
57use common_time::util::DefaultSystemTimer;
58use common_wal::config::MetasrvWalConfig;
59use serde::{Deserialize, Serialize};
60use servers::grpc::GrpcOptions;
61use servers::http::HttpOptions;
62use servers::tls::TlsOption;
63use snafu::{OptionExt, ResultExt};
64use store_api::storage::RegionId;
65use tokio::sync::broadcast::error::RecvError;
66
67use crate::cluster::MetaPeerClientRef;
68use crate::discovery;
69use crate::error::{
70    self, InitMetadataSnafu, KvBackendSnafu, Result, StartProcedureManagerSnafu,
71    StartTelemetryTaskSnafu, StopProcedureManagerSnafu,
72};
73use crate::failure_detector::PhiAccrualFailureDetectorOptions;
74use crate::gc::{GcSchedulerOptions, GcTickerRef};
75use crate::handler::{HeartbeatHandlerGroupBuilder, HeartbeatHandlerGroupRef};
76use crate::procedure::ProcedureManagerListenerAdapter;
77use crate::procedure::region_migration::manager::RegionMigrationManagerRef;
78use crate::procedure::wal_prune::manager::WalPruneTickerRef;
79use crate::pubsub::{PublisherRef, SubscriptionManagerRef};
80use crate::region::flush_trigger::RegionFlushTickerRef;
81use crate::region::supervisor::RegionSupervisorTickerRef;
82use crate::selector::{RegionStatAwareSelector, Selector, SelectorType};
83use crate::service::mailbox::MailboxRef;
84use crate::service::store::cached_kv::LeaderCachedKvBackend;
85use crate::state::{StateRef, become_follower, become_leader};
86use crate::utils::database::DatabaseOperatorRef;
87
88pub const TABLE_ID_SEQ: &str = "table_id";
89pub const FLOW_ID_SEQ: &str = "flow_id";
90pub const METASRV_DATA_DIR: &str = "metasrv";
91
92// The datastores that implements metadata kvbackend.
93#[derive(Clone, Debug, PartialEq, Serialize, Default, Deserialize, ValueEnum)]
94#[serde(rename_all = "snake_case")]
95pub enum BackendImpl {
96    // Etcd as metadata storage.
97    #[default]
98    EtcdStore,
99    // In memory metadata storage - mostly used for testing.
100    MemoryStore,
101    #[cfg(feature = "pg_kvbackend")]
102    // Postgres as metadata storage.
103    PostgresStore,
104    #[cfg(feature = "mysql_kvbackend")]
105    // MySql as metadata storage.
106    MysqlStore,
107}
108
109/// Configuration options for the stats persistence.
110#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
111pub struct StatsPersistenceOptions {
112    /// TTL for the stats table that will be used to store the stats.
113    #[serde(with = "humantime_serde")]
114    pub ttl: Duration,
115    /// The interval to persist the stats.
116    #[serde(with = "humantime_serde")]
117    pub interval: Duration,
118}
119
120impl Default for StatsPersistenceOptions {
121    fn default() -> Self {
122        Self {
123            ttl: Duration::ZERO,
124            interval: Duration::from_mins(10),
125        }
126    }
127}
128
129/// Heartbeat configuration for a single node type.
130#[derive(Clone, PartialEq, Serialize, Deserialize, Debug)]
131#[serde(default)]
132pub struct HeartbeatOptions {
133    /// Heartbeat interval.
134    #[serde(with = "humantime_serde")]
135    pub interval: Duration,
136    /// Retry interval when heartbeat connection fails.
137    #[serde(with = "humantime_serde")]
138    pub retry_interval: Duration,
139}
140
141impl Default for HeartbeatOptions {
142    fn default() -> Self {
143        Self {
144            interval: BASE_HEARTBEAT_INTERVAL,
145            retry_interval: BASE_HEARTBEAT_INTERVAL,
146        }
147    }
148}
149
150impl HeartbeatOptions {
151    pub fn datanode_from(base_interval: Duration) -> Self {
152        Self {
153            interval: base_interval,
154            retry_interval: base_interval,
155        }
156    }
157
158    pub fn frontend_from(base_interval: Duration) -> Self {
159        Self {
160            interval: frontend_heartbeat_interval(base_interval),
161            retry_interval: base_interval,
162        }
163    }
164
165    pub fn flownode_from(base_interval: Duration) -> Self {
166        Self {
167            interval: base_interval,
168            retry_interval: base_interval,
169        }
170    }
171}
172
173impl From<HeartbeatOptions> for HeartbeatConfig {
174    fn from(opts: HeartbeatOptions) -> Self {
175        Self {
176            heartbeat_interval_ms: opts.interval.as_millis() as u64,
177            retry_interval_ms: opts.retry_interval.as_millis() as u64,
178        }
179    }
180}
181
182#[derive(Clone, PartialEq, Serialize, Deserialize, Debug)]
183#[serde(default)]
184pub struct BackendClientOptions {
185    #[serde(with = "humantime_serde")]
186    pub keep_alive_timeout: Duration,
187    #[serde(with = "humantime_serde")]
188    pub keep_alive_interval: Duration,
189    #[serde(with = "humantime_serde")]
190    pub connect_timeout: Duration,
191}
192
193impl Default for BackendClientOptions {
194    fn default() -> Self {
195        Self {
196            keep_alive_interval: Duration::from_secs(10),
197            keep_alive_timeout: Duration::from_secs(3),
198            connect_timeout: Duration::from_secs(3),
199        }
200    }
201}
202
203#[derive(Clone, PartialEq, Serialize, Deserialize)]
204#[serde(default)]
205pub struct MetasrvOptions {
206    /// The address the server listens on.
207    #[deprecated(note = "Use grpc.bind_addr instead")]
208    pub bind_addr: String,
209    /// The address the server advertises to the clients.
210    #[deprecated(note = "Use grpc.server_addr instead")]
211    pub server_addr: String,
212    /// The address of the store, e.g., etcd.
213    pub store_addrs: Vec<String>,
214    /// TLS configuration for kv store backend (PostgreSQL/MySQL)
215    /// Only applicable when using PostgreSQL or MySQL as the metadata store
216    #[serde(default)]
217    pub backend_tls: Option<TlsOption>,
218    /// The backend client options.
219    /// Currently, only applicable when using etcd as the metadata store.
220    #[serde(default)]
221    pub backend_client: BackendClientOptions,
222    /// The type of selector.
223    pub selector: SelectorType,
224    /// Whether to enable region failover.
225    pub enable_region_failover: bool,
226    /// The base heartbeat interval.
227    ///
228    /// This value is used to calculate the distributed time constants for components.
229    /// e.g., the region lease time is `heartbeat_interval * 3 + Duration::from_secs(1)`.
230    #[serde(with = "humantime_serde")]
231    pub heartbeat_interval: Duration,
232    /// The delay before starting region failure detection.
233    /// This delay helps prevent Metasrv from triggering unnecessary region failovers before all Datanodes are fully started.
234    /// Especially useful when the cluster is not deployed with GreptimeDB Operator and maintenance mode is not enabled.
235    #[serde(with = "humantime_serde")]
236    pub region_failure_detector_initialization_delay: Duration,
237    /// Whether to allow region failover on local WAL.
238    ///
239    /// If it's true, the region failover will be allowed even if the local WAL is used.
240    /// Note that this option is not recommended to be set to true, because it may lead to data loss during failover.
241    pub allow_region_failover_on_local_wal: bool,
242    pub grpc: GrpcOptions,
243    /// The HTTP server options.
244    pub http: HttpOptions,
245    /// The logging options.
246    pub logging: LoggingOptions,
247    /// The procedure options.
248    pub procedure: ProcedureConfig,
249    /// The failure detector options.
250    pub failure_detector: PhiAccrualFailureDetectorOptions,
251    /// The datanode options.
252    pub datanode: DatanodeClientOptions,
253    /// Whether to enable telemetry.
254    pub enable_telemetry: bool,
255    /// The data home directory.
256    pub data_home: String,
257    /// The WAL options.
258    pub wal: MetasrvWalConfig,
259    /// The store key prefix. If it is not empty, all keys in the store will be prefixed with it.
260    /// This is useful when multiple metasrv clusters share the same store.
261    pub store_key_prefix: String,
262    /// The max operations per txn
263    ///
264    /// This value is usually limited by which store is used for the `KvBackend`.
265    /// For example, if using etcd, this value should ensure that it is less than
266    /// or equal to the `--max-txn-ops` option value of etcd.
267    ///
268    /// TODO(jeremy): Currently, this option only affects the etcd store, but it may
269    /// also affect other stores in the future. In other words, each store needs to
270    /// limit the number of operations in a txn because an infinitely large txn could
271    /// potentially block other operations.
272    pub max_txn_ops: usize,
273    /// The factor that determines how often statistics should be flushed,
274    /// based on the number of received heartbeats. When the number of heartbeats
275    /// reaches this factor, a flush operation is triggered.
276    pub flush_stats_factor: usize,
277    /// The tracing options.
278    pub tracing: TracingOptions,
279    /// The memory options.
280    pub memory: MemoryOptions,
281    /// The datastore for kv metadata.
282    pub backend: BackendImpl,
283    #[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
284    /// Table name of rds kv backend.
285    pub meta_table_name: String,
286    #[cfg(feature = "pg_kvbackend")]
287    /// Lock id for meta kv election. Only effect when using pg_kvbackend.
288    pub meta_election_lock_id: u64,
289    #[cfg(feature = "pg_kvbackend")]
290    /// Optional PostgreSQL schema for metadata table (defaults to current search_path if empty).
291    pub meta_schema_name: Option<String>,
292    #[cfg(feature = "pg_kvbackend")]
293    /// Automatically create PostgreSQL schema if it doesn't exist (default: true).
294    pub auto_create_schema: bool,
295    #[serde(with = "humantime_serde")]
296    pub node_max_idle_time: Duration,
297    /// The event recorder options.
298    pub event_recorder: EventRecorderOptions,
299    /// The stats persistence options.
300    pub stats_persistence: StatsPersistenceOptions,
301    /// The GC scheduler options.
302    pub gc: GcSchedulerOptions,
303}
304
305impl fmt::Debug for MetasrvOptions {
306    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
307        let mut debug_struct = f.debug_struct("MetasrvOptions");
308        debug_struct
309            .field("store_addrs", &self.sanitize_store_addrs())
310            .field("backend_tls", &self.backend_tls)
311            .field("selector", &self.selector)
312            .field("enable_region_failover", &self.enable_region_failover)
313            .field(
314                "allow_region_failover_on_local_wal",
315                &self.allow_region_failover_on_local_wal,
316            )
317            .field("grpc", &self.grpc)
318            .field("http", &self.http)
319            .field("logging", &self.logging)
320            .field("procedure", &self.procedure)
321            .field("failure_detector", &self.failure_detector)
322            .field("datanode", &self.datanode)
323            .field("enable_telemetry", &self.enable_telemetry)
324            .field("data_home", &self.data_home)
325            .field("wal", &self.wal)
326            .field("store_key_prefix", &self.store_key_prefix)
327            .field("max_txn_ops", &self.max_txn_ops)
328            .field("flush_stats_factor", &self.flush_stats_factor)
329            .field("tracing", &self.tracing)
330            .field("backend", &self.backend)
331            .field("event_recorder", &self.event_recorder)
332            .field("stats_persistence", &self.stats_persistence)
333            .field("heartbeat_interval", &self.heartbeat_interval)
334            .field("backend_client", &self.backend_client);
335
336        #[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
337        debug_struct.field("meta_table_name", &self.meta_table_name);
338
339        #[cfg(feature = "pg_kvbackend")]
340        debug_struct.field("meta_election_lock_id", &self.meta_election_lock_id);
341        #[cfg(feature = "pg_kvbackend")]
342        debug_struct.field("meta_schema_name", &self.meta_schema_name);
343
344        debug_struct
345            .field("node_max_idle_time", &self.node_max_idle_time)
346            .finish()
347    }
348}
349
350const DEFAULT_METASRV_ADDR_PORT: &str = "3002";
351
352impl Default for MetasrvOptions {
353    fn default() -> Self {
354        Self {
355            #[allow(deprecated)]
356            bind_addr: String::new(),
357            #[allow(deprecated)]
358            server_addr: String::new(),
359            store_addrs: vec!["127.0.0.1:2379".to_string()],
360            backend_tls: Some(TlsOption::prefer()),
361            selector: SelectorType::default(),
362            enable_region_failover: false,
363            heartbeat_interval: distributed_time_constants::BASE_HEARTBEAT_INTERVAL,
364            region_failure_detector_initialization_delay: Duration::from_secs(10 * 60),
365            allow_region_failover_on_local_wal: false,
366            grpc: GrpcOptions {
367                bind_addr: format!("127.0.0.1:{}", DEFAULT_METASRV_ADDR_PORT),
368                ..Default::default()
369            },
370            http: HttpOptions::default(),
371            logging: LoggingOptions::default(),
372            procedure: ProcedureConfig {
373                max_retry_times: 12,
374                retry_delay: Duration::from_millis(500),
375                // The etcd the maximum size of any request is 1.5 MiB
376                // 1500KiB = 1536KiB (1.5MiB) - 36KiB (reserved size of key)
377                max_metadata_value_size: Some(ReadableSize::kb(1500)),
378                max_running_procedures: 128,
379            },
380            failure_detector: PhiAccrualFailureDetectorOptions::default(),
381            datanode: DatanodeClientOptions::default(),
382            enable_telemetry: true,
383            data_home: DEFAULT_DATA_HOME.to_string(),
384            wal: MetasrvWalConfig::default(),
385            store_key_prefix: String::new(),
386            max_txn_ops: 128,
387            flush_stats_factor: 3,
388            tracing: TracingOptions::default(),
389            memory: MemoryOptions::default(),
390            backend: BackendImpl::EtcdStore,
391            #[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
392            meta_table_name: common_meta::kv_backend::DEFAULT_META_TABLE_NAME.to_string(),
393            #[cfg(feature = "pg_kvbackend")]
394            meta_election_lock_id: common_meta::kv_backend::DEFAULT_META_ELECTION_LOCK_ID,
395            #[cfg(feature = "pg_kvbackend")]
396            meta_schema_name: None,
397            #[cfg(feature = "pg_kvbackend")]
398            auto_create_schema: true,
399            node_max_idle_time: Duration::from_secs(24 * 60 * 60),
400            event_recorder: EventRecorderOptions::default(),
401            stats_persistence: StatsPersistenceOptions::default(),
402            gc: GcSchedulerOptions::default(),
403            backend_client: BackendClientOptions::default(),
404        }
405    }
406}
407
408impl Configurable for MetasrvOptions {
409    fn env_list_keys() -> Option<&'static [&'static str]> {
410        Some(&["wal.broker_endpoints", "store_addrs"])
411    }
412}
413
414impl MetasrvOptions {
415    fn sanitize_store_addrs(&self) -> Vec<String> {
416        self.store_addrs
417            .iter()
418            .map(|addr| common_meta::kv_backend::util::sanitize_connection_string(addr))
419            .collect()
420    }
421}
422
423pub struct MetasrvInfo {
424    pub server_addr: String,
425}
426#[derive(Clone)]
427pub struct Context {
428    pub server_addr: String,
429    pub in_memory: ResettableKvBackendRef,
430    pub kv_backend: KvBackendRef,
431    pub leader_cached_kv_backend: ResettableKvBackendRef,
432    pub meta_peer_client: MetaPeerClientRef,
433    pub mailbox: MailboxRef,
434    pub election: Option<ElectionRef>,
435    pub is_infancy: bool,
436    pub table_metadata_manager: TableMetadataManagerRef,
437    pub cache_invalidator: CacheInvalidatorRef,
438    pub leader_region_registry: LeaderRegionRegistryRef,
439    pub topic_stats_registry: TopicStatsRegistryRef,
440    pub heartbeat_interval: Duration,
441    pub is_handshake: bool,
442}
443
444impl Context {
445    pub fn reset_in_memory(&self) {
446        self.in_memory.reset();
447        self.leader_region_registry.reset();
448    }
449
450    pub fn with_handshake(mut self, is_handshake: bool) -> Self {
451        self.is_handshake = is_handshake;
452        self
453    }
454
455    pub fn heartbeat_options_for(&self, role: Role) -> HeartbeatOptions {
456        match role {
457            Role::Datanode => HeartbeatOptions::datanode_from(self.heartbeat_interval),
458            Role::Frontend => HeartbeatOptions::frontend_from(self.heartbeat_interval),
459            Role::Flownode => HeartbeatOptions::flownode_from(self.heartbeat_interval),
460        }
461    }
462}
463
464#[derive(Clone, Copy)]
465pub enum SelectTarget {
466    Datanode,
467    Flownode,
468}
469
470impl Display for SelectTarget {
471    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
472        match self {
473            SelectTarget::Datanode => write!(f, "datanode"),
474            SelectTarget::Flownode => write!(f, "flownode"),
475        }
476    }
477}
478
479#[derive(Clone)]
480pub struct SelectorContext {
481    pub peer_discovery: PeerDiscoveryRef,
482}
483
484pub type SelectorRef = Arc<dyn Selector<Context = SelectorContext, Output = Vec<Peer>>>;
485pub type RegionStatAwareSelectorRef =
486    Arc<dyn RegionStatAwareSelector<Context = SelectorContext, Output = Vec<(RegionId, Peer)>>>;
487
488pub struct MetaStateHandler {
489    subscribe_manager: Option<SubscriptionManagerRef>,
490    greptimedb_telemetry_task: Arc<GreptimeDBTelemetryTask>,
491    leader_cached_kv_backend: Arc<LeaderCachedKvBackend>,
492    leadership_change_notifier: LeadershipChangeNotifier,
493    mailbox: MailboxRef,
494    state: StateRef,
495}
496
497impl MetaStateHandler {
498    pub async fn on_leader_start(&self) {
499        self.state.write().unwrap().next_state(become_leader(false));
500
501        if let Err(e) = self.leader_cached_kv_backend.load().await {
502            error!(e; "Failed to load kv into leader cache kv store");
503        } else {
504            self.state.write().unwrap().next_state(become_leader(true));
505        }
506
507        self.leadership_change_notifier
508            .notify_on_leader_start()
509            .await;
510
511        self.greptimedb_telemetry_task.should_report(true);
512    }
513
514    pub async fn on_leader_stop(&self) {
515        self.state.write().unwrap().next_state(become_follower());
516
517        // Enforces the mailbox to clear all pushers.
518        // The remaining heartbeat connections will be closed by the remote peer or keep-alive detection.
519        self.mailbox.reset().await;
520        self.leadership_change_notifier
521            .notify_on_leader_stop()
522            .await;
523
524        // Suspends reporting.
525        self.greptimedb_telemetry_task.should_report(false);
526
527        if let Some(sub_manager) = self.subscribe_manager.clone() {
528            info!("Leader changed, un_subscribe all");
529            if let Err(e) = sub_manager.unsubscribe_all() {
530                error!(e; "Failed to un_subscribe all");
531            }
532        }
533    }
534}
535
536pub struct Metasrv {
537    state: StateRef,
538    started: Arc<AtomicBool>,
539    start_time_ms: u64,
540    options: MetasrvOptions,
541    // It is only valid at the leader node and is used to temporarily
542    // store some data that will not be persisted.
543    in_memory: ResettableKvBackendRef,
544    kv_backend: KvBackendRef,
545    leader_cached_kv_backend: Arc<LeaderCachedKvBackend>,
546    meta_peer_client: MetaPeerClientRef,
547    // The selector is used to select a target datanode.
548    selector: SelectorRef,
549    selector_ctx: SelectorContext,
550    // The flow selector is used to select a target flownode.
551    flow_selector: SelectorRef,
552    handler_group: RwLock<Option<HeartbeatHandlerGroupRef>>,
553    handler_group_builder: Mutex<Option<HeartbeatHandlerGroupBuilder>>,
554    election: Option<ElectionRef>,
555    procedure_manager: ProcedureManagerRef,
556    mailbox: MailboxRef,
557    ddl_manager: DdlManagerRef,
558    wal_provider: WalProviderRef,
559    table_metadata_manager: TableMetadataManagerRef,
560    runtime_switch_manager: RuntimeSwitchManagerRef,
561    memory_region_keeper: MemoryRegionKeeperRef,
562    greptimedb_telemetry_task: Arc<GreptimeDBTelemetryTask>,
563    region_migration_manager: RegionMigrationManagerRef,
564    region_supervisor_ticker: Option<RegionSupervisorTickerRef>,
565    cache_invalidator: CacheInvalidatorRef,
566    leader_region_registry: LeaderRegionRegistryRef,
567    topic_stats_registry: TopicStatsRegistryRef,
568    wal_prune_ticker: Option<WalPruneTickerRef>,
569    region_flush_ticker: Option<RegionFlushTickerRef>,
570    table_id_allocator: ResourceIdAllocatorRef,
571    reconciliation_manager: ReconciliationManagerRef,
572    resource_stat: ResourceStatRef,
573    gc_ticker: Option<GcTickerRef>,
574    database_operator: DatabaseOperatorRef,
575
576    plugins: Plugins,
577}
578
579impl Metasrv {
580    pub async fn try_start(&self) -> Result<()> {
581        if self
582            .started
583            .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire)
584            .is_err()
585        {
586            warn!("Metasrv already started");
587            return Ok(());
588        }
589
590        let handler_group_builder =
591            self.handler_group_builder
592                .lock()
593                .unwrap()
594                .take()
595                .context(error::UnexpectedSnafu {
596                    violated: "expected heartbeat handler group builder",
597                })?;
598        *self.handler_group.write().unwrap() = Some(Arc::new(handler_group_builder.build()?));
599
600        // Creates default schema if not exists
601        self.table_metadata_manager
602            .init()
603            .await
604            .context(InitMetadataSnafu)?;
605
606        if let Some(election) = self.election() {
607            let procedure_manager = self.procedure_manager.clone();
608            let in_memory = self.in_memory.clone();
609            let leader_cached_kv_backend = self.leader_cached_kv_backend.clone();
610            let subscribe_manager = self.subscription_manager();
611            let mut rx = election.subscribe_leader_change();
612            let greptimedb_telemetry_task = self.greptimedb_telemetry_task.clone();
613            greptimedb_telemetry_task
614                .start()
615                .context(StartTelemetryTaskSnafu)?;
616
617            // Builds leadership change notifier.
618            let mut leadership_change_notifier = LeadershipChangeNotifier::default();
619            leadership_change_notifier.add_listener(self.wal_provider.clone());
620            leadership_change_notifier
621                .add_listener(Arc::new(ProcedureManagerListenerAdapter(procedure_manager)));
622            leadership_change_notifier.add_listener(Arc::new(NodeExpiryListener::new(
623                self.options.node_max_idle_time,
624                self.in_memory.clone(),
625            )));
626            if let Some(region_supervisor_ticker) = &self.region_supervisor_ticker {
627                leadership_change_notifier.add_listener(region_supervisor_ticker.clone() as _);
628            }
629            if let Some(wal_prune_ticker) = &self.wal_prune_ticker {
630                leadership_change_notifier.add_listener(wal_prune_ticker.clone() as _);
631            }
632            if let Some(region_flush_trigger) = &self.region_flush_ticker {
633                leadership_change_notifier.add_listener(region_flush_trigger.clone() as _);
634            }
635            if let Some(gc_ticker) = &self.gc_ticker {
636                leadership_change_notifier.add_listener(gc_ticker.clone() as _);
637            }
638            if let Some(customizer) = self.plugins.get::<LeadershipChangeNotifierCustomizerRef>() {
639                customizer.customize(&mut leadership_change_notifier);
640            }
641
642            let state_handler = MetaStateHandler {
643                greptimedb_telemetry_task,
644                subscribe_manager,
645                state: self.state.clone(),
646                leader_cached_kv_backend: leader_cached_kv_backend.clone(),
647                leadership_change_notifier,
648                mailbox: self.mailbox.clone(),
649            };
650            let _handle = common_runtime::spawn_global(async move {
651                loop {
652                    match rx.recv().await {
653                        Ok(msg) => {
654                            in_memory.reset();
655                            leader_cached_kv_backend.reset();
656                            info!("Leader's cache has bean cleared on leader change: {msg}");
657                            match msg {
658                                LeaderChangeMessage::Elected(_) => {
659                                    state_handler.on_leader_start().await;
660                                }
661                                LeaderChangeMessage::StepDown(leader) => {
662                                    error!("Leader :{:?} step down", leader);
663
664                                    state_handler.on_leader_stop().await;
665                                }
666                            }
667                        }
668                        Err(RecvError::Closed) => {
669                            error!("Not expected, is leader election loop still running?");
670                            break;
671                        }
672                        Err(RecvError::Lagged(_)) => {
673                            break;
674                        }
675                    }
676                }
677
678                state_handler.on_leader_stop().await;
679            });
680
681            // Register candidate and keep lease in background.
682            {
683                let election = election.clone();
684                let started = self.started.clone();
685                let node_info = self.node_info();
686                let _handle = common_runtime::spawn_global(async move {
687                    while started.load(Ordering::Acquire) {
688                        let res = election.register_candidate(&node_info).await;
689                        if let Err(e) = res {
690                            warn!(e; "Metasrv register candidate error");
691                        }
692                    }
693                });
694            }
695
696            // Campaign
697            {
698                let election = election.clone();
699                let started = self.started.clone();
700                let _handle = common_runtime::spawn_global(async move {
701                    while started.load(Ordering::Acquire) {
702                        let res = election.campaign().await;
703                        if let Err(e) = res {
704                            warn!(e; "Metasrv election error");
705                        }
706                        election.reset_campaign().await;
707                        info!("Metasrv re-initiate election");
708                    }
709                    info!("Metasrv stopped");
710                });
711            }
712        } else {
713            warn!(
714                "Ensure only one instance of Metasrv is running, as there is no election service."
715            );
716
717            if let Err(e) = self.wal_provider.start().await {
718                error!(e; "Failed to start wal provider");
719            }
720            // Always load kv into cached kv store.
721            self.leader_cached_kv_backend
722                .load()
723                .await
724                .context(KvBackendSnafu)?;
725            self.procedure_manager
726                .start()
727                .await
728                .context(StartProcedureManagerSnafu)?;
729        }
730
731        info!("Metasrv started");
732
733        Ok(())
734    }
735
736    pub async fn shutdown(&self) -> Result<()> {
737        if self
738            .started
739            .compare_exchange(true, false, Ordering::AcqRel, Ordering::Acquire)
740            .is_err()
741        {
742            warn!("Metasrv already stopped");
743            return Ok(());
744        }
745
746        self.procedure_manager
747            .stop()
748            .await
749            .context(StopProcedureManagerSnafu)?;
750
751        info!("Metasrv stopped");
752
753        Ok(())
754    }
755
756    pub fn start_time_ms(&self) -> u64 {
757        self.start_time_ms
758    }
759
760    pub fn resource_stat(&self) -> &ResourceStatRef {
761        &self.resource_stat
762    }
763
764    pub fn node_info(&self) -> MetasrvNodeInfo {
765        let build_info = common_version::build_info();
766        MetasrvNodeInfo {
767            addr: self.options().grpc.server_addr.clone(),
768            version: build_info.version.to_string(),
769            git_commit: build_info.commit_short.to_string(),
770            start_time_ms: self.start_time_ms(),
771            total_cpu_millicores: self.resource_stat.get_total_cpu_millicores(),
772            total_memory_bytes: self.resource_stat.get_total_memory_bytes(),
773            cpu_usage_millicores: self.resource_stat.get_cpu_usage_millicores(),
774            memory_usage_bytes: self.resource_stat.get_memory_usage_bytes(),
775            hostname: hostname::get()
776                .unwrap_or_default()
777                .to_string_lossy()
778                .to_string(),
779        }
780    }
781
782    /// Looks up a datanode peer by peer_id, returning it only when it's alive.
783    /// A datanode is considered alive when it's still within the lease period.
784    pub(crate) async fn lookup_datanode_peer(&self, peer_id: u64) -> Result<Option<Peer>> {
785        discovery::utils::alive_datanode(
786            &DefaultSystemTimer,
787            self.meta_peer_client.as_ref(),
788            peer_id,
789            default_distributed_time_constants().datanode_lease,
790        )
791        .await
792    }
793
794    pub fn options(&self) -> &MetasrvOptions {
795        &self.options
796    }
797
798    pub fn in_memory(&self) -> &ResettableKvBackendRef {
799        &self.in_memory
800    }
801
802    pub fn kv_backend(&self) -> &KvBackendRef {
803        &self.kv_backend
804    }
805
806    pub fn meta_peer_client(&self) -> &MetaPeerClientRef {
807        &self.meta_peer_client
808    }
809
810    pub fn selector(&self) -> &SelectorRef {
811        &self.selector
812    }
813
814    pub fn selector_ctx(&self) -> &SelectorContext {
815        &self.selector_ctx
816    }
817
818    pub fn flow_selector(&self) -> &SelectorRef {
819        &self.flow_selector
820    }
821
822    pub fn handler_group(&self) -> Option<HeartbeatHandlerGroupRef> {
823        self.handler_group.read().unwrap().clone()
824    }
825
826    pub fn election(&self) -> Option<&ElectionRef> {
827        self.election.as_ref()
828    }
829
830    pub fn mailbox(&self) -> &MailboxRef {
831        &self.mailbox
832    }
833
834    pub fn ddl_manager(&self) -> &DdlManagerRef {
835        &self.ddl_manager
836    }
837
838    pub fn procedure_manager(&self) -> &ProcedureManagerRef {
839        &self.procedure_manager
840    }
841
842    pub fn table_metadata_manager(&self) -> &TableMetadataManagerRef {
843        &self.table_metadata_manager
844    }
845
846    pub fn runtime_switch_manager(&self) -> &RuntimeSwitchManagerRef {
847        &self.runtime_switch_manager
848    }
849
850    pub fn memory_region_keeper(&self) -> &MemoryRegionKeeperRef {
851        &self.memory_region_keeper
852    }
853
854    pub fn region_migration_manager(&self) -> &RegionMigrationManagerRef {
855        &self.region_migration_manager
856    }
857
858    pub fn publish(&self) -> Option<PublisherRef> {
859        self.plugins.get::<PublisherRef>()
860    }
861
862    pub fn subscription_manager(&self) -> Option<SubscriptionManagerRef> {
863        self.plugins.get::<SubscriptionManagerRef>()
864    }
865
866    pub fn table_id_allocator(&self) -> &ResourceIdAllocatorRef {
867        &self.table_id_allocator
868    }
869
870    pub fn reconciliation_manager(&self) -> &ReconciliationManagerRef {
871        &self.reconciliation_manager
872    }
873
874    pub fn database_operator(&self) -> &DatabaseOperatorRef {
875        &self.database_operator
876    }
877
878    pub fn plugins(&self) -> &Plugins {
879        &self.plugins
880    }
881
882    pub fn started(&self) -> Arc<AtomicBool> {
883        self.started.clone()
884    }
885
886    pub fn gc_ticker(&self) -> Option<GcTickerRef> {
887        self.gc_ticker.as_ref().cloned()
888    }
889
890    #[inline]
891    pub fn new_ctx(&self) -> Context {
892        let server_addr = self.options().grpc.server_addr.clone();
893        let in_memory = self.in_memory.clone();
894        let kv_backend = self.kv_backend.clone();
895        let leader_cached_kv_backend = self.leader_cached_kv_backend.clone();
896        let meta_peer_client = self.meta_peer_client.clone();
897        let mailbox = self.mailbox.clone();
898        let election = self.election.clone();
899        let table_metadata_manager = self.table_metadata_manager.clone();
900        let cache_invalidator = self.cache_invalidator.clone();
901        let leader_region_registry = self.leader_region_registry.clone();
902        let topic_stats_registry = self.topic_stats_registry.clone();
903
904        Context {
905            server_addr,
906            in_memory,
907            kv_backend,
908            leader_cached_kv_backend,
909            meta_peer_client,
910            mailbox,
911            election,
912            is_infancy: false,
913            table_metadata_manager,
914            cache_invalidator,
915            leader_region_registry,
916            topic_stats_registry,
917            heartbeat_interval: self.options().heartbeat_interval,
918            is_handshake: false,
919        }
920    }
921}
922
923#[cfg(test)]
924mod tests {
925    use crate::metasrv::MetasrvNodeInfo;
926
927    #[test]
928    fn test_deserialize_metasrv_node_info() {
929        let str = r#"{"addr":"127.0.0.1:4002","version":"0.1.0","git_commit":"1234567890","start_time_ms":1715145600}"#;
930        let node_info: MetasrvNodeInfo = serde_json::from_str(str).unwrap();
931        assert_eq!(node_info.addr, "127.0.0.1:4002");
932        assert_eq!(node_info.version, "0.1.0");
933        assert_eq!(node_info.git_commit, "1234567890");
934        assert_eq!(node_info.start_time_ms, 1715145600);
935    }
936}