1pub mod builder;
16
17use std::fmt::{self, Display};
18use std::sync::atomic::{AtomicBool, Ordering};
19use std::sync::{Arc, Mutex, RwLock};
20use std::time::Duration;
21
22use api::v1::meta::{HeartbeatConfig, Role};
23use clap::ValueEnum;
24use common_base::Plugins;
25use common_base::readable_size::ReadableSize;
26use common_config::{Configurable, DEFAULT_DATA_HOME};
27use common_event_recorder::EventRecorderOptions;
28use common_greptimedb_telemetry::GreptimeDBTelemetryTask;
29use common_meta::cache_invalidator::CacheInvalidatorRef;
30use common_meta::ddl::allocator::resource_id::ResourceIdAllocatorRef;
31use common_meta::ddl_manager::DdlManagerRef;
32use common_meta::distributed_time_constants::{
33 self, BASE_HEARTBEAT_INTERVAL, default_distributed_time_constants, frontend_heartbeat_interval,
34};
35use common_meta::election::LeaderChangeMessage;
36pub use common_meta::election::{ElectionRef, MetasrvNodeInfo};
37use common_meta::key::TableMetadataManagerRef;
38use common_meta::key::runtime_switch::RuntimeSwitchManagerRef;
39use common_meta::kv_backend::{KvBackendRef, ResettableKvBackend, ResettableKvBackendRef};
40use common_meta::leadership_notifier::{
41 LeadershipChangeNotifier, LeadershipChangeNotifierCustomizerRef,
42};
43use common_meta::node_expiry_listener::NodeExpiryListener;
44use common_meta::peer::{Peer, PeerDiscoveryRef};
45use common_meta::reconciliation::manager::ReconciliationManagerRef;
46use common_meta::region_keeper::MemoryRegionKeeperRef;
47use common_meta::region_registry::LeaderRegionRegistryRef;
48use common_meta::stats::topic::TopicStatsRegistryRef;
49use common_meta::wal_provider::WalProviderRef;
50use common_options::datanode::DatanodeClientOptions;
51use common_options::memory::MemoryOptions;
52use common_procedure::ProcedureManagerRef;
53use common_procedure::options::ProcedureConfig;
54use common_stat::ResourceStatRef;
55use common_telemetry::logging::{LoggingOptions, TracingOptions};
56use common_telemetry::{error, info, warn};
57use common_time::util::DefaultSystemTimer;
58use common_wal::config::MetasrvWalConfig;
59use serde::{Deserialize, Serialize};
60use servers::grpc::GrpcOptions;
61use servers::http::HttpOptions;
62use servers::tls::TlsOption;
63use snafu::{OptionExt, ResultExt};
64use store_api::storage::RegionId;
65use tokio::sync::broadcast::error::RecvError;
66
67use crate::cluster::MetaPeerClientRef;
68use crate::discovery;
69use crate::error::{
70 self, InitMetadataSnafu, KvBackendSnafu, Result, StartProcedureManagerSnafu,
71 StartTelemetryTaskSnafu, StopProcedureManagerSnafu,
72};
73use crate::failure_detector::PhiAccrualFailureDetectorOptions;
74use crate::gc::{GcSchedulerOptions, GcTickerRef};
75use crate::handler::{HeartbeatHandlerGroupBuilder, HeartbeatHandlerGroupRef};
76use crate::procedure::ProcedureManagerListenerAdapter;
77use crate::procedure::region_migration::manager::RegionMigrationManagerRef;
78use crate::procedure::wal_prune::manager::WalPruneTickerRef;
79use crate::pubsub::{PublisherRef, SubscriptionManagerRef};
80use crate::region::flush_trigger::RegionFlushTickerRef;
81use crate::region::supervisor::RegionSupervisorTickerRef;
82use crate::selector::{RegionStatAwareSelector, Selector, SelectorType};
83use crate::service::mailbox::MailboxRef;
84use crate::service::store::cached_kv::LeaderCachedKvBackend;
85use crate::state::{StateRef, become_follower, become_leader};
86use crate::utils::database::DatabaseOperatorRef;
87
88pub const TABLE_ID_SEQ: &str = "table_id";
89pub const FLOW_ID_SEQ: &str = "flow_id";
90pub const METASRV_DATA_DIR: &str = "metasrv";
91
92#[derive(Clone, Debug, PartialEq, Serialize, Default, Deserialize, ValueEnum)]
94#[serde(rename_all = "snake_case")]
95pub enum BackendImpl {
96 #[default]
98 EtcdStore,
99 MemoryStore,
101 #[cfg(feature = "pg_kvbackend")]
102 PostgresStore,
104 #[cfg(feature = "mysql_kvbackend")]
105 MysqlStore,
107}
108
109#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
111pub struct StatsPersistenceOptions {
112 #[serde(with = "humantime_serde")]
114 pub ttl: Duration,
115 #[serde(with = "humantime_serde")]
117 pub interval: Duration,
118}
119
120impl Default for StatsPersistenceOptions {
121 fn default() -> Self {
122 Self {
123 ttl: Duration::ZERO,
124 interval: Duration::from_mins(10),
125 }
126 }
127}
128
129#[derive(Clone, PartialEq, Serialize, Deserialize, Debug)]
131#[serde(default)]
132pub struct HeartbeatOptions {
133 #[serde(with = "humantime_serde")]
135 pub interval: Duration,
136 #[serde(with = "humantime_serde")]
138 pub retry_interval: Duration,
139}
140
141impl Default for HeartbeatOptions {
142 fn default() -> Self {
143 Self {
144 interval: BASE_HEARTBEAT_INTERVAL,
145 retry_interval: BASE_HEARTBEAT_INTERVAL,
146 }
147 }
148}
149
150impl HeartbeatOptions {
151 pub fn datanode_from(base_interval: Duration) -> Self {
152 Self {
153 interval: base_interval,
154 retry_interval: base_interval,
155 }
156 }
157
158 pub fn frontend_from(base_interval: Duration) -> Self {
159 Self {
160 interval: frontend_heartbeat_interval(base_interval),
161 retry_interval: base_interval,
162 }
163 }
164
165 pub fn flownode_from(base_interval: Duration) -> Self {
166 Self {
167 interval: base_interval,
168 retry_interval: base_interval,
169 }
170 }
171}
172
173impl From<HeartbeatOptions> for HeartbeatConfig {
174 fn from(opts: HeartbeatOptions) -> Self {
175 Self {
176 heartbeat_interval_ms: opts.interval.as_millis() as u64,
177 retry_interval_ms: opts.retry_interval.as_millis() as u64,
178 }
179 }
180}
181
182#[derive(Clone, PartialEq, Serialize, Deserialize, Debug)]
183#[serde(default)]
184pub struct BackendClientOptions {
185 #[serde(with = "humantime_serde")]
186 pub keep_alive_timeout: Duration,
187 #[serde(with = "humantime_serde")]
188 pub keep_alive_interval: Duration,
189 #[serde(with = "humantime_serde")]
190 pub connect_timeout: Duration,
191}
192
193impl Default for BackendClientOptions {
194 fn default() -> Self {
195 Self {
196 keep_alive_interval: Duration::from_secs(10),
197 keep_alive_timeout: Duration::from_secs(3),
198 connect_timeout: Duration::from_secs(3),
199 }
200 }
201}
202
203#[derive(Clone, PartialEq, Serialize, Deserialize)]
204#[serde(default)]
205pub struct MetasrvOptions {
206 #[deprecated(note = "Use grpc.bind_addr instead")]
208 pub bind_addr: String,
209 #[deprecated(note = "Use grpc.server_addr instead")]
211 pub server_addr: String,
212 pub store_addrs: Vec<String>,
214 #[serde(default)]
217 pub backend_tls: Option<TlsOption>,
218 #[serde(default)]
221 pub backend_client: BackendClientOptions,
222 pub selector: SelectorType,
224 pub enable_region_failover: bool,
226 #[serde(with = "humantime_serde")]
231 pub heartbeat_interval: Duration,
232 #[serde(with = "humantime_serde")]
236 pub region_failure_detector_initialization_delay: Duration,
237 pub allow_region_failover_on_local_wal: bool,
242 pub grpc: GrpcOptions,
243 pub http: HttpOptions,
245 pub logging: LoggingOptions,
247 pub procedure: ProcedureConfig,
249 pub failure_detector: PhiAccrualFailureDetectorOptions,
251 pub datanode: DatanodeClientOptions,
253 pub enable_telemetry: bool,
255 pub data_home: String,
257 pub wal: MetasrvWalConfig,
259 pub store_key_prefix: String,
262 pub max_txn_ops: usize,
273 pub flush_stats_factor: usize,
277 pub tracing: TracingOptions,
279 pub memory: MemoryOptions,
281 pub backend: BackendImpl,
283 #[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
284 pub meta_table_name: String,
286 #[cfg(feature = "pg_kvbackend")]
287 pub meta_election_lock_id: u64,
289 #[cfg(feature = "pg_kvbackend")]
290 pub meta_schema_name: Option<String>,
292 #[cfg(feature = "pg_kvbackend")]
293 pub auto_create_schema: bool,
295 #[serde(with = "humantime_serde")]
296 pub node_max_idle_time: Duration,
297 pub event_recorder: EventRecorderOptions,
299 pub stats_persistence: StatsPersistenceOptions,
301 pub gc: GcSchedulerOptions,
303}
304
305impl fmt::Debug for MetasrvOptions {
306 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
307 let mut debug_struct = f.debug_struct("MetasrvOptions");
308 debug_struct
309 .field("store_addrs", &self.sanitize_store_addrs())
310 .field("backend_tls", &self.backend_tls)
311 .field("selector", &self.selector)
312 .field("enable_region_failover", &self.enable_region_failover)
313 .field(
314 "allow_region_failover_on_local_wal",
315 &self.allow_region_failover_on_local_wal,
316 )
317 .field("grpc", &self.grpc)
318 .field("http", &self.http)
319 .field("logging", &self.logging)
320 .field("procedure", &self.procedure)
321 .field("failure_detector", &self.failure_detector)
322 .field("datanode", &self.datanode)
323 .field("enable_telemetry", &self.enable_telemetry)
324 .field("data_home", &self.data_home)
325 .field("wal", &self.wal)
326 .field("store_key_prefix", &self.store_key_prefix)
327 .field("max_txn_ops", &self.max_txn_ops)
328 .field("flush_stats_factor", &self.flush_stats_factor)
329 .field("tracing", &self.tracing)
330 .field("backend", &self.backend)
331 .field("event_recorder", &self.event_recorder)
332 .field("stats_persistence", &self.stats_persistence)
333 .field("heartbeat_interval", &self.heartbeat_interval)
334 .field("backend_client", &self.backend_client);
335
336 #[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
337 debug_struct.field("meta_table_name", &self.meta_table_name);
338
339 #[cfg(feature = "pg_kvbackend")]
340 debug_struct.field("meta_election_lock_id", &self.meta_election_lock_id);
341 #[cfg(feature = "pg_kvbackend")]
342 debug_struct.field("meta_schema_name", &self.meta_schema_name);
343
344 debug_struct
345 .field("node_max_idle_time", &self.node_max_idle_time)
346 .finish()
347 }
348}
349
350const DEFAULT_METASRV_ADDR_PORT: &str = "3002";
351
352impl Default for MetasrvOptions {
353 fn default() -> Self {
354 Self {
355 #[allow(deprecated)]
356 bind_addr: String::new(),
357 #[allow(deprecated)]
358 server_addr: String::new(),
359 store_addrs: vec!["127.0.0.1:2379".to_string()],
360 backend_tls: Some(TlsOption::prefer()),
361 selector: SelectorType::default(),
362 enable_region_failover: false,
363 heartbeat_interval: distributed_time_constants::BASE_HEARTBEAT_INTERVAL,
364 region_failure_detector_initialization_delay: Duration::from_secs(10 * 60),
365 allow_region_failover_on_local_wal: false,
366 grpc: GrpcOptions {
367 bind_addr: format!("127.0.0.1:{}", DEFAULT_METASRV_ADDR_PORT),
368 ..Default::default()
369 },
370 http: HttpOptions::default(),
371 logging: LoggingOptions::default(),
372 procedure: ProcedureConfig {
373 max_retry_times: 12,
374 retry_delay: Duration::from_millis(500),
375 max_metadata_value_size: Some(ReadableSize::kb(1500)),
378 max_running_procedures: 128,
379 },
380 failure_detector: PhiAccrualFailureDetectorOptions::default(),
381 datanode: DatanodeClientOptions::default(),
382 enable_telemetry: true,
383 data_home: DEFAULT_DATA_HOME.to_string(),
384 wal: MetasrvWalConfig::default(),
385 store_key_prefix: String::new(),
386 max_txn_ops: 128,
387 flush_stats_factor: 3,
388 tracing: TracingOptions::default(),
389 memory: MemoryOptions::default(),
390 backend: BackendImpl::EtcdStore,
391 #[cfg(any(feature = "pg_kvbackend", feature = "mysql_kvbackend"))]
392 meta_table_name: common_meta::kv_backend::DEFAULT_META_TABLE_NAME.to_string(),
393 #[cfg(feature = "pg_kvbackend")]
394 meta_election_lock_id: common_meta::kv_backend::DEFAULT_META_ELECTION_LOCK_ID,
395 #[cfg(feature = "pg_kvbackend")]
396 meta_schema_name: None,
397 #[cfg(feature = "pg_kvbackend")]
398 auto_create_schema: true,
399 node_max_idle_time: Duration::from_secs(24 * 60 * 60),
400 event_recorder: EventRecorderOptions::default(),
401 stats_persistence: StatsPersistenceOptions::default(),
402 gc: GcSchedulerOptions::default(),
403 backend_client: BackendClientOptions::default(),
404 }
405 }
406}
407
408impl Configurable for MetasrvOptions {
409 fn env_list_keys() -> Option<&'static [&'static str]> {
410 Some(&["wal.broker_endpoints", "store_addrs"])
411 }
412}
413
414impl MetasrvOptions {
415 fn sanitize_store_addrs(&self) -> Vec<String> {
416 self.store_addrs
417 .iter()
418 .map(|addr| common_meta::kv_backend::util::sanitize_connection_string(addr))
419 .collect()
420 }
421}
422
423pub struct MetasrvInfo {
424 pub server_addr: String,
425}
426#[derive(Clone)]
427pub struct Context {
428 pub server_addr: String,
429 pub in_memory: ResettableKvBackendRef,
430 pub kv_backend: KvBackendRef,
431 pub leader_cached_kv_backend: ResettableKvBackendRef,
432 pub meta_peer_client: MetaPeerClientRef,
433 pub mailbox: MailboxRef,
434 pub election: Option<ElectionRef>,
435 pub is_infancy: bool,
436 pub table_metadata_manager: TableMetadataManagerRef,
437 pub cache_invalidator: CacheInvalidatorRef,
438 pub leader_region_registry: LeaderRegionRegistryRef,
439 pub topic_stats_registry: TopicStatsRegistryRef,
440 pub heartbeat_interval: Duration,
441 pub is_handshake: bool,
442}
443
444impl Context {
445 pub fn reset_in_memory(&self) {
446 self.in_memory.reset();
447 self.leader_region_registry.reset();
448 }
449
450 pub fn with_handshake(mut self, is_handshake: bool) -> Self {
451 self.is_handshake = is_handshake;
452 self
453 }
454
455 pub fn heartbeat_options_for(&self, role: Role) -> HeartbeatOptions {
456 match role {
457 Role::Datanode => HeartbeatOptions::datanode_from(self.heartbeat_interval),
458 Role::Frontend => HeartbeatOptions::frontend_from(self.heartbeat_interval),
459 Role::Flownode => HeartbeatOptions::flownode_from(self.heartbeat_interval),
460 }
461 }
462}
463
464#[derive(Clone, Copy)]
465pub enum SelectTarget {
466 Datanode,
467 Flownode,
468}
469
470impl Display for SelectTarget {
471 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
472 match self {
473 SelectTarget::Datanode => write!(f, "datanode"),
474 SelectTarget::Flownode => write!(f, "flownode"),
475 }
476 }
477}
478
479#[derive(Clone)]
480pub struct SelectorContext {
481 pub peer_discovery: PeerDiscoveryRef,
482}
483
484pub type SelectorRef = Arc<dyn Selector<Context = SelectorContext, Output = Vec<Peer>>>;
485pub type RegionStatAwareSelectorRef =
486 Arc<dyn RegionStatAwareSelector<Context = SelectorContext, Output = Vec<(RegionId, Peer)>>>;
487
488pub struct MetaStateHandler {
489 subscribe_manager: Option<SubscriptionManagerRef>,
490 greptimedb_telemetry_task: Arc<GreptimeDBTelemetryTask>,
491 leader_cached_kv_backend: Arc<LeaderCachedKvBackend>,
492 leadership_change_notifier: LeadershipChangeNotifier,
493 mailbox: MailboxRef,
494 state: StateRef,
495}
496
497impl MetaStateHandler {
498 pub async fn on_leader_start(&self) {
499 self.state.write().unwrap().next_state(become_leader(false));
500
501 if let Err(e) = self.leader_cached_kv_backend.load().await {
502 error!(e; "Failed to load kv into leader cache kv store");
503 } else {
504 self.state.write().unwrap().next_state(become_leader(true));
505 }
506
507 self.leadership_change_notifier
508 .notify_on_leader_start()
509 .await;
510
511 self.greptimedb_telemetry_task.should_report(true);
512 }
513
514 pub async fn on_leader_stop(&self) {
515 self.state.write().unwrap().next_state(become_follower());
516
517 self.mailbox.reset().await;
520 self.leadership_change_notifier
521 .notify_on_leader_stop()
522 .await;
523
524 self.greptimedb_telemetry_task.should_report(false);
526
527 if let Some(sub_manager) = self.subscribe_manager.clone() {
528 info!("Leader changed, un_subscribe all");
529 if let Err(e) = sub_manager.unsubscribe_all() {
530 error!(e; "Failed to un_subscribe all");
531 }
532 }
533 }
534}
535
536pub struct Metasrv {
537 state: StateRef,
538 started: Arc<AtomicBool>,
539 start_time_ms: u64,
540 options: MetasrvOptions,
541 in_memory: ResettableKvBackendRef,
544 kv_backend: KvBackendRef,
545 leader_cached_kv_backend: Arc<LeaderCachedKvBackend>,
546 meta_peer_client: MetaPeerClientRef,
547 selector: SelectorRef,
549 selector_ctx: SelectorContext,
550 flow_selector: SelectorRef,
552 handler_group: RwLock<Option<HeartbeatHandlerGroupRef>>,
553 handler_group_builder: Mutex<Option<HeartbeatHandlerGroupBuilder>>,
554 election: Option<ElectionRef>,
555 procedure_manager: ProcedureManagerRef,
556 mailbox: MailboxRef,
557 ddl_manager: DdlManagerRef,
558 wal_provider: WalProviderRef,
559 table_metadata_manager: TableMetadataManagerRef,
560 runtime_switch_manager: RuntimeSwitchManagerRef,
561 memory_region_keeper: MemoryRegionKeeperRef,
562 greptimedb_telemetry_task: Arc<GreptimeDBTelemetryTask>,
563 region_migration_manager: RegionMigrationManagerRef,
564 region_supervisor_ticker: Option<RegionSupervisorTickerRef>,
565 cache_invalidator: CacheInvalidatorRef,
566 leader_region_registry: LeaderRegionRegistryRef,
567 topic_stats_registry: TopicStatsRegistryRef,
568 wal_prune_ticker: Option<WalPruneTickerRef>,
569 region_flush_ticker: Option<RegionFlushTickerRef>,
570 table_id_allocator: ResourceIdAllocatorRef,
571 reconciliation_manager: ReconciliationManagerRef,
572 resource_stat: ResourceStatRef,
573 gc_ticker: Option<GcTickerRef>,
574 database_operator: DatabaseOperatorRef,
575
576 plugins: Plugins,
577}
578
579impl Metasrv {
580 pub async fn try_start(&self) -> Result<()> {
581 if self
582 .started
583 .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire)
584 .is_err()
585 {
586 warn!("Metasrv already started");
587 return Ok(());
588 }
589
590 let handler_group_builder =
591 self.handler_group_builder
592 .lock()
593 .unwrap()
594 .take()
595 .context(error::UnexpectedSnafu {
596 violated: "expected heartbeat handler group builder",
597 })?;
598 *self.handler_group.write().unwrap() = Some(Arc::new(handler_group_builder.build()?));
599
600 self.table_metadata_manager
602 .init()
603 .await
604 .context(InitMetadataSnafu)?;
605
606 if let Some(election) = self.election() {
607 let procedure_manager = self.procedure_manager.clone();
608 let in_memory = self.in_memory.clone();
609 let leader_cached_kv_backend = self.leader_cached_kv_backend.clone();
610 let subscribe_manager = self.subscription_manager();
611 let mut rx = election.subscribe_leader_change();
612 let greptimedb_telemetry_task = self.greptimedb_telemetry_task.clone();
613 greptimedb_telemetry_task
614 .start()
615 .context(StartTelemetryTaskSnafu)?;
616
617 let mut leadership_change_notifier = LeadershipChangeNotifier::default();
619 leadership_change_notifier.add_listener(self.wal_provider.clone());
620 leadership_change_notifier
621 .add_listener(Arc::new(ProcedureManagerListenerAdapter(procedure_manager)));
622 leadership_change_notifier.add_listener(Arc::new(NodeExpiryListener::new(
623 self.options.node_max_idle_time,
624 self.in_memory.clone(),
625 )));
626 if let Some(region_supervisor_ticker) = &self.region_supervisor_ticker {
627 leadership_change_notifier.add_listener(region_supervisor_ticker.clone() as _);
628 }
629 if let Some(wal_prune_ticker) = &self.wal_prune_ticker {
630 leadership_change_notifier.add_listener(wal_prune_ticker.clone() as _);
631 }
632 if let Some(region_flush_trigger) = &self.region_flush_ticker {
633 leadership_change_notifier.add_listener(region_flush_trigger.clone() as _);
634 }
635 if let Some(gc_ticker) = &self.gc_ticker {
636 leadership_change_notifier.add_listener(gc_ticker.clone() as _);
637 }
638 if let Some(customizer) = self.plugins.get::<LeadershipChangeNotifierCustomizerRef>() {
639 customizer.customize(&mut leadership_change_notifier);
640 }
641
642 let state_handler = MetaStateHandler {
643 greptimedb_telemetry_task,
644 subscribe_manager,
645 state: self.state.clone(),
646 leader_cached_kv_backend: leader_cached_kv_backend.clone(),
647 leadership_change_notifier,
648 mailbox: self.mailbox.clone(),
649 };
650 let _handle = common_runtime::spawn_global(async move {
651 loop {
652 match rx.recv().await {
653 Ok(msg) => {
654 in_memory.reset();
655 leader_cached_kv_backend.reset();
656 info!("Leader's cache has bean cleared on leader change: {msg}");
657 match msg {
658 LeaderChangeMessage::Elected(_) => {
659 state_handler.on_leader_start().await;
660 }
661 LeaderChangeMessage::StepDown(leader) => {
662 error!("Leader :{:?} step down", leader);
663
664 state_handler.on_leader_stop().await;
665 }
666 }
667 }
668 Err(RecvError::Closed) => {
669 error!("Not expected, is leader election loop still running?");
670 break;
671 }
672 Err(RecvError::Lagged(_)) => {
673 break;
674 }
675 }
676 }
677
678 state_handler.on_leader_stop().await;
679 });
680
681 {
683 let election = election.clone();
684 let started = self.started.clone();
685 let node_info = self.node_info();
686 let _handle = common_runtime::spawn_global(async move {
687 while started.load(Ordering::Acquire) {
688 let res = election.register_candidate(&node_info).await;
689 if let Err(e) = res {
690 warn!(e; "Metasrv register candidate error");
691 }
692 }
693 });
694 }
695
696 {
698 let election = election.clone();
699 let started = self.started.clone();
700 let _handle = common_runtime::spawn_global(async move {
701 while started.load(Ordering::Acquire) {
702 let res = election.campaign().await;
703 if let Err(e) = res {
704 warn!(e; "Metasrv election error");
705 }
706 election.reset_campaign().await;
707 info!("Metasrv re-initiate election");
708 }
709 info!("Metasrv stopped");
710 });
711 }
712 } else {
713 warn!(
714 "Ensure only one instance of Metasrv is running, as there is no election service."
715 );
716
717 if let Err(e) = self.wal_provider.start().await {
718 error!(e; "Failed to start wal provider");
719 }
720 self.leader_cached_kv_backend
722 .load()
723 .await
724 .context(KvBackendSnafu)?;
725 self.procedure_manager
726 .start()
727 .await
728 .context(StartProcedureManagerSnafu)?;
729 }
730
731 info!("Metasrv started");
732
733 Ok(())
734 }
735
736 pub async fn shutdown(&self) -> Result<()> {
737 if self
738 .started
739 .compare_exchange(true, false, Ordering::AcqRel, Ordering::Acquire)
740 .is_err()
741 {
742 warn!("Metasrv already stopped");
743 return Ok(());
744 }
745
746 self.procedure_manager
747 .stop()
748 .await
749 .context(StopProcedureManagerSnafu)?;
750
751 info!("Metasrv stopped");
752
753 Ok(())
754 }
755
756 pub fn start_time_ms(&self) -> u64 {
757 self.start_time_ms
758 }
759
760 pub fn resource_stat(&self) -> &ResourceStatRef {
761 &self.resource_stat
762 }
763
764 pub fn node_info(&self) -> MetasrvNodeInfo {
765 let build_info = common_version::build_info();
766 MetasrvNodeInfo {
767 addr: self.options().grpc.server_addr.clone(),
768 version: build_info.version.to_string(),
769 git_commit: build_info.commit_short.to_string(),
770 start_time_ms: self.start_time_ms(),
771 total_cpu_millicores: self.resource_stat.get_total_cpu_millicores(),
772 total_memory_bytes: self.resource_stat.get_total_memory_bytes(),
773 cpu_usage_millicores: self.resource_stat.get_cpu_usage_millicores(),
774 memory_usage_bytes: self.resource_stat.get_memory_usage_bytes(),
775 hostname: hostname::get()
776 .unwrap_or_default()
777 .to_string_lossy()
778 .to_string(),
779 }
780 }
781
782 pub(crate) async fn lookup_datanode_peer(&self, peer_id: u64) -> Result<Option<Peer>> {
785 discovery::utils::alive_datanode(
786 &DefaultSystemTimer,
787 self.meta_peer_client.as_ref(),
788 peer_id,
789 default_distributed_time_constants().datanode_lease,
790 )
791 .await
792 }
793
794 pub fn options(&self) -> &MetasrvOptions {
795 &self.options
796 }
797
798 pub fn in_memory(&self) -> &ResettableKvBackendRef {
799 &self.in_memory
800 }
801
802 pub fn kv_backend(&self) -> &KvBackendRef {
803 &self.kv_backend
804 }
805
806 pub fn meta_peer_client(&self) -> &MetaPeerClientRef {
807 &self.meta_peer_client
808 }
809
810 pub fn selector(&self) -> &SelectorRef {
811 &self.selector
812 }
813
814 pub fn selector_ctx(&self) -> &SelectorContext {
815 &self.selector_ctx
816 }
817
818 pub fn flow_selector(&self) -> &SelectorRef {
819 &self.flow_selector
820 }
821
822 pub fn handler_group(&self) -> Option<HeartbeatHandlerGroupRef> {
823 self.handler_group.read().unwrap().clone()
824 }
825
826 pub fn election(&self) -> Option<&ElectionRef> {
827 self.election.as_ref()
828 }
829
830 pub fn mailbox(&self) -> &MailboxRef {
831 &self.mailbox
832 }
833
834 pub fn ddl_manager(&self) -> &DdlManagerRef {
835 &self.ddl_manager
836 }
837
838 pub fn procedure_manager(&self) -> &ProcedureManagerRef {
839 &self.procedure_manager
840 }
841
842 pub fn table_metadata_manager(&self) -> &TableMetadataManagerRef {
843 &self.table_metadata_manager
844 }
845
846 pub fn runtime_switch_manager(&self) -> &RuntimeSwitchManagerRef {
847 &self.runtime_switch_manager
848 }
849
850 pub fn memory_region_keeper(&self) -> &MemoryRegionKeeperRef {
851 &self.memory_region_keeper
852 }
853
854 pub fn region_migration_manager(&self) -> &RegionMigrationManagerRef {
855 &self.region_migration_manager
856 }
857
858 pub fn publish(&self) -> Option<PublisherRef> {
859 self.plugins.get::<PublisherRef>()
860 }
861
862 pub fn subscription_manager(&self) -> Option<SubscriptionManagerRef> {
863 self.plugins.get::<SubscriptionManagerRef>()
864 }
865
866 pub fn table_id_allocator(&self) -> &ResourceIdAllocatorRef {
867 &self.table_id_allocator
868 }
869
870 pub fn reconciliation_manager(&self) -> &ReconciliationManagerRef {
871 &self.reconciliation_manager
872 }
873
874 pub fn database_operator(&self) -> &DatabaseOperatorRef {
875 &self.database_operator
876 }
877
878 pub fn plugins(&self) -> &Plugins {
879 &self.plugins
880 }
881
882 pub fn started(&self) -> Arc<AtomicBool> {
883 self.started.clone()
884 }
885
886 pub fn gc_ticker(&self) -> Option<GcTickerRef> {
887 self.gc_ticker.as_ref().cloned()
888 }
889
890 #[inline]
891 pub fn new_ctx(&self) -> Context {
892 let server_addr = self.options().grpc.server_addr.clone();
893 let in_memory = self.in_memory.clone();
894 let kv_backend = self.kv_backend.clone();
895 let leader_cached_kv_backend = self.leader_cached_kv_backend.clone();
896 let meta_peer_client = self.meta_peer_client.clone();
897 let mailbox = self.mailbox.clone();
898 let election = self.election.clone();
899 let table_metadata_manager = self.table_metadata_manager.clone();
900 let cache_invalidator = self.cache_invalidator.clone();
901 let leader_region_registry = self.leader_region_registry.clone();
902 let topic_stats_registry = self.topic_stats_registry.clone();
903
904 Context {
905 server_addr,
906 in_memory,
907 kv_backend,
908 leader_cached_kv_backend,
909 meta_peer_client,
910 mailbox,
911 election,
912 is_infancy: false,
913 table_metadata_manager,
914 cache_invalidator,
915 leader_region_registry,
916 topic_stats_registry,
917 heartbeat_interval: self.options().heartbeat_interval,
918 is_handshake: false,
919 }
920 }
921}
922
923#[cfg(test)]
924mod tests {
925 use crate::metasrv::MetasrvNodeInfo;
926
927 #[test]
928 fn test_deserialize_metasrv_node_info() {
929 let str = r#"{"addr":"127.0.0.1:4002","version":"0.1.0","git_commit":"1234567890","start_time_ms":1715145600}"#;
930 let node_info: MetasrvNodeInfo = serde_json::from_str(str).unwrap();
931 assert_eq!(node_info.addr, "127.0.0.1:4002");
932 assert_eq!(node_info.version, "0.1.0");
933 assert_eq!(node_info.git_commit, "1234567890");
934 assert_eq!(node_info.start_time_ms, 1715145600);
935 }
936}