meta_client/
client.rs

1// Copyright 2023 Greptime Team
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15mod ask_leader;
16mod config;
17pub mod heartbeat;
18mod load_balance;
19mod procedure;
20
21mod cluster;
22mod store;
23mod util;
24
25use std::fmt::Debug;
26use std::sync::Arc;
27use std::time::Duration;
28
29use api::v1::meta::{
30    MetasrvNodeInfo, ProcedureDetailResponse, ReconcileRequest, ReconcileResponse, Role,
31};
32pub use ask_leader::{AskLeader, LeaderProvider, LeaderProviderRef};
33use cluster::Client as ClusterClient;
34pub use cluster::ClusterKvBackend;
35use common_error::ext::BoxedError;
36use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
37use common_meta::cluster::{
38    ClusterInfo, MetasrvStatus, NodeInfo, NodeInfoKey, NodeStatus, Role as ClusterRole,
39};
40use common_meta::datanode::{DatanodeStatKey, DatanodeStatValue, RegionStat};
41use common_meta::error::{
42    self as meta_error, ExternalSnafu, Result as MetaResult, UnsupportedSnafu,
43};
44use common_meta::key::flow::flow_state::{FlowStat, FlowStateManager};
45use common_meta::kv_backend::KvBackendRef;
46use common_meta::procedure_executor::{ExecutorContext, ProcedureExecutor};
47use common_meta::range_stream::PaginationStream;
48use common_meta::rpc::KeyValue;
49use common_meta::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
50use common_meta::rpc::procedure::{
51    AddRegionFollowerRequest, AddTableFollowerRequest, ManageRegionFollowerRequest,
52    MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse,
53    RemoveRegionFollowerRequest, RemoveTableFollowerRequest,
54};
55use common_meta::rpc::store::{
56    BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
57    BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
58    DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
59};
60use common_options::plugin_options::PluginOptionsDeserializer;
61use common_telemetry::info;
62use config::Client as ConfigClient;
63use futures::TryStreamExt;
64use heartbeat::{Client as HeartbeatClient, HeartbeatConfig};
65use procedure::Client as ProcedureClient;
66use serde::de::DeserializeOwned;
67use snafu::{OptionExt, ResultExt};
68use store::Client as StoreClient;
69
70pub use self::heartbeat::{HeartbeatSender, HeartbeatStream};
71use crate::client::ask_leader::{LeaderProviderFactoryImpl, LeaderProviderFactoryRef};
72use crate::error::{
73    ConvertMetaConfigSnafu, ConvertMetaRequestSnafu, ConvertMetaResponseSnafu, Error,
74    GetFlowStatSnafu, NotStartedSnafu, Result,
75};
76
77pub type Id = u64;
78
79const DEFAULT_ASK_LEADER_MAX_RETRY: usize = 3;
80const DEFAULT_SUBMIT_DDL_MAX_RETRY: usize = 3;
81const DEFAULT_CLUSTER_CLIENT_MAX_RETRY: usize = 3;
82const DEFAULT_DDL_TIMEOUT: Duration = Duration::from_secs(10);
83
84#[derive(Clone, Debug, Default)]
85pub struct MetaClientBuilder {
86    id: Id,
87    role: Role,
88    enable_heartbeat: bool,
89    enable_store: bool,
90    enable_procedure: bool,
91    enable_access_cluster_info: bool,
92    region_follower: Option<RegionFollowerClientRef>,
93    channel_manager: Option<ChannelManager>,
94    ddl_channel_manager: Option<ChannelManager>,
95    /// The default ddl timeout for each request.
96    ddl_timeout: Option<Duration>,
97    heartbeat_channel_manager: Option<ChannelManager>,
98}
99
100impl MetaClientBuilder {
101    pub fn new(member_id: u64, role: Role) -> Self {
102        Self {
103            id: member_id,
104            role,
105            ..Default::default()
106        }
107    }
108
109    /// Returns the role of Frontend's default options.
110    pub fn frontend_default_options() -> Self {
111        // Frontend does not need a member id.
112        Self::new(0, Role::Frontend)
113            .enable_store()
114            .enable_heartbeat()
115            .enable_procedure()
116            .enable_access_cluster_info()
117    }
118
119    /// Returns the role of Datanode's default options.
120    pub fn datanode_default_options(member_id: u64) -> Self {
121        Self::new(member_id, Role::Datanode)
122            .enable_store()
123            .enable_heartbeat()
124    }
125
126    /// Returns the role of Flownode's default options.
127    pub fn flownode_default_options(member_id: u64) -> Self {
128        Self::new(member_id, Role::Flownode)
129            .enable_store()
130            .enable_heartbeat()
131            .enable_procedure()
132            .enable_access_cluster_info()
133    }
134
135    pub fn enable_heartbeat(self) -> Self {
136        Self {
137            enable_heartbeat: true,
138            ..self
139        }
140    }
141
142    pub fn enable_store(self) -> Self {
143        Self {
144            enable_store: true,
145            ..self
146        }
147    }
148
149    pub fn enable_procedure(self) -> Self {
150        Self {
151            enable_procedure: true,
152            ..self
153        }
154    }
155
156    pub fn enable_access_cluster_info(self) -> Self {
157        Self {
158            enable_access_cluster_info: true,
159            ..self
160        }
161    }
162
163    pub fn channel_manager(self, channel_manager: ChannelManager) -> Self {
164        Self {
165            channel_manager: Some(channel_manager),
166            ..self
167        }
168    }
169
170    pub fn ddl_channel_manager(self, channel_manager: ChannelManager) -> Self {
171        Self {
172            ddl_channel_manager: Some(channel_manager),
173            ..self
174        }
175    }
176
177    pub fn ddl_timeout(self, timeout: Duration) -> Self {
178        Self {
179            ddl_timeout: Some(timeout),
180            ..self
181        }
182    }
183
184    pub fn heartbeat_channel_manager(self, channel_manager: ChannelManager) -> Self {
185        Self {
186            heartbeat_channel_manager: Some(channel_manager),
187            ..self
188        }
189    }
190
191    pub fn with_region_follower(self, region_follower: RegionFollowerClientRef) -> Self {
192        Self {
193            region_follower: Some(region_follower),
194            ..self
195        }
196    }
197
198    pub fn build(self) -> MetaClient {
199        let mgr = self.channel_manager.unwrap_or_default();
200        let heartbeat_channel_manager = self
201            .heartbeat_channel_manager
202            .clone()
203            .unwrap_or_else(|| mgr.clone());
204
205        let heartbeat = self.enable_heartbeat.then(|| {
206            if self.heartbeat_channel_manager.is_some() {
207                info!("Enable heartbeat channel using the heartbeat channel manager.");
208            }
209
210            HeartbeatClient::new(self.id, self.role, heartbeat_channel_manager.clone())
211        });
212        let config = self
213            .enable_heartbeat
214            .then(|| ConfigClient::new(self.id, self.role, mgr.clone()));
215        let store = self
216            .enable_store
217            .then(|| StoreClient::new(self.id, self.role, mgr.clone()));
218        let procedure = self.enable_procedure.then(|| {
219            let mgr = self.ddl_channel_manager.unwrap_or(mgr.clone());
220            ProcedureClient::new(
221                self.id,
222                self.role,
223                mgr,
224                DEFAULT_SUBMIT_DDL_MAX_RETRY,
225                self.ddl_timeout.unwrap_or(DEFAULT_DDL_TIMEOUT),
226            )
227        });
228        let cluster = self
229            .enable_access_cluster_info
230            .then(|| ClusterClient::new(mgr.clone(), DEFAULT_CLUSTER_CLIENT_MAX_RETRY));
231        let region_follower = self.region_follower.clone();
232
233        MetaClient {
234            id: self.id,
235            channel_manager: mgr.clone(),
236            leader_provider_factory: Arc::new(LeaderProviderFactoryImpl::new(
237                self.id,
238                self.role,
239                DEFAULT_ASK_LEADER_MAX_RETRY,
240                heartbeat_channel_manager,
241            )),
242            heartbeat,
243            config,
244            store,
245            procedure,
246            cluster,
247            region_follower,
248        }
249    }
250}
251
252#[derive(Debug)]
253pub struct MetaClient {
254    id: Id,
255    channel_manager: ChannelManager,
256    leader_provider_factory: LeaderProviderFactoryRef,
257    heartbeat: Option<HeartbeatClient>,
258    config: Option<ConfigClient>,
259    store: Option<StoreClient>,
260    procedure: Option<ProcedureClient>,
261    cluster: Option<ClusterClient>,
262    region_follower: Option<RegionFollowerClientRef>,
263}
264
265impl MetaClient {
266    pub fn new(id: Id, role: Role) -> Self {
267        Self {
268            id,
269            channel_manager: ChannelManager::default(),
270            leader_provider_factory: Arc::new(LeaderProviderFactoryImpl::new(
271                id,
272                role,
273                DEFAULT_ASK_LEADER_MAX_RETRY,
274                ChannelManager::default(),
275            )),
276            heartbeat: None,
277            config: None,
278            store: None,
279            procedure: None,
280            cluster: None,
281            region_follower: None,
282        }
283    }
284}
285
286pub type RegionFollowerClientRef = Arc<dyn RegionFollowerClient>;
287
288/// A trait for clients that can manage region followers.
289#[async_trait::async_trait]
290pub trait RegionFollowerClient: Sync + Send + Debug {
291    async fn add_region_follower(&self, request: AddRegionFollowerRequest) -> Result<()>;
292
293    async fn remove_region_follower(&self, request: RemoveRegionFollowerRequest) -> Result<()>;
294
295    async fn add_table_follower(&self, request: AddTableFollowerRequest) -> Result<()>;
296
297    async fn remove_table_follower(&self, request: RemoveTableFollowerRequest) -> Result<()>;
298
299    async fn start(&self, urls: &[&str]) -> Result<()>;
300
301    async fn start_with(&self, leader_provider: LeaderProviderRef) -> Result<()>;
302}
303
304#[async_trait::async_trait]
305impl ProcedureExecutor for MetaClient {
306    async fn submit_ddl_task(
307        &self,
308        _ctx: &ExecutorContext,
309        request: SubmitDdlTaskRequest,
310    ) -> MetaResult<SubmitDdlTaskResponse> {
311        self.submit_ddl_task(request)
312            .await
313            .map_err(BoxedError::new)
314            .context(meta_error::ExternalSnafu)
315    }
316
317    async fn migrate_region(
318        &self,
319        _ctx: &ExecutorContext,
320        request: MigrateRegionRequest,
321    ) -> MetaResult<MigrateRegionResponse> {
322        self.migrate_region(request)
323            .await
324            .map_err(BoxedError::new)
325            .context(meta_error::ExternalSnafu)
326    }
327
328    async fn reconcile(
329        &self,
330        _ctx: &ExecutorContext,
331        request: ReconcileRequest,
332    ) -> MetaResult<ReconcileResponse> {
333        self.reconcile(request)
334            .await
335            .map_err(BoxedError::new)
336            .context(meta_error::ExternalSnafu)
337    }
338
339    async fn manage_region_follower(
340        &self,
341        _ctx: &ExecutorContext,
342        request: ManageRegionFollowerRequest,
343    ) -> MetaResult<()> {
344        if let Some(region_follower) = &self.region_follower {
345            match request {
346                ManageRegionFollowerRequest::AddRegionFollower(add_region_follower_request) => {
347                    region_follower
348                        .add_region_follower(add_region_follower_request)
349                        .await
350                }
351                ManageRegionFollowerRequest::RemoveRegionFollower(
352                    remove_region_follower_request,
353                ) => {
354                    region_follower
355                        .remove_region_follower(remove_region_follower_request)
356                        .await
357                }
358                ManageRegionFollowerRequest::AddTableFollower(add_table_follower_request) => {
359                    region_follower
360                        .add_table_follower(add_table_follower_request)
361                        .await
362                }
363                ManageRegionFollowerRequest::RemoveTableFollower(remove_table_follower_request) => {
364                    region_follower
365                        .remove_table_follower(remove_table_follower_request)
366                        .await
367                }
368            }
369            .map_err(BoxedError::new)
370            .context(meta_error::ExternalSnafu)
371        } else {
372            UnsupportedSnafu {
373                operation: "manage_region_follower",
374            }
375            .fail()
376        }
377    }
378
379    async fn query_procedure_state(
380        &self,
381        _ctx: &ExecutorContext,
382        pid: &str,
383    ) -> MetaResult<ProcedureStateResponse> {
384        self.query_procedure_state(pid)
385            .await
386            .map_err(BoxedError::new)
387            .context(meta_error::ExternalSnafu)
388    }
389
390    async fn list_procedures(&self, _ctx: &ExecutorContext) -> MetaResult<ProcedureDetailResponse> {
391        self.procedure_client()
392            .map_err(BoxedError::new)
393            .context(meta_error::ExternalSnafu)?
394            .list_procedures()
395            .await
396            .map_err(BoxedError::new)
397            .context(meta_error::ExternalSnafu)
398    }
399}
400
401// TODO(zyy17): Allow deprecated fields for backward compatibility. Remove this when the deprecated fields are removed from the proto.
402#[allow(deprecated)]
403#[async_trait::async_trait]
404impl ClusterInfo for MetaClient {
405    type Error = Error;
406
407    async fn list_nodes(&self, role: Option<ClusterRole>) -> Result<Vec<NodeInfo>> {
408        let cluster_client = self.cluster_client()?;
409
410        let (get_metasrv_nodes, nodes_key_prefix) = match role {
411            None => (true, Some(NodeInfoKey::key_prefix())),
412            Some(ClusterRole::Metasrv) => (true, None),
413            Some(role) => (false, Some(NodeInfoKey::key_prefix_with_role(role))),
414        };
415
416        let mut nodes = if get_metasrv_nodes {
417            let last_activity_ts = -1; // Metasrv does not provide this information.
418
419            let (leader, followers): (Option<MetasrvNodeInfo>, Vec<MetasrvNodeInfo>) =
420                cluster_client.get_metasrv_peers().await?;
421            followers
422                .into_iter()
423                .map(|node| {
424                    if let Some(node_info) = node.info {
425                        NodeInfo {
426                            peer: node.peer.unwrap_or_default(),
427                            last_activity_ts,
428                            status: NodeStatus::Metasrv(MetasrvStatus { is_leader: false }),
429                            version: node_info.version,
430                            git_commit: node_info.git_commit,
431                            start_time_ms: node_info.start_time_ms,
432                            total_cpu_millicores: node_info.total_cpu_millicores,
433                            total_memory_bytes: node_info.total_memory_bytes,
434                            cpu_usage_millicores: node_info.cpu_usage_millicores,
435                            memory_usage_bytes: node_info.memory_usage_bytes,
436                            hostname: node_info.hostname,
437                        }
438                    } else {
439                        // TODO(zyy17): It's for backward compatibility. Remove this when the deprecated fields are removed from the proto.
440                        NodeInfo {
441                            peer: node.peer.unwrap_or_default(),
442                            last_activity_ts,
443                            status: NodeStatus::Metasrv(MetasrvStatus { is_leader: false }),
444                            version: node.version,
445                            git_commit: node.git_commit,
446                            start_time_ms: node.start_time_ms,
447                            total_cpu_millicores: node.cpus as i64,
448                            total_memory_bytes: node.memory_bytes as i64,
449                            cpu_usage_millicores: 0,
450                            memory_usage_bytes: 0,
451                            hostname: "".to_string(),
452                        }
453                    }
454                })
455                .chain(leader.into_iter().map(|node| {
456                    if let Some(node_info) = node.info {
457                        NodeInfo {
458                            peer: node.peer.unwrap_or_default(),
459                            last_activity_ts,
460                            status: NodeStatus::Metasrv(MetasrvStatus { is_leader: true }),
461                            version: node_info.version,
462                            git_commit: node_info.git_commit,
463                            start_time_ms: node_info.start_time_ms,
464                            total_cpu_millicores: node_info.total_cpu_millicores,
465                            total_memory_bytes: node_info.total_memory_bytes,
466                            cpu_usage_millicores: node_info.cpu_usage_millicores,
467                            memory_usage_bytes: node_info.memory_usage_bytes,
468                            hostname: node_info.hostname,
469                        }
470                    } else {
471                        // TODO(zyy17): It's for backward compatibility. Remove this when the deprecated fields are removed from the proto.
472                        NodeInfo {
473                            peer: node.peer.unwrap_or_default(),
474                            last_activity_ts,
475                            status: NodeStatus::Metasrv(MetasrvStatus { is_leader: true }),
476                            version: node.version,
477                            git_commit: node.git_commit,
478                            start_time_ms: node.start_time_ms,
479                            total_cpu_millicores: node.cpus as i64,
480                            total_memory_bytes: node.memory_bytes as i64,
481                            cpu_usage_millicores: 0,
482                            memory_usage_bytes: 0,
483                            hostname: "".to_string(),
484                        }
485                    }
486                }))
487                .collect::<Vec<_>>()
488        } else {
489            Vec::new()
490        };
491
492        if let Some(prefix) = nodes_key_prefix {
493            let req = RangeRequest::new().with_prefix(prefix);
494            let res = cluster_client.range(req).await?;
495            for kv in res.kvs {
496                nodes.push(NodeInfo::try_from(kv.value).context(ConvertMetaResponseSnafu)?);
497            }
498        }
499
500        Ok(nodes)
501    }
502
503    async fn list_region_stats(&self) -> Result<Vec<RegionStat>> {
504        let cluster_kv_backend = Arc::new(self.cluster_client()?);
505        let range_prefix = DatanodeStatKey::prefix_key();
506        let req = RangeRequest::new().with_prefix(range_prefix);
507        let stream =
508            PaginationStream::new(cluster_kv_backend, req, 256, decode_stats).into_stream();
509        let mut datanode_stats = stream
510            .try_collect::<Vec<_>>()
511            .await
512            .context(ConvertMetaResponseSnafu)?;
513        let region_stats = datanode_stats
514            .iter_mut()
515            .flat_map(|datanode_stat| {
516                let last = datanode_stat.stats.pop();
517                last.map(|stat| stat.region_stats).unwrap_or_default()
518            })
519            .collect::<Vec<_>>();
520
521        Ok(region_stats)
522    }
523
524    async fn list_flow_stats(&self) -> Result<Option<FlowStat>> {
525        let cluster_backend = ClusterKvBackend::new(Arc::new(self.cluster_client()?));
526        let cluster_backend = Arc::new(cluster_backend) as KvBackendRef;
527        let flow_state_manager = FlowStateManager::new(cluster_backend);
528        let res = flow_state_manager.get().await.context(GetFlowStatSnafu)?;
529
530        Ok(res.map(|r| r.into()))
531    }
532}
533
534fn decode_stats(kv: KeyValue) -> MetaResult<DatanodeStatValue> {
535    DatanodeStatValue::try_from(kv.value)
536        .map_err(BoxedError::new)
537        .context(ExternalSnafu)
538}
539
540impl MetaClient {
541    pub async fn start<U, A>(&mut self, urls: A) -> Result<()>
542    where
543        U: AsRef<str>,
544        A: AsRef<[U]> + Clone,
545    {
546        info!("MetaClient channel config: {:?}", self.channel_config());
547
548        let urls = urls.as_ref().iter().map(|u| u.as_ref()).collect::<Vec<_>>();
549        let leader_provider = self.leader_provider_factory.create(&urls);
550
551        self.start_with(leader_provider, urls).await
552    }
553
554    /// Start the client with a [LeaderProvider] and other Metasrv peers' addresses.
555    pub(crate) async fn start_with<U, A>(
556        &mut self,
557        leader_provider: LeaderProviderRef,
558        peers: A,
559    ) -> Result<()>
560    where
561        U: AsRef<str>,
562        A: AsRef<[U]> + Clone,
563    {
564        if let Some(client) = &self.region_follower {
565            info!("Starting region follower client ...");
566            client.start_with(leader_provider.clone()).await?;
567        }
568
569        if let Some(client) = &self.heartbeat {
570            info!("Starting heartbeat client ...");
571            client.start_with(leader_provider.clone()).await?;
572        }
573
574        if let Some(client) = &self.config {
575            info!("Starting config client ...");
576            client.start_with(leader_provider.clone()).await?;
577        }
578
579        if let Some(client) = &mut self.store {
580            info!("Starting store client ...");
581            client.start(peers.clone()).await?;
582        }
583
584        if let Some(client) = &self.procedure {
585            info!("Starting procedure client ...");
586            client.start_with(leader_provider.clone()).await?;
587        }
588
589        if let Some(client) = &mut self.cluster {
590            info!("Starting cluster client ...");
591            client.start_with(leader_provider).await?;
592        }
593        Ok(())
594    }
595
596    /// Ask the leader address of `metasrv`, and the heartbeat component
597    /// needs to create a bidirectional streaming to the leader.
598    pub async fn ask_leader(&self) -> Result<String> {
599        self.heartbeat_client()?.ask_leader().await
600    }
601
602    pub async fn pull_config<T, U>(&self, deserializer: T) -> Result<U>
603    where
604        T: PluginOptionsDeserializer<U>,
605        U: DeserializeOwned,
606    {
607        let res = self.config_client()?.pull_config().await?;
608        let v = deserializer
609            .deserialize(&res.payload)
610            .context(ConvertMetaConfigSnafu)?;
611        Ok(v)
612    }
613
614    /// Returns a heartbeat bidirectional streaming: (sender, receiver), the
615    /// other end is the leader of `metasrv`.
616    ///
617    /// The `datanode` needs to use the sender to continuously send heartbeat
618    /// packets (some self-state data), and the receiver can receive a response
619    /// from "metasrv" (which may contain some scheduling instructions).
620    ///
621    /// Returns the heartbeat sender, stream, and configuration received from Metasrv.
622    pub async fn heartbeat(&self) -> Result<(HeartbeatSender, HeartbeatStream, HeartbeatConfig)> {
623        self.heartbeat_client()?.heartbeat().await
624    }
625
626    /// Range gets the keys in the range from the key-value store.
627    pub async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
628        self.store_client()?
629            .range(req.into())
630            .await?
631            .try_into()
632            .context(ConvertMetaResponseSnafu)
633    }
634
635    /// Put puts the given key into the key-value store.
636    pub async fn put(&self, req: PutRequest) -> Result<PutResponse> {
637        self.store_client()?
638            .put(req.into())
639            .await?
640            .try_into()
641            .context(ConvertMetaResponseSnafu)
642    }
643
644    /// BatchGet atomically get values by the given keys from the key-value store.
645    pub async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse> {
646        self.store_client()?
647            .batch_get(req.into())
648            .await?
649            .try_into()
650            .context(ConvertMetaResponseSnafu)
651    }
652
653    /// BatchPut atomically puts the given keys into the key-value store.
654    pub async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> {
655        self.store_client()?
656            .batch_put(req.into())
657            .await?
658            .try_into()
659            .context(ConvertMetaResponseSnafu)
660    }
661
662    /// BatchDelete atomically deletes the given keys from the key-value store.
663    pub async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
664        self.store_client()?
665            .batch_delete(req.into())
666            .await?
667            .try_into()
668            .context(ConvertMetaResponseSnafu)
669    }
670
671    /// CompareAndPut atomically puts the value to the given updated
672    /// value if the current value == the expected value.
673    pub async fn compare_and_put(
674        &self,
675        req: CompareAndPutRequest,
676    ) -> Result<CompareAndPutResponse> {
677        self.store_client()?
678            .compare_and_put(req.into())
679            .await?
680            .try_into()
681            .context(ConvertMetaResponseSnafu)
682    }
683
684    /// DeleteRange deletes the given range from the key-value store.
685    pub async fn delete_range(&self, req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
686        self.store_client()?
687            .delete_range(req.into())
688            .await?
689            .try_into()
690            .context(ConvertMetaResponseSnafu)
691    }
692
693    /// Query the procedure state by its id.
694    pub async fn query_procedure_state(&self, pid: &str) -> Result<ProcedureStateResponse> {
695        self.procedure_client()?.query_procedure_state(pid).await
696    }
697
698    /// Submit a region migration task.
699    pub async fn migrate_region(
700        &self,
701        request: MigrateRegionRequest,
702    ) -> Result<MigrateRegionResponse> {
703        self.procedure_client()?
704            .migrate_region(
705                request.region_id,
706                request.from_peer,
707                request.to_peer,
708                request.timeout,
709            )
710            .await
711    }
712
713    /// Reconcile the procedure state.
714    pub async fn reconcile(&self, request: ReconcileRequest) -> Result<ReconcileResponse> {
715        self.procedure_client()?.reconcile(request).await
716    }
717
718    /// Submit a DDL task
719    pub async fn submit_ddl_task(
720        &self,
721        req: SubmitDdlTaskRequest,
722    ) -> Result<SubmitDdlTaskResponse> {
723        let res = self
724            .procedure_client()?
725            .submit_ddl_task(req.try_into().context(ConvertMetaRequestSnafu)?)
726            .await?
727            .try_into()
728            .context(ConvertMetaResponseSnafu)?;
729
730        Ok(res)
731    }
732
733    pub fn heartbeat_client(&self) -> Result<HeartbeatClient> {
734        self.heartbeat.clone().context(NotStartedSnafu {
735            name: "heartbeat_client",
736        })
737    }
738
739    pub fn config_client(&self) -> Result<ConfigClient> {
740        self.config.clone().context(NotStartedSnafu {
741            name: "config_client",
742        })
743    }
744
745    pub fn store_client(&self) -> Result<StoreClient> {
746        self.store.clone().context(NotStartedSnafu {
747            name: "store_client",
748        })
749    }
750
751    pub fn procedure_client(&self) -> Result<ProcedureClient> {
752        self.procedure.clone().context(NotStartedSnafu {
753            name: "procedure_client",
754        })
755    }
756
757    pub fn cluster_client(&self) -> Result<ClusterClient> {
758        self.cluster.clone().context(NotStartedSnafu {
759            name: "cluster_client",
760        })
761    }
762
763    pub fn channel_config(&self) -> &ChannelConfig {
764        self.channel_manager.config()
765    }
766
767    pub fn id(&self) -> Id {
768        self.id
769    }
770}
771
772#[cfg(test)]
773mod tests {
774    use std::sync::atomic::{AtomicUsize, Ordering};
775
776    use api::v1::meta::{HeartbeatRequest, Peer};
777    use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
778    use rand::Rng;
779
780    use super::*;
781    use crate::error;
782    use crate::mocks::{self, MockMetaContext};
783
784    const TEST_KEY_PREFIX: &str = "__unit_test__meta__";
785
786    struct TestClient {
787        ns: String,
788        client: MetaClient,
789        meta_ctx: MockMetaContext,
790    }
791
792    impl TestClient {
793        async fn new(ns: impl Into<String>) -> Self {
794            // can also test with etcd: mocks::mock_client_with_etcdstore("127.0.0.1:2379").await;
795            let (client, meta_ctx) = mocks::mock_client_with_memstore().await;
796            Self {
797                ns: ns.into(),
798                client,
799                meta_ctx,
800            }
801        }
802
803        fn key(&self, name: &str) -> Vec<u8> {
804            format!("{}-{}-{}", TEST_KEY_PREFIX, self.ns, name).into_bytes()
805        }
806
807        async fn gen_data(&self) {
808            for i in 0..10 {
809                let req = PutRequest::new()
810                    .with_key(self.key(&format!("key-{i}")))
811                    .with_value(format!("{}-{}", "value", i).into_bytes())
812                    .with_prev_kv();
813                let res = self.client.put(req).await;
814                let _ = res.unwrap();
815            }
816        }
817
818        async fn clear_data(&self) {
819            let req =
820                DeleteRangeRequest::new().with_prefix(format!("{}-{}", TEST_KEY_PREFIX, self.ns));
821            let res = self.client.delete_range(req).await;
822            let _ = res.unwrap();
823        }
824
825        #[allow(dead_code)]
826        fn kv_backend(&self) -> KvBackendRef {
827            self.meta_ctx.kv_backend.clone()
828        }
829
830        fn in_memory(&self) -> Option<ResettableKvBackendRef> {
831            self.meta_ctx.in_memory.clone()
832        }
833    }
834
835    async fn new_client(ns: impl Into<String>) -> TestClient {
836        let client = TestClient::new(ns).await;
837        client.clear_data().await;
838        client
839    }
840
841    #[tokio::test]
842    async fn test_meta_client_builder() {
843        let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
844
845        let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
846            .enable_heartbeat()
847            .build();
848        let _ = meta_client.heartbeat_client().unwrap();
849        assert!(meta_client.store_client().is_err());
850        meta_client.start(urls).await.unwrap();
851
852        let mut meta_client = MetaClientBuilder::new(0, Role::Datanode).build();
853        assert!(meta_client.heartbeat_client().is_err());
854        assert!(meta_client.store_client().is_err());
855        meta_client.start(urls).await.unwrap();
856
857        let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
858            .enable_store()
859            .build();
860        assert!(meta_client.heartbeat_client().is_err());
861        let _ = meta_client.store_client().unwrap();
862        meta_client.start(urls).await.unwrap();
863
864        let mut meta_client = MetaClientBuilder::new(2, Role::Datanode)
865            .enable_heartbeat()
866            .enable_store()
867            .build();
868        assert_eq!(2, meta_client.id());
869        assert_eq!(2, meta_client.id());
870        let _ = meta_client.heartbeat_client().unwrap();
871        let _ = meta_client.store_client().unwrap();
872        meta_client.start(urls).await.unwrap();
873    }
874
875    #[tokio::test]
876    async fn test_not_start_heartbeat_client() {
877        let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
878        let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
879            .enable_store()
880            .build();
881        meta_client.start(urls).await.unwrap();
882        let res = meta_client.ask_leader().await;
883        assert!(matches!(res.err(), Some(error::Error::NotStarted { .. })));
884    }
885
886    #[tokio::test]
887    async fn test_not_start_store_client() {
888        let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
889        let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
890            .enable_heartbeat()
891            .build();
892
893        meta_client.start(urls).await.unwrap();
894        let res = meta_client.put(PutRequest::default()).await;
895        assert!(matches!(res.err(), Some(error::Error::NotStarted { .. })));
896    }
897
898    #[tokio::test]
899    async fn test_ask_leader() {
900        let tc = new_client("test_ask_leader").await;
901        tc.client.ask_leader().await.unwrap();
902    }
903
904    #[tokio::test]
905    async fn test_heartbeat() {
906        let tc = new_client("test_heartbeat").await;
907        let (sender, mut receiver, _config) = tc.client.heartbeat().await.unwrap();
908        // send heartbeats
909
910        let request_sent = Arc::new(AtomicUsize::new(0));
911        let request_sent_clone = request_sent.clone();
912        let _handle = tokio::spawn(async move {
913            for _ in 0..5 {
914                let req = HeartbeatRequest {
915                    peer: Some(Peer {
916                        id: 1,
917                        addr: "meta_client_peer".to_string(),
918                    }),
919                    ..Default::default()
920                };
921                sender.send(req).await.unwrap();
922                request_sent_clone.fetch_add(1, Ordering::Relaxed);
923            }
924        });
925
926        let heartbeat_count = Arc::new(AtomicUsize::new(0));
927        let heartbeat_count_clone = heartbeat_count.clone();
928        let handle = tokio::spawn(async move {
929            while let Some(_resp) = receiver.message().await.unwrap() {
930                heartbeat_count_clone.fetch_add(1, Ordering::Relaxed);
931            }
932        });
933
934        handle.await.unwrap();
935        //+1 for the initial response
936        assert_eq!(
937            request_sent.load(Ordering::Relaxed) + 1,
938            heartbeat_count.load(Ordering::Relaxed)
939        );
940    }
941
942    #[tokio::test]
943    async fn test_range_get() {
944        let tc = new_client("test_range_get").await;
945        tc.gen_data().await;
946
947        let key = tc.key("key-0");
948        let req = RangeRequest::new().with_key(key.as_slice());
949        let res = tc.client.range(req).await;
950        let mut kvs = res.unwrap().take_kvs();
951        assert_eq!(1, kvs.len());
952        let mut kv = kvs.pop().unwrap();
953        assert_eq!(key, kv.take_key());
954        assert_eq!(b"value-0".to_vec(), kv.take_value());
955    }
956
957    #[tokio::test]
958    async fn test_range_get_prefix() {
959        let tc = new_client("test_range_get_prefix").await;
960        tc.gen_data().await;
961
962        let req = RangeRequest::new().with_prefix(tc.key("key-"));
963        let res = tc.client.range(req).await;
964        let kvs = res.unwrap().take_kvs();
965        assert_eq!(10, kvs.len());
966        for (i, mut kv) in kvs.into_iter().enumerate() {
967            assert_eq!(tc.key(&format!("key-{i}")), kv.take_key());
968            assert_eq!(format!("{}-{}", "value", i).into_bytes(), kv.take_value());
969        }
970    }
971
972    #[tokio::test]
973    async fn test_range() {
974        let tc = new_client("test_range").await;
975        tc.gen_data().await;
976
977        let req = RangeRequest::new().with_range(tc.key("key-5"), tc.key("key-8"));
978        let res = tc.client.range(req).await;
979        let kvs = res.unwrap().take_kvs();
980        assert_eq!(3, kvs.len());
981        for (i, mut kv) in kvs.into_iter().enumerate() {
982            assert_eq!(tc.key(&format!("key-{}", i + 5)), kv.take_key());
983            assert_eq!(
984                format!("{}-{}", "value", i + 5).into_bytes(),
985                kv.take_value()
986            );
987        }
988    }
989
990    #[tokio::test]
991    async fn test_range_keys_only() {
992        let tc = new_client("test_range_keys_only").await;
993        tc.gen_data().await;
994
995        let req = RangeRequest::new()
996            .with_range(tc.key("key-5"), tc.key("key-8"))
997            .with_keys_only();
998        let res = tc.client.range(req).await;
999        let kvs = res.unwrap().take_kvs();
1000        assert_eq!(3, kvs.len());
1001        for (i, mut kv) in kvs.into_iter().enumerate() {
1002            assert_eq!(tc.key(&format!("key-{}", i + 5)), kv.take_key());
1003            assert!(kv.take_value().is_empty());
1004        }
1005    }
1006
1007    #[tokio::test]
1008    async fn test_put() {
1009        let tc = new_client("test_put").await;
1010
1011        let req = PutRequest::new()
1012            .with_key(tc.key("key"))
1013            .with_value(b"value".to_vec());
1014        let res = tc.client.put(req).await;
1015        assert!(res.unwrap().prev_kv.is_none());
1016    }
1017
1018    #[tokio::test]
1019    async fn test_put_with_prev_kv() {
1020        let tc = new_client("test_put_with_prev_kv").await;
1021
1022        let key = tc.key("key");
1023        let req = PutRequest::new()
1024            .with_key(key.as_slice())
1025            .with_value(b"value".to_vec())
1026            .with_prev_kv();
1027        let res = tc.client.put(req).await;
1028        assert!(res.unwrap().prev_kv.is_none());
1029
1030        let req = PutRequest::new()
1031            .with_key(key.as_slice())
1032            .with_value(b"value1".to_vec())
1033            .with_prev_kv();
1034        let res = tc.client.put(req).await;
1035        let mut kv = res.unwrap().prev_kv.unwrap();
1036        assert_eq!(key, kv.take_key());
1037        assert_eq!(b"value".to_vec(), kv.take_value());
1038    }
1039
1040    #[tokio::test]
1041    async fn test_batch_put() {
1042        let tc = new_client("test_batch_put").await;
1043
1044        let mut req = BatchPutRequest::new();
1045        for i in 0..275 {
1046            req = req.add_kv(
1047                tc.key(&format!("key-{}", i)),
1048                format!("value-{}", i).into_bytes(),
1049            );
1050        }
1051
1052        let res = tc.client.batch_put(req).await;
1053        assert_eq!(0, res.unwrap().take_prev_kvs().len());
1054
1055        let req = RangeRequest::new().with_prefix(tc.key("key-"));
1056        let res = tc.client.range(req).await;
1057        let kvs = res.unwrap().take_kvs();
1058        assert_eq!(275, kvs.len());
1059    }
1060
1061    #[tokio::test]
1062    async fn test_batch_get() {
1063        let tc = new_client("test_batch_get").await;
1064        tc.gen_data().await;
1065
1066        let mut req = BatchGetRequest::default();
1067        for i in 0..256 {
1068            req = req.add_key(tc.key(&format!("key-{}", i)));
1069        }
1070        let res = tc.client.batch_get(req).await.unwrap();
1071        assert_eq!(10, res.kvs.len());
1072
1073        let req = BatchGetRequest::default()
1074            .add_key(tc.key("key-1"))
1075            .add_key(tc.key("key-999"));
1076        let res = tc.client.batch_get(req).await.unwrap();
1077        assert_eq!(1, res.kvs.len());
1078    }
1079
1080    #[tokio::test]
1081    async fn test_batch_put_with_prev_kv() {
1082        let tc = new_client("test_batch_put_with_prev_kv").await;
1083
1084        let key = tc.key("key");
1085        let key2 = tc.key("key2");
1086        let req = BatchPutRequest::new().add_kv(key.as_slice(), b"value".to_vec());
1087        let res = tc.client.batch_put(req).await;
1088        assert_eq!(0, res.unwrap().take_prev_kvs().len());
1089
1090        let req = BatchPutRequest::new()
1091            .add_kv(key.as_slice(), b"value-".to_vec())
1092            .add_kv(key2.as_slice(), b"value2-".to_vec())
1093            .with_prev_kv();
1094        let res = tc.client.batch_put(req).await;
1095        let mut kvs = res.unwrap().take_prev_kvs();
1096        assert_eq!(1, kvs.len());
1097        let mut kv = kvs.pop().unwrap();
1098        assert_eq!(key, kv.take_key());
1099        assert_eq!(b"value".to_vec(), kv.take_value());
1100    }
1101
1102    #[tokio::test]
1103    async fn test_compare_and_put() {
1104        let tc = new_client("test_compare_and_put").await;
1105
1106        let key = tc.key("key");
1107        let req = CompareAndPutRequest::new()
1108            .with_key(key.as_slice())
1109            .with_expect(b"expect".to_vec())
1110            .with_value(b"value".to_vec());
1111        let res = tc.client.compare_and_put(req).await;
1112        assert!(!res.unwrap().is_success());
1113
1114        // create if absent
1115        let req = CompareAndPutRequest::new()
1116            .with_key(key.as_slice())
1117            .with_value(b"value".to_vec());
1118        let res = tc.client.compare_and_put(req).await;
1119        let mut res = res.unwrap();
1120        assert!(res.is_success());
1121        assert!(res.take_prev_kv().is_none());
1122
1123        // compare and put fail
1124        let req = CompareAndPutRequest::new()
1125            .with_key(key.as_slice())
1126            .with_expect(b"not_eq".to_vec())
1127            .with_value(b"value2".to_vec());
1128        let res = tc.client.compare_and_put(req).await;
1129        let mut res = res.unwrap();
1130        assert!(!res.is_success());
1131        assert_eq!(b"value".to_vec(), res.take_prev_kv().unwrap().take_value());
1132
1133        // compare and put success
1134        let req = CompareAndPutRequest::new()
1135            .with_key(key.as_slice())
1136            .with_expect(b"value".to_vec())
1137            .with_value(b"value2".to_vec());
1138        let res = tc.client.compare_and_put(req).await;
1139        let mut res = res.unwrap();
1140        assert!(res.is_success());
1141
1142        // If compare-and-put is success, previous value doesn't need to be returned.
1143        assert!(res.take_prev_kv().is_none());
1144    }
1145
1146    #[tokio::test]
1147    async fn test_delete_with_key() {
1148        let tc = new_client("test_delete_with_key").await;
1149        tc.gen_data().await;
1150
1151        let req = DeleteRangeRequest::new()
1152            .with_key(tc.key("key-0"))
1153            .with_prev_kv();
1154        let res = tc.client.delete_range(req).await;
1155        let mut res = res.unwrap();
1156        assert_eq!(1, res.deleted());
1157        let mut kvs = res.take_prev_kvs();
1158        assert_eq!(1, kvs.len());
1159        let mut kv = kvs.pop().unwrap();
1160        assert_eq!(b"value-0".to_vec(), kv.take_value());
1161    }
1162
1163    #[tokio::test]
1164    async fn test_delete_with_prefix() {
1165        let tc = new_client("test_delete_with_prefix").await;
1166        tc.gen_data().await;
1167
1168        let req = DeleteRangeRequest::new()
1169            .with_prefix(tc.key("key-"))
1170            .with_prev_kv();
1171        let res = tc.client.delete_range(req).await;
1172        let mut res = res.unwrap();
1173        assert_eq!(10, res.deleted());
1174        let kvs = res.take_prev_kvs();
1175        assert_eq!(10, kvs.len());
1176        for (i, mut kv) in kvs.into_iter().enumerate() {
1177            assert_eq!(format!("{}-{}", "value", i).into_bytes(), kv.take_value());
1178        }
1179    }
1180
1181    #[tokio::test]
1182    async fn test_delete_with_range() {
1183        let tc = new_client("test_delete_with_range").await;
1184        tc.gen_data().await;
1185
1186        let req = DeleteRangeRequest::new()
1187            .with_range(tc.key("key-2"), tc.key("key-7"))
1188            .with_prev_kv();
1189        let res = tc.client.delete_range(req).await;
1190        let mut res = res.unwrap();
1191        assert_eq!(5, res.deleted());
1192        let kvs = res.take_prev_kvs();
1193        assert_eq!(5, kvs.len());
1194        for (i, mut kv) in kvs.into_iter().enumerate() {
1195            assert_eq!(
1196                format!("{}-{}", "value", i + 2).into_bytes(),
1197                kv.take_value()
1198            );
1199        }
1200    }
1201
1202    fn mock_decoder(_kv: KeyValue) -> MetaResult<()> {
1203        Ok(())
1204    }
1205
1206    #[tokio::test]
1207    async fn test_cluster_client_adaptive_range() {
1208        let tx = new_client("test_cluster_client").await;
1209        let in_memory = tx.in_memory().unwrap();
1210        let cluster_client = tx.client.cluster_client().unwrap();
1211        let mut rng = rand::rng();
1212
1213        // Generates rough 10MB data, which is larger than the default grpc message size limit.
1214        for i in 0..10 {
1215            let data: Vec<u8> = (0..1024 * 1024).map(|_| rng.random()).collect();
1216            in_memory
1217                .put(
1218                    PutRequest::new()
1219                        .with_key(format!("__prefix/{i}").as_bytes())
1220                        .with_value(data.clone()),
1221                )
1222                .await
1223                .unwrap();
1224        }
1225
1226        let req = RangeRequest::new().with_prefix(b"__prefix/");
1227        let stream =
1228            PaginationStream::new(Arc::new(cluster_client), req, 10, mock_decoder).into_stream();
1229
1230        let res = stream.try_collect::<Vec<_>>().await.unwrap();
1231        assert_eq!(10, res.len());
1232    }
1233}