bindy/reconcilers/bind9instance/
status_helpers.rs

1// Copyright (c) 2025 Erick Bourgeois, firestoned
2// SPDX-License-Identifier: MIT
3
4//! Status calculation and update helpers for `Bind9Instance` resources.
5//!
6//! This module handles computing instance status from deployment/pod health and
7//! patching the instance status in Kubernetes.
8
9#[allow(clippy::wildcard_imports)]
10use super::types::*;
11use crate::reconcilers::pagination::list_all_paginated;
12
13/// Update instance status from deployment pod health.
14///
15/// Queries the Deployment and its Pods to determine readiness, then updates
16/// the instance status with detailed per-pod conditions.
17///
18/// # Arguments
19///
20/// * `client` - Kubernetes API client
21/// * `namespace` - Instance namespace
22/// * `name` - Instance name
23/// * `instance` - The `Bind9Instance` resource
24/// * `cluster_ref` - Optional cluster reference to include in status
25///
26/// # Errors
27///
28/// Returns an error if Kubernetes API operations fail or status patching fails.
29#[allow(clippy::too_many_lines)]
30pub(super) async fn update_status_from_deployment(
31    client: &Client,
32    namespace: &str,
33    name: &str,
34    instance: &Bind9Instance,
35    cluster_ref: Option<ClusterReference>,
36) -> Result<()> {
37    let deploy_api: Api<Deployment> = Api::namespaced(client.clone(), namespace);
38    let pod_api: Api<Pod> = Api::namespaced(client.clone(), namespace);
39
40    match deploy_api.get(name).await {
41        Ok(deployment) => {
42            let actual_replicas = deployment
43                .spec
44                .as_ref()
45                .and_then(|spec| spec.replicas)
46                .unwrap_or(0);
47
48            // List pods for this deployment using label selector
49            // Use the standard Kubernetes label for instance matching
50            let label_selector = format!("{}={}", crate::labels::K8S_INSTANCE, name);
51            let list_params = ListParams::default().labels(&label_selector);
52            let all_pods = list_all_paginated(&pod_api, list_params).await?;
53
54            // Filter to only non-terminating pods (exclude pods with deletionTimestamp)
55            // This prevents counting old pods during rollouts
56            let pods: Vec<_> = all_pods
57                .into_iter()
58                .filter(|pod| pod.metadata.deletion_timestamp.is_none())
59                .collect();
60
61            // Create pod-level conditions
62            let mut pod_conditions = Vec::new();
63            let mut ready_pod_count = 0;
64
65            for (index, pod) in pods.iter().enumerate() {
66                let pod_name = pod.metadata.name.as_deref().unwrap_or("unknown");
67                // Using map_or for explicit false default on None - more readable than is_some_and
68                #[allow(clippy::unnecessary_map_or)]
69                let is_pod_ready = pod
70                    .status
71                    .as_ref()
72                    .and_then(|status| status.conditions.as_ref())
73                    .map_or(false, |conditions| {
74                        conditions
75                            .iter()
76                            .any(|c| c.type_ == "Ready" && c.status == "True")
77                    });
78
79                if is_pod_ready {
80                    ready_pod_count += 1;
81                }
82
83                let (status, reason, message) = if is_pod_ready {
84                    ("True", REASON_READY, format!("Pod {pod_name} is ready"))
85                } else {
86                    (
87                        "False",
88                        REASON_NOT_READY,
89                        format!("Pod {pod_name} is not ready"),
90                    )
91                };
92
93                pod_conditions.push(Condition {
94                    r#type: pod_condition_type(index),
95                    status: status.to_string(),
96                    reason: Some(reason.to_string()),
97                    message: Some(message),
98                    last_transition_time: Some(Utc::now().to_rfc3339()),
99                });
100            }
101
102            // Create encompassing Ready condition
103            let (encompassing_status, encompassing_reason, encompassing_message) =
104                if ready_pod_count == 0 && actual_replicas > 0 {
105                    (
106                        "False",
107                        REASON_NOT_READY,
108                        "Waiting for pods to become ready".to_string(),
109                    )
110                } else if ready_pod_count == actual_replicas && actual_replicas > 0 {
111                    (
112                        "True",
113                        REASON_ALL_READY,
114                        format!("All {ready_pod_count} pods are ready"),
115                    )
116                } else if ready_pod_count > 0 {
117                    (
118                        "False",
119                        REASON_PARTIALLY_READY,
120                        format!("{ready_pod_count}/{actual_replicas} pods are ready"),
121                    )
122                } else {
123                    ("False", REASON_NOT_READY, "No pods are ready".to_string())
124                };
125
126            let encompassing_condition = Condition {
127                r#type: CONDITION_TYPE_READY.to_string(),
128                status: encompassing_status.to_string(),
129                reason: Some(encompassing_reason.to_string()),
130                message: Some(encompassing_message),
131                last_transition_time: Some(Utc::now().to_rfc3339()),
132            };
133
134            // Combine encompassing condition + pod-level conditions
135            let mut all_conditions = vec![encompassing_condition];
136            all_conditions.extend(pod_conditions);
137
138            // Update status with all conditions
139            update_status(client, instance, all_conditions, cluster_ref).await?;
140        }
141        Err(e) => {
142            warn!(
143                "Failed to get Deployment status for {}/{}: {}",
144                namespace, name, e
145            );
146            // Set status as unknown if we can't check deployment
147            let unknown_condition = Condition {
148                r#type: CONDITION_TYPE_READY.to_string(),
149                status: "Unknown".to_string(),
150                reason: Some(REASON_NOT_READY.to_string()),
151                message: Some("Unable to determine deployment status".to_string()),
152                last_transition_time: Some(Utc::now().to_rfc3339()),
153            };
154            update_status(client, instance, vec![unknown_condition], cluster_ref).await?;
155        }
156    }
157
158    Ok(())
159}
160
161/// Update the status of a `Bind9Instance` with multiple conditions.
162///
163/// NOTE: This function does NOT update `status.zones`. Zone reconciliation is handled
164/// separately by `reconcile_instance_zones()` which is called:
165/// 1. From the main reconcile loop after deployment changes
166/// 2. From the `DNSZone` watcher when zone selections change
167///
168/// # Arguments
169///
170/// * `client` - Kubernetes API client
171/// * `instance` - The instance to update
172/// * `conditions` - Vector of status conditions to set
173/// * `cluster_ref` - Optional cluster reference
174///
175/// # Errors
176///
177/// Returns an error if status patching fails.
178pub(super) async fn update_status(
179    client: &Client,
180    instance: &Bind9Instance,
181    conditions: Vec<Condition>,
182    cluster_ref: Option<ClusterReference>,
183) -> Result<()> {
184    let api: Api<Bind9Instance> =
185        Api::namespaced(client.clone(), &instance.namespace().unwrap_or_default());
186
187    // Preserve existing zones - zone reconciliation is handled separately
188    let zones = instance
189        .status
190        .as_ref()
191        .map(|s| s.zones.clone())
192        .unwrap_or_default();
193
194    // Compute zones_count from zones length
195    let zones_count = i32::try_from(zones.len()).ok();
196
197    // Check if status has actually changed (now including zones)
198    let current_status = &instance.status;
199    let status_changed =
200        if let Some(current) = current_status {
201            // Check if cluster_ref or zones changed
202            if current.cluster_ref != cluster_ref || current.zones != zones {
203                true
204            } else {
205                // Check if any condition changed
206                if current.conditions.len() == conditions.len() {
207                    // Compare each condition
208                    current.conditions.iter().zip(conditions.iter()).any(
209                        |(current_cond, new_cond)| {
210                            current_cond.r#type != new_cond.r#type
211                                || current_cond.status != new_cond.status
212                                || current_cond.message != new_cond.message
213                                || current_cond.reason != new_cond.reason
214                        },
215                    )
216                } else {
217                    true
218                }
219            }
220        } else {
221            // No status exists, need to update
222            true
223        };
224
225    // Only update if status has changed
226    if !status_changed {
227        debug!(
228            "Status unchanged for Bind9Instance {}/{}, skipping patch",
229            instance.namespace().unwrap_or_default(),
230            instance.name_any()
231        );
232        return Ok(());
233    }
234
235    let new_status = Bind9InstanceStatus {
236        conditions,
237        observed_generation: instance.metadata.generation,
238        service_address: None, // Will be populated when service is ready
239        cluster_ref,
240        zones,
241        zones_count,
242        rndc_key_rotation: None, // Will be populated by rotation reconciler
243    };
244
245    let patch = json!({ "status": new_status });
246    api.patch_status(
247        &instance.name_any(),
248        &PatchParams::default(),
249        &Patch::Merge(patch),
250    )
251    .await?;
252
253    Ok(())
254}
255
256#[cfg(test)]
257#[path = "status_helpers_tests.rs"]
258mod status_helpers_tests;