bindy/reconcilers/
bind9instance.rs

1// Copyright (c) 2025 Erick Bourgeois, firestoned
2// SPDX-License-Identifier: MIT
3
4//! BIND9 instance reconciliation logic.
5//!
6//! This module handles the lifecycle of BIND9 DNS server deployments in Kubernetes.
7//! It creates and manages Deployments, `ConfigMaps`, and Services for each `Bind9Instance`.
8
9use crate::bind9::Bind9Manager;
10use crate::bind9_resources::{
11    build_configmap, build_deployment, build_service, build_service_account,
12};
13use crate::constants::{API_GROUP_VERSION, DEFAULT_BIND9_VERSION, KIND_BIND9_INSTANCE};
14use crate::crd::{Bind9Cluster, Bind9Instance, Bind9InstanceStatus, Condition};
15use crate::labels::{BINDY_MANAGED_BY_LABEL, FINALIZER_BIND9_INSTANCE};
16use crate::reconcilers::finalizers::{ensure_finalizer, handle_deletion, FinalizerCleanup};
17use crate::reconcilers::resources::{create_or_apply, create_or_replace};
18use crate::status_reasons::{
19    pod_condition_type, CONDITION_TYPE_READY, REASON_ALL_READY, REASON_NOT_READY,
20    REASON_PARTIALLY_READY, REASON_READY,
21};
22use anyhow::Result;
23use chrono::Utc;
24use k8s_openapi::api::{
25    apps::v1::Deployment,
26    core::v1::{ConfigMap, Pod, Secret, Service, ServiceAccount},
27};
28use k8s_openapi::apimachinery::pkg::apis::meta::v1::OwnerReference;
29use kube::{
30    api::{ListParams, Patch, PatchParams, PostParams},
31    client::Client,
32    Api, ResourceExt,
33};
34use serde_json::json;
35use tracing::{debug, error, info, warn};
36
37/// Implement cleanup trait for `Bind9Instance` finalizer management
38#[async_trait::async_trait]
39impl FinalizerCleanup for Bind9Instance {
40    async fn cleanup(&self, client: &Client) -> Result<()> {
41        let namespace = self.namespace().unwrap_or_default();
42        let name = self.name_any();
43
44        // Check if this instance is managed by a Bind9Cluster
45        let is_managed: bool = self
46            .metadata
47            .labels
48            .as_ref()
49            .and_then(|labels| labels.get(BINDY_MANAGED_BY_LABEL))
50            .is_some();
51
52        if is_managed {
53            info!(
54                "Bind9Instance {}/{} is managed by a Bind9Cluster, skipping resource cleanup (cluster will handle it)",
55                namespace, name
56            );
57            Ok(())
58        } else {
59            info!(
60                "Running cleanup for standalone Bind9Instance {}/{}",
61                namespace, name
62            );
63            delete_resources(client, &namespace, &name).await
64        }
65    }
66}
67
68/// Reconciles a `Bind9Instance` resource.
69///
70/// Creates or updates all Kubernetes resources needed to run a BIND9 DNS server:
71/// - `ConfigMap` with BIND9 configuration files
72/// - Deployment with BIND9 container pods
73/// - Service for DNS traffic (TCP/UDP port 53)
74///
75/// # Arguments
76///
77/// * `client` - Kubernetes API client
78/// * `instance` - The `Bind9Instance` resource to reconcile
79///
80/// # Returns
81///
82/// * `Ok(())` - If reconciliation succeeded
83/// * `Err(_)` - If resource creation/update failed
84///
85/// # Example
86///
87/// ```rust,no_run
88/// use bindy::reconcilers::reconcile_bind9instance;
89/// use bindy::crd::Bind9Instance;
90/// use kube::Client;
91///
92/// async fn handle_instance(instance: Bind9Instance) -> anyhow::Result<()> {
93///     let client = Client::try_default().await?;
94///     reconcile_bind9instance(client, instance).await?;
95///     Ok(())
96/// }
97/// ```
98///
99/// # Errors
100///
101/// Returns an error if Kubernetes API operations fail or resource creation/update fails.
102///
103/// Reconcile a `Bind9Instance` custom resource
104///
105/// Creates or updates all Kubernetes resources needed to run a BIND9 DNS server:
106/// - `ConfigMap` with BIND9 configuration files
107/// - Deployment with BIND9 container pods
108/// - Service for DNS traffic (TCP/UDP port 53)
109///
110/// # Arguments
111///
112/// * `client` - Kubernetes API client
113/// * `instance` - The `Bind9Instance` resource to reconcile
114///
115/// # Errors
116///
117/// Returns an error if Kubernetes API operations fail or resource creation/update fails.
118pub async fn reconcile_bind9instance(client: Client, instance: Bind9Instance) -> Result<()> {
119    let namespace = instance.namespace().unwrap_or_default();
120    let name = instance.name_any();
121
122    info!("Reconciling Bind9Instance: {}/{}", namespace, name);
123    debug!(
124        namespace = %namespace,
125        name = %name,
126        generation = ?instance.metadata.generation,
127        "Starting Bind9Instance reconciliation"
128    );
129
130    // Check if the instance is being deleted
131    if instance.metadata.deletion_timestamp.is_some() {
132        return handle_deletion(&client, &instance, FINALIZER_BIND9_INSTANCE).await;
133    }
134
135    // Add finalizer if not present
136    ensure_finalizer(&client, &instance, FINALIZER_BIND9_INSTANCE).await?;
137
138    let spec = &instance.spec;
139    let replicas = spec.replicas.unwrap_or(1);
140    let version = spec.version.as_deref().unwrap_or(DEFAULT_BIND9_VERSION);
141
142    debug!(
143        cluster_ref = %spec.cluster_ref,
144        replicas,
145        version = %version,
146        role = ?spec.role,
147        "Instance configuration"
148    );
149
150    info!(
151        "Bind9Instance {} configured with {} replicas, version {}",
152        name, replicas, version
153    );
154
155    // Check if spec has changed using the standard generation check
156    let current_generation = instance.metadata.generation;
157    let observed_generation = instance.status.as_ref().and_then(|s| s.observed_generation);
158
159    // Check if resources actually exist (drift detection)
160    let deployment_exists = {
161        let deployment_api: Api<Deployment> = Api::namespaced(client.clone(), &namespace);
162        deployment_api.get(&name).await.is_ok()
163    };
164
165    // Only reconcile resources if:
166    // 1. Spec changed (generation mismatch), OR
167    // 2. We haven't processed this resource yet (no observed_generation), OR
168    // 3. Resources are missing (drift detected)
169    let should_reconcile =
170        crate::reconcilers::should_reconcile(current_generation, observed_generation);
171
172    if !should_reconcile && deployment_exists {
173        debug!(
174            "Spec unchanged (generation={:?}) and resources exist, skipping resource reconciliation",
175            current_generation
176        );
177        // Update status from current deployment state (only patches if status changed)
178        update_status_from_deployment(&client, &namespace, &name, &instance, replicas).await?;
179        return Ok(());
180    }
181
182    if !should_reconcile && !deployment_exists {
183        info!("Spec unchanged but Deployment missing - drift detected, reconciling resources");
184    }
185
186    debug!(
187        "Reconciliation needed: current_generation={:?}, observed_generation={:?}",
188        current_generation, observed_generation
189    );
190
191    // Create or update resources
192    match create_or_update_resources(&client, &namespace, &name, &instance).await {
193        Ok(()) => {
194            info!(
195                "Successfully created/updated resources for {}/{}",
196                namespace, name
197            );
198
199            // Update status based on actual deployment state
200            update_status_from_deployment(&client, &namespace, &name, &instance, replicas).await?;
201        }
202        Err(e) => {
203            error!(
204                "Failed to create/update resources for {}/{}: {}",
205                namespace, name, e
206            );
207
208            // Update status to show error
209            let error_condition = Condition {
210                r#type: CONDITION_TYPE_READY.to_string(),
211                status: "False".to_string(),
212                reason: Some(REASON_NOT_READY.to_string()),
213                message: Some(format!("Failed to create resources: {e}")),
214                last_transition_time: Some(Utc::now().to_rfc3339()),
215            };
216            update_status(&client, &instance, vec![error_condition], replicas, 0).await?;
217
218            return Err(e);
219        }
220    }
221
222    Ok(())
223}
224
225/// Create or update all Kubernetes resources for a `Bind9Instance`
226async fn create_or_update_resources(
227    client: &Client,
228    namespace: &str,
229    name: &str,
230    instance: &Bind9Instance,
231) -> Result<()> {
232    debug!(
233        namespace = %namespace,
234        name = %name,
235        "Creating or updating Kubernetes resources"
236    );
237
238    // Fetch the Bind9Cluster (namespace-scoped) if referenced
239    let cluster = if instance.spec.cluster_ref.is_empty() {
240        debug!("No cluster reference, proceeding with standalone instance");
241        None
242    } else {
243        debug!(cluster_ref = %instance.spec.cluster_ref, "Fetching Bind9Cluster");
244        let cluster_api: Api<Bind9Cluster> = Api::namespaced(client.clone(), namespace);
245        match cluster_api.get(&instance.spec.cluster_ref).await {
246            Ok(cluster) => {
247                debug!(
248                    cluster_name = %instance.spec.cluster_ref,
249                    "Successfully fetched Bind9Cluster"
250                );
251                info!(
252                    "Found Bind9Cluster: {}/{}",
253                    namespace, instance.spec.cluster_ref
254                );
255                Some(cluster)
256            }
257            Err(e) => {
258                warn!(
259                    "Failed to fetch Bind9Cluster {}/{}: {}. Proceeding with instance-only config.",
260                    namespace, instance.spec.cluster_ref, e
261                );
262                None
263            }
264        }
265    };
266
267    // Fetch the ClusterBind9Provider (cluster-scoped) if no namespace-scoped cluster was found
268    let cluster_provider = if cluster.is_none() && !instance.spec.cluster_ref.is_empty() {
269        debug!(cluster_ref = %instance.spec.cluster_ref, "Fetching ClusterBind9Provider");
270        let cluster_provider_api: Api<crate::crd::ClusterBind9Provider> = Api::all(client.clone());
271        match cluster_provider_api.get(&instance.spec.cluster_ref).await {
272            Ok(gc) => {
273                debug!(
274                    cluster_name = %instance.spec.cluster_ref,
275                    "Successfully fetched ClusterBind9Provider"
276                );
277                info!("Found ClusterBind9Provider: {}", instance.spec.cluster_ref);
278                Some(gc)
279            }
280            Err(e) => {
281                warn!(
282                    "Failed to fetch ClusterBind9Provider {}: {}. Proceeding with instance-only config.",
283                    instance.spec.cluster_ref, e
284                );
285                None
286            }
287        }
288    } else {
289        None
290    };
291
292    // 1. Create/update ServiceAccount (must be first, as deployment will reference it)
293    debug!("Step 1: Creating/updating ServiceAccount");
294    create_or_update_service_account(client, namespace, instance).await?;
295
296    // 2. Create/update RNDC Secret (must be before deployment, as it will be mounted)
297    debug!("Step 2: Creating/updating RNDC Secret");
298    create_or_update_rndc_secret(client, namespace, name, instance).await?;
299
300    // 3. Create/update ConfigMap
301    debug!("Step 3: Creating/updating ConfigMap");
302    create_or_update_configmap(
303        client,
304        namespace,
305        name,
306        instance,
307        cluster.as_ref(),
308        cluster_provider.as_ref(),
309    )
310    .await?;
311
312    // 4. Create/update Deployment
313    debug!("Step 4: Creating/updating Deployment");
314    create_or_update_deployment(
315        client,
316        namespace,
317        name,
318        instance,
319        cluster.as_ref(),
320        cluster_provider.as_ref(),
321    )
322    .await?;
323
324    // 5. Create/update Service
325    debug!("Step 5: Creating/updating Service");
326    create_or_update_service(
327        client,
328        namespace,
329        name,
330        instance,
331        cluster.as_ref(),
332        cluster_provider.as_ref(),
333    )
334    .await?;
335
336    debug!("Successfully created/updated all resources");
337    Ok(())
338}
339
340/// Create or update the `ServiceAccount` for BIND9 pods
341async fn create_or_update_service_account(
342    client: &Client,
343    namespace: &str,
344    instance: &Bind9Instance,
345) -> Result<()> {
346    let service_account = build_service_account(namespace, instance);
347    create_or_apply(client, namespace, &service_account, "bindy-controller").await
348}
349
350/// Create or update the RNDC Secret for BIND9 remote control
351async fn create_or_update_rndc_secret(
352    client: &Client,
353    namespace: &str,
354    name: &str,
355    instance: &Bind9Instance,
356) -> Result<()> {
357    let secret_name = format!("{name}-rndc-key");
358    let secret_api: Api<Secret> = Api::namespaced(client.clone(), namespace);
359
360    // Check if secret already exists
361    match secret_api.get(&secret_name).await {
362        Ok(existing_secret) => {
363            // Secret exists, don't regenerate the key
364            info!(
365                "RNDC Secret {}/{} already exists, skipping",
366                namespace, secret_name
367            );
368            // Verify it has the required keys
369            if let Some(ref data) = existing_secret.data {
370                if !data.contains_key("key-name")
371                    || !data.contains_key("algorithm")
372                    || !data.contains_key("secret")
373                {
374                    warn!(
375                        "RNDC Secret {}/{} is missing required keys, will recreate",
376                        namespace, secret_name
377                    );
378                    // Delete and recreate
379                    secret_api
380                        .delete(&secret_name, &kube::api::DeleteParams::default())
381                        .await?;
382                } else {
383                    return Ok(());
384                }
385            } else {
386                warn!(
387                    "RNDC Secret {}/{} has no data, will recreate",
388                    namespace, secret_name
389                );
390                secret_api
391                    .delete(&secret_name, &kube::api::DeleteParams::default())
392                    .await?;
393            }
394        }
395        Err(_) => {
396            info!(
397                "RNDC Secret {}/{} does not exist, creating",
398                namespace, secret_name
399            );
400        }
401    }
402
403    // Generate new RNDC key
404    let mut key_data = Bind9Manager::generate_rndc_key();
405    key_data.name = "bindy-operator".to_string();
406
407    // Create Secret data
408    let secret_data = Bind9Manager::create_rndc_secret_data(&key_data);
409
410    // Create owner reference to the Bind9Instance
411    let owner_ref = OwnerReference {
412        api_version: API_GROUP_VERSION.to_string(),
413        kind: KIND_BIND9_INSTANCE.to_string(),
414        name: name.to_string(),
415        uid: instance.metadata.uid.clone().unwrap_or_default(),
416        controller: Some(true),
417        block_owner_deletion: Some(true),
418    };
419
420    // Build Secret object
421    let secret = Secret {
422        metadata: k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta {
423            name: Some(secret_name.clone()),
424            namespace: Some(namespace.to_string()),
425            owner_references: Some(vec![owner_ref]),
426            ..Default::default()
427        },
428        string_data: Some(secret_data),
429        ..Default::default()
430    };
431
432    // Create the secret
433    info!("Creating RNDC Secret {}/{}", namespace, secret_name);
434    secret_api.create(&PostParams::default(), &secret).await?;
435
436    Ok(())
437}
438
439/// Create or update the `ConfigMap` for BIND9 configuration
440///
441/// **Note:** If the instance belongs to a cluster (has `spec.clusterRef`), this function
442/// does NOT create an instance-specific `ConfigMap`. Instead, the instance will use the
443/// cluster-level shared `ConfigMap` created by the `Bind9Cluster` reconciler.
444async fn create_or_update_configmap(
445    client: &Client,
446    namespace: &str,
447    name: &str,
448    instance: &Bind9Instance,
449    cluster: Option<&Bind9Cluster>,
450    _cluster_provider: Option<&crate::crd::ClusterBind9Provider>,
451) -> Result<()> {
452    // If instance belongs to a cluster, skip ConfigMap creation
453    // The cluster creates a shared ConfigMap that all instances use
454    if !instance.spec.cluster_ref.is_empty() {
455        debug!(
456            "Instance {}/{} belongs to cluster '{}', using cluster ConfigMap",
457            namespace, name, instance.spec.cluster_ref
458        );
459        return Ok(());
460    }
461
462    // Instance is standalone (no clusterRef), create instance-specific ConfigMap
463    info!(
464        "Instance {}/{} is standalone, creating instance-specific ConfigMap",
465        namespace, name
466    );
467
468    // Get role-specific allow-transfer override from cluster config
469    // Note: We only reach this code for standalone instances (no clusterRef),
470    // so we should only have a namespace-scoped cluster here, not a global cluster
471    let role_allow_transfer = cluster.and_then(|c| match instance.spec.role {
472        crate::crd::ServerRole::Primary => c
473            .spec
474            .common
475            .primary
476            .as_ref()
477            .and_then(|p| p.allow_transfer.as_ref()),
478        crate::crd::ServerRole::Secondary => c
479            .spec
480            .common
481            .secondary
482            .as_ref()
483            .and_then(|s| s.allow_transfer.as_ref()),
484    });
485
486    // build_configmap returns None if custom ConfigMaps are referenced
487    if let Some(configmap) =
488        build_configmap(name, namespace, instance, cluster, role_allow_transfer)
489    {
490        let cm_api: Api<ConfigMap> = Api::namespaced(client.clone(), namespace);
491        let cm_name = format!("{name}-config");
492
493        if (cm_api.get(&cm_name).await).is_ok() {
494            // ConfigMap exists, update it
495            info!("Updating ConfigMap {}/{}", namespace, cm_name);
496            cm_api
497                .replace(&cm_name, &PostParams::default(), &configmap)
498                .await?;
499        } else {
500            // ConfigMap doesn't exist, create it
501            info!("Creating ConfigMap {}/{}", namespace, cm_name);
502            cm_api.create(&PostParams::default(), &configmap).await?;
503        }
504    } else {
505        info!(
506            "Using custom ConfigMaps for {}/{}, skipping ConfigMap creation",
507            namespace, name
508        );
509    }
510
511    Ok(())
512}
513
514/// Create or update the Deployment for BIND9
515async fn create_or_update_deployment(
516    client: &Client,
517    namespace: &str,
518    name: &str,
519    instance: &Bind9Instance,
520    cluster: Option<&Bind9Cluster>,
521    cluster_provider: Option<&crate::crd::ClusterBind9Provider>,
522) -> Result<()> {
523    let deployment = build_deployment(name, namespace, instance, cluster, cluster_provider);
524    create_or_replace(client, namespace, &deployment).await
525}
526
527/// Create or update the Service for BIND9
528async fn create_or_update_service(
529    client: &Client,
530    namespace: &str,
531    name: &str,
532    instance: &Bind9Instance,
533    cluster: Option<&Bind9Cluster>,
534    cluster_provider: Option<&crate::crd::ClusterBind9Provider>,
535) -> Result<()> {
536    // Get custom service spec based on instance role from cluster (namespace-scoped or global)
537    let custom_spec = cluster
538        .and_then(|c| match instance.spec.role {
539            crate::crd::ServerRole::Primary => c
540                .spec
541                .common
542                .primary
543                .as_ref()
544                .and_then(|p| p.service.as_ref()),
545            crate::crd::ServerRole::Secondary => c
546                .spec
547                .common
548                .secondary
549                .as_ref()
550                .and_then(|s| s.service.as_ref()),
551        })
552        .or_else(|| {
553            // Fall back to global cluster if no namespace-scoped cluster
554            cluster_provider.and_then(|gc| match instance.spec.role {
555                crate::crd::ServerRole::Primary => gc
556                    .spec
557                    .common
558                    .primary
559                    .as_ref()
560                    .and_then(|p| p.service.as_ref()),
561                crate::crd::ServerRole::Secondary => gc
562                    .spec
563                    .common
564                    .secondary
565                    .as_ref()
566                    .and_then(|s| s.service.as_ref()),
567            })
568        });
569
570    let service = build_service(name, namespace, instance, custom_spec);
571    let svc_api: Api<Service> = Api::namespaced(client.clone(), namespace);
572
573    if let Ok(existing) = svc_api.get(name).await {
574        // Service exists, update it (preserve clusterIP)
575        info!("Updating Service {}/{}", namespace, name);
576        let mut updated_service = service;
577        if let Some(ref mut spec) = updated_service.spec {
578            if let Some(ref existing_spec) = existing.spec {
579                spec.cluster_ip.clone_from(&existing_spec.cluster_ip);
580                spec.cluster_ips.clone_from(&existing_spec.cluster_ips);
581            }
582        }
583        svc_api
584            .replace(name, &PostParams::default(), &updated_service)
585            .await?;
586    } else {
587        // Service doesn't exist, create it
588        info!("Creating Service {}/{}", namespace, name);
589        svc_api.create(&PostParams::default(), &service).await?;
590    }
591
592    Ok(())
593}
594
595/// Deletes all resources associated with a `Bind9Instance`.
596///
597/// Cleans up Kubernetes resources in reverse order:
598/// 1. Service
599/// 2. Deployment
600/// 3. `ConfigMap`
601///
602/// # Arguments
603///
604/// * `client` - Kubernetes API client
605/// * `instance` - The `Bind9Instance` resource to delete
606///
607/// # Returns
608///
609/// * `Ok(())` - If deletion succeeded or resources didn't exist
610/// * `Err(_)` - If a critical error occurred during deletion
611///
612/// # Errors
613///
614/// Returns an error if Kubernetes API operations fail during resource deletion.
615pub async fn delete_bind9instance(client: Client, instance: Bind9Instance) -> Result<()> {
616    let namespace = instance.namespace().unwrap_or_default();
617    let name = instance.name_any();
618
619    info!("Deleting Bind9Instance: {}/{}", namespace, name);
620
621    // Delete resources in reverse order (Service, Deployment, ConfigMap)
622    delete_resources(&client, &namespace, &name).await?;
623
624    info!("Successfully deleted resources for {}/{}", namespace, name);
625
626    Ok(())
627}
628
629/// Delete all Kubernetes resources for a `Bind9Instance`
630async fn delete_resources(client: &Client, namespace: &str, name: &str) -> Result<()> {
631    let delete_params = kube::api::DeleteParams::default();
632
633    // 1. Delete Service (if it exists)
634    let svc_api: Api<Service> = Api::namespaced(client.clone(), namespace);
635    match svc_api.delete(name, &delete_params).await {
636        Ok(_) => info!("Deleted Service {}/{}", namespace, name),
637        Err(e) => warn!("Failed to delete Service {}/{}: {}", namespace, name, e),
638    }
639
640    // 2. Delete Deployment (if it exists)
641    let deploy_api: Api<Deployment> = Api::namespaced(client.clone(), namespace);
642    match deploy_api.delete(name, &delete_params).await {
643        Ok(_) => info!("Deleted Deployment {}/{}", namespace, name),
644        Err(e) => warn!("Failed to delete Deployment {}/{}: {}", namespace, name, e),
645    }
646
647    // 3. Delete ConfigMap (if it exists)
648    let cm_api: Api<ConfigMap> = Api::namespaced(client.clone(), namespace);
649    let cm_name = format!("{name}-config");
650    match cm_api.delete(&cm_name, &delete_params).await {
651        Ok(_) => info!("Deleted ConfigMap {}/{}", namespace, cm_name),
652        Err(e) => warn!(
653            "Failed to delete ConfigMap {}/{}: {}",
654            namespace, cm_name, e
655        ),
656    }
657
658    // 4. Delete RNDC Secret (if it exists)
659    let secret_api: Api<Secret> = Api::namespaced(client.clone(), namespace);
660    let secret_name = format!("{name}-rndc-key");
661    match secret_api.delete(&secret_name, &delete_params).await {
662        Ok(_) => info!("Deleted Secret {}/{}", namespace, secret_name),
663        Err(e) => warn!(
664            "Failed to delete Secret {}/{}: {}",
665            namespace, secret_name, e
666        ),
667    }
668
669    // 5. Delete ServiceAccount (if it exists and is owned by this instance)
670    let sa_api: Api<ServiceAccount> = Api::namespaced(client.clone(), namespace);
671    let sa_name = crate::constants::BIND9_SERVICE_ACCOUNT;
672    match sa_api.get(sa_name).await {
673        Ok(sa) => {
674            // Check if this instance owns the ServiceAccount
675            let is_owner = sa
676                .metadata
677                .owner_references
678                .as_ref()
679                .is_some_and(|owners| owners.iter().any(|owner| owner.name == name));
680
681            if is_owner {
682                match sa_api.delete(sa_name, &delete_params).await {
683                    Ok(_) => info!("Deleted ServiceAccount {}/{}", namespace, sa_name),
684                    Err(e) => warn!(
685                        "Failed to delete ServiceAccount {}/{}: {}",
686                        namespace, sa_name, e
687                    ),
688                }
689            } else {
690                debug!(
691                    "ServiceAccount {}/{} is not owned by this instance, skipping deletion",
692                    namespace, sa_name
693                );
694            }
695        }
696        Err(e) => {
697            debug!(
698                "ServiceAccount {}/{} does not exist or cannot be retrieved: {}",
699                namespace, sa_name, e
700            );
701        }
702    }
703
704    Ok(())
705}
706
707/// Update status based on actual Deployment and Pod readiness
708#[allow(clippy::too_many_lines)]
709async fn update_status_from_deployment(
710    client: &Client,
711    namespace: &str,
712    name: &str,
713    instance: &Bind9Instance,
714    expected_replicas: i32,
715) -> Result<()> {
716    let deploy_api: Api<Deployment> = Api::namespaced(client.clone(), namespace);
717    let pod_api: Api<Pod> = Api::namespaced(client.clone(), namespace);
718
719    match deploy_api.get(name).await {
720        Ok(deployment) => {
721            let actual_replicas = deployment
722                .spec
723                .as_ref()
724                .and_then(|spec| spec.replicas)
725                .unwrap_or(0);
726
727            let ready_replicas = deployment
728                .status
729                .as_ref()
730                .and_then(|status| status.ready_replicas)
731                .unwrap_or(0);
732
733            // List pods for this deployment using label selector
734            // Use the standard Kubernetes label for instance matching
735            let label_selector = format!("{}={}", crate::labels::K8S_INSTANCE, name);
736            let list_params = ListParams::default().labels(&label_selector);
737            let pods = pod_api.list(&list_params).await?;
738
739            // Create pod-level conditions
740            let mut pod_conditions = Vec::new();
741            let mut ready_pod_count = 0;
742
743            for (index, pod) in pods.items.iter().enumerate() {
744                let pod_name = pod.metadata.name.as_deref().unwrap_or("unknown");
745                // Using map_or for explicit false default on None - more readable than is_some_and
746                #[allow(clippy::unnecessary_map_or)]
747                let is_pod_ready = pod
748                    .status
749                    .as_ref()
750                    .and_then(|status| status.conditions.as_ref())
751                    .map_or(false, |conditions| {
752                        conditions
753                            .iter()
754                            .any(|c| c.type_ == "Ready" && c.status == "True")
755                    });
756
757                if is_pod_ready {
758                    ready_pod_count += 1;
759                }
760
761                let (status, reason, message) = if is_pod_ready {
762                    ("True", REASON_READY, format!("Pod {pod_name} is ready"))
763                } else {
764                    (
765                        "False",
766                        REASON_NOT_READY,
767                        format!("Pod {pod_name} is not ready"),
768                    )
769                };
770
771                pod_conditions.push(Condition {
772                    r#type: pod_condition_type(index),
773                    status: status.to_string(),
774                    reason: Some(reason.to_string()),
775                    message: Some(message),
776                    last_transition_time: Some(Utc::now().to_rfc3339()),
777                });
778            }
779
780            // Create encompassing Ready condition
781            let (encompassing_status, encompassing_reason, encompassing_message) =
782                if ready_pod_count == 0 && actual_replicas > 0 {
783                    (
784                        "False",
785                        REASON_NOT_READY,
786                        "Waiting for pods to become ready".to_string(),
787                    )
788                } else if ready_pod_count == actual_replicas && actual_replicas > 0 {
789                    (
790                        "True",
791                        REASON_ALL_READY,
792                        format!("All {ready_pod_count} pods are ready"),
793                    )
794                } else if ready_pod_count > 0 {
795                    (
796                        "False",
797                        REASON_PARTIALLY_READY,
798                        format!("{ready_pod_count}/{actual_replicas} pods are ready"),
799                    )
800                } else {
801                    ("False", REASON_NOT_READY, "No pods are ready".to_string())
802                };
803
804            let encompassing_condition = Condition {
805                r#type: CONDITION_TYPE_READY.to_string(),
806                status: encompassing_status.to_string(),
807                reason: Some(encompassing_reason.to_string()),
808                message: Some(encompassing_message),
809                last_transition_time: Some(Utc::now().to_rfc3339()),
810            };
811
812            // Combine encompassing condition + pod-level conditions
813            let mut all_conditions = vec![encompassing_condition];
814            all_conditions.extend(pod_conditions);
815
816            // Update status with all conditions
817            update_status(
818                client,
819                instance,
820                all_conditions,
821                actual_replicas,
822                ready_replicas,
823            )
824            .await?;
825        }
826        Err(e) => {
827            warn!(
828                "Failed to get Deployment status for {}/{}: {}",
829                namespace, name, e
830            );
831            // Set status as unknown if we can't check deployment
832            let unknown_condition = Condition {
833                r#type: CONDITION_TYPE_READY.to_string(),
834                status: "Unknown".to_string(),
835                reason: Some(REASON_NOT_READY.to_string()),
836                message: Some("Unable to determine deployment status".to_string()),
837                last_transition_time: Some(Utc::now().to_rfc3339()),
838            };
839            update_status(
840                client,
841                instance,
842                vec![unknown_condition],
843                expected_replicas,
844                0,
845            )
846            .await?;
847        }
848    }
849
850    Ok(())
851}
852
853/// Update the status of a `Bind9Instance` with multiple conditions
854async fn update_status(
855    client: &Client,
856    instance: &Bind9Instance,
857    conditions: Vec<Condition>,
858    replicas: i32,
859    ready_replicas: i32,
860) -> Result<()> {
861    let api: Api<Bind9Instance> =
862        Api::namespaced(client.clone(), &instance.namespace().unwrap_or_default());
863
864    // Check if status has actually changed
865    let current_status = &instance.status;
866    let status_changed = if let Some(current) = current_status {
867        // Check if replicas changed
868        if current.replicas != Some(replicas) || current.ready_replicas != Some(ready_replicas) {
869            true
870        } else {
871            // Check if any condition changed
872            if current.conditions.len() == conditions.len() {
873                // Compare each condition
874                current
875                    .conditions
876                    .iter()
877                    .zip(conditions.iter())
878                    .any(|(current_cond, new_cond)| {
879                        current_cond.r#type != new_cond.r#type
880                            || current_cond.status != new_cond.status
881                            || current_cond.message != new_cond.message
882                            || current_cond.reason != new_cond.reason
883                    })
884            } else {
885                true
886            }
887        }
888    } else {
889        // No status exists, need to update
890        true
891    };
892
893    // Only update if status has changed
894    if !status_changed {
895        return Ok(());
896    }
897
898    let new_status = Bind9InstanceStatus {
899        conditions,
900        observed_generation: instance.metadata.generation,
901        replicas: Some(replicas),
902        ready_replicas: Some(ready_replicas),
903        service_address: None, // Will be populated when service is ready
904    };
905
906    let patch = json!({ "status": new_status });
907    api.patch_status(
908        &instance.name_any(),
909        &PatchParams::default(),
910        &Patch::Merge(patch),
911    )
912    .await?;
913
914    Ok(())
915}