bindy/reconcilers/bind9cluster/
instances.rs

1// Copyright (c) 2025 Erick Bourgeois, firestoned
2// SPDX-License-Identifier: MIT
3
4//! Instance lifecycle management for `Bind9Cluster` resources.
5//!
6//! This module handles creating, updating, and deleting `Bind9Instance`
7//! resources that are managed by a `Bind9Cluster`.
8
9#[allow(clippy::wildcard_imports)]
10use super::types::*;
11use crate::constants::{API_GROUP_VERSION, KIND_BIND9_CLUSTER, KIND_BIND9_INSTANCE};
12use crate::reconcilers::pagination::list_all_paginated;
13
14/// Reconcile managed `Bind9Instance` resources for a cluster
15///
16/// This function ensures the correct number of primary and secondary instances exist
17/// based on the cluster spec. It creates missing instances and adds management labels.
18///
19/// # Arguments
20///
21/// * `client` - Kubernetes API client
22/// * `cluster` - The `Bind9Cluster` resource
23///
24/// # Errors
25///
26/// Returns an error if:
27/// - Failed to list existing instances
28/// - Failed to create new instances
29#[allow(clippy::too_many_lines)]
30pub(super) async fn reconcile_managed_instances(
31    ctx: &Context,
32    cluster: &Bind9Cluster,
33) -> Result<()> {
34    let client = ctx.client.clone();
35    let namespace = cluster.namespace().unwrap_or_default();
36    let cluster_name = cluster.name_any();
37
38    info!(
39        "Reconciling managed instances for cluster {}/{}",
40        namespace, cluster_name
41    );
42
43    // Get desired replica counts from spec
44    let primary_replicas = cluster
45        .spec
46        .common
47        .primary
48        .as_ref()
49        .and_then(|p| p.replicas)
50        .unwrap_or(0);
51
52    let secondary_replicas = cluster
53        .spec
54        .common
55        .secondary
56        .as_ref()
57        .and_then(|s| s.replicas)
58        .unwrap_or(0);
59
60    debug!(
61        "Desired replicas: {} primary, {} secondary",
62        primary_replicas, secondary_replicas
63    );
64
65    if primary_replicas == 0 && secondary_replicas == 0 {
66        debug!(
67            "No instances requested for cluster {}/{}",
68            namespace, cluster_name
69        );
70        return Ok(());
71    }
72
73    // List existing managed instances
74    let api: Api<Bind9Instance> = Api::namespaced(client.clone(), &namespace);
75    let instances = list_all_paginated(&api, ListParams::default()).await?;
76
77    // Filter for managed instances of this cluster
78    let managed_instances: Vec<_> = instances
79        .into_iter()
80        .filter(|instance| {
81            // Check if instance has management labels
82            instance.metadata.labels.as_ref().is_some_and(|labels| {
83                labels.get(BINDY_MANAGED_BY_LABEL) == Some(&MANAGED_BY_BIND9_CLUSTER.to_string())
84                    && labels.get(BINDY_CLUSTER_LABEL) == Some(&cluster_name)
85            })
86        })
87        .collect();
88
89    debug!(
90        "Found {} managed instances for cluster {}/{}",
91        managed_instances.len(),
92        namespace,
93        cluster_name
94    );
95
96    // Separate by role
97    let existing_primary: Vec<_> = managed_instances
98        .iter()
99        .filter(|i| i.spec.role == ServerRole::Primary)
100        .collect();
101
102    let existing_secondary: Vec<_> = managed_instances
103        .iter()
104        .filter(|i| i.spec.role == ServerRole::Secondary)
105        .collect();
106
107    debug!(
108        "Existing instances: {} primary, {} secondary",
109        existing_primary.len(),
110        existing_secondary.len()
111    );
112
113    // Create ownerReference to the Bind9Cluster
114    let owner_ref = k8s_openapi::apimachinery::pkg::apis::meta::v1::OwnerReference {
115        api_version: API_GROUP_VERSION.to_string(),
116        kind: KIND_BIND9_CLUSTER.to_string(),
117        name: cluster_name.clone(),
118        uid: cluster.metadata.uid.clone().unwrap_or_default(),
119        controller: Some(true),
120        block_owner_deletion: Some(true),
121    };
122
123    // Handle scale-up: Create missing primary instances
124    // CRITICAL: Compare desired vs current state to find missing instances
125    // Build set of desired instance names, compare with existing, create the difference
126    #[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)]
127    let mut primaries_to_create = 0;
128    {
129        // Build set of desired primary instance names based on replica count
130        #[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)]
131        let desired_primary_names: std::collections::HashSet<String> = (0..(primary_replicas
132            as usize))
133            .map(|i| format!("{cluster_name}-primary-{i}"))
134            .collect();
135
136        // Build set of existing primary instance names
137        let existing_primary_names: std::collections::HashSet<String> = existing_primary
138            .iter()
139            .map(|instance| instance.name_any())
140            .collect();
141
142        // Find missing instances (desired - existing)
143        let missing_primaries: Vec<_> = desired_primary_names
144            .difference(&existing_primary_names)
145            .collect();
146
147        // Create each missing instance
148        for instance_name in missing_primaries {
149            // Extract index from name (e.g., "production-dns-primary-0" -> 0)
150            let index = instance_name
151                .rsplit('-')
152                .next()
153                .and_then(|s| s.parse::<usize>().ok())
154                .unwrap_or(0);
155
156            create_managed_instance_with_owner(
157                &client,
158                &namespace,
159                &cluster_name,
160                ServerRole::Primary,
161                index,
162                &cluster.spec.common,
163                Some(owner_ref.clone()),
164            )
165            .await?;
166            primaries_to_create += 1;
167        }
168    }
169
170    // Handle scale-down: Delete excess primary instances
171    #[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)]
172    let primaries_to_delete = existing_primary
173        .len()
174        .saturating_sub(primary_replicas as usize);
175    if primaries_to_delete > 0 {
176        // Sort by index descending to delete highest-indexed instances first
177        let mut sorted_primary: Vec<_> = existing_primary.iter().collect();
178        sorted_primary.sort_by_key(|instance| {
179            instance
180                .metadata
181                .annotations
182                .as_ref()
183                .and_then(|a| a.get(BINDY_INSTANCE_INDEX_ANNOTATION))
184                .and_then(|idx| idx.parse::<usize>().ok())
185                .unwrap_or(0)
186        });
187        sorted_primary.reverse();
188
189        for instance in sorted_primary.iter().take(primaries_to_delete) {
190            let instance_name = instance.name_any();
191            delete_managed_instance(&client, &namespace, &instance_name).await?;
192        }
193    }
194
195    // Handle scale-up: Create missing secondary instances
196    // CRITICAL: Compare desired vs current state to find missing instances
197    // Build set of desired instance names, compare with existing, create the difference
198    #[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)]
199    let mut secondaries_to_create = 0;
200    {
201        // Build set of desired secondary instance names based on replica count
202        #[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)]
203        let desired_secondary_names: std::collections::HashSet<String> = (0..(secondary_replicas
204            as usize))
205            .map(|i| format!("{cluster_name}-secondary-{i}"))
206            .collect();
207
208        // Build set of existing secondary instance names
209        let existing_secondary_names: std::collections::HashSet<String> = existing_secondary
210            .iter()
211            .map(|instance| instance.name_any())
212            .collect();
213
214        // Find missing instances (desired - existing)
215        let missing_secondaries: Vec<_> = desired_secondary_names
216            .difference(&existing_secondary_names)
217            .collect();
218
219        // Create each missing instance
220        for instance_name in missing_secondaries {
221            // Extract index from name (e.g., "production-dns-secondary-0" -> 0)
222            let index = instance_name
223                .rsplit('-')
224                .next()
225                .and_then(|s| s.parse::<usize>().ok())
226                .unwrap_or(0);
227
228            create_managed_instance_with_owner(
229                &client,
230                &namespace,
231                &cluster_name,
232                ServerRole::Secondary,
233                index,
234                &cluster.spec.common,
235                Some(owner_ref.clone()),
236            )
237            .await?;
238            secondaries_to_create += 1;
239        }
240    }
241
242    // Handle scale-down: Delete excess secondary instances
243    #[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)]
244    let secondaries_to_delete = existing_secondary
245        .len()
246        .saturating_sub(secondary_replicas as usize);
247    if secondaries_to_delete > 0 {
248        // Sort by index descending to delete highest-indexed instances first
249        let mut sorted_secondary: Vec<_> = existing_secondary.iter().collect();
250        sorted_secondary.sort_by_key(|instance| {
251            instance
252                .metadata
253                .annotations
254                .as_ref()
255                .and_then(|a| a.get(BINDY_INSTANCE_INDEX_ANNOTATION))
256                .and_then(|idx| idx.parse::<usize>().ok())
257                .unwrap_or(0)
258        });
259        sorted_secondary.reverse();
260
261        for instance in sorted_secondary.iter().take(secondaries_to_delete) {
262            let instance_name = instance.name_any();
263            delete_managed_instance(&client, &namespace, &instance_name).await?;
264        }
265    }
266
267    if primaries_to_create > 0
268        || secondaries_to_create > 0
269        || primaries_to_delete > 0
270        || secondaries_to_delete > 0
271    {
272        info!(
273            "Scaled cluster {}/{}: created {} primary, {} secondary; deleted {} primary, {} secondary",
274            namespace,
275            cluster_name,
276            primaries_to_create,
277            secondaries_to_create,
278            primaries_to_delete,
279            secondaries_to_delete
280        );
281    } else {
282        debug!(
283            "Cluster {}/{} already at desired scale",
284            namespace, cluster_name
285        );
286    }
287
288    // Update existing managed instances to match cluster spec (declarative reconciliation)
289    update_existing_managed_instances(
290        &client,
291        &namespace,
292        &cluster_name,
293        &cluster.spec.common,
294        &managed_instances,
295    )
296    .await?;
297
298    // Ensure child resources (ConfigMaps, Secrets, Services, Deployments) exist for all managed instances
299    ensure_managed_instance_resources(&client, cluster, &managed_instances).await?;
300
301    Ok(())
302}
303
304/// Update existing managed instances to match the cluster's current spec.
305///
306/// This implements true declarative reconciliation - comparing the desired state (from cluster spec)
307/// with the actual state (existing instance specs) and updating any instances that have drifted.
308///
309/// This ensures that when the cluster's `spec.common` changes (e.g., bindcar version, volumes,
310/// config references), all managed instances are updated to reflect the new configuration.
311///
312/// # Arguments
313///
314/// * `client` - Kubernetes API client
315/// * `namespace` - Namespace containing the instances
316/// * `cluster_name` - Name of the parent cluster
317/// * `common_spec` - The cluster's common spec (source of truth)
318/// * `managed_instances` - List of existing managed instances to check
319///
320/// # Errors
321///
322/// Returns an error if patching instances fails
323pub(super) async fn update_existing_managed_instances(
324    client: &Client,
325    namespace: &str,
326    cluster_name: &str,
327    common_spec: &crate::crd::Bind9ClusterCommonSpec,
328    managed_instances: &[Bind9Instance],
329) -> Result<()> {
330    if managed_instances.is_empty() {
331        return Ok(());
332    }
333
334    let instance_api: Api<Bind9Instance> = Api::namespaced(client.clone(), namespace);
335    let mut updated_count = 0;
336
337    for instance in managed_instances {
338        let instance_name = instance.name_any();
339
340        // Build the desired spec based on current cluster configuration
341        let desired_bindcar_config = common_spec
342            .global
343            .as_ref()
344            .and_then(|g| g.bindcar_config.clone());
345
346        // Check if instance spec needs updating by comparing key fields
347        let needs_update = instance.spec.version != common_spec.version
348            || instance.spec.image != common_spec.image
349            || instance.spec.config_map_refs != common_spec.config_map_refs
350            || instance.spec.volumes != common_spec.volumes
351            || instance.spec.volume_mounts != common_spec.volume_mounts
352            || instance.spec.bindcar_config != desired_bindcar_config;
353
354        if needs_update {
355            debug!(
356                "Instance {}/{} spec differs from cluster spec, updating",
357                namespace, instance_name
358            );
359
360            // Build updated instance spec - preserve instance-specific fields, update cluster-inherited fields
361            #[allow(deprecated)]
362            // Backward compatibility: preserve deprecated rndc_secret_ref if set
363            let updated_spec = Bind9InstanceSpec {
364                cluster_ref: instance.spec.cluster_ref.clone(),
365                role: instance.spec.role.clone(),
366                replicas: instance.spec.replicas, // Preserve instance replicas (always 1 for managed)
367                version: common_spec.version.clone(),
368                image: common_spec.image.clone(),
369                config_map_refs: common_spec.config_map_refs.clone(),
370                config: None, // Managed instances inherit from cluster
371                primary_servers: instance.spec.primary_servers.clone(), // Preserve if set
372                volumes: common_spec.volumes.clone(),
373                volume_mounts: common_spec.volume_mounts.clone(),
374                rndc_secret_ref: instance.spec.rndc_secret_ref.clone(), // Preserve if set (deprecated)
375                rndc_key: instance.spec.rndc_key.clone(),               // Preserve if set
376                storage: instance.spec.storage.clone(),                 // Preserve if set
377                bindcar_config: desired_bindcar_config,
378            };
379
380            // Use server-side apply to update the instance spec
381            let patch = serde_json::json!({
382                "apiVersion": API_GROUP_VERSION,
383                "kind": KIND_BIND9_INSTANCE,
384                "metadata": {
385                    "name": instance_name,
386                    "namespace": namespace,
387                },
388                "spec": updated_spec,
389            });
390
391            match instance_api
392                .patch(
393                    &instance_name,
394                    &PatchParams::apply("bindy-controller").force(),
395                    &Patch::Apply(&patch),
396                )
397                .await
398            {
399                Ok(_) => {
400                    info!(
401                        "Updated managed instance {}/{} to match cluster spec",
402                        namespace, instance_name
403                    );
404                    updated_count += 1;
405                }
406                Err(e) => {
407                    error!(
408                        "Failed to update managed instance {}/{}: {}",
409                        namespace, instance_name, e
410                    );
411                    return Err(e.into());
412                }
413            }
414        } else {
415            debug!(
416                "Instance {}/{} spec matches cluster spec, no update needed",
417                namespace, instance_name
418            );
419        }
420    }
421
422    if updated_count > 0 {
423        info!(
424            "Updated {} managed instances in cluster {}/{} to match current spec",
425            updated_count, namespace, cluster_name
426        );
427    }
428
429    Ok(())
430}
431
432/// Ensure child resources exist for all managed instances
433///
434/// This function verifies that all Kubernetes resources (`ConfigMap`, `Secret`, `Service`, `Deployment`)
435/// exist for each managed instance. If any resource is missing, it triggers reconciliation
436/// by updating the instance's annotations to force the `Bind9Instance` controller to recreate them.
437///
438/// # Arguments
439///
440/// * `client` - Kubernetes API client
441/// * `cluster` - The parent `Bind9Cluster`
442/// * `managed_instances` - List of managed `Bind9Instance` resources
443///
444/// # Errors
445///
446/// Returns an error if resource checking or instance update fails
447pub(super) async fn ensure_managed_instance_resources(
448    client: &Client,
449    cluster: &Bind9Cluster,
450    managed_instances: &[Bind9Instance],
451) -> Result<()> {
452    let namespace = cluster.namespace().unwrap_or_default();
453    let cluster_name = cluster.name_any();
454
455    if managed_instances.is_empty() {
456        return Ok(());
457    }
458
459    debug!(
460        "Ensuring child resources exist for {} managed instances in cluster {}/{}",
461        managed_instances.len(),
462        namespace,
463        cluster_name
464    );
465
466    let configmap_api: Api<ConfigMap> = Api::namespaced(client.clone(), &namespace);
467    let secret_api: Api<Secret> = Api::namespaced(client.clone(), &namespace);
468    let service_api: Api<Service> = Api::namespaced(client.clone(), &namespace);
469    let deployment_api: Api<Deployment> = Api::namespaced(client.clone(), &namespace);
470    let instance_api: Api<Bind9Instance> = Api::namespaced(client.clone(), &namespace);
471
472    // Managed instances share the cluster ConfigMap, not instance-specific ones
473    let cluster_configmap_name = format!("{cluster_name}-config");
474
475    for instance in managed_instances {
476        let instance_name = instance.name_any();
477        let mut missing_resources = Vec::new();
478
479        // Check ConfigMap - managed instances use the shared cluster ConfigMap
480        if configmap_api.get(&cluster_configmap_name).await.is_err() {
481            missing_resources.push("ConfigMap");
482        }
483
484        // Check RNDC Secret
485        let secret_name = format!("{instance_name}-rndc-key");
486        if secret_api.get(&secret_name).await.is_err() {
487            missing_resources.push("Secret");
488        }
489
490        // Check Service
491        if service_api.get(&instance_name).await.is_err() {
492            missing_resources.push("Service");
493        }
494
495        // Check Deployment
496        if deployment_api.get(&instance_name).await.is_err() {
497            missing_resources.push("Deployment");
498        }
499
500        // If any resources are missing, trigger instance reconciliation
501        if missing_resources.is_empty() {
502            debug!(
503                "All child resources exist for managed instance {}/{}",
504                namespace, instance_name
505            );
506        } else {
507            warn!(
508                "Missing resources for managed instance {}/{}: {}. Triggering reconciliation.",
509                namespace,
510                instance_name,
511                missing_resources.join(", ")
512            );
513
514            // Force reconciliation by updating an annotation
515            let patch = json!({
516                "metadata": {
517                    "annotations": {
518                        BINDY_RECONCILE_TRIGGER_ANNOTATION: Utc::now().to_rfc3339()
519                    }
520                }
521            });
522
523            instance_api
524                .patch(
525                    &instance_name,
526                    &PatchParams::apply("bindy-cluster-controller"),
527                    &Patch::Merge(&patch),
528                )
529                .await?;
530
531            info!(
532                "Triggered reconciliation for instance {}/{} to recreate: {}",
533                namespace,
534                instance_name,
535                missing_resources.join(", ")
536            );
537        }
538    }
539
540    Ok(())
541}
542
543/// Create a managed `Bind9Instance` resource
544///
545/// This function is public to allow reuse by `ClusterBind9Provider` reconciler.
546///
547/// # Arguments
548///
549/// * `client` - Kubernetes API client
550/// * `namespace` - Namespace for the instance
551/// * `cluster_name` - Name of the cluster (namespace-scoped or global)
552/// * `role` - Role of the instance (Primary or Secondary)
553/// * `index` - Index of this instance within its role
554/// * `common_spec` - The cluster's common specification
555/// * `is_global` - Whether this is for a global cluster
556///
557/// # Errors
558///
559/// Returns an error if instance creation fails
560#[allow(clippy::too_many_lines, clippy::too_many_arguments)]
561pub async fn create_managed_instance(
562    client: &Client,
563    namespace: &str,
564    cluster_name: &str,
565    role: ServerRole,
566    index: usize,
567    common_spec: &crate::crd::Bind9ClusterCommonSpec,
568    _is_global: bool,
569) -> Result<()> {
570    create_managed_instance_with_owner(
571        client,
572        namespace,
573        cluster_name,
574        role,
575        index,
576        common_spec,
577        None, // No owner reference - for backward compatibility
578    )
579    .await
580}
581
582/// Create a managed `Bind9Instance` with optional ownerReference.
583///
584/// This is the internal implementation that supports setting ownerReferences.
585/// Use `create_managed_instance()` for backward compatibility without ownerReferences.
586///
587/// # Arguments
588///
589/// * `owner_ref` - Optional ownerReference to the parent `Bind9Cluster`
590#[allow(clippy::too_many_arguments, clippy::too_many_lines)]
591async fn create_managed_instance_with_owner(
592    client: &Client,
593    namespace: &str,
594    cluster_name: &str,
595    role: ServerRole,
596    index: usize,
597    common_spec: &crate::crd::Bind9ClusterCommonSpec,
598    owner_ref: Option<k8s_openapi::apimachinery::pkg::apis::meta::v1::OwnerReference>,
599) -> Result<()> {
600    let role_str = match role {
601        ServerRole::Primary => ROLE_PRIMARY,
602        ServerRole::Secondary => ROLE_SECONDARY,
603    };
604
605    let instance_name = format!("{cluster_name}-{role_str}-{index}");
606
607    info!(
608        "Creating managed instance {}/{} for cluster {} (role: {:?}, index: {})",
609        namespace, instance_name, cluster_name, role, index
610    );
611
612    // Create labels
613    let mut labels = BTreeMap::new();
614    labels.insert(
615        BINDY_MANAGED_BY_LABEL.to_string(),
616        MANAGED_BY_BIND9_CLUSTER.to_string(),
617    );
618    labels.insert(BINDY_CLUSTER_LABEL.to_string(), cluster_name.to_string());
619    labels.insert(BINDY_ROLE_LABEL.to_string(), role_str.to_string());
620    labels.insert(K8S_PART_OF.to_string(), PART_OF_BINDY.to_string());
621
622    // Propagate custom labels from cluster spec based on role
623    match role {
624        ServerRole::Primary => {
625            if let Some(primary_config) = &common_spec.primary {
626                if let Some(custom_labels) = &primary_config.labels {
627                    for (key, value) in custom_labels {
628                        labels.insert(key.clone(), value.clone());
629                    }
630                }
631            }
632        }
633        ServerRole::Secondary => {
634            if let Some(secondary_config) = &common_spec.secondary {
635                if let Some(custom_labels) = &secondary_config.labels {
636                    for (key, value) in custom_labels {
637                        labels.insert(key.clone(), value.clone());
638                    }
639                }
640            }
641        }
642    }
643
644    // Create annotations
645    let mut annotations = BTreeMap::new();
646    annotations.insert(
647        BINDY_INSTANCE_INDEX_ANNOTATION.to_string(),
648        index.to_string(),
649    );
650
651    // Build instance spec - copy configuration from cluster
652    #[allow(deprecated)] // Backward compatibility: include deprecated rndc_secret_ref field
653    let instance_spec = Bind9InstanceSpec {
654        cluster_ref: cluster_name.to_string(),
655        role,
656        replicas: Some(1), // Each managed instance has 1 replica
657        version: common_spec.version.clone(),
658        image: common_spec.image.clone(),
659        config_map_refs: common_spec.config_map_refs.clone(),
660        config: None,          // Inherit from cluster
661        primary_servers: None, // TODO: Could populate for secondaries
662        volumes: common_spec.volumes.clone(),
663        volume_mounts: common_spec.volume_mounts.clone(),
664        rndc_secret_ref: None, // Inherit from cluster/role config (deprecated)
665        rndc_key: None,        // Inherit from cluster/role config
666        storage: None,         // Use default (emptyDir)
667        bindcar_config: common_spec
668            .global
669            .as_ref()
670            .and_then(|g| g.bindcar_config.clone()),
671    };
672
673    let instance = Bind9Instance {
674        metadata: ObjectMeta {
675            name: Some(instance_name.clone()),
676            namespace: Some(namespace.to_string()),
677            labels: Some(labels.clone()),
678            annotations: Some(annotations),
679            owner_references: owner_ref.map(|r| vec![r]),
680            ..Default::default()
681        },
682        spec: instance_spec,
683        status: None,
684    };
685
686    let api: Api<Bind9Instance> = Api::namespaced(client.clone(), namespace);
687
688    match api.create(&PostParams::default(), &instance).await {
689        Ok(_) => {
690            info!(
691                "Successfully created managed instance {}/{}",
692                namespace, instance_name
693            );
694            Ok(())
695        }
696        Err(e) => {
697            // If already exists, patch it to ensure spec is up to date
698            if e.to_string().contains("AlreadyExists") {
699                debug!(
700                    "Managed instance {}/{} already exists, patching with updated spec",
701                    namespace, instance_name
702                );
703
704                // Build a complete patch object for server-side apply
705                // Convert BTreeMap labels to serde_json::Value for patch
706                let labels_json: serde_json::Map<String, serde_json::Value> = labels
707                    .iter()
708                    .map(|(k, v)| (k.clone(), serde_json::Value::String(v.clone())))
709                    .collect();
710
711                let patch = serde_json::json!({
712                    "apiVersion": API_GROUP_VERSION,
713                    "kind": KIND_BIND9_INSTANCE,
714                    "metadata": {
715                        "name": instance_name,
716                        "namespace": namespace,
717                        "labels": labels_json,
718                        "annotations": {
719                            BINDY_INSTANCE_INDEX_ANNOTATION: index.to_string(),
720                        },
721                        "ownerReferences": instance.metadata.owner_references,
722                    },
723                    "spec": instance.spec,
724                });
725
726                // Apply the patch to update the spec, labels, annotations, and owner references
727                match api
728                    .patch(
729                        &instance_name,
730                        &PatchParams::apply("bindy-controller").force(),
731                        &Patch::Apply(&patch),
732                    )
733                    .await
734                {
735                    Ok(_) => {
736                        info!(
737                            "Successfully patched managed instance {}/{} with updated spec",
738                            namespace, instance_name
739                        );
740                        Ok(())
741                    }
742                    Err(patch_err) => {
743                        error!(
744                            "Failed to patch managed instance {}/{}: {}",
745                            namespace, instance_name, patch_err
746                        );
747                        Err(patch_err.into())
748                    }
749                }
750            } else {
751                error!(
752                    "Failed to create managed instance {}/{}: {}",
753                    namespace, instance_name, e
754                );
755                Err(e.into())
756            }
757        }
758    }
759}
760
761/// Delete a single managed `Bind9Instance` resource.
762///
763/// This function is public to allow reuse by `ClusterBind9Provider` reconciler.
764///
765/// # Arguments
766///
767/// * `client` - Kubernetes API client
768/// * `namespace` - Namespace of the instance
769/// * `instance_name` - Name of the instance to delete
770///
771/// # Errors
772///
773/// Returns an error if deletion fails (except for `NotFound` errors, which are treated as success)
774pub async fn delete_managed_instance(
775    client: &Client,
776    namespace: &str,
777    instance_name: &str,
778) -> Result<()> {
779    let api: Api<Bind9Instance> = Api::namespaced(client.clone(), namespace);
780
781    match api.delete(instance_name, &DeleteParams::default()).await {
782        Ok(_) => {
783            info!(
784                "Successfully deleted managed instance {}/{}",
785                namespace, instance_name
786            );
787            Ok(())
788        }
789        Err(e) if e.to_string().contains("NotFound") => {
790            debug!(
791                "Managed instance {}/{} already deleted",
792                namespace, instance_name
793            );
794            Ok(())
795        }
796        Err(e) => {
797            error!(
798                "Failed to delete managed instance {}/{}: {}",
799                namespace, instance_name, e
800            );
801            Err(e.into())
802        }
803    }
804}
805
806/// Delete all `Bind9Instance` resources that reference the given cluster
807///
808/// # Arguments
809///
810/// * `client` - Kubernetes API client
811/// * `namespace` - Namespace containing the instances
812/// * `cluster_name` - Name of the cluster being deleted
813///
814/// # Errors
815///
816/// Returns an error if:
817/// - Failed to list `Bind9Instance` resources
818/// - Failed to delete any `Bind9Instance` resource
819pub(super) async fn delete_cluster_instances(
820    client: &Client,
821    namespace: &str,
822    cluster_name: &str,
823) -> Result<()> {
824    let api: Api<Bind9Instance> = Api::namespaced(client.clone(), namespace);
825
826    info!(
827        "Finding all Bind9Instance resources for cluster {}/{}",
828        namespace, cluster_name
829    );
830
831    // List all instances in the namespace
832    let instances = list_all_paginated(&api, ListParams::default()).await?;
833
834    // Filter instances that reference this cluster
835    let cluster_instances: Vec<_> = instances
836        .into_iter()
837        .filter(|instance| instance.spec.cluster_ref == cluster_name)
838        .collect();
839
840    if cluster_instances.is_empty() {
841        info!(
842            "No Bind9Instance resources found for cluster {}/{}",
843            namespace, cluster_name
844        );
845        return Ok(());
846    }
847
848    info!(
849        "Found {} Bind9Instance resources for cluster {}/{}, deleting...",
850        cluster_instances.len(),
851        namespace,
852        cluster_name
853    );
854
855    // Delete each instance
856    for instance in cluster_instances {
857        let instance_name = instance.name_any();
858        info!(
859            "Deleting Bind9Instance {}/{} (clusterRef: {})",
860            namespace, instance_name, cluster_name
861        );
862
863        match api.delete(&instance_name, &DeleteParams::default()).await {
864            Ok(_) => {
865                info!(
866                    "Successfully deleted Bind9Instance {}/{}",
867                    namespace, instance_name
868                );
869            }
870            Err(e) => {
871                // If the resource is already deleted, treat it as success
872                if e.to_string().contains("NotFound") {
873                    warn!(
874                        "Bind9Instance {}/{} already deleted",
875                        namespace, instance_name
876                    );
877                } else {
878                    error!(
879                        "Failed to delete Bind9Instance {}/{}: {}",
880                        namespace, instance_name, e
881                    );
882                    return Err(e.into());
883                }
884            }
885        }
886    }
887
888    info!(
889        "Successfully deleted all Bind9Instance resources for cluster {}/{}",
890        namespace, cluster_name
891    );
892
893    Ok(())
894}
895
896/// Delete handler for `Bind9Cluster` resources (cleanup logic)
897///
898/// This function is no longer used as deletion is handled by the finalizer in `reconcile_bind9cluster`.
899/// Kept for backward compatibility.
900///
901/// # Errors
902///
903/// This function currently never returns an error, but returns `Result` for API consistency.
904pub async fn delete_bind9cluster(_client: Client, _cluster: Bind9Cluster) -> Result<()> {
905    // Deletion is now handled by the finalizer in reconcile_bind9cluster
906    Ok(())
907}
908
909#[cfg(test)]
910#[path = "instances_tests.rs"]
911mod instances_tests;