bindy/
bind9_resources.rs

1// Copyright (c) 2025 Erick Bourgeois, firestoned
2// SPDX-License-Identifier: MIT
3
4//! BIND9 Kubernetes resource builders
5//!
6//! This module provides functions to build Kubernetes resources (`Deployment`, `ConfigMap`, `Service`)
7//! for BIND9 instances. All functions are pure and easily testable.
8
9use crate::constants::{
10    API_GROUP_VERSION, BIND9_SERVICE_ACCOUNT, DEFAULT_BIND9_VERSION, DNS_PORT, KIND_BIND9_INSTANCE,
11    LIVENESS_FAILURE_THRESHOLD, LIVENESS_INITIAL_DELAY_SECS, LIVENESS_PERIOD_SECS,
12    LIVENESS_TIMEOUT_SECS, READINESS_FAILURE_THRESHOLD, READINESS_INITIAL_DELAY_SECS,
13    READINESS_PERIOD_SECS, READINESS_TIMEOUT_SECS, RNDC_PORT,
14};
15use crate::crd::{Bind9Cluster, Bind9Instance, ConfigMapRefs, ImageConfig};
16use crate::labels::{
17    APP_NAME_BIND9, COMPONENT_DNS_CLUSTER, COMPONENT_DNS_SERVER, K8S_COMPONENT, K8S_INSTANCE,
18    K8S_MANAGED_BY, K8S_NAME, K8S_PART_OF, MANAGED_BY_BIND9_CLUSTER, MANAGED_BY_BIND9_INSTANCE,
19    PART_OF_BINDY,
20};
21use k8s_openapi::api::{
22    apps::v1::{Deployment, DeploymentSpec},
23    core::v1::{
24        ConfigMap, Container, ContainerPort, EnvVar, EnvVarSource, PodSpec, PodTemplateSpec, Probe,
25        SecretKeySelector, Service, ServiceAccount, ServicePort, ServiceSpec, TCPSocketAction,
26        Volume, VolumeMount,
27    },
28};
29use k8s_openapi::apimachinery::pkg::{
30    apis::meta::v1::{LabelSelector, ObjectMeta, OwnerReference},
31    util::intstr::IntOrString,
32};
33use kube::ResourceExt;
34use std::collections::BTreeMap;
35use tracing::debug;
36
37// Embed configuration templates at compile time
38const NAMED_CONF_TEMPLATE: &str = include_str!("../templates/named.conf.tmpl");
39const NAMED_CONF_OPTIONS_TEMPLATE: &str = include_str!("../templates/named.conf.options.tmpl");
40const RNDC_CONF_TEMPLATE: &str = include_str!("../templates/rndc.conf.tmpl");
41
42// BIND configuration file paths and mount points
43const BIND_ZONES_PATH: &str = "/etc/bind/zones";
44const BIND_CACHE_PATH: &str = "/var/cache/bind";
45const BIND_KEYS_PATH: &str = "/etc/bind/keys";
46const BIND_NAMED_CONF_PATH: &str = "/etc/bind/named.conf";
47const BIND_NAMED_CONF_OPTIONS_PATH: &str = "/etc/bind/named.conf.options";
48const BIND_NAMED_CONF_ZONES_PATH: &str = "/etc/bind/named.conf.zones";
49const BIND_RNDC_CONF_PATH: &str = "/etc/bind/rndc.conf";
50
51// BIND configuration file names
52const NAMED_CONF_FILENAME: &str = "named.conf";
53const NAMED_CONF_OPTIONS_FILENAME: &str = "named.conf.options";
54const NAMED_CONF_ZONES_FILENAME: &str = "named.conf.zones";
55const RNDC_CONF_FILENAME: &str = "rndc.conf";
56
57// Volume mount names
58const VOLUME_ZONES: &str = "zones";
59const VOLUME_CACHE: &str = "cache";
60const VOLUME_RNDC_KEY: &str = "rndc-key";
61const VOLUME_CONFIG: &str = "config";
62const VOLUME_NAMED_CONF: &str = "named-conf";
63const VOLUME_NAMED_CONF_OPTIONS: &str = "named-conf-options";
64const VOLUME_NAMED_CONF_ZONES: &str = "named-conf-zones";
65
66/// Builds standardized Kubernetes labels for BIND9 instance resources.
67///
68/// Creates labels for resources managed by `Bind9Instance` controller.
69/// Use `build_cluster_labels()` for resources managed by `Bind9Cluster`.
70///
71/// # Arguments
72///
73/// * `instance_name` - Name of the `Bind9Instance` resource
74///
75/// # Returns
76///
77/// A `BTreeMap` of label key-value pairs
78///
79/// # Example
80///
81/// ```rust
82/// use bindy::bind9_resources::build_labels;
83///
84/// let labels = build_labels("my-dns-server");
85/// assert_eq!(labels.get("app").unwrap(), "bind9");
86/// assert_eq!(labels.get("instance").unwrap(), "my-dns-server");
87/// ```
88#[must_use]
89pub fn build_labels(instance_name: &str) -> BTreeMap<String, String> {
90    let mut labels = BTreeMap::new();
91    labels.insert("app".into(), APP_NAME_BIND9.into());
92    labels.insert("instance".into(), instance_name.into());
93    labels.insert(K8S_NAME.into(), APP_NAME_BIND9.into());
94    labels.insert(K8S_INSTANCE.into(), instance_name.into());
95    labels.insert(K8S_COMPONENT.into(), COMPONENT_DNS_SERVER.into());
96    labels.insert(K8S_MANAGED_BY.into(), MANAGED_BY_BIND9_INSTANCE.into());
97    labels.insert(K8S_PART_OF.into(), PART_OF_BINDY.into());
98    labels
99}
100
101/// Builds standardized Kubernetes labels for BIND9 cluster resources.
102///
103/// Creates labels for resources managed by `Bind9Cluster` controller.
104/// Use `build_labels()` for resources managed by `Bind9Instance`.
105///
106/// # Arguments
107///
108/// * `cluster_name` - Name of the `Bind9Cluster` resource
109///
110/// # Returns
111///
112/// A `BTreeMap` of label key-value pairs
113#[must_use]
114pub fn build_cluster_labels(cluster_name: &str) -> BTreeMap<String, String> {
115    let mut labels = BTreeMap::new();
116    labels.insert("app".into(), APP_NAME_BIND9.into());
117    labels.insert("cluster".into(), cluster_name.into());
118    labels.insert(K8S_NAME.into(), APP_NAME_BIND9.into());
119    labels.insert(K8S_INSTANCE.into(), cluster_name.into());
120    labels.insert(K8S_COMPONENT.into(), COMPONENT_DNS_CLUSTER.into());
121    labels.insert(K8S_MANAGED_BY.into(), MANAGED_BY_BIND9_CLUSTER.into());
122    labels.insert(K8S_PART_OF.into(), PART_OF_BINDY.into());
123    labels
124}
125
126/// Builds standardized Kubernetes labels for BIND9 instance resources,
127/// propagating the `managed-by` label from the `Bind9Instance` if it exists.
128///
129/// This function checks if the instance has a `bindy.firestoned.io/managed-by` label.
130/// If it does (indicating the instance is managed by a `Bind9Cluster`), that label
131/// value is propagated to the `app.kubernetes.io/managed-by` label. Otherwise,
132/// it defaults to `Bind9Instance`.
133///
134/// This ensures that when a `Bind9Cluster` creates a `Bind9Instance` with
135/// `managed-by: Bind9Cluster`, all child resources (Deployments, Services) also
136/// get `managed-by: Bind9Cluster`.
137///
138/// # Arguments
139///
140/// * `instance_name` - Name of the `Bind9Instance` resource
141/// * `instance` - The `Bind9Instance` resource to check for management labels
142///
143/// # Returns
144///
145/// A `BTreeMap` of label key-value pairs
146#[must_use]
147pub fn build_labels_from_instance(
148    instance_name: &str,
149    instance: &Bind9Instance,
150) -> BTreeMap<String, String> {
151    use crate::labels::BINDY_MANAGED_BY_LABEL;
152
153    let mut labels = BTreeMap::new();
154    labels.insert("app".into(), APP_NAME_BIND9.into());
155    labels.insert("instance".into(), instance_name.into());
156    labels.insert(K8S_NAME.into(), APP_NAME_BIND9.into());
157    labels.insert(K8S_INSTANCE.into(), instance_name.into());
158    labels.insert(K8S_COMPONENT.into(), COMPONENT_DNS_SERVER.into());
159    labels.insert(K8S_PART_OF.into(), PART_OF_BINDY.into());
160
161    // Check if instance has bindy.firestoned.io/managed-by label
162    // If it does, propagate it to app.kubernetes.io/managed-by
163    let managed_by = instance
164        .metadata
165        .labels
166        .as_ref()
167        .and_then(|labels| labels.get(BINDY_MANAGED_BY_LABEL))
168        .map_or(MANAGED_BY_BIND9_INSTANCE, String::as_str);
169
170    labels.insert(K8S_MANAGED_BY.into(), managed_by.into());
171
172    labels
173}
174
175/// Builds owner references for a resource owned by a `Bind9Instance`
176///
177/// Sets up cascade deletion so that when the `Bind9Instance` is deleted,
178/// all its child resources (`Deployment`, `Service`, `ConfigMap`) are automatically deleted.
179///
180/// # Arguments
181///
182/// * `instance` - The `Bind9Instance` that owns this resource
183///
184/// # Returns
185///
186/// A vector containing a single `OwnerReference` pointing to the instance
187#[must_use]
188pub fn build_owner_references(instance: &Bind9Instance) -> Vec<OwnerReference> {
189    vec![OwnerReference {
190        api_version: API_GROUP_VERSION.to_string(),
191        kind: KIND_BIND9_INSTANCE.to_string(),
192        name: instance.name_any(),
193        uid: instance.metadata.uid.clone().unwrap_or_default(),
194        controller: Some(true),
195        block_owner_deletion: Some(true),
196    }]
197}
198
199/// Builds a Kubernetes `ConfigMap` containing BIND9 configuration files.
200///
201/// Creates a `ConfigMap` with:
202/// - `named.conf` - Main BIND9 configuration
203/// - `named.conf.options` - BIND9 options (recursion, ACLs, DNSSEC, etc.)
204///
205/// If custom `ConfigMaps` are referenced in the cluster or instance spec, this function
206/// will not generate configuration files, as they should be provided by the user.
207///
208/// # Arguments
209///
210/// * `name` - Name for the `ConfigMap` (typically `{instance-name}-config`)
211/// * `namespace` - Kubernetes namespace
212/// * `instance` - `Bind9Instance` spec containing configuration options
213/// * `cluster` - Optional `Bind9Cluster` containing shared configuration
214///
215/// # Returns
216///
217/// A Kubernetes `ConfigMap` resource ready for creation/update, or None if custom `ConfigMaps` are used
218#[must_use]
219pub fn build_configmap(
220    name: &str,
221    namespace: &str,
222    instance: &Bind9Instance,
223    cluster: Option<&Bind9Cluster>,
224    role_allow_transfer: Option<&Vec<String>>,
225) -> Option<ConfigMap> {
226    debug!(
227        name = %name,
228        namespace = %namespace,
229        "Building ConfigMap for Bind9Instance"
230    );
231
232    // Check if custom ConfigMaps are referenced (instance overrides cluster)
233    let config_map_refs = instance
234        .spec
235        .config_map_refs
236        .as_ref()
237        .or_else(|| cluster.and_then(|c| c.spec.common.config_map_refs.as_ref()));
238
239    // If custom ConfigMaps are specified, don't generate a ConfigMap
240    if let Some(refs) = config_map_refs {
241        if refs.named_conf.is_some() || refs.named_conf_options.is_some() {
242            debug!(
243                named_conf_ref = ?refs.named_conf,
244                named_conf_options_ref = ?refs.named_conf_options,
245                "Custom ConfigMaps specified, skipping generation"
246            );
247            // User is providing custom ConfigMaps, so we don't create one
248            return None;
249        }
250    }
251
252    // Generate default configuration
253    let mut data = BTreeMap::new();
254    let labels = build_labels(name);
255
256    // Build named.conf
257    let named_conf = build_named_conf(instance, cluster);
258    data.insert(NAMED_CONF_FILENAME.into(), named_conf);
259
260    // Build named.conf.options
261    let options_conf = build_options_conf(instance, cluster, role_allow_transfer);
262    data.insert(NAMED_CONF_OPTIONS_FILENAME.into(), options_conf);
263
264    // Build rndc.conf (references key file mounted from Secret)
265    data.insert(RNDC_CONF_FILENAME.into(), RNDC_CONF_TEMPLATE.to_string());
266
267    // Note: We do NOT auto-generate named.conf.zones anymore.
268    // Users must explicitly provide a namedConfZones ConfigMap if they want zones support.
269
270    let owner_refs = build_owner_references(instance);
271
272    Some(ConfigMap {
273        metadata: ObjectMeta {
274            name: Some(format!("{name}-config")),
275            namespace: Some(namespace.into()),
276            labels: Some(labels),
277            owner_references: Some(owner_refs),
278            ..Default::default()
279        },
280        data: Some(data),
281        ..Default::default()
282    })
283}
284
285/// Builds a cluster-level shared `ConfigMap` containing BIND9 configuration files.
286///
287/// This `ConfigMap` is shared across all instances in a cluster, containing configuration
288/// from `spec.global`. This eliminates the need for per-instance `ConfigMaps` when all
289/// instances share the same configuration.
290///
291/// # Arguments
292///
293/// * `cluster_name` - Name of the cluster (used for `ConfigMap` naming)
294/// * `namespace` - Kubernetes namespace
295/// * `cluster` - `Bind9Cluster` containing shared configuration
296///
297/// # Returns
298///
299/// A Kubernetes `ConfigMap` resource ready for creation/update
300///
301/// # Errors
302///
303/// Returns an error if configuration generation fails
304pub fn build_cluster_configmap(
305    cluster_name: &str,
306    namespace: &str,
307    cluster: &Bind9Cluster,
308) -> Result<ConfigMap, anyhow::Error> {
309    debug!(
310        cluster_name = %cluster_name,
311        namespace = %namespace,
312        "Building cluster-level shared ConfigMap"
313    );
314
315    // Generate default configuration from cluster spec
316    let mut data = BTreeMap::new();
317    let labels = build_cluster_labels(cluster_name);
318
319    // Build named.conf from cluster
320    let named_conf = build_cluster_named_conf(cluster);
321    data.insert(NAMED_CONF_FILENAME.into(), named_conf);
322
323    // Build named.conf.options from cluster.spec.common.global
324    let options_conf = build_cluster_options_conf(cluster);
325    data.insert(NAMED_CONF_OPTIONS_FILENAME.into(), options_conf);
326
327    // Build rndc.conf (references key file mounted from Secret)
328    data.insert(RNDC_CONF_FILENAME.into(), RNDC_CONF_TEMPLATE.to_string());
329
330    Ok(ConfigMap {
331        metadata: ObjectMeta {
332            name: Some(format!("{cluster_name}-config")),
333            namespace: Some(namespace.into()),
334            labels: Some(labels),
335            ..Default::default()
336        },
337        data: Some(data),
338        ..Default::default()
339    })
340}
341
342/// Build the main named.conf configuration from template
343///
344/// Generates the main BIND9 configuration file with conditional zones include.
345/// The zones include directive is only added if the user provides a `namedConfZones` `ConfigMap`.
346///
347/// # Arguments
348///
349/// * `instance` - `Bind9Instance` spec (checked first for config refs)
350/// * `cluster` - Optional `Bind9Cluster` (fallback for config refs)
351///
352/// # Returns
353///
354/// A string containing the complete named.conf configuration
355fn build_named_conf(instance: &Bind9Instance, cluster: Option<&Bind9Cluster>) -> String {
356    // Check if user provided a custom zones ConfigMap
357    let config_map_refs = instance
358        .spec
359        .config_map_refs
360        .as_ref()
361        .or_else(|| cluster.and_then(|c| c.spec.common.config_map_refs.as_ref()));
362
363    let zones_include = if let Some(refs) = config_map_refs {
364        if refs.named_conf_zones.is_some() {
365            // User provided custom zones file, include it from custom ConfigMap location
366            "\n// Include zones file from user-provided ConfigMap\ninclude \"/etc/bind/named.conf.zones\";\n".to_string()
367        } else {
368            // No zones ConfigMap provided, don't include zones file
369            String::new()
370        }
371    } else {
372        // No config refs at all, don't include zones file
373        String::new()
374    };
375
376    // Build RNDC key includes and key names for controls block
377    // For now, we support a single key per instance (bindy-operator)
378    // Future enhancement: support multiple keys from spec
379    let rndc_key_includes = "include \"/etc/bind/keys/rndc.key\";";
380    let rndc_key_names = "\"bindy-operator\"";
381
382    NAMED_CONF_TEMPLATE
383        .replace("{{ZONES_INCLUDE}}", &zones_include)
384        .replace("{{RNDC_KEY_INCLUDES}}", rndc_key_includes)
385        .replace("{{RNDC_KEY_NAMES}}", rndc_key_names)
386}
387
388/// Build the named.conf.options configuration from template
389///
390/// Generates the BIND9 options configuration file from the instance's config spec.
391/// Includes settings for recursion, ACLs (allow-query, allow-transfer), and DNSSEC.
392///
393/// Priority for configuration values (highest to lowest):
394/// 1. Instance-level settings (`instance.spec.config`)
395/// 2. Role-specific settings (`role_allow_transfer` from cluster primary/secondary spec)
396/// 3. Global cluster settings (`cluster.spec.common.global`)
397/// 4. Defaults (BIND9 defaults or no setting)
398///
399/// # Arguments
400///
401/// * `instance` - `Bind9Instance` spec containing the BIND9 configuration
402/// * `cluster` - Optional `Bind9Cluster` containing global configuration
403/// * `role_allow_transfer` - Role-specific allow-transfer override from cluster spec (primary/secondary)
404///
405/// # Returns
406///
407/// A string containing the complete named.conf.options configuration
408#[allow(clippy::too_many_lines)]
409fn build_options_conf(
410    instance: &Bind9Instance,
411    cluster: Option<&Bind9Cluster>,
412    role_allow_transfer: Option<&Vec<String>>,
413) -> String {
414    let recursion;
415    let mut allow_query = String::new();
416    let allow_transfer;
417    let mut dnssec_validate = String::new();
418
419    // Get global config from cluster if available
420    let global_config = cluster.and_then(|c| c.spec.common.global.as_ref());
421
422    if let Some(config) = &instance.spec.config {
423        // Recursion setting - instance overrides global
424        let recursion_value = if let Some(rec) = config.recursion {
425            if rec {
426                "yes"
427            } else {
428                "no"
429            }
430        } else if let Some(global) = global_config {
431            if global.recursion.unwrap_or(false) {
432                "yes"
433            } else {
434                "no"
435            }
436        } else {
437            "no"
438        };
439        recursion = format!("recursion {recursion_value};");
440
441        // Allow-query ACL - instance overrides global
442        if let Some(acls) = &config.allow_query {
443            if !acls.is_empty() {
444                let acl_list = acls.join("; ");
445                allow_query = format!("allow-query {{ {acl_list}; }};");
446            }
447        } else if let Some(global) = global_config {
448            if let Some(global_acls) = &global.allow_query {
449                if !global_acls.is_empty() {
450                    let acl_list = global_acls.join("; ");
451                    allow_query = format!("allow-query {{ {acl_list}; }};");
452                }
453            }
454        }
455
456        // Allow-transfer ACL - priority: instance config > role-specific > global > no default
457        if let Some(acls) = &config.allow_transfer {
458            // Instance-level config takes highest priority
459            let acl_list = if acls.is_empty() {
460                "none".to_string()
461            } else {
462                acls.join("; ")
463            };
464            allow_transfer = format!("allow-transfer {{ {acl_list}; }};");
465        } else if let Some(role_acls) = role_allow_transfer {
466            // Role-specific override from cluster config (primary/secondary)
467            let acl_list = if role_acls.is_empty() {
468                "none".to_string()
469            } else {
470                role_acls.join("; ")
471            };
472            allow_transfer = format!("allow-transfer {{ {acl_list}; }};");
473        } else if let Some(global) = global_config {
474            // Global cluster settings
475            if let Some(global_acls) = &global.allow_transfer {
476                let acl_list = if global_acls.is_empty() {
477                    "none".to_string()
478                } else {
479                    global_acls.join("; ")
480                };
481                allow_transfer = format!("allow-transfer {{ {acl_list}; }};");
482            } else {
483                allow_transfer = String::new();
484            }
485        } else {
486            // No default - let BIND9 use its own defaults (none)
487            allow_transfer = String::new();
488        }
489
490        // DNSSEC configuration - instance overrides global
491        // Note: dnssec-enable was removed in BIND 9.15+ (DNSSEC is always enabled)
492        // Only dnssec-validation is configurable now
493        if let Some(dnssec) = &config.dnssec {
494            if dnssec.validation.unwrap_or(false) {
495                dnssec_validate = "dnssec-validation yes;".to_string();
496            } else {
497                dnssec_validate = "dnssec-validation no;".to_string();
498            }
499        } else if let Some(global) = global_config {
500            if let Some(global_dnssec) = &global.dnssec {
501                if global_dnssec.validation.unwrap_or(false) {
502                    dnssec_validate = "dnssec-validation yes;".to_string();
503                } else {
504                    dnssec_validate = "dnssec-validation no;".to_string();
505                }
506            }
507        }
508    } else {
509        // No instance config - use global config if available, otherwise defaults
510        if let Some(global) = global_config {
511            // Recursion from global
512            let recursion_value = if global.recursion.unwrap_or(false) {
513                "yes"
514            } else {
515                "no"
516            };
517            recursion = format!("recursion {recursion_value};");
518
519            // Allow-query from global
520            if let Some(acls) = &global.allow_query {
521                if !acls.is_empty() {
522                    let acl_list = acls.join("; ");
523                    allow_query = format!("allow-query {{ {acl_list}; }};");
524                }
525            }
526
527            // Allow-transfer - priority: role-specific > global > no default
528            if let Some(role_acls) = role_allow_transfer {
529                let acl_list = if role_acls.is_empty() {
530                    "none".to_string()
531                } else {
532                    role_acls.join("; ")
533                };
534                allow_transfer = format!("allow-transfer {{ {acl_list}; }};");
535            } else if let Some(global_acls) = &global.allow_transfer {
536                let acl_list = if global_acls.is_empty() {
537                    "none".to_string()
538                } else {
539                    global_acls.join("; ")
540                };
541                allow_transfer = format!("allow-transfer {{ {acl_list}; }};");
542            } else {
543                allow_transfer = String::new();
544            }
545
546            // DNSSEC from global
547            if let Some(dnssec) = &global.dnssec {
548                if dnssec.validation.unwrap_or(false) {
549                    dnssec_validate = "dnssec-validation yes;".to_string();
550                }
551            }
552        } else {
553            // Defaults when no config is specified
554            recursion = "recursion no;".to_string();
555            // No default for allow-transfer - let BIND9 use its own defaults (none)
556            allow_transfer = String::new();
557        }
558    }
559
560    // Perform template substitutions
561    NAMED_CONF_OPTIONS_TEMPLATE
562        .replace("{{RECURSION}}", &recursion)
563        .replace("{{ALLOW_QUERY}}", &allow_query)
564        .replace("{{ALLOW_TRANSFER}}", &allow_transfer)
565        .replace("{{DNSSEC_VALIDATE}}", &dnssec_validate)
566}
567
568/// Build the main named.conf configuration for a cluster from template
569///
570/// Generates the main BIND9 configuration file with conditional zones include.
571/// The zones include directive is only added if the user provides a `namedConfZones` `ConfigMap`.
572///
573/// # Arguments
574///
575/// * `cluster` - `Bind9Cluster` spec (checked for config refs)
576///
577/// # Returns
578///
579/// A string containing the complete named.conf configuration
580fn build_cluster_named_conf(cluster: &Bind9Cluster) -> String {
581    // Check if user provided a custom zones ConfigMap
582    let zones_include = if let Some(refs) = &cluster.spec.common.config_map_refs {
583        if refs.named_conf_zones.is_some() {
584            // User provided custom zones file, include it from custom ConfigMap location
585            "\n// Include zones file from user-provided ConfigMap\ninclude \"/etc/bind/named.conf.zones\";\n".to_string()
586        } else {
587            // No zones ConfigMap provided, don't include zones file
588            String::new()
589        }
590    } else {
591        // No config refs at all, don't include zones file
592        String::new()
593    };
594
595    // Build RNDC key includes and key names for controls block
596    // For now, we support a single key per instance (bindy-operator)
597    // Future enhancement: support multiple keys from spec
598    let rndc_key_includes = "include \"/etc/bind/keys/rndc.key\";";
599    let rndc_key_names = "\"bindy-operator\"";
600
601    NAMED_CONF_TEMPLATE
602        .replace("{{ZONES_INCLUDE}}", &zones_include)
603        .replace("{{RNDC_KEY_INCLUDES}}", rndc_key_includes)
604        .replace("{{RNDC_KEY_NAMES}}", rndc_key_names)
605}
606
607/// Build the named.conf.options configuration for a cluster from template
608///
609/// Generates the BIND9 options configuration file from the cluster's `spec.global` config.
610/// Includes settings for recursion, ACLs (allow-query, allow-transfer), and DNSSEC.
611///
612/// # Arguments
613///
614/// * `cluster` - `Bind9Cluster` containing global configuration
615///
616/// # Returns
617///
618/// A string containing the complete named.conf.options configuration
619#[allow(clippy::too_many_lines)]
620fn build_cluster_options_conf(cluster: &Bind9Cluster) -> String {
621    let recursion;
622    let mut allow_query = String::new();
623    let mut allow_transfer = String::new();
624    let mut dnssec_validate = String::new();
625
626    // Use cluster global config
627    if let Some(global) = &cluster.spec.common.global {
628        // Recursion setting
629        let recursion_value = if global.recursion.unwrap_or(false) {
630            "yes"
631        } else {
632            "no"
633        };
634        recursion = format!("recursion {recursion_value};");
635
636        // allow-query ACL
637        if let Some(aq) = &global.allow_query {
638            if !aq.is_empty() {
639                allow_query = format!(
640                    "allow-query {{ {}; }};",
641                    aq.iter().map(String::as_str).collect::<Vec<_>>().join("; ")
642                );
643            }
644        }
645
646        // allow-transfer ACL
647        if let Some(at) = &global.allow_transfer {
648            if !at.is_empty() {
649                allow_transfer = format!(
650                    "allow-transfer {{ {}; }};",
651                    at.iter().map(String::as_str).collect::<Vec<_>>().join("; ")
652                );
653            }
654        }
655
656        // DNSSEC validation
657        if let Some(dnssec) = &global.dnssec {
658            if dnssec.validation.unwrap_or(false) {
659                dnssec_validate = "dnssec-validation yes;".to_string();
660            } else {
661                dnssec_validate = "dnssec-validation no;".to_string();
662            }
663        }
664    } else {
665        // No global config, use defaults
666        recursion = "recursion no;".to_string();
667    }
668
669    NAMED_CONF_OPTIONS_TEMPLATE
670        .replace("{{RECURSION}}", &recursion)
671        .replace("{{ALLOW_QUERY}}", &allow_query)
672        .replace("{{ALLOW_TRANSFER}}", &allow_transfer)
673        .replace("{{DNSSEC_VALIDATE}}", &dnssec_validate)
674}
675
676/// Builds a Kubernetes Deployment for running BIND9 pods.
677///
678/// Creates a Deployment with:
679/// - BIND9 container using configured or default image
680/// - `ConfigMap` volume mounts for configuration
681/// - `EmptyDir` volumes for zones and cache
682/// - TCP/UDP port 53 exposed
683/// - Liveness and readiness probes
684///
685/// # Arguments
686///
687/// * `name` - Name for the Deployment
688/// * `namespace` - Kubernetes namespace
689/// * `instance` - `Bind9Instance` spec containing replicas, version, etc.
690/// * `cluster` - Optional `Bind9Cluster` containing shared configuration
691///
692/// # Returns
693///
694/// A Kubernetes Deployment resource ready for creation/update
695#[must_use]
696/// Helper struct to hold resolved configuration for a `Bind9Instance` deployment
697struct DeploymentConfig<'a> {
698    image_config: Option<&'a ImageConfig>,
699    config_map_refs: Option<&'a ConfigMapRefs>,
700    version: &'a str,
701    volumes: Option<&'a Vec<Volume>>,
702    volume_mounts: Option<&'a Vec<VolumeMount>>,
703    bindcar_config: Option<&'a crate::crd::BindcarConfig>,
704    configmap_name: String,
705}
706
707/// Extract and resolve deployment configuration from instance and cluster
708fn resolve_deployment_config<'a>(
709    name: &str,
710    instance: &'a Bind9Instance,
711    cluster: Option<&'a Bind9Cluster>,
712    cluster_provider: Option<&'a crate::crd::ClusterBind9Provider>,
713) -> DeploymentConfig<'a> {
714    // Get image config (instance overrides cluster overrides cluster provider)
715    let image_config = instance
716        .spec
717        .image
718        .as_ref()
719        .or_else(|| cluster.and_then(|c| c.spec.common.image.as_ref()))
720        .or_else(|| cluster_provider.and_then(|cp| cp.spec.common.image.as_ref()));
721
722    // Get ConfigMap references (instance overrides cluster overrides cluster provider)
723    let config_map_refs = instance
724        .spec
725        .config_map_refs
726        .as_ref()
727        .or_else(|| cluster.and_then(|c| c.spec.common.config_map_refs.as_ref()))
728        .or_else(|| cluster_provider.and_then(|cp| cp.spec.common.config_map_refs.as_ref()));
729
730    // Get version (instance overrides cluster overrides cluster provider)
731    let version = instance
732        .spec
733        .version
734        .as_deref()
735        .or_else(|| cluster.and_then(|c| c.spec.common.version.as_deref()))
736        .or_else(|| cluster_provider.and_then(|cp| cp.spec.common.version.as_deref()))
737        .unwrap_or(DEFAULT_BIND9_VERSION);
738
739    // Get volumes (instance overrides cluster overrides cluster provider)
740    let volumes = instance
741        .spec
742        .volumes
743        .as_ref()
744        .or_else(|| cluster.and_then(|c| c.spec.common.volumes.as_ref()))
745        .or_else(|| cluster_provider.and_then(|cp| cp.spec.common.volumes.as_ref()));
746
747    // Get volume mounts (instance overrides cluster overrides cluster provider)
748    let volume_mounts = instance
749        .spec
750        .volume_mounts
751        .as_ref()
752        .or_else(|| cluster.and_then(|c| c.spec.common.volume_mounts.as_ref()))
753        .or_else(|| cluster_provider.and_then(|cp| cp.spec.common.volume_mounts.as_ref()));
754
755    // Get bindcar_config (instance overrides cluster global overrides cluster provider global)
756    let bindcar_config = instance
757        .spec
758        .bindcar_config
759        .as_ref()
760        .or_else(|| {
761            cluster.and_then(|c| {
762                c.spec
763                    .common
764                    .global
765                    .as_ref()
766                    .and_then(|g| g.bindcar_config.as_ref())
767            })
768        })
769        .or_else(|| {
770            cluster_provider.and_then(|cp| {
771                cp.spec
772                    .common
773                    .global
774                    .as_ref()
775                    .and_then(|g| g.bindcar_config.as_ref())
776            })
777        });
778
779    // Determine ConfigMap name: use cluster ConfigMap if instance belongs to a cluster
780    let configmap_name = if instance.spec.cluster_ref.is_empty() {
781        // Use instance-specific ConfigMap
782        format!("{name}-config")
783    } else {
784        // Use cluster-level shared ConfigMap
785        format!("{}-config", instance.spec.cluster_ref)
786    };
787
788    DeploymentConfig {
789        image_config,
790        config_map_refs,
791        version,
792        volumes,
793        volume_mounts,
794        bindcar_config,
795        configmap_name,
796    }
797}
798
799pub fn build_deployment(
800    name: &str,
801    namespace: &str,
802    instance: &Bind9Instance,
803    cluster: Option<&Bind9Cluster>,
804    cluster_provider: Option<&crate::crd::ClusterBind9Provider>,
805) -> Deployment {
806    debug!(
807        name = %name,
808        namespace = %namespace,
809        has_cluster = cluster.is_some(),
810        has_cluster_provider = cluster_provider.is_some(),
811        "Building Deployment for Bind9Instance"
812    );
813
814    // Build labels, checking if instance is managed by a cluster
815    let labels = build_labels_from_instance(name, instance);
816    let replicas = instance.spec.replicas.unwrap_or(1);
817    debug!(replicas, "Deployment replica count");
818
819    let config = resolve_deployment_config(name, instance, cluster, cluster_provider);
820
821    let owner_refs = build_owner_references(instance);
822
823    Deployment {
824        metadata: ObjectMeta {
825            name: Some(name.into()),
826            namespace: Some(namespace.into()),
827            labels: Some(labels.clone()),
828            owner_references: Some(owner_refs),
829            ..Default::default()
830        },
831        spec: Some(DeploymentSpec {
832            replicas: Some(replicas),
833            selector: LabelSelector {
834                match_labels: Some(labels.clone()),
835                ..Default::default()
836            },
837            template: PodTemplateSpec {
838                metadata: Some(ObjectMeta {
839                    labels: Some(labels.clone()),
840                    ..Default::default()
841                }),
842                spec: Some(build_pod_spec(
843                    &config.configmap_name,
844                    &format!("{name}-rndc-key"),
845                    config.version,
846                    config.image_config,
847                    config.config_map_refs,
848                    config.volumes,
849                    config.volume_mounts,
850                    config.bindcar_config,
851                )),
852            },
853            ..Default::default()
854        }),
855        ..Default::default()
856    }
857}
858
859/// Builds pod specification with BIND9 container and API sidecar
860///
861/// # Arguments
862/// * `configmap_name` - Name of the `ConfigMap` with BIND9 configuration
863/// * `rndc_secret_name` - Name of the Secret with RNDC keys
864/// * `version` - BIND9 version tag
865/// * `image_config` - Optional custom image configuration
866/// * `config_map_refs` - Optional custom `ConfigMap` references
867/// * `custom_volumes` - Optional custom volumes to add
868/// * `custom_volume_mounts` - Optional custom volume mounts to add
869/// * `bindcar_config` - Optional API sidecar configuration
870#[allow(clippy::too_many_arguments)]
871fn build_pod_spec(
872    configmap_name: &str,
873    rndc_secret_name: &str,
874    version: &str,
875    image_config: Option<&ImageConfig>,
876    config_map_refs: Option<&ConfigMapRefs>,
877    custom_volumes: Option<&Vec<Volume>>,
878    custom_volume_mounts: Option<&Vec<VolumeMount>>,
879    bindcar_config: Option<&crate::crd::BindcarConfig>,
880) -> PodSpec {
881    // Determine image to use
882    let image = if let Some(img_cfg) = image_config {
883        img_cfg
884            .image
885            .clone()
886            .unwrap_or_else(|| format!("internetsystemsconsortium/bind9:{version}"))
887    } else {
888        format!("internetsystemsconsortium/bind9:{version}")
889    };
890
891    // Determine image pull policy
892    let image_pull_policy = image_config
893        .and_then(|cfg| cfg.image_pull_policy.clone())
894        .unwrap_or_else(|| "IfNotPresent".into());
895
896    // BIND9 container
897    let bind9_container = Container {
898        name: "bind9".into(),
899        image: Some(image),
900        image_pull_policy: Some(image_pull_policy),
901        command: Some(vec!["named".into()]),
902        args: Some(vec![
903            "-c".into(),
904            BIND_NAMED_CONF_PATH.into(),
905            "-g".into(), // Run in foreground (required for containers)
906        ]),
907        ports: Some(vec![
908            ContainerPort {
909                name: Some("dns-tcp".into()),
910                container_port: i32::from(DNS_PORT),
911                protocol: Some("TCP".into()),
912                ..Default::default()
913            },
914            ContainerPort {
915                name: Some("dns-udp".into()),
916                container_port: i32::from(DNS_PORT),
917                protocol: Some("UDP".into()),
918                ..Default::default()
919            },
920            ContainerPort {
921                name: Some("rndc".into()),
922                container_port: i32::from(RNDC_PORT),
923                protocol: Some("TCP".into()),
924                ..Default::default()
925            },
926        ]),
927        env: Some(vec![EnvVar {
928            name: "TZ".into(),
929            value: Some("UTC".into()),
930            ..Default::default()
931        }]),
932        volume_mounts: Some(build_volume_mounts(config_map_refs, custom_volume_mounts)),
933        liveness_probe: Some(Probe {
934            tcp_socket: Some(TCPSocketAction {
935                port: IntOrString::Int(i32::from(DNS_PORT)),
936                ..Default::default()
937            }),
938            initial_delay_seconds: Some(LIVENESS_INITIAL_DELAY_SECS),
939            period_seconds: Some(LIVENESS_PERIOD_SECS),
940            timeout_seconds: Some(LIVENESS_TIMEOUT_SECS),
941            failure_threshold: Some(LIVENESS_FAILURE_THRESHOLD),
942            ..Default::default()
943        }),
944        readiness_probe: Some(Probe {
945            tcp_socket: Some(TCPSocketAction {
946                port: IntOrString::Int(i32::from(DNS_PORT)),
947                ..Default::default()
948            }),
949            initial_delay_seconds: Some(READINESS_INITIAL_DELAY_SECS),
950            period_seconds: Some(READINESS_PERIOD_SECS),
951            timeout_seconds: Some(READINESS_TIMEOUT_SECS),
952            failure_threshold: Some(READINESS_FAILURE_THRESHOLD),
953            ..Default::default()
954        }),
955        ..Default::default()
956    };
957
958    // Build image pull secrets if specified
959    let image_pull_secrets = image_config.and_then(|cfg| {
960        cfg.image_pull_secrets.as_ref().map(|secrets| {
961            secrets
962                .iter()
963                .map(|s| k8s_openapi::api::core::v1::LocalObjectReference { name: s.clone() })
964                .collect()
965        })
966    });
967
968    PodSpec {
969        containers: {
970            let mut containers = vec![bind9_container];
971            containers.push(build_api_sidecar_container(
972                bindcar_config,
973                rndc_secret_name,
974            ));
975            containers
976        },
977        volumes: Some(build_volumes(
978            configmap_name,
979            rndc_secret_name,
980            config_map_refs,
981            custom_volumes,
982        )),
983        image_pull_secrets,
984        service_account_name: Some(BIND9_SERVICE_ACCOUNT.into()),
985        ..Default::default()
986    }
987}
988
989/// Build the Bindcar API sidecar container
990///
991/// # Arguments
992///
993/// * `bindcar_config` - Optional Bindcar container configuration from the instance spec
994/// * `rndc_secret_name` - Name of the Secret containing the RNDC key
995///
996/// # Returns
997///
998/// A `Container` configured to run the Bindcar RNDC API sidecar
999fn build_api_sidecar_container(
1000    bindcar_config: Option<&crate::crd::BindcarConfig>,
1001    rndc_secret_name: &str,
1002) -> Container {
1003    // Use defaults if bindcar_config is not provided
1004    let image = bindcar_config
1005        .and_then(|c| c.image.clone())
1006        .unwrap_or_else(|| crate::constants::DEFAULT_BINDCAR_IMAGE.to_string());
1007
1008    let image_pull_policy = bindcar_config
1009        .and_then(|c| c.image_pull_policy.clone())
1010        .unwrap_or_else(|| "IfNotPresent".to_string());
1011
1012    let port = bindcar_config.and_then(|c| c.port).unwrap_or(8080);
1013
1014    let log_level = bindcar_config
1015        .and_then(|c| c.log_level.clone())
1016        .unwrap_or_else(|| "info".to_string());
1017
1018    let resources = bindcar_config.and_then(|c| c.resources.clone());
1019
1020    // Build required environment variables
1021    let mut env_vars = vec![
1022        EnvVar {
1023            name: "BIND_ZONE_DIR".into(),
1024            value: Some(BIND_CACHE_PATH.into()),
1025            ..Default::default()
1026        },
1027        EnvVar {
1028            name: "API_PORT".into(),
1029            value: Some(port.to_string()),
1030            ..Default::default()
1031        },
1032        EnvVar {
1033            name: "RUST_LOG".into(),
1034            value: Some(log_level),
1035            ..Default::default()
1036        },
1037        EnvVar {
1038            name: "BIND_ALLOWED_SERVICE_ACCOUNTS".into(),
1039            value: Some(BIND9_SERVICE_ACCOUNT.into()),
1040            ..Default::default()
1041        },
1042        EnvVar {
1043            name: "RNDC_SECRET".into(),
1044            value_from: Some(EnvVarSource {
1045                secret_key_ref: Some(SecretKeySelector {
1046                    name: rndc_secret_name.to_string(),
1047                    key: "secret".to_string(),
1048                    optional: Some(false),
1049                }),
1050                ..Default::default()
1051            }),
1052            ..Default::default()
1053        },
1054        EnvVar {
1055            name: "RNDC_ALGORITHM".into(),
1056            value_from: Some(EnvVarSource {
1057                secret_key_ref: Some(SecretKeySelector {
1058                    name: rndc_secret_name.to_string(),
1059                    key: "algorithm".to_string(),
1060                    optional: Some(false),
1061                }),
1062                ..Default::default()
1063            }),
1064            ..Default::default()
1065        },
1066    ];
1067
1068    // Add user-provided environment variables if any
1069    if let Some(config) = bindcar_config {
1070        if let Some(user_env_vars) = &config.env_vars {
1071            env_vars.extend(user_env_vars.clone());
1072        }
1073    }
1074
1075    Container {
1076        name: "api".into(),
1077        image: Some(image),
1078        image_pull_policy: Some(image_pull_policy),
1079        ports: Some(vec![ContainerPort {
1080            name: Some("http".into()),
1081            container_port: port,
1082            protocol: Some("TCP".into()),
1083            ..Default::default()
1084        }]),
1085        env: Some(env_vars),
1086        volume_mounts: Some(vec![
1087            VolumeMount {
1088                name: "cache".into(),
1089                mount_path: BIND_CACHE_PATH.into(),
1090                ..Default::default()
1091            },
1092            VolumeMount {
1093                name: "rndc-key".into(),
1094                mount_path: BIND_KEYS_PATH.into(),
1095                read_only: Some(true),
1096                ..Default::default()
1097            },
1098            VolumeMount {
1099                name: VOLUME_CONFIG.into(),
1100                mount_path: BIND_RNDC_CONF_PATH.into(),
1101                sub_path: Some(RNDC_CONF_FILENAME.into()),
1102                ..Default::default()
1103            },
1104        ]),
1105        resources,
1106        ..Default::default()
1107    }
1108}
1109
1110/// Build volume mounts for the BIND9 container
1111///
1112/// Creates volume mounts for:
1113/// - `zones` - `EmptyDir` for zone files
1114/// - `cache` - `EmptyDir` for BIND9 cache
1115/// - `named.conf` - From `ConfigMap` (custom or generated)
1116/// - `named.conf.options` - From `ConfigMap` (custom or generated)
1117/// - `named.conf.zones` - From custom `ConfigMap` (only if `namedConfZones` is specified)
1118///
1119/// # Arguments
1120///
1121/// * `config_map_refs` - Optional references to custom `ConfigMaps`
1122/// * `custom_volume_mounts` - Optional additional volume mounts from instance/cluster spec
1123///
1124/// # Returns
1125///
1126/// A vector of `VolumeMount` objects for the BIND9 container
1127fn build_volume_mounts(
1128    config_map_refs: Option<&ConfigMapRefs>,
1129    custom_volume_mounts: Option<&Vec<VolumeMount>>,
1130) -> Vec<VolumeMount> {
1131    let mut mounts = vec![
1132        VolumeMount {
1133            name: VOLUME_ZONES.into(),
1134            mount_path: BIND_ZONES_PATH.into(),
1135            ..Default::default()
1136        },
1137        VolumeMount {
1138            name: VOLUME_CACHE.into(),
1139            mount_path: BIND_CACHE_PATH.into(),
1140            ..Default::default()
1141        },
1142        VolumeMount {
1143            name: VOLUME_RNDC_KEY.into(),
1144            mount_path: BIND_KEYS_PATH.into(),
1145            read_only: Some(true),
1146            ..Default::default()
1147        },
1148    ];
1149
1150    // Add named.conf mount
1151    if let Some(refs) = config_map_refs {
1152        if let Some(_configmap_name) = &refs.named_conf {
1153            mounts.push(VolumeMount {
1154                name: VOLUME_NAMED_CONF.into(),
1155                mount_path: BIND_NAMED_CONF_PATH.into(),
1156                sub_path: Some(NAMED_CONF_FILENAME.into()),
1157                ..Default::default()
1158            });
1159        } else {
1160            // Use default generated ConfigMap
1161            mounts.push(VolumeMount {
1162                name: VOLUME_CONFIG.into(),
1163                mount_path: BIND_NAMED_CONF_PATH.into(),
1164                sub_path: Some(NAMED_CONF_FILENAME.into()),
1165                ..Default::default()
1166            });
1167        }
1168
1169        if let Some(_configmap_name) = &refs.named_conf_options {
1170            mounts.push(VolumeMount {
1171                name: VOLUME_NAMED_CONF_OPTIONS.into(),
1172                mount_path: BIND_NAMED_CONF_OPTIONS_PATH.into(),
1173                sub_path: Some(NAMED_CONF_OPTIONS_FILENAME.into()),
1174                ..Default::default()
1175            });
1176        } else {
1177            // Use default generated ConfigMap
1178            mounts.push(VolumeMount {
1179                name: VOLUME_CONFIG.into(),
1180                mount_path: BIND_NAMED_CONF_OPTIONS_PATH.into(),
1181                sub_path: Some(NAMED_CONF_OPTIONS_FILENAME.into()),
1182                ..Default::default()
1183            });
1184        }
1185
1186        // Add zones file mount only if user provided a ConfigMap
1187        if let Some(_configmap_name) = &refs.named_conf_zones {
1188            mounts.push(VolumeMount {
1189                name: VOLUME_NAMED_CONF_ZONES.into(),
1190                mount_path: BIND_NAMED_CONF_ZONES_PATH.into(),
1191                sub_path: Some(NAMED_CONF_ZONES_FILENAME.into()),
1192                ..Default::default()
1193            });
1194        }
1195        // Note: No else block - if user doesn't provide zones ConfigMap, we don't mount it
1196    } else {
1197        // No custom ConfigMaps, use default
1198        mounts.push(VolumeMount {
1199            name: VOLUME_CONFIG.into(),
1200            mount_path: BIND_NAMED_CONF_PATH.into(),
1201            sub_path: Some(NAMED_CONF_FILENAME.into()),
1202            ..Default::default()
1203        });
1204        mounts.push(VolumeMount {
1205            name: VOLUME_CONFIG.into(),
1206            mount_path: BIND_NAMED_CONF_OPTIONS_PATH.into(),
1207            sub_path: Some(NAMED_CONF_OPTIONS_FILENAME.into()),
1208            ..Default::default()
1209        });
1210        // Note: No zones mount - users must explicitly provide namedConfZones ConfigMap
1211    }
1212
1213    // Always add rndc.conf mount from default ConfigMap (contains rndc.conf)
1214    mounts.push(VolumeMount {
1215        name: VOLUME_CONFIG.into(),
1216        mount_path: BIND_RNDC_CONF_PATH.into(),
1217        sub_path: Some(RNDC_CONF_FILENAME.into()),
1218        ..Default::default()
1219    });
1220
1221    // Append custom volume mounts from cluster/instance
1222    if let Some(custom_mounts) = custom_volume_mounts {
1223        mounts.extend(custom_mounts.iter().cloned());
1224    }
1225
1226    mounts
1227}
1228
1229/// Build volumes for the BIND9 pod
1230///
1231/// Creates volumes for:
1232/// - `zones` (`EmptyDir`) - Zone files storage
1233/// - `cache` (`EmptyDir`) - BIND9 cache
1234/// - `ConfigMap` volumes (custom or default generated - can be instance or cluster `ConfigMap`)
1235///
1236/// If custom `ConfigMaps` are specified via `config_map_refs`, individual volumes are created
1237/// for each custom `ConfigMap`. If `namedConfZones` is not specified, no zones `ConfigMap` volume
1238/// is created.
1239///
1240/// # Arguments
1241///
1242/// * `configmap_name` - Name of the `ConfigMap` to mount (instance or cluster `ConfigMap`)
1243/// * `config_map_refs` - Optional references to custom `ConfigMaps`
1244/// * `custom_volumes` - Optional additional volumes from instance/cluster spec
1245///
1246/// # Returns
1247///
1248/// A vector of `Volume` objects for the pod spec
1249fn build_volumes(
1250    configmap_name: &str,
1251    rndc_secret_name: &str,
1252    config_map_refs: Option<&ConfigMapRefs>,
1253    custom_volumes: Option<&Vec<Volume>>,
1254) -> Vec<Volume> {
1255    let mut volumes = vec![
1256        Volume {
1257            name: VOLUME_ZONES.into(),
1258            empty_dir: Some(k8s_openapi::api::core::v1::EmptyDirVolumeSource::default()),
1259            ..Default::default()
1260        },
1261        Volume {
1262            name: VOLUME_CACHE.into(),
1263            empty_dir: Some(k8s_openapi::api::core::v1::EmptyDirVolumeSource::default()),
1264            ..Default::default()
1265        },
1266        Volume {
1267            name: VOLUME_RNDC_KEY.into(),
1268            secret: Some(k8s_openapi::api::core::v1::SecretVolumeSource {
1269                secret_name: Some(rndc_secret_name.to_string()),
1270                ..Default::default()
1271            }),
1272            ..Default::default()
1273        },
1274    ];
1275
1276    // Add ConfigMap volumes
1277    if let Some(refs) = config_map_refs {
1278        if let Some(configmap_name) = &refs.named_conf {
1279            volumes.push(Volume {
1280                name: VOLUME_NAMED_CONF.into(),
1281                config_map: Some(k8s_openapi::api::core::v1::ConfigMapVolumeSource {
1282                    name: configmap_name.clone(),
1283                    ..Default::default()
1284                }),
1285                ..Default::default()
1286            });
1287        }
1288
1289        if let Some(configmap_name) = &refs.named_conf_options {
1290            volumes.push(Volume {
1291                name: VOLUME_NAMED_CONF_OPTIONS.into(),
1292                config_map: Some(k8s_openapi::api::core::v1::ConfigMapVolumeSource {
1293                    name: configmap_name.clone(),
1294                    ..Default::default()
1295                }),
1296                ..Default::default()
1297            });
1298        }
1299
1300        if let Some(configmap_name) = &refs.named_conf_zones {
1301            volumes.push(Volume {
1302                name: VOLUME_NAMED_CONF_ZONES.into(),
1303                config_map: Some(k8s_openapi::api::core::v1::ConfigMapVolumeSource {
1304                    name: configmap_name.clone(),
1305                    ..Default::default()
1306                }),
1307                ..Default::default()
1308            });
1309        }
1310
1311        // If any of the named.conf or named.conf.options use defaults, add the config volume
1312        // This ensures volume mounts have a corresponding volume
1313        if refs.named_conf.is_none() || refs.named_conf_options.is_none() {
1314            volumes.push(Volume {
1315                name: VOLUME_CONFIG.into(),
1316                config_map: Some(k8s_openapi::api::core::v1::ConfigMapVolumeSource {
1317                    name: configmap_name.to_string(),
1318                    ..Default::default()
1319                }),
1320                ..Default::default()
1321            });
1322        }
1323    } else {
1324        // No custom ConfigMaps, use default generated one (cluster or instance ConfigMap)
1325        volumes.push(Volume {
1326            name: VOLUME_CONFIG.into(),
1327            config_map: Some(k8s_openapi::api::core::v1::ConfigMapVolumeSource {
1328                name: configmap_name.to_string(),
1329                ..Default::default()
1330            }),
1331            ..Default::default()
1332        });
1333    }
1334
1335    // Append custom volumes from cluster/instance
1336    if let Some(custom_vols) = custom_volumes {
1337        volumes.extend(custom_vols.iter().cloned());
1338    }
1339
1340    volumes
1341}
1342
1343/// Builds a Kubernetes Service for exposing BIND9 DNS ports.
1344///
1345/// Creates a Service exposing:
1346/// - TCP port 53 (for zone transfers and large queries)
1347/// - UDP port 53 (for standard DNS queries)
1348/// - HTTP port 80 (mapped to bindcar API port)
1349///
1350/// Custom service configuration includes both spec fields and metadata annotations.
1351/// These are merged with defaults, allowing partial customization while maintaining
1352/// safe defaults for unspecified fields.
1353///
1354/// # Arguments
1355///
1356/// * `name` - Name for the Service
1357/// * `namespace` - Kubernetes namespace
1358/// * `instance` - The `Bind9Instance` that owns this Service
1359/// * `custom_config` - Optional custom `ServiceConfig` with spec and annotations to merge with defaults
1360///
1361/// # Returns
1362///
1363/// A Kubernetes Service resource ready for creation/update
1364///
1365/// # Example
1366///
1367/// ```rust,no_run
1368/// use bindy::bind9_resources::build_service;
1369/// use bindy::crd::{Bind9Instance, ServiceConfig};
1370/// use std::collections::BTreeMap;
1371///
1372/// # fn example(instance: Bind9Instance) {
1373/// let mut annotations = BTreeMap::new();
1374/// annotations.insert("metallb.universe.tf/address-pool".to_string(), "my-pool".to_string());
1375///
1376/// let config = ServiceConfig {
1377///     annotations: Some(annotations),
1378///     spec: None,
1379/// };
1380///
1381/// let service = build_service("dns-primary", "dns-system", &instance, Some(&config));
1382/// # }
1383/// ```
1384#[must_use]
1385pub fn build_service(
1386    name: &str,
1387    namespace: &str,
1388    instance: &Bind9Instance,
1389    custom_config: Option<&crate::crd::ServiceConfig>,
1390) -> Service {
1391    // Build labels, checking if instance is managed by a cluster
1392    let labels = build_labels_from_instance(name, instance);
1393    let owner_refs = build_owner_references(instance);
1394
1395    // Get API port from instance spec, default to 8080
1396    let api_port = instance
1397        .spec
1398        .bindcar_config
1399        .as_ref()
1400        .and_then(|c| c.port)
1401        .unwrap_or(8080);
1402
1403    // Build default service spec
1404    let mut default_spec = ServiceSpec {
1405        selector: Some(labels.clone()),
1406        ports: Some(vec![
1407            ServicePort {
1408                name: Some("dns-tcp".into()),
1409                port: i32::from(DNS_PORT),
1410                target_port: Some(IntOrString::Int(i32::from(DNS_PORT))),
1411                protocol: Some("TCP".into()),
1412                ..Default::default()
1413            },
1414            ServicePort {
1415                name: Some("dns-udp".into()),
1416                port: i32::from(DNS_PORT),
1417                target_port: Some(IntOrString::Int(i32::from(DNS_PORT))),
1418                protocol: Some("UDP".into()),
1419                ..Default::default()
1420            },
1421            ServicePort {
1422                name: Some("http".into()),
1423                port: 80,
1424                target_port: Some(IntOrString::Int(api_port)),
1425                protocol: Some("TCP".into()),
1426                ..Default::default()
1427            },
1428        ]),
1429        type_: Some("ClusterIP".into()),
1430        ..Default::default()
1431    };
1432
1433    // Extract custom spec and annotations from service config
1434    let (custom_spec, custom_annotations) = custom_config.map_or((None, None), |config| {
1435        (config.spec.as_ref(), config.annotations.as_ref())
1436    });
1437
1438    // Merge custom spec if provided
1439    if let Some(custom) = custom_spec {
1440        merge_service_spec(&mut default_spec, custom);
1441    }
1442
1443    // Build metadata with optional annotations
1444    let mut metadata = ObjectMeta {
1445        name: Some(name.into()),
1446        namespace: Some(namespace.into()),
1447        labels: Some(labels),
1448        owner_references: Some(owner_refs),
1449        ..Default::default()
1450    };
1451
1452    // Apply custom annotations if provided
1453    if let Some(annotations) = custom_annotations {
1454        metadata.annotations = Some(annotations.clone());
1455    }
1456
1457    Service {
1458        metadata,
1459        spec: Some(default_spec),
1460        ..Default::default()
1461    }
1462}
1463
1464/// Builds a Kubernetes `ServiceAccount` for BIND9 pods.
1465///
1466/// Creates a `ServiceAccount` that will be used by BIND9 pods for authentication
1467/// to the bindcar API sidecar. This enables service-to-service authentication
1468/// using Kubernetes service account tokens.
1469///
1470/// # Arguments
1471///
1472/// * `namespace` - The namespace where the `ServiceAccount` will be created
1473/// * `instance` - The `Bind9Instance` that owns this `ServiceAccount`
1474///
1475/// # Returns
1476///
1477/// A `ServiceAccount` configured for BIND9 pods
1478///
1479/// # Example
1480///
1481/// ```rust,no_run
1482/// use bindy::bind9_resources::build_service_account;
1483/// use bindy::crd::Bind9Instance;
1484///
1485/// # fn example(instance: Bind9Instance) {
1486/// let service_account = build_service_account("dns-system", &instance);
1487/// assert_eq!(service_account.metadata.name, Some("bind9".to_string()));
1488/// # }
1489/// ```
1490#[must_use]
1491pub fn build_service_account(namespace: &str, _instance: &Bind9Instance) -> ServiceAccount {
1492    // IMPORTANT: ServiceAccount is SHARED across all Bind9Instance resources in the namespace.
1493    // Do NOT set ownerReferences, as multiple instances would conflict (only one can have Controller=true).
1494    // Do NOT use instance-specific labels like managed-by, as multiple instances would conflict during Server-Side Apply.
1495    // The ServiceAccount will be cleaned up manually or via namespace deletion.
1496
1497    // Use static labels that don't vary between instances
1498    let mut labels = BTreeMap::new();
1499    labels.insert(K8S_NAME.into(), APP_NAME_BIND9.into());
1500    labels.insert(K8S_COMPONENT.into(), COMPONENT_DNS_SERVER.into());
1501    labels.insert(K8S_PART_OF.into(), PART_OF_BINDY.into());
1502
1503    ServiceAccount {
1504        metadata: ObjectMeta {
1505            name: Some(BIND9_SERVICE_ACCOUNT.into()),
1506            namespace: Some(namespace.into()),
1507            labels: Some(labels),
1508            owner_references: None, // Shared resource - no owner
1509            ..Default::default()
1510        },
1511        ..Default::default()
1512    }
1513}
1514
1515/// Merge custom service spec fields into the default spec
1516///
1517/// Only updates fields that are explicitly specified in the custom spec.
1518/// This allows partial customization while preserving defaults for other fields.
1519///
1520/// The `selector` and `ports` fields are never overridden to ensure the service
1521/// correctly routes traffic to the BIND9 pods.
1522fn merge_service_spec(default: &mut ServiceSpec, custom: &ServiceSpec) {
1523    // Merge type
1524    if let Some(ref type_) = custom.type_ {
1525        default.type_ = Some(type_.clone());
1526    }
1527
1528    // Merge loadBalancerIP
1529    if let Some(ref lb_ip) = custom.load_balancer_ip {
1530        default.load_balancer_ip = Some(lb_ip.clone());
1531    }
1532
1533    // Merge sessionAffinity
1534    if let Some(ref affinity) = custom.session_affinity {
1535        default.session_affinity = Some(affinity.clone());
1536    }
1537
1538    // Merge sessionAffinityConfig
1539    if let Some(ref config) = custom.session_affinity_config {
1540        default.session_affinity_config = Some(config.clone());
1541    }
1542
1543    // Merge clusterIP
1544    if let Some(ref cluster_ip) = custom.cluster_ip {
1545        default.cluster_ip = Some(cluster_ip.clone());
1546    }
1547
1548    // Merge externalTrafficPolicy
1549    if let Some(ref policy) = custom.external_traffic_policy {
1550        default.external_traffic_policy = Some(policy.clone());
1551    }
1552
1553    // Merge loadBalancerSourceRanges
1554    if let Some(ref ranges) = custom.load_balancer_source_ranges {
1555        default.load_balancer_source_ranges = Some(ranges.clone());
1556    }
1557
1558    // Merge externalIPs
1559    if let Some(ref ips) = custom.external_ips {
1560        default.external_ips = Some(ips.clone());
1561    }
1562
1563    // Merge loadBalancerClass
1564    if let Some(ref class) = custom.load_balancer_class {
1565        default.load_balancer_class = Some(class.clone());
1566    }
1567
1568    // Merge healthCheckNodePort
1569    if let Some(port) = custom.health_check_node_port {
1570        default.health_check_node_port = Some(port);
1571    }
1572
1573    // Merge publishNotReadyAddresses
1574    if let Some(publish) = custom.publish_not_ready_addresses {
1575        default.publish_not_ready_addresses = Some(publish);
1576    }
1577
1578    // Merge allocateLoadBalancerNodePorts
1579    if let Some(allocate) = custom.allocate_load_balancer_node_ports {
1580        default.allocate_load_balancer_node_ports = Some(allocate);
1581    }
1582
1583    // Merge internalTrafficPolicy
1584    if let Some(ref policy) = custom.internal_traffic_policy {
1585        default.internal_traffic_policy = Some(policy.clone());
1586    }
1587
1588    // Merge ipFamilies
1589    if let Some(ref families) = custom.ip_families {
1590        default.ip_families = Some(families.clone());
1591    }
1592
1593    // Merge ipFamilyPolicy
1594    if let Some(ref policy) = custom.ip_family_policy {
1595        default.ip_family_policy = Some(policy.clone());
1596    }
1597
1598    // Merge clusterIPs
1599    if let Some(ref ips) = custom.cluster_ips {
1600        default.cluster_ips = Some(ips.clone());
1601    }
1602
1603    // Note: We intentionally don't merge ports or selector as they need to match
1604    // the deployment configuration to ensure traffic is routed correctly.
1605}