rancher / terraform-provider-rancher2

Terraform Rancher2 provider
https://www.terraform.io/docs/providers/rancher2/
Mozilla Public License 2.0
253 stars 216 forks source link

[RFE] Example of usage: rancher2_etcd_backup Resource #1329

Open junkiebev opened 3 months ago

junkiebev commented 3 months ago

This is maybe more of a question - the documentation is a bit unclear. I am wanting to create a etcd backup of my Terraform-Provisioned cluster before it changes, any time it changes, and wait for the backup to conclude before it makes changes to the cluster's configuration

resource "rancher2_cluster" "this" {
  name        = var.cluster_name
  description = "Terraform"
  labels      = var.cluster_labels
  rke_config { REDACTED }
    network {
      plugin = "canal"
    }
    ingress {
      provider = "none"
    }
    services {
      etcd {
        creation  = "6h"
        retention = "24h"
        backup_config {
          s3_backup_config {
            access_key  = var.S3_ACCESS_KEY
            bucket_name = var.s3_bucket_name
            endpoint    = var.s3_endpoint
            folder      = "nonprod/${var.cluster_name}"
            region      = var.s3_bucket_region
            secret_key  = var.S3_SECRET_KEY
          }
          enabled        = var.backup_enabled
          retention      = var.backup_retention
          interval_hours = var.backup_interval
        }
      }
      kube_api {
        pod_security_policy = var.pod_security_policy_enabled
        audit_log {
          configuration {
            path = var.audit_log_path
          }
          enabled = var.audit_log_enabled
        }
      }
      kubelet {
        extra_args = {
          "max-pods" = "150"
        }
      }
    }
    monitoring {
      provider = "metrics-server"
    }
  }
  cluster_auth_endpoint {
    enabled = true
  }
  lifecycle {
    ignore_changes = [cluster_monitoring_input]
    prevent_destroy = true
  }
  #depends_on = [rancher2_etcd_backup.pre-change-backup]
}

resource "rancher2_etcd_backup" "pre-change-backup" {
  backup_config {
    s3_backup_config {
      access_key  = var.S3_ACCESS_KEY
      bucket_name = var.s3_bucket_name
      endpoint    = var.s3_endpoint
      folder      = "nonprod/${var.cluster_name}"
      region      = var.s3_bucket_region
      secret_key  = var.S3_SECRET_KEY
    }
  }
  manual     = true
  cluster_id = rancher2_cluster.this.id
  name       = "terraform-triggered"
  lifecycle {
    replace_triggered_by = [rancher2_cluster.this]
  }
}

That's what I have, but according to plan output, this is what the result would be:

# rancher2_etcd_backup.pre-change-backup will be created
  + resource "rancher2_etcd_backup" "pre-change-backup" {
      + annotations  = (known after apply)
      + cluster_id   = "REDACTED"
      + filename     = (known after apply)
      + id           = (known after apply)
      + labels       = (known after apply)
      + manual       = true
      + name         = "terraform-triggered"
      + namespace_id = (known after apply)

      + backup_config {
          + enabled        = true
          + interval_hours = 12
          + retention      = 6
          + safe_timestamp = false
          + timeout        = (known after apply)

          + s3_backup_config {
              + access_key  = (sensitive value)
              + bucket_name = "REDACTED"
              + endpoint    = "REDACTED"
              + folder      = "nonprod/REDACTED"
              + region      = "REDACTED"
              + secret_key  = (sensitive value)
            }
        }
    }

Am I going about this the wrong way? I'm not quite able to parse the documentation.