terraform-ibm-modules / terraform-ibm-cluster

Terraform modules to create and work with IBM Kubernetes cluster & Red Hat OpenShift Kubernetes cluster on IBM Cloud (Classic, VPC)
https://cloud.ibm.com/docs/containers
Apache License 2.0
11 stars 34 forks source link

Can't see taints after running apply #81

Open seancallanan opened 2 years ago

seancallanan commented 2 years ago

I have set taints with configure-classic-worker-pool:

module "dataplane_mzr_cluster_base_node_workers" {
  source             = "terraform-ibm-modules/cluster/ibm//modules/configure-classic-worker-pool"
  version            = "1.5.0"
  cluster_name       = module.dataplane_mzr_cluster.container_cluster_id
  worker_pool_name   = local.config.dataplane-base-node-pool.pool_name
  worker_nodes       = local.config.dataplane-base-node-pool.pool_size
  worker_zones       = zipmap(local.datacentersArray, module.vlans)
  flavor             = local.config.dataplane-base-node-pool.machine_type
  resource_group_id  = module.cs_resource_group.resource_group_id
  wait_till_albs     = local.config.dataplane-base-node-pool.wait_till_albs
  hardware           = local.config.dataplane-base-node-pool.hardware
  encrypt_local_disk = true
  labels             = local.config.dataplane-base-node-pool.labels
  taints             = local.config.dataplane-base-node-pool.taints
  create_timeout     = "3h"
  update_timeout     = "3h"
  delete_timeout     = "60m"
}

Config looks like this:

  "dataplane-base-node-pool": {
    "pool_name": "base-node",
    "tags": ["cs"],
    "hardware": "dedicated",
    "machine_type": "mb3c.16x64",
    "pool_size": 2,
    "wait_till_albs": "false",
    "labels": {
      "dedicated": "internal",
      "worker": "base-node",
      "kata-install": true
    },
    "taints": [{
      "key": "gpfs",
      "value": "true",
      "effect": "NoSchedule"
    }, {
      "key": "gpfs",
      "value": "true",
      "effect": "NoExecute"
    }]
  },

The state looks like this:


   838 # module.dataplane_mzr_cluster_base_node_workers.ibm_container_worker_pool.pool:
    839 resource "ibm_container_worker_pool" "pool" {
    840     cluster                 = "c6ia2b8w044cqdqtr3mg"
    841     disk_encryption         = true
    842     hardware                = "dedicated"
    843     id                      = "c6ia2b8w044cqdqtr3mg/c6ia2b8w044cqdqtr3mg-80081e8"
    844     labels                  = {
    845         "dedicated"    = "internal"
    846         "kata-install" = "true"
    847         "worker"       = "base-node"
    848     }
    849     machine_type            = "mb3c.16x64"
    850     resource_controller_url = "https://cloud.ibm.com/kubernetes/clusters"
    851     resource_group_id       = "7cd3a974a52246b39c575de7346b770e"
    852     size_per_zone           = 2
    853     state                   = "active"
    854     worker_pool_id          = "c6ia2b8w044cqdqtr3mg-80081e8"
    855     worker_pool_name        = "base-node"
    856     zones                   = []
    857
    858     taints {
    859         effect = "NoExecute"
    860         key    = "gpfs"
    861         value  = "true"
    862     }
    863     taints {
    864         effect = "NoSchedule"
    865         key    = "gpfs"
    866         value  = "true"
    867     }
    868
    869     timeouts {
    870         update = "3h"
    871     }
    872 }

It looks like it get into the state but if I do kubectl describe node I don't see the taints on the node