Hello I am trying to provision my cluster EKS usin...
# terraform-provider-rancher2
a
Hello I am trying to provision my cluster EKS using terraform. My cluster has many node_group, so I am using
dynamic block
on terraform. However it looks like despite using dynamic blocks, everytime I add or delete a node_group the order change and terraform wants to destroy and recreate new node_groups. After some hours, I saw that it maybe an issue with the provider (since it works fine with another resources like AWS), here is my code :
Copy code
variable "clusters" {
  type = list(object({
    cluster_name                            = string
    vpc_id                                  = string
    description                             = optional(string)
    enable_network_policy                   = optional(string)
    default_pod_security_policy_template_id = optional(string)
    enable_cluster_monitoring               = optional(bool)
    cluster_monitoring_input                = optional(list(map(string)))
    cloud_credential_id                     = string
    kms_key                                 = optional(string)
    region                                  = string
    eks_version                             = string
    logging_types                           = optional(list(string))
    eks_subnets                             = list(string)
    private_access                          = optional(string)
    public_access                           = optional(string)
    node_groups = list(object({
      name          = string
      instance_type = optional(string)
      desired_size  = optional(number)
      max_size      = optional(number)
      disk_size     = optional(number)
      subnets       = list(string)
      ec2_ssh_key   = string
      labels        = optional(map(string))
      tags          = optional(map(string))
      user_data     = optional(string)
    }))
  }))
}
resource "rancher2_cluster" "this" {
  for_each = {
    for cluster in var.clusters : cluster.cluster_name => cluster
  }
  name                                    = each.value.cluster_name
  description                             = each.value.description
  enable_network_policy                   = each.value.enable_network_policy
  default_pod_security_policy_template_id = each.value.default_pod_security_policy_template_id
  enable_cluster_monitoring               = each.value.enable_cluster_monitoring

  dynamic "cluster_monitoring_input" {
    for_each = each.value.enable_cluster_monitoring ? [{}] : []
    content {
      answers = each.value.cluster_monitoring_input
    }
  }

  eks_config_v2 {
    name                = each.value.cluster_name
    cloud_credential_id = each.value.cloud_credential_id
    imported            = false
    kms_key             = each.value.kms_key == "" ? null : each.value.kms_key
    region              = each.value.region
    kubernetes_version  = each.value.eks_version
    logging_types       = each.value.logging_types
    subnets             = each.value.eks_subnets
    security_groups     = [aws_security_group.this[each.key].id]
    service_role        = aws_iam_role.this[each.key].name
    tags                = merge(local.common_tags, { "Name" = each.value.cluster_name })
    private_access      = each.value.private_access
    public_access       = each.value.public_access

    dynamic "node_groups" {
      for_each = each.value.node_groups

      content {
        name         = node_groups.value.name
        desired_size = node_groups.value.desired_size == "" ? 3 : node_groups.value.desired_size
        max_size     = node_groups.value.max_size == "" ? 3 : node_groups.value.max_size
        subnets      = node_groups.value.subnets
        labels       = node_groups.value.labels == "" ? null : node_groups.value.labels
        launch_template {
          id      = aws_launch_template.this[node_groups.value.name].id
          name    = aws_launch_template.this[node_groups.value.name].name
          version = aws_launch_template.this[node_groups.value.name].latest_version
        }
        resource_tags = merge(local.common_tags, { "Name" = each.value.cluster_name }, node_groups.value.tags)
      }
    }
  }
}
And as you can see in the screenshot, when applying, terraform try to change the node_group
ops
to
example
and create a new node_group
ops
instead of creating a new node_group
example