rough-night-37588
09/02/2024, 1:05 PMroot@ip-172-27-78-142:~# /var/lib/rancher/rke2/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get ds -n kube-system
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
aws-cloud-controller-manager 0 0 0 0 0 <http://node-role.kubernetes.io/control-plane=true|node-role.kubernetes.io/control-plane=true> 91m
We are doing this via terraform and full block that provision rke2 cluster along with the details of add on manifest that installs the aws external cloud controller is below. Please help me understand why daemonset is not running. I don't know why but the tolerations are not getting applied.
resource "rancher2_cluster_v2" "cluster" {
name = var.cluster_name
kubernetes_version = var.kubernetes_version
depends_on = [
]
local_auth_endpoint {
enabled = true
}
rke_config {
additional_manifest = <<EOF
apiVersion: <http://helm.cattle.io/v1|helm.cattle.io/v1>
kind: HelmChart
metadata:
name: aws-cloud-controller-manager
namespace: kube-system
spec:
chart: aws-cloud-controller-manager
repo: <https://kubernetes.github.io/cloud-provider-aws>
bootstrap: true
targetNamespace: kube-system
valuesContent: |-
hostNetworking: true
resources:
requests:
cpu: 200m
dnsPolicy: Default
clusterRoleName : system:cloud-controller-manager
roleBindingName: cloud-controller-manager:apiserver-authentication-reader
serviceAccountName: cloud-controller-manager
roleName: extension-apiserver-authentication-reader
extraVolumes: []
extraVolumeMounts: []
image:
repository: <http://registry.k8s.io/provider-aws/cloud-controller-manager|registry.k8s.io/provider-aws/cloud-controller-manager>
tag: v1.28.1
tolerations:
- effect: NoSchedule
key: <http://node.cloudprovider.kubernetes.io/uninitialized|node.cloudprovider.kubernetes.io/uninitialized>
value: "true"
- effect: NoSchedule
key: <http://node-role.kubernetes.io/master|node-role.kubernetes.io/master>
- effect: NoSchedule
key: <http://node-role.kubernetes.io/controlplane|node-role.kubernetes.io/controlplane>
value: "true"
- effect: NoExecute
key: <http://node-role.kubernetes.io/etcd|node-role.kubernetes.io/etcd>
value: "true"
nodeSelector:
<http://node-role.kubernetes.io/control-plane|node-role.kubernetes.io/control-plane>: "true"
args:
- --configure-cloud-routes=false
- --v=2
- --cloud-provider=aws
- --use-service-account-credentials=true
clusterRoleRules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- '*'
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- ""
resources:
- services
verbs:
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- services/status
verbs:
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- get
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- update
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- get
- list
- watch
- update
- apiGroups:
- <http://coordination.k8s.io|coordination.k8s.io>
resources:
- leases
verbs:
- create
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- serviceaccounts/token
verbs:
- create
EOF
etcd {
snapshot_retention = 240
s3_config {
bucket = module.base.etcd_bucket_id
folder = "cluster-backups"
endpoint = "s3.${data.aws_region.current.name}.<http://amazonaws.com|amazonaws.com>"
region = data.aws_region.current.name
}
}
upgrade_strategy {
worker_concurrency = var.max_unavailable_worker
}
machine_global_config = <<EOF
cloud-provider-name: aws
disable:
- rke2-ingress-nginx
- rke2-metrics-server
cni: "canal"
kubelet-arg:
- cloud-provider=external
- kube-reserved=cpu=250m,memory=256Mi
kube-apiserver-arg:
- cloud-provider=external
- anonymous-auth=false
kube-controller-manager-arg:
- cloud-provider=external
EOF
}
}