This message was deleted.
# general
a
This message was deleted.
a
Copy code
Nov 18 15:36:12 platform-k8s-s2 sh[336379]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service
Nov 18 15:36:12 platform-k8s-s2 sh[336380]: Failed to get unit file state for nm-cloud-setup.service: No such file or directory
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Starting k3s v1.25.3+k3s1 (f2585c16)"
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=warning msg="Cluster CA certificate is not trusted by the host CA bundle, but the token does not include a CA hash. Use the full token from the server's node-token file to enable Cluster CA validation."
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Managed etcd cluster not yet initialized"
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=warning msg="Cluster CA certificate is not trusted by the host CA bundle, but the token does not include a CA hash. Use the full token from the server's node-token file to enable Cluster CA validation."
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Reconciling bootstrap data between datastore and disk"
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Tunnel server egress proxy mode: agent"
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Tunnel server egress proxy waiting for runtime core to become available"
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Running kube-apiserver --advertise-address=10.0.0.2 --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=<https://kubernetes.default.svc.cluster.local>,k3s --auth>
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind>
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/se>
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher>
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token"
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="To join server node to cluster: k3s server -s <https://15.204.183.2:6443> -t ${SERVER_NODE_TOKEN}"
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token"
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="To join agent node to cluster: k3s agent -s <https://15.204.183.2:6443> -t ${AGENT_NODE_TOKEN}"
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Wrote kubeconfig /home/poktscan/.kube/config"
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Run: k3s kubectl"
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="certificate CN=platform-k8s-s2 signed by CN=k3s-server-ca@1668785558: notBefore=2022-11-18 15:32:38 +0000 UTC notAfter=2023-11-18 15:36:12 +0000 UTC"
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="certificate CN=system:node:platform-k8s-s2,O=system:nodes signed by CN=k3s-client-ca@1668785558: notBefore=2022-11-18 15:32:38 +0000 UTC notAfter=2023-11-18 15:36:12 +0000 UTC"
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Module overlay was already loaded"
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Module nf_conntrack was already loaded"
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Module br_netfilter was already loaded"
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Module iptable_nat was already loaded"
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Logging containerd to /var/lib/rancher/k3s/agent/containerd/containerd.log"
Nov 18 15:36:12 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:12Z" level=info msg="Running containerd -c /var/lib/rancher/k3s/agent/etc/containerd/config.toml -a /run/k3s/containerd/containerd.sock --state /run/k3s/containerd --root /var/lib/rancher/k3s/agent/containerd"
Nov 18 15:36:13 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:13Z" level=info msg="Containerd is now running"
Nov 18 15:36:13 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:13Z" level=info msg="Connecting to proxy" url="<wss://127.0.0.1:6443/v1-k3s/connect>"
Nov 18 15:36:13 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:13Z" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webh>
Nov 18 15:36:13 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:13Z" level=info msg="Handling backend connection request [platform-k8s-s2]"
Nov 18 15:36:13 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:13Z" level=info msg="Adding member platform-k8s-s2-c527e62e=<https://10.0.0.2:2380> to etcd cluster [=<https://15.204.183.2:2380> platform-k8s-s1-c6f632ab=<https://10.0.0.1:2380>]"
Nov 18 15:36:13 platform-k8s-s2 k3s[336383]: {"level":"warn","ts":"2022-11-18T15:36:13.531Z","logger":"etcd-client","caller":"v3@v3.5.3-k3s1/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"<etcd-endpoints://0xc00013f6c0/10.0.0.1:2379>","attempt":0,"error">
Nov 18 15:36:13 platform-k8s-s2 k3s[336383]: time="2022-11-18T15:36:13Z" level=fatal msg="ETCD join failed: etcdserver: too many learner members in cluster"
Nov 18 15:36:13 platform-k8s-s2 systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE
CMD on Server1:
Copy code
./k3s-install.sh \
  server \
  --token="Iun8bRN3RAtCI8vidHMSA76f" \
  --write-kubeconfig="/home/user/.kube/config" \
  --write-kubeconfig-mode="644" \
  --node-ip="10.0.0.1" \
  --node-external-ip="<node public ip>" \
  --advertise-address="10.0.0.1" \
  --kube-proxy-arg="proxy-mode=ipvs" \
  --disable="traefik,local-storage" \
  --cluster-init \
  --tls-san="<fixed-registration-ip>" \
  --tls-san="10.0.0.1" \
  --tls-san="10.0.0.2"
CMD on Server2:
Copy code
./k3s-install.sh \
  server \
  --token="Iun8bRN3RAtCI8vidHMSA76f" \
  --write-kubeconfig="/home/user/.kube/config" \
  --write-kubeconfig-mode="644" \
  --server="https://<fixed-registration-ip>:6443" \
  --node-ip="10.0.0.2" \
  --node-external-ip="<node public ip>" \
  --advertise-address="10.0.0.2" \
  --kube-proxy-arg="proxy-mode=ipvs" \
  --disable="traefik,local-storage" \
  --tls-san="<fixed-registration-ip>" \
  --tls-san="10.0.0.1" \
  --tls-san="10.0.0.2"