bright-fireman-42144
07/29/2022, 7:22 PMbright-fireman-42144
07/29/2022, 7:33 PMhelm get values rancher -n cattle-system
hostname: <http://rancher.company.com|rancher.company.com>
rancherImageTag: v2.3.5
What I get:
USER-SUPPLIED VALUES:
bootstrapPassword: admin
hostname: rancher.0.0.0.0.sslip.io
replicas: 1
IP is an actual IP (removed for obscurity)acceptable-ram-2309
07/29/2022, 11:15 PMDOMException: Failed to construct 'Worker': Script at '<https://releases.rancher.com/dashboard/latest/785bf7a.worker.js>' cannot be accessed from origin '<https://rancher.company.com>'.
Firefox, which doesn’t appear to care about cross-origin issues on web workers, keeps on keepin’ on. Coworkers said this has happened before. Anybody else seeing it, or know who to ping when this happens?calm-egg-14566
07/30/2022, 3:25 AMrke up
it failed with different error messages. Here is my configuration of cluster.yml
# mainatiner: <mailto:nick.reva@snap.com|nick.reva@snap.com>
nodes: # !DO_NOT_UPDATE
- address: 192.168.50.101
port: "22"
internal_address: 192.168.50.101
role:
- controlplane
- etcd
- worker
hostname_override: node1
user: root
docker_socket: /var/run/docker.sock
ssh_key: ""
ssh_key_path: ~/.ssh/id_rsa
ssh_cert: ""
ssh_cert_path: ""
labels: {}
taints: []
#- address: 192.168.50.102
# port: "22"
# internal_address: 192.168.50.102
# role:
# - worker
# hostname_override: node2
# user: root
# docker_socket: /var/run/docker.sock
# ssh_key: ""
# ssh_key_path: ~/.ssh/id_rsa
# ssh_cert: ""
# ssh_cert_path: ""
# labels: {}
# taints: []
services:
etcd:
image: ""
extra_args: {}
extra_binds: []
extra_env: []
win_extra_args: {}
win_extra_binds: []
win_extra_env: []
external_urls: []
ca_cert: ""
cert: ""
key: ""
path: ""
uid: 52034
gid: 52034
snapshot: null
retention: ""
creation: ""
backup_config: null
kube-api:
image: ""
extra_args: {}
extra_binds: []
extra_env: []
win_extra_args: {}
win_extra_binds: []
win_extra_env: []
service_cluster_ip_range: 10.43.0.0/16
service_node_port_range: ""
pod_security_policy: true
always_pull_images: false
secrets_encryption_config:
enabled: true
audit_log:
enabled: true
admission_configuration:
event_rate_limit:
enabled: true
kube-controller:
image: ""
extra_args: #TODO
feature-gates: RotateKubeletServerCertificate=true
extra_binds: []
extra_env: []
win_extra_args: {}
win_extra_binds: []
win_extra_env: []
cluster_cidr: 10.42.0.0/16
service_cluster_ip_range: 10.43.0.0/16
scheduler:
image: ""
extra_args: {}
extra_binds: []
extra_env: []
win_extra_args: {}
win_extra_binds: []
win_extra_env: []
kubelet:
image: ""
extra_args: #TODO
feature-gates: RotateKubeletServerCertificate=true
protect-kernel-defaults: "true"
tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
extra_binds: []
extra_env: []
win_extra_args: {}
win_extra_binds: []
win_extra_env: []
cluster_domain: cluster.local
infra_container_image: ""
cluster_dns_server: 10.43.0.10
fail_swap_on: false
generate_serving_certificate: true
kubeproxy:
image: ""
extra_args: {}
extra_binds: []
extra_env: []
win_extra_args: {}
win_extra_binds: []
win_extra_env: []
network:
plugin: calico # !DO_NOT_UPDATE
options: {}
mtu: 0
node_selector: {}
update_strategy: null
tolerations: [] # !DO_NOT_UPDATE
authentication: # !DO_NOT_UPDATE
strategy: x509 # !DO_NOT_UPDATE
sans: []
webhook: null
addons: |
---
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
---
apiVersion: <http://rbac.authorization.k8s.io/v1|rbac.authorization.k8s.io/v1>
kind: Role
metadata:
name: default-psp-role
namespace: ingress-nginx
rules:
- apiGroups:
- extensions
resourceNames:
- default-psp
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: <http://rbac.authorization.k8s.io/v1|rbac.authorization.k8s.io/v1>
kind: RoleBinding
metadata:
name: default-psp-rolebinding
namespace: ingress-nginx
roleRef:
apiGroup: <http://rbac.authorization.k8s.io|rbac.authorization.k8s.io>
kind: Role
name: default-psp-role
subjects:
- apiGroup: <http://rbac.authorization.k8s.io|rbac.authorization.k8s.io>
kind: Group
name: system:serviceaccounts
- apiGroup: <http://rbac.authorization.k8s.io|rbac.authorization.k8s.io>
kind: Group
name: system:authenticated
---
apiVersion: v1
kind: Namespace
metadata:
name: cattle-system
---
apiVersion: <http://rbac.authorization.k8s.io/v1|rbac.authorization.k8s.io/v1>
kind: Role
metadata:
name: default-psp-role
namespace: cattle-system
rules:
- apiGroups:
- extensions
resourceNames:
- default-psp
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: <http://rbac.authorization.k8s.io/v1|rbac.authorization.k8s.io/v1>
kind: RoleBinding
metadata:
name: default-psp-rolebinding
namespace: cattle-system
roleRef:
apiGroup: <http://rbac.authorization.k8s.io|rbac.authorization.k8s.io>
kind: Role
name: default-psp-role
subjects:
- apiGroup: <http://rbac.authorization.k8s.io|rbac.authorization.k8s.io>
kind: Group
name: system:serviceaccounts
- apiGroup: <http://rbac.authorization.k8s.io|rbac.authorization.k8s.io>
kind: Group
name: system:authenticated
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: restricted
spec:
requiredDropCapabilities:
- NET_RAW
privileged: false
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
fsGroup:
rule: RunAsAny
runAsUser:
rule: MustRunAsNonRoot
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- emptyDir
- secret
- persistentVolumeClaim
- downwardAPI
- configMap
- projected
---
apiVersion: <http://rbac.authorization.k8s.io/v1|rbac.authorization.k8s.io/v1>
kind: ClusterRole
metadata:
name: psp:restricted
rules:
- apiGroups:
- extensions
resourceNames:
- restricted
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: <http://rbac.authorization.k8s.io/v1|rbac.authorization.k8s.io/v1>
kind: ClusterRoleBinding
metadata:
name: psp:restricted
roleRef:
apiGroup: <http://rbac.authorization.k8s.io|rbac.authorization.k8s.io>
kind: ClusterRole
name: psp:restricted
subjects:
- apiGroup: <http://rbac.authorization.k8s.io|rbac.authorization.k8s.io>
kind: Group
name: system:serviceaccounts
- apiGroup: <http://rbac.authorization.k8s.io|rbac.authorization.k8s.io>
kind: Group
name: system:authenticated
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: kube-system
---
apiVersion: <http://rbac.authorization.k8s.io/v1|rbac.authorization.k8s.io/v1>
kind: ClusterRoleBinding
metadata:
name: tiller
roleRef:
apiGroup: <http://rbac.authorization.k8s.io|rbac.authorization.k8s.io>
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: kube-system
addons_include: []
system_images: # !DO_NOT_UPDATE
etcd: rancher/coreos-etcd:v3.4.14-rancher1
alpine: rancher/rke-tools:v0.1.72
nginx_proxy: rancher/rke-tools:v0.1.72
cert_downloader: rancher/rke-tools:v0.1.72
kubernetes_services_sidecar: rancher/rke-tools:v0.1.72
kubedns: rancher/k8s-dns-kube-dns:1.15.10
dnsmasq: rancher/k8s-dns-dnsmasq-nanny:1.15.10
kubedns_sidecar: rancher/k8s-dns-sidecar:1.15.10
kubedns_autoscaler: rancher/cluster-proportional-autoscaler:1.8.1
coredns: rancher/coredns-coredns:1.8.0
coredns_autoscaler: rancher/cluster-proportional-autoscaler:1.8.1
nodelocal: rancher/k8s-dns-node-cache:1.15.13
kubernetes: rancher/hyperkube:v1.20.4-rancher1
flannel: rancher/coreos-flannel:v0.13.0-rancher1
flannel_cni: rancher/flannel-cni:v0.3.0-rancher6
calico_node: rancher/calico-node:v3.17.2
calico_cni: rancher/calico-cni:v3.17.2
calico_controllers: rancher/calico-kube-controllers:v3.17.2
calico_ctl: rancher/calico-ctl:v3.17.2
calico_flexvol: rancher/calico-pod2daemon-flexvol:v3.17.2
canal_node: rancher/calico-node:v3.17.2
canal_cni: rancher/calico-cni:v3.17.2
canal_controllers: rancher/calico-kube-controllers:v3.17.2
canal_flannel: rancher/coreos-flannel:v0.13.0-rancher1
canal_flexvol: rancher/calico-pod2daemon-flexvol:v3.17.2
weave_node: weaveworks/weave-kube:2.8.1
weave_cni: weaveworks/weave-npc:2.8.1
pod_infra_container: rancher/pause:3.2
ingress: rancher/nginx-ingress-controller:nginx-0.43.0-rancher1
ingress_backend: rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
metrics_server: rancher/metrics-server:v0.4.1
windows_pod_infra_container: rancher/kubelet-pause:v0.1.6
aci_cni_deploy_container: noiro/cnideploy:5.1.1.0.1ae238a
aci_host_container: noiro/aci-containers-host:5.1.1.0.1ae238a
aci_opflex_container: noiro/opflex:5.1.1.0.1ae238a
aci_mcast_container: noiro/opflex:5.1.1.0.1ae238a
aci_ovs_container: noiro/openvswitch:5.1.1.0.1ae238a
aci_controller_container: noiro/aci-containers-controller:5.1.1.0.1ae238a
aci_gbp_server_container: noiro/gbp-server:5.1.1.0.1ae238a
aci_opflex_server_container: noiro/opflex-server:5.1.1.0.1ae238a
ssh_key_path: ~/.ssh/id_rsa
ssh_cert_path: ""
ssh_agent_auth: false
authorization:
mode: rbac # !DO_NOT_UPDATE
options: {}
ignore_docker_version: null
kubernetes_version: "" # !DO_NOT_UPDATE
private_registries: []
ingress:
provider: ""
options: {}
node_selector: {}
extra_args: {}
dns_policy: ""
extra_envs: []
extra_volumes: []
extra_volume_mounts: []
update_strategy: null
http_port: 0
https_port: 0
network_mode: ""
tolerations: [] # !DO_NOT_UPDATE
default_backend: null
default_http_backend_priority_class_name: ""
nginx_ingress_controller_priority_class_name: ""
cluster_name: "" # !DO_NOT_UPDATE
cloud_provider:
name: ""
prefix_path: ""
win_prefix_path: ""
addon_job_timeout: 30
bastion_host:
address: ""
port: ""
user: ""
ssh_key: ""
ssh_key_path: ""
ssh_cert: ""
ssh_cert_path: ""
monitoring:
provider: ""
options: {}
node_selector: {}
update_strategy: null
replicas: null
tolerations: [] # !DO_NOT_UPDATE
metrics_server_priority_class_name: ""
restore:
restore: false
snapshot_name: ""
rotate_encryption_key: false
dns: null
Here are the errors I’ve found
first ran
FATA[0096] Failed to get job complete status for job rke-network-plugin-deploy-job in namespace kube-system
second ran
ERRO[0091] Host node1 failed to report Ready status with error: host node1 not ready
INFO[0091] [controlplane] Processing controlplane hosts for upgrade 1 at a time
INFO[0091] Processing controlplane host node1
INFO[0091] [controlplane] Now checking status of node node1, try #1
INFO[0096] [controlplane] Now checking status of node node1, try #2
INFO[0101] [controlplane] Now checking status of node node1, try #3
INFO[0106] [controlplane] Now checking status of node node1, try #4
INFO[0111] [controlplane] Now checking status of node node1, try #5
ERRO[0116] Failed to upgrade hosts: node1 with error [host node1 not ready]
FATA[0116] [controlPlane] Failed to upgrade Control Plane: [[host node1 not ready]]
Please advise and Thank you in advanced. 🙏rough-barista-65361
07/30/2022, 7:38 AMdamp-australia-67861
07/30/2022, 5:59 PMwitty-jelly-95845
07/31/2022, 9:27 AMbright-fireman-42144
07/31/2022, 2:16 PMbright-fireman-42144
07/31/2022, 3:15 PMbright-fireman-42144
07/31/2022, 7:00 PMbright-fireman-42144
07/31/2022, 8:23 PMbright-fireman-42144
07/31/2022, 10:02 PMsteep-painter-35886
08/01/2022, 10:29 AMnumerous-midnight-66907
08/01/2022, 11:34 AM---
apiVersion: <http://helm.cattle.io/v1|helm.cattle.io/v1>
kind: HelmChart
metadata:
name: traefik-crd
namespace: kube-system
spec:
chart: https://%{KUBERNETES_API}%/static/charts/traefik-crd-10.19.300.tgz
---
apiVersion: <http://helm.cattle.io/v1|helm.cattle.io/v1>
kind: HelmChart
metadata:
name: traefik
namespace: kube-system
spec:
chart: https://%{KUBERNETES_API}%/static/charts/traefik-10.19.300.tgz
set:
global.systemDefaultRegistry: ""
"ssl.insecureSkipVerify": "true" # Our changes
"serversTransport.insecureSkipVerify": "true" # Our changes
valuesContent: |-
ssl.insecureSkipVerify: "true" # Our changes
globalArguments:
rbac:
enabled: true
ports:
websecure:
tls:
enabled: true
podAnnotations:
<http://prometheus.io/port|prometheus.io/port>: "8082"
<http://prometheus.io/scrape|prometheus.io/scrape>: "true"
providers:
kubernetesIngress:
publishedService:
enabled: true
http:
tls:
insecureSkipVerify: false
priorityClassName: "system-cluster-critical"
image:
name: "rancher/mirrored-library-traefik"
tag: "2.6.2"
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- key: "<http://node-role.kubernetes.io/control-plane|node-role.kubernetes.io/control-plane>"
operator: "Exists"
effect: "NoSchedule"
- key: "<http://node-role.kubernetes.io/master|node-role.kubernetes.io/master>"
operator: "Exists"
effect: "NoSchedule"
service:
ipFamilyPolicy: "PreferDualStack"
numerous-midnight-66907
08/01/2022, 11:35 AM--no-deploy traefik
numerous-midnight-66907
08/01/2022, 11:35 AMnumerous-midnight-66907
08/01/2022, 11:36 AMdazzling-smartphone-16726
08/01/2022, 3:52 PMbored-nest-98612
08/01/2022, 6:14 PMhundreds-evening-84071
08/01/2022, 7:43 PMmagnificent-king-20947
08/01/2022, 9:46 PM1 $ kubectl -n cattle-system logs cattle-cluster-agent-5d6bc5fdbb-f6mwt
[INFO snipped]
2 time="2022-07-28T22:35:47Z" level=info msg="Listening on /tmp/log.sock"
3 time="2022-07-28T22:35:47Z" level=info msg="Rancher agent version v2.6.6 is starting"
4 time="2022-07-28T22:35:47Z" level=info msg="Connecting to <wss://rancher.xxx.com/v3/connect/register> with token starting with rhssppswh4dhlkbd4qcwxjw67b9"
5 time="2022-07-28T22:35:47Z" level=info msg="Connecting to proxy" url="<wss://rancher.xxx.com/v3/connect/register>"
6 time="2022-07-28T22:41:18Z" level=error msg="Remotedialer proxy error" error="websocket: close 1006 (abnormal closure): unexpected EOF"
7 time="2022-07-28T22:41:28Z" level=info msg="Connecting to <wss://rancher.xxx.com/v3/connect> with token starting with rhssppswh4dhlkbd4qcwxjw67b9"
8 time="2022-07-28T22:41:28Z" level=info msg="Connecting to proxy" url="<wss://rancher.xxx.com/v3/connect>"
9 time="2022-07-28T22:41:42Z" level=error msg="Remotedialer proxy error" error="websocket: close 1006 (abnormal closure): unexpected EOF"
10 time="2022-07-28T22:41:52Z" level=info msg="Connecting to <wss://rancher.xxx.com/v3/connect> with token starting with rhssppswh4dhlkbd4qcwxjw67b9"
11 time="2022-07-28T22:41:52Z" level=info msg="Connecting to proxy" url="<wss://rancher.xxx.com/v3/connect>"
I've verified network path is clear and the reg token is correct. Any ideas what this 1006
error is?narrow-xylophone-23021
08/01/2022, 11:15 PMnarrow-xylophone-23021
08/01/2022, 11:16 PMshy-jackal-31085
08/01/2022, 11:52 PMnarrow-action-40502
08/02/2022, 9:10 AMfancy-hospital-64829
08/01/2022, 6:50 PM=> ERROR [ 7/18] RUN echo "fuck" 0.4s
------
> [ 7/18] RUN echo "fuck":
#0 0.407 -c: 0: cannot open echo "fuck": No such file
------
error: failed to solve: executor failed running [/bin/sh -c echo "fuck"]: exit code: 2
(nearly) unchanged docker file, updated rancher desktop to 1.5 today using apple silicon mac. Any ideas, how and why? Switched back to docker desktop for now ...future-account-50371
07/27/2022, 8:10 AMmany-evening-49066
08/02/2022, 10:07 AMfreezing-secretary-51819
08/02/2022, 11:17 AMbusy-rose-13046
08/02/2022, 12:03 PM