Requirements
-
Windows11 wsl2 with systemd
-
Turn off swap
- shutdown the firewall
Init k8s on linux machine
- Intall k8s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
|
sudo apt-get update
sudo apt install -y docker.io
sudo apt-get install -y apt-transport-https ca-certificates curl
mkdir -p /etc/apt/keyrings/
sudo curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | sudo apt-key add -
#sudo curl -fsSLo /etc/apt/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
#echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
sudo apt-get update
sudo apt-get install -y kubelet=1.25.8-00 kubeadm=1.25.8-00 kubectl=1.25.8-00
|
- Install tailscale and tailscale up
1
2
|
curl -fsSL https://tailscale.com/install.sh | sh
tailscale up
|
- Click the link printed on the console, then log into Tailscale. This action will add the node to the network.
- Execute the following bash file to append “–node-ip=$(tailscale ip)” to ExecStart=/usr/bin/kubelet. The original statement is ExecStart=/usr/bin/kubelet and it’s located in /etc/systemd/system/kubelet.service.d/10-kubeadm.conf.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
|
#!/bin/bash
# 获取ztmjfjsueh网卡的IP地址
IP=$(ifconfig tailscale0 | grep 'inet ' | awk '{ print $2}')
# 定义配置文件路径
CONF_FILE="/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"
# 创建备份文件
cp $CONF_FILE "$CONF_FILE.bak"
# 使用sed将获取的IP地址插入到配置文件中
sed -i "s|ExecStart=/usr/bin/kubelet|ExecStart=/usr/bin/kubelet --node-ip=$IP|" $CONF_FILE
# 重新加载systemd配置并重启kubelet服务
systemctl daemon-reload
systemctl restart kubelet
|
- Init the cluster
1
|
sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --apiserver-advertise-address=$(tailscale ip)
|
- Init the config and flannel
1
2
3
4
5
6
|
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
mkdir -p /opt/cni/bin
curl -O -L https://github.com/containernetworking/plugins/releases/download/v1.2.0/cni-plugins-linux-amd64-v1.2.0.tgz
tar -C /opt/cni/bin -xzf cni-plugins-linux-amd64-v1.2.0.tgz
|
- Use the kube-flannel.yaml below to create kube-flannel.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
|
root@vultr:~# cat kube-flannel.yml
apiVersion: v1
kind: Namespace
metadata:
labels:
k8s-app: flannel
pod-security.kubernetes.io/enforce: privileged
name: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: flannel
name: flannel
namespace: kube-flannel
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: flannel
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- networking.k8s.io
resources:
- clustercidrs
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: flannel
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-flannel
---
apiVersion: v1
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "192.168.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
kind: ConfigMap
metadata:
labels:
app: flannel
k8s-app: flannel
tier: node
name: kube-flannel-cfg
namespace: kube-flannel
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: flannel
k8s-app: flannel
tier: node
name: kube-flannel-ds
namespace: kube-flannel
spec:
selector:
matchLabels:
app: flannel
k8s-app: flannel
template:
metadata:
labels:
app: flannel
k8s-app: flannel
tier: node
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
containers:
- args:
- --ip-masq
- --kube-subnet-mgr
- --iface=tailscale0
command:
- /opt/bin/flanneld
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
image: docker.io/flannel/flannel:v0.22.2
name: kube-flannel
resources:
requests:
cpu: 100m
memory: 50Mi
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
privileged: false
volumeMounts:
- mountPath: /run/flannel
name: run
- mountPath: /etc/kube-flannel/
name: flannel-cfg
- mountPath: /run/xtables.lock
name: xtables-lock
hostNetwork: true
initContainers:
- args:
- -f
- /flannel
- /opt/cni/bin/flannel
command:
- cp
image: docker.io/flannel/flannel-cni-plugin:v1.2.0
name: install-cni-plugin
volumeMounts:
- mountPath: /opt/cni/bin
name: cni-plugin
- args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
command:
- cp
image: docker.io/flannel/flannel:v0.22.2
name: install-cni
volumeMounts:
- mountPath: /etc/cni/net.d
name: cni
- mountPath: /etc/kube-flannel/
name: flannel-cfg
priorityClassName: system-node-critical
serviceAccountName: flannel
tolerations:
- effect: NoSchedule
operator: Exists
volumes:
- hostPath:
path: /run/flannel
name: run
- hostPath:
path: /opt/cni/bin
name: cni-plugin
- hostPath:
path: /etc/cni/net.d
name: cni
- configMap:
name: kube-flannel-cfg
name: flannel-cfg
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: xtables-lock
|
- Create flannel and test ready status
1
2
3
4
|
kubectl create -f kube-flannel.yml
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
kubectl taint nodes --all node-role.kubernetes.io/master-
kubectl get nodes -o wide
|
Test windows node
- Intall k8s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
|
sudo apt-get update
sudo apt install -y docker.io
sudo apt-get install -y apt-transport-https ca-certificates curl
mkdir -p /etc/apt/keyrings/
sudo curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | sudo apt-key add -
#sudo curl -fsSLo /etc/apt/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
#echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
sudo apt-get update
sudo apt-get install -y kubelet=1.25.8-00 kubeadm=1.25.8-00 kubectl=1.25.8-00
|
- Install tailscale and tailscale up
1
2
|
curl -fsSL https://tailscale.com/install.sh | sh
tailscale up
|
- Click the link printed on the consloe and login the tailscale.Then the node will join the network
- Execute the following bash file and add “–node-ip=$(tailscale ip)” to “ExecStart=/usr/bin/kubelet|ExecStart=/usr/bin/kubelet” within /etc/systemd/system/kubelet.service.d/10-kubeadm.conf.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
|
#!/bin/bash
# 获取ztmjfjsueh网卡的IP地址
IP=$(ifconfig taiscale0 | grep 'inet ' | awk '{ print $2}')
# 定义配置文件路径
CONF_FILE="/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"
# 创建备份文件
cp $CONF_FILE "$CONF_FILE.bak"
# 使用sed将获取的IP地址插入到配置文件中
sed -i "s|ExecStart=/usr/bin/kubelet|ExecStart=/usr/bin/kubelet --node-ip=$IP|" $CONF_FILE
# 重新加载systemd配置并重启kubelet服务
systemctl daemon-reload
systemctl restart kubelet
|
- Use the ‘join’ command to connect to the cluster.
1
|
sudo kubeadm join $(tailscale ip):6443 --token wnfwin.s06rxcw825l0rt5x --discovery-token-ca-cert-hash sha256:54e9355b485979fefe28ff5d762ef9f58e5386bb36f560b8f2b8905daebe975b
|
https://stackoverflow.com/questions/49112336/container-runtime-network-not-ready-cni-config-uninitialized
11
Stop and disable apparmor & restart the containerd service on that node will solve your issue
1
2
3
|
root@node:~# systemctl stop apparmor
root@node:~# systemctl disable apparmor
root@node:~# systemctl restart containerd.service
|
Share
Improve this answer
Follow
answered Sep 21, 2022 at 21:06
https://stackoverflow.com/users/1425867/aniketgole
AniketGole91922 gold badges1212 silver badges2323 bronze badges
-
This, this one works. tnx. – Rm4n
Apr 5 at 13:06
-
thanks this unlocked it, – kubeadm 1.26.3 Calico CNI Containerd Docker.io – Would be great to know the reason why. – setrar
Apr 10 at 22:47
-
This did it for me. kubernetesVersion 1.27.3 using flannel. Followed by a systemctl restart kubelet. – sm0ke21
Jul 10 at 18:11
-
@setrar AppArmor is a Linux kernel security module that allows the system administrator to restrict programs’ capabilities with per-program profiles. Profiles can allow capabilities like network access, raw socket access, and the permission to read, write, or execute files on matching paths, need to configure AppArmor if you want to allow k8s services – AniketGole
Sep 21 at 11:59
https://github.com/NVIDIA/k8s-device-plugin/issues/332
docker pull registry.gitlab.com/nvidia/kubernetes/device-plugin/staging/k8s-device-plugin:8b416016
apt-get install net-tools inetutils-ping openssh-server samba samba-common git vim curl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
|
apt-get update
apt install -y docker.io
apt-get install -y apt-transport-https ca-certificates curl
mkdir -p /etc/apt/keyrings/
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
#sudo curl -fsSLo /etc/apt/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
#echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
apt-get update
apt-get install -y kubelet=1.25.8-00 kubeadm=1.25.8-00 kubectl=1.25.8-00
|
1
2
3
4
5
6
7
|
docker run \
--tty \
--privileged \
--volume /sys/fs/cgroup:/sys/fs/cgroup:ro \
--volume /dev/net/tun:/dev/net/tun \
--volume /var/lib:/var/lib \
robertdebock/ubuntu:focal
|