kubeadm部署kubernetes v1.28.4 单master节点

环境说明:

#操作系统:CentOS Stream release 8 Rocky Linux release 8.8
#containerd版本:1.7.11
#kubernetes版本:v1.28.4
#K8S master 节点IP:192.168.2.175
#K8S worker节点IP:192.168.3.62
#网络插件:flannel
#kube-proxy网络转发: ipvs
#kubernetes源: 阿里云镜像站
#service-cidr:10.96.0.0/16
#pod-network-cidr:10.244.0.0/16

部署准备:

操作在所有节点进行

1、修改内核参数

1
2
3
4
5
6
7
vim /etc/sysctl.conf
vm.swappiness=0
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables=1
sysctl -p

2、关闭swap

1
2
3
4
swapoff -a && sysctl -w vm.swappiness=0
修改 fstab 不在挂载 swap
vi /etc/fstab
/dev/mapper/centos-swap swap swap defaults 0 0

3、将 SELinux 设置为 disabled 模式

1
2
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config

4、cgroup2 开启(经常升级内核不建议执行不然会出现升级后不能启动,只能用旧内核启动的问题)

1
2
3
grubby \
--update-kernel=ALL \
--args="systemd.unified_cgroup_hierarchy=1"

5、内核模块加载

1
2
3
4
5
6
7
8
9
10
11
12
cat <<EOF | tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
nf_conntrack
EOF

cat <<EOF | tee /etc/modules-load.d/ipvs.conf
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
EOF

6、重启系统使上面配置生效

reboot

7、安装依赖

1
dnf install -y   dnf-utils  ipvsadm  telnet  wget  net-tools  conntrack  ipset  jq  iptables  curl  sysstat  libseccomp  socat  nfs-utils  fuse  fuse-devel 

安装 containerd

1、下载containerd

1
2
3
4
5
cd /tmp
wget https://github.com/containerd/containerd/releases/download/v1.7.11/containerd-1.7.11-linux-amd64.tar.gz
tar -xvf containerd-1.7.11-linux-amd64.tar.gz
cd bin
cp -pdr * /usr/bin/

2、下载crictl

1
2
3
4
cd /tmp
wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.28.0/crictl-v1.28.0-linux-amd64.tar.gz
tar -xvf crictl-v1.28.0-linux-amd64.tar.gz
cp -pdr crictl /usr/bin/

2、下载cni

1
2
3
4
5
6
7
cd /tmp
mkdir -pv /opt/cni/bin
mkdir bin
cd bin
wget https://github.com/containernetworking/plugins/releases/download/v1.4.0/cni-plugins-linux-amd64-v1.4.0.tgz
tar -xvf cni-plugins-linux-amd64-v1.4.0.tgz
cp -pdr * /opt/cni/bin/

3、下载runc

1
2
3
4
cd /tmp
wget https://github.com/opencontainers/runc/releases/download/v1.1.10/runc.amd64
chmod +x runc.amd64
cp -pdr runc.amd64 /usr/bin/runc

containerd 配置

1、配置 containerd

1
2
3
4
5
6
7
8
9
10
11
12
 mkdir -p /etc/containerd/
#生成默认配置
containerd config default > /etc/containerd/config.toml
#修改配置
sandbox_image = "registry.k8s.io/pause:3.8" 改成国内地址
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.8"
Updated config for group driver changed..

[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
...
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true

2、创建启动配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
cat <<EOF | tee /usr/lib/systemd/system/containerd.service
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target

[Service]

ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/bin/containerd
KillMode=process
Delegate=yes
LimitNOFILE=655350
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity

[Install]
WantedBy=multi-user.target
EOF

3、 创建存储挂在数据

1
2
3
4
5
6
7
8
9
10
11
12
13
mkdir -p /var/lib/containerd/
mkdir -p /apps/containerd/ # 改成你大硬盘路径

/etc/fstab

echo "/apps/containerd /var/lib/containerd none defaults,bind,nofail 0 0" >>/etc/fstab

systemctl daemon-reload
# 挂在
mount -a
# 查看是否挂在
[root@k8s-master-1 containerd]# mount | grep containerd
/dev/vda3 on /var/lib/containerd type xfs (rw,relatime,attr2,inode64,logbufs=8,logbsize=32k,noquota)

4 开启开机启动并启动

1
systemctl enable containerd.service --now

5、查看进程是否启动

1
systemctl status containerd.service 

6、 查看数据盘是否有文件有证明挂在正确

ll /apps/containerd/

7、 创建crictl 配置

1
2
3
4
5
6
7
8
cat <<EOF | tee /etc/crictl.yaml
runtime-endpoint: "unix:///var/run/containerd/containerd.sock"
image-endpoint: "unix:///var/run/containerd/containerd.sock"
timeout: 10
debug: false
pull-image-on-create: true
disable-pull-on-run: false
EOF

8、查看配置是否生效

1
2
3
4
[root@k8s-master-1 containerd]# crictl info|  grep sandboxImage
"sandboxImage": "registry.aliyuncs.com/google_containers/pause:3.8",
[root@k8s-master-1 containerd]# crictl info| grep SystemdCgroup
"SystemdCgroup": true

安装 kubelet kubeadm kubectl

1、 只需下载node就可以了

1
2
3
4
5
6
cd /tmp
wget https://dl.k8s.io/v1.28.4/kubernetes-node-linux-amd64.tar.gz

tar -xvf kubernetes-node-linux-amd64.tar.gz
cd kubernetes/node/bin
cp -pdr kubeadm kubectl kubelet /usr/bin/

配置kubelet

1、 创建kubelet 存储挂在

1
2
3
4
5
6
7
8
9
mkdir /var/lib/kubelet
mkdir /apps/kubelet
/etc/fstab

echo "/apps/kubelet /var/lib/kubelet none defaults,bind,nofail 0 0" >>/etc/fstab

systemctl daemon-reload
# 挂在
mount -a

2、 查看是否挂在

1
2
[root@k8s-master-1]# mount | grep kubelet
/dev/vda3 on /var/lib/kubelet type xfs (rw,relatime,attr2,inode64,logbufs=8,logbsize=32k,noquota)

3、创建kubelet 启动配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
mkdir -p  /usr/lib/systemd/system/kubelet.service.d/
cat <<EOF | tee /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
# Note: This dropin only works with kubeadm and kubelet v1.11+
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/sysconfig/kubelet
ExecStart=
ExecStart=/usr/bin/kubelet \$KUBELET_KUBECONFIG_ARGS \$KUBELET_CONFIG_ARGS \$KUBELET_KUBEADM_ARGS \$KUBELET_EXTRA_ARGS
EOF
cat <<EOF | tee /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS=
EOF

4、 创建 kubelet.service

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
cat <<EOF | tee /usr/lib/systemd/system/kubelet.service
[Unit]
Description=kubelet: The Kubernetes Node Agent
Documentation=https://kubernetes.io/docs/
Wants=network-online.target
After=network-online.target

[Service]
ExecStart=/usr/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10

[Install]
WantedBy=multi-user.target
EOF

6、刷新 service

1
systemctl daemon-reload

初始化kubernetes master

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
kubeadm init --apiserver-advertise-address=0.0.0.0 \
--apiserver-cert-extra-sans=127.0.0.1 \
--image-repository=registry.aliyuncs.com/google_containers \
--ignore-preflight-errors=all \
--service-cidr=10.96.0.0/16 \
--pod-network-cidr=10.244.0.0/16 \
--ignore-preflight-errors=all \
--cri-socket=unix:///var/run/containerd/containerd.sock

#初始化过程
[root@k8s-master-1 apps]# kubeadm init --apiserver-advertise-address=0.0.0.0 \
> --apiserver-cert-extra-sans=127.0.0.1 \
> --image-repository=registry.aliyuncs.com/google_containers \
> --ignore-preflight-errors=all \
> --service-cidr=10.96.0.0/16 \
> --pod-network-cidr=10.244.0.0/16 \
> --ignore-preflight-errors=all \
> --cri-socket=unix:///var/run/containerd/containerd.sock
[init] Using Kubernetes version: v1.28.4
[preflight] Running pre-flight checks
[WARNING FileExisting-tc]: tc not found in system path
[WARNING Hostname]: hostname "k8s-master-1" could not be reached
[WARNING Hostname]: hostname "k8s-master-1": lookup k8s-master-1 on 192.168.2.84:53: no such host
[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
W1211 17:02:12.360608 38189 checks.go:835] detected that the sandbox image "registry.aliyuncs.com/google_containers/pause:3.8" of the container runtime is inconsistent with that used by kubeadm. It is recommended that using "registry.aliyuncs.com/google_containers/pause:3.9" as the CRI sandbox image.
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master-1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.2.175 127.0.0.1]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master-1 localhost] and IPs [192.168.2.175 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master-1 localhost] and IPs [192.168.2.175 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 8.504053 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master-1 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master-1 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: j03vax.ed9rursqoz27olk6
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.2.175:6443 --token j03vax.ed9rursqoz27olk6 \
--discovery-token-ca-cert-hash sha256:8aa5fb17b6909dce425d1e8bafd41e85beeb18bfda6bba4025ff36662c2774f2
# 设置kubelet 开机启动
systemctl enable kubelet.service
# 查看启动状态
systemctl status kubelet.service

#错误排除
journalctl -u kubelet
# 查看集群状态
[root@k8s-master-1 apps]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy ok
# 查看集群pod
[root@k8s-master-1 apps]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-66f779496c-dk8sr 0/1 Pending 0 4m40s
kube-system coredns-66f779496c-vmqcl 0/1 Pending 0 4m40s
kube-system etcd-k8s-master-1 1/1 Running 1 4m52s
kube-system kube-apiserver-k8s-master-1 1/1 Running 1 4m57s
kube-system kube-controller-manager-k8s-master-1 1/1 Running 1 4m52s
kube-system kube-proxy-rmc6j 1/1 Running 0 4m40s
kube-system kube-scheduler-k8s-master-1 1/1 Running 1 4m53s

# 修改 kube-proxy 为ipvs
kubectl -n kube-system edit cm kube-proxy
logging:
flushFrequency: 0
options:
json:
infoBufferSize: "0"
verbosity: 0
metricsBindAddress: ""
mode: "ipvs" # 添加ipvs
nodePortAddresses: null
# 让配置生效
kubectl -n kube-system delete pod kube-proxy-rmc6j
# 查看 kube-ipvs0 网卡是否创建
[root@k8s-master-1 apps]# ip a | grep kube-ipvs0
3: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default
inet 10.96.0.1/32 scope global kube-ipvs0
inet 10.96.0.10/32 scope global kube-ipvs0
# 查看 ipvs 信息
[root@k8s-master-1 apps]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.96.0.1:443 rr
-> 192.168.2.175:6443 Masq 1 0 0
TCP 10.96.0.10:53 rr
TCP 10.96.0.10:9153 rr
UDP 10.96.0.10:53 rr

部署 node 节点

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
kubeadm join 192.168.2.175:6443 --token j03vax.ed9rursqoz27olk6 \
--discovery-token-ca-cert-hash sha256:8aa5fb17b6909dce425d1e8bafd41e85beeb18bfda6bba4025ff36662c2774f2

[root@k8s-node-3 tmp]# kubeadm join 192.168.2.175:6443 --token j03vax.ed9rursqoz27olk6 \
> --discovery-token-ca-cert-hash sha256:8aa5fb17b6909dce425d1e8bafd41e85beeb18bfda6bba4025ff36662c2774f2
[preflight] Running pre-flight checks
[WARNING FileExisting-tc]: tc not found in system path
[WARNING Hostname]: hostname "k8s-node-3" could not be reached
[WARNING Hostname]: hostname "k8s-node-3": lookup k8s-node-3 on 192.168.2.84:53: no such host
[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

# 设置kubelet 开机启动
systemctl enable kubelet.service
# 查看启动状态
systemctl status kubelet.service

#错误排除
journalctl -u kubelet

# master 节点查看节点
[root@k8s-master-1 tmp]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master-1 NotReady control-plane 31m v1.28.4
k8s-node-3 NotReady <none> 88s v1.28.4

flannel cni 部署

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
cat <<EOF | kubectl create -f -
---
kind: Namespace
apiVersion: v1
metadata:
name: kube-flannel
labels:
k8s-app: flannel
pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: flannel
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- networking.k8s.io
resources:
- clustercidrs
verbs:
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: flannel
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: flannel
name: flannel
namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-flannel
labels:
tier: node
k8s-app: flannel
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-flannel
labels:
tier: node
app: flannel
k8s-app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
image: docker.io/flannel/flannel-cni-plugin:v1.2.0
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
image: docker.io/flannel/flannel:v0.22.3
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: docker.io/flannel/flannel:v0.22.3
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
EOF

集群测试

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
[root@k8s-master-1 tmp]# kubectl  get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-flannel kube-flannel-ds-5mvkj 1/1 Running 0 57s
kube-flannel kube-flannel-ds-qccq4 1/1 Running 0 56s
kube-system coredns-66f779496c-dk8sr 1/1 Running 0 35m
kube-system coredns-66f779496c-vmqcl 1/1 Running 0 35m
kube-system etcd-k8s-master-1 1/1 Running 1 35m
kube-system kube-apiserver-k8s-master-1 1/1 Running 1 35m
kube-system kube-controller-manager-k8s-master-1 1/1 Running 1 35m
kube-system kube-proxy-65tbm 1/1 Running 0 5m51s
kube-system kube-proxy-rg882 1/1 Running 0 27m
kube-system kube-scheduler-k8s-master-1 1/1 Running 1 35m
# dns 测试
dig @10.96.0.10 www.qq.com
cat <<EOF | kubectl create -f -
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: net-tools
labels:
k8s-app: net-tools
spec:
selector:
matchLabels:
k8s-app: net-tools
template:
metadata:
labels:
k8s-app: net-tools
spec:
tolerations:
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
containers:
- name: net-tools
image: juestnow/net-tools
command:
- /bin/sh
- "-c"
- set -e -x; tail -f /dev/null
resources:
limits:
memory: 30Mi
requests:
cpu: 50m
memory: 20Mi
dnsConfig:
options:
- name: single-request-reopen

EOF
[root@k8s-master-1 tmp]# kubectl get pod
NAME READY STATUS RESTARTS AGE
net-tools-8wxnf 0/1 ContainerCreating 0 18s
net-tools-bxdns 0/1 ContainerCreating 0 18s

[root@k8s-master-1 tmp]# kubectl get pod
NAME READY STATUS RESTARTS AGE
net-tools-8wxnf 1/1 Running 0 105s
net-tools-bxdns 1/1 Running 0 105s
[root@k8s-master-1 tmp]#
[root@k8s-master-1 tmp]#
[root@k8s-master-1 tmp]# kubectl exec -ti net-tools-8wxnf /bin/sh
/ # ping www.qq.com
PING www.qq.com (121.14.77.221): 56 data bytes
64 bytes from 121.14.77.221: seq=0 ttl=51 time=7.157 ms
^C
--- www.qq.com ping statistics ---
1 packets transmitted, 1 packets received, 0% packet loss
round-trip min/avg/max = 7.157/7.157/7.157 ms
#内网
/ # nc -vz kubernetes 443
kubernetes (10.96.0.1:443) open
/ # curl -k https://kubernetes
{
"kind": "Status",
"apiVersion": "v1",
"metadata": {},
"status": "Failure",
"message": "forbidden: User \"system:anonymous\" cannot get path \"/\"",
"reason": "Forbidden",
"details": {},
"code": 403
}/ #
# 内部解析正常
#证明集群网络正常