systemctl daemon-reload # 挂在 mount -a # 查看是否挂在 [root@k8s-master-1 containerd]# mount | grep containerd /dev/vda3 on /var/lib/containerd type xfs (rw,relatime,attr2,inode64,logbufs=8,logbsize=32k,noquota)
# 执行命令创建yaml kube-vip manifest pod \ --interface $INTERFACE \ --vip $VIP \ --controlplane \ --arp \ --leaderElection | tee /etc/kubernetes/manifests/kube-vip.yaml
# 修改镜像策略 sed -i 's/Always/IfNotPresent/g' /etc/kubernetes/manifests/kube-vip.yaml sed -i "s#ghcr.io/kube-vip/kube-vip:v0.6.4#docker.io/juestnow/kube-vip:$KVVERSION#g" /etc/kubernetes/manifests/kube-vip.yaml
#初始化过程 [root@k8s-master-1 tmp]# kubeadm init --apiserver-advertise-address=0.0.0.0 \ --apiserver-cert-extra-sans=127.0.0.1 \ --kubernetes-version 1.28.2 \ --image-repository=registry.aliyuncs.com/google_containers \ --ignore-preflight-errors=all \ --service-cidr=10.96.0.0/16 \ --pod-network-cidr=10.244.0.0/16 \ --ignore-preflight-errors=all \ --upload-certs \ --control-plane-endpoint=192.168.3.251 \ --cri-socket=unix:///var/run/containerd/containerd.sock [init] Using Kubernetes version: v1.28.2 [preflight] Running pre-flight checks [WARNING Hostname]: hostname "k8s-master-1" could not be reached [WARNING Hostname]: hostname "k8s-master-1": lookup k8s-master-1 on 192.168.2.84:53: no such host [WARNING FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables does not exist [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' W0111 19:59:33.239720 811848 checks.go:835] detected that the sandbox image "registry.aliyuncs.com/google_containers/pause:3.6" of the container runtime is inconsistent with that used by kubeadm. It is recommended that using "registry.aliyuncs.com/google_containers/pause:3.9" as the CRI sandbox image. [certs] Using certificateDir folder "/etc/kubernetes/pki" [certs] Generating "ca" certificate and key [certs] Generating "apiserver" certificate and key [certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local rocky] and IPs [10.96.0.1 192.168.2.175 192.168.3.251 127.0.0.1] [certs] Generating "apiserver-kubelet-client" certificate and key [certs] Generating "front-proxy-ca" certificate and key [certs] Generating "front-proxy-client" certificate and key [certs] Generating "etcd/ca" certificate and key [certs] Generating "etcd/server" certificate and key [certs] etcd/server serving cert is signed for DNS names [localhost k8s-master-1] and IPs [192.168.2.175 127.0.0.1 ::1] [certs] Generating "etcd/peer" certificate and key [certs] etcd/peer serving cert is signed for DNS names [localhost k8s-master-1] and IPs [192.168.2.175 127.0.0.1 ::1] [certs] Generating "etcd/healthcheck-client" certificate and key [certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "sa" key and public key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Writing "admin.conf" kubeconfig file [kubeconfig] Writing "kubelet.conf" kubeconfig file [kubeconfig] Writing "controller-manager.conf" kubeconfig file [kubeconfig] Writing "scheduler.conf" kubeconfig file [etcd] Creating static Pod manifest forlocal etcd in"/etc/kubernetes/manifests" [control-plane] Using manifest folder "/etc/kubernetes/manifests" [control-plane] Creating static Pod manifest for"kube-apiserver" [control-plane] Creating static Pod manifest for"kube-controller-manager" [control-plane] Creating static Pod manifest for"kube-scheduler" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Starting the kubelet [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s [apiclient] All control plane components are healthy after 7.563962 seconds [upload-config] Storing the configuration used in ConfigMap "kubeadm-config"in the "kube-system" Namespace [kubelet] Creating a ConfigMap "kubelet-config"in namespace kube-system with the configuration for the kubelets in the cluster [upload-certs] Storing the certificates in Secret "kubeadm-certs"in the "kube-system" Namespace [upload-certs] Using certificate key: c6a80e1929786899137bb0a765323fa0cb7c14fb8c0bedb61a0eaf1583a13abd [mark-control-plane] Marking the node rocky as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers] [mark-control-plane] Marking the node rocky as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule] [bootstrap-token] Using token: u9ryln.7f9t2ih8v1es5d79 [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key [addons] Applied essential addon: CoreDNS [addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
Please note that the certificate-key gives access to cluster sensitive data, keep it secret! As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use "kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
[root@k8s-master-2 tmp]# kubeadm join 192.168.3.251:6443 --token vx5j0a.7n1jgk7cj7hffkmy \ > --discovery-token-ca-cert-hash sha256:6055c9951d7d92d1243006e973a41a375b71b8e20ae4ccdf35ac4a7edfd4531a \ > --control-plane --certificate-key 5da3036c3748773980d0cc9ee4352ace20f6b3a5fbee5a5aad2a9ff0bba3ccd2 W0111 20:26:07.335263 8470 initconfiguration.go:120] Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket" with value "/var/run/cri-docker/cri-docker.sock". Please update your configuration! [preflight] Running pre-flight checks [WARNING Hostname]: hostname "k8s-master-2" could not be reached [WARNING Hostname]: hostname "k8s-master-2": lookup k8s-master-2 on 192.168.2.84:53: no such host [preflight] Reading configuration from the cluster... [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml' [preflight] Running pre-flight checks before initializing the new control plane instance [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' [download-certs] Downloading the certificates in Secret "kubeadm-certs"in the "kube-system" Namespace [download-certs] Saving the certificates to the folder: "/etc/kubernetes/pki" [certs] Using certificateDir folder "/etc/kubernetes/pki" [certs] Generating "etcd/healthcheck-client" certificate and key [certs] Generating "etcd/server" certificate and key [certs] etcd/server serving cert is signed for DNS names [k8s-master-2 localhost] and IPs [192.168.2.176 127.0.0.1 ::1] [certs] Generating "etcd/peer" certificate and key [certs] etcd/peer serving cert is signed for DNS names [k8s-master-2 localhost] and IPs [192.168.2.176 127.0.0.1 ::1] [certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "apiserver" certificate and key [certs] apiserver serving cert is signed for DNS names [k8s-master-2 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.2.176 192.168.3.251 127.0.0.1] [certs] Generating "apiserver-kubelet-client" certificate and key [certs] Generating "front-proxy-client" certificate and key [certs] Valid certificates and keys now exist in"/etc/kubernetes/pki" [certs] Using the existing "sa" key [kubeconfig] Generating kubeconfig files [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Writing "admin.conf" kubeconfig file [kubeconfig] Writing "controller-manager.conf" kubeconfig file [kubeconfig] Writing "scheduler.conf" kubeconfig file [control-plane] Using manifest folder "/etc/kubernetes/manifests" [control-plane] Creating static Pod manifest for"kube-apiserver" [control-plane] Creating static Pod manifest for"kube-controller-manager" [control-plane] Creating static Pod manifest for"kube-scheduler" [check-etcd] Checking that the etcd cluster is healthy [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Starting the kubelet [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap... [etcd] Announced new etcd member joining to the existing etcd cluster [etcd] Creating static Pod manifest for"etcd" [etcd] Waiting for the new etcd member to join the cluster. This can take up to 40s [kubelet-check] Initial timeout of 40s passed. The 'update-status' phase is deprecated and will be removed in a future release. Currently it performs no operation [mark-control-plane] Marking the node k8s-master-2 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers] [mark-control-plane] Marking the node k8s-master-2 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
This node has joined the cluster and a new control plane instance was created:
* Certificate signing request was sent to apiserver and approval was received. * The Kubelet was informed of the new secure connection details. * Control plane label and taint were applied to the new node. * The Kubernetes control plane instances scaled up. * A new etcd member was added to the local/stacked etcd cluster.
To start administering your cluster from this node, you need to run the following as a regular user:
kubeadm join 192.168.3.251:6443 --token vx5j0a.7n1jgk7cj7hffkmy \ --discovery-token-ca-cert-hash sha256:6055c9951d7d92d1243006e973a41a375b71b8e20ae4ccdf35ac4a7edfd4531a [root@k8s-node-1 ~]# kubeadm join 192.168.3.251:6443 --token vx5j0a.7n1jgk7cj7hffkmy \ > --discovery-token-ca-cert-hash sha256:6055c9951d7d92d1243006e973a41a375b71b8e20ae4ccdf35ac4a7edfd4531a W0112 09:18:43.791610 356308 initconfiguration.go:120] Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket" with value "/var/run/cri-docker/cri-docker.sock". Please update your configuration! [preflight] Running pre-flight checks [WARNING Hostname]: hostname "k8s-node-1" could not be reached [WARNING Hostname]: hostname "k8s-node-1": lookup k8s-node-1 on 192.168.2.84:53: no such host [preflight] Reading configuration from the cluster... [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml' [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Starting the kubelet [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster: * Certificate signing request was sent to apiserver and a response was received. * The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
# 进入节点查询端口是否监听 [root@k8s-master-1~]# ss -tnlp | grep 8443 LISTEN 0 4096 *:8443 *:* users:(("haproxy",pid=829813,fd=7)) # 替换 配置kubeconfig 文件 server 地址 master1 节点 sed -i 's/192.168.2.175:6443/127.0.0.1:8443/g' /etc/kubernetes/controller-manager.conf sed -i 's/192.168.2.175:6443/127.0.0.1:8443/g' /etc/kubernetes/scheduler.conf sed -i 's/192.168.3.251:6443/127.0.0.1:8443/g' /etc/kubernetes/kubelet.conf
# controller-manager scheduler server 地址是本地ip # master2 节点 sed -i 's/192.168.2.176:6443/127.0.0.1:8443/g' /etc/kubernetes/controller-manager.conf sed -i 's/192.168.2.176:6443/127.0.0.1:8443/g' /etc/kubernetes/scheduler.conf sed -i 's/192.168.3.251:6443/127.0.0.1:8443/g' /etc/kubernetes/kubelet.conf # master3 节点 sed -i 's/192.168.2.177:6443/127.0.0.1:8443/g' /etc/kubernetes/controller-manager.conf sed -i 's/192.168.2.177:6443/127.0.0.1:8443/g' /etc/kubernetes/scheduler.conf sed -i 's/192.168.3.251:6443/127.0.0.1:8443/g' /etc/kubernetes/kubelet.conf
# 所有ndoe 节点 sed -i 's/192.168.3.251:6443/127.0.0.1:8443/g' /etc/kubernetes/kubelet.conf