安装部署Cobbler3 依赖

安装epel-release

1
dnf install epel-release

开启 cobbler 源

1
2
dnf config-manager --set-enabled epel-modular
dnf module enable cobbler:3.3

安装cobbler

1
2
3
4
dnf install cobbler grub2-efi-x64.x86_64 grub2-pc.x86_64  \
grub2-pc-modules grub2-efi-x64-modules grub2-efi-aa64-modules \
ipxe-bootimgs-aarch64 ipxe-bootimgs-x86 syslinux yum-utils \
pykickstart fence-agents-all dnsmasq

配置cobbler

配置依赖组件

1
2
3
4
cp -f /usr/share/syslinux/{ldlinux.c32,libcom32.c32,libutil.c32,menu.c32,pxelinux.0} /var/lib/cobbler/loaders
sed -i "s/#ServerName www.example.com:80/ServerName 192.168.2.49:80/" /etc/httpd/conf/httpd.conf
sed -i "s/bind-interfaces/# bind-interfaces/g" /etc/dnsmasq.conf
sed -i "s/interface=lo/# interface=lo/g" /etc/dnsmasq.conf

配置cobbler

1
2
3
sed -i "s/127.0.0.1/192.168.2.49/g" /etc/cobbler/settings.yaml
sed -i "s/restart_dhcp: true/restart_dhcp: false/g" /etc/cobbler/settings.yaml
sed -i "s/restart_dns: true/restart_dns: false/g" /etc/cobbler/settings.yaml

settings.yaml 配置后结果

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
default_password_crypted  # default_password_crypted    #执行 openssl passwd -1 <密码> 生成加密字符串,替换默认密码
#(可选)开启ipxe,一般用于部署exsi
enable_ipxe: true
enable_menu: true
#配置使用本机dhcp服务
manage_dhcp: false
#(可选)开启DHCPv6服务
manage_dhcp_v6: false
#开启DHCPv4服务
manage_dhcp_v4: false
#设置DHCPv4下发的tftp服务器地址,这里的地址是手动配置的静态IPv4地址
next_server_v4: 192.168.2.49
#(可选)设置DHCPv6下发的tftp服务器地址,这里的地址是手动配置的静态IPv6地址
next_server_v6: "fdaa::1155"
#配置使用本机tftp服务
manage_tftpd: true
restart_dhcp: false
restart_dns: false
#配置cobbler的http等服务监听的地址,这里设置为本机IPv4地址
server: 192.168.2.49

bind_master: 192.168.2.49

配置dnsmasq 使用上级路由dhcp

1
2
3
4
5
6
7
8
/etc/dnsmasq.d/dhcp
port=0
log-dhcp
dhcp-range=192.168.2.0,proxy
dhcp-boot=pxelinux.0
pxe-service=x86PC,'Network Boot',pxelinux
#enable-tftp
#tftp-root=/var/lib/tftpboot

启动cobbler及相关程序

1
2
3
4
5
6
7
8
9
10
# 开启开机启动并启动
systemctl enable tftp.service --now
systemctl enable cobblerd.service --now
systemctl enable dnsmasq.service --now
systemctl enable httpd.service --now
# 重启
systemctl restart tftp.service
systemctl restart cobblerd.service
systemctl restart dnsmasq.service
systemctl restart httpd.service

导入镜像到cobbler

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
# 导入CentOS-7-x86_64-DVD-2207-02.iso
mkdir /mnt/centos/
mount -t iso9660 -r -o ro,loop CentOS-7-x86_64-DVD-2207-02.iso /mnt/centos/
# 创建 ks centos-7.9.2009-x86_64.ks

/var/lib/cobbler/templates/centos-7.9.2009-x86_64.ks
# 下面文件内容

#Kickstart Configurator by Qist
#platform=x86, AMD64, or Intel EM64T
#System language
lang en_US
#System keyboard
keyboard us
#Sytem timezone
timezone Asia/Shanghai
#Root password
rootpw --iscrypted $default_password_crypted
#rootpw --iscrypted $1$ops-node$7hqdpgEmIE7Z0RbtQkxW20
#Use text mode install
text
#Install OS instead of upgrade
install
#Use NFS installation Media
url --url=$tree
#url --url=http://192.168.2.49/cblr/links/centos-7.9.2009-x86_64
#System bootloader configuration
bootloader --location=mbr --timeout 1 --append "console=tty0 console=ttyS0,115200 net.ifnames=0 biosdevname=0 quiet systemd.show_status=yes"
#Clear the Master Boot Record
zerombr
#Partition clearing information
clearpart --all --initlabel
#Disk partitioning information
#autopart
part /boot --fstype xfs --size 1024
#part swap --size 16384
part / --fstype xfs --size 1 --grow
#System authorization infomation
auth --useshadow --enablemd5
#Network information
$SNIPPET('network_config')
#network --bootproto=dhcp --device=eth0 --onboot=on
# Reboot after installation
reboot
#Firewall configuration
firewall --disabled
#SELinux configuration
selinux --disabled
#Do not configure XWindows
skipx

%pre
$SNIPPET('log_ks_pre')
$SNIPPET('kickstart_start')
$SNIPPET('pre_install_network_config')
# Enable installation monitoring
$SNIPPET('pre_anamon')
%end

#Package install information
%packages
@ base
@ core
tree
ntpdate
net-tools
wget
vim
mailx
%end

%post
rm -f /root/*.ks
rm -f /root/*.cfg
%end

# 导入镜像到cobbler
cobbler import --path=/mnt/centos --name=centos-7.9.2009 --autoinstall=centos-7.9.2009-x86_64.ks --arch=x86_64

# bootloader 没配置网卡为ethx ks 文件有配置就不需要执行下面这句
cobbler profile edit --name=centos-7.9.2009-x86_64 --kernel-options='net.ifnames=0 biosdevname=0'

# 创建tree变量 不创建 默认ks 文件不能使用 tree 变量
cobbler distro edit --name=centos-7.9.2009-x86_64 --autoinstall-meta="tree=http://@@http_server@@/cblr/links/centos-7.9.2009-x86_64"

# 同步cobbler 配置
cobbler sync
# 删除 导入cobbler 镜像
cobbler profile remove --name=centos7.9-x86_64
cobbler distro remove --name=centos7.9-x86_64

1、为docker私有仓库导入账户密码信息:

1
kubectl create secret docker-registry myregistrykey --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL

转换标准输出:

1
kubectl get secret myregistrykey --output="jsonpath={.data.\.dockerconfigjson}" | base64 -d

2、查看信息是否生成:

1
kubectl get secrets myregistrykey

3、为访问kubernetes.api创建一个用户

编辑一个yaml格式文件:

1
2
3
4
5
apiVersion: v1
kind: ServiceAccount
metadata:
name: build-robot
automountServiceAccountToken: false

通过 kubectl create -f file.yaml 生成 用户 build-robot

通过 kubectl get serviceaccount build-robot -o yaml 查看用户信息

可以顺便创建一个API令牌:

1
2
3
4
5
6
7
apiVersion: v1
kind: Secret
metadata:
name: build-robot-secret
annotations:
kubernetes.io/service-account.name: build-robot
type: kubernetes.io/service-account-token

token可通过 kubectl get secret build-robot-secret -o yaml 获取(或通过describe命令获取)

4、接下来我们将之前的”myregistrykey” 加入build-robot用户,让之后在使用此账户时,可pull私有仓库的加密资源

(1)

1
kubectl get serviceaccounts build-robot -o yaml > ./ro.yaml

(2 )

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
kubectl get serviceaccounts default -o yaml > ./sa.yaml
$ cat ro.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: 2018-04-03T22:02:39Z
name: build-robot
namespace: default
resourceVersion: "243024"
selfLink: /api/v1/namespaces/default/serviceaccounts/default
uid: 052fb0f4-3d50-11e5-b066-42010af0d7b6
secrets:
- name: build-robot-token

$ vi sa.yaml
[editor session not shown]
[delete line with key "resourceVersion"]
[add lines with "secrets:"]
[add lines with "imagePullSecret:"]

$ cat sa.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: 2018-04-03T22:02:39Z
name: build-robot
namespace: default
selfLink: /api/v1/namespaces/default/serviceaccounts/default
uid: 052fb0f4-3d50-11e5-b066-42010af0d7b6
secrets:
- name: build-robot-token
imagePullSecrets:
- name: myregistrykey
$ kubectl replace serviceaccount default -f ./ro.yaml

这样在生成pod的yaml文件的编辑中,可使用如下账户在default空间下时,会自动调用myregistrykey

1
2
3
serviceAccount: build-robot

serviceAccountName: build-robot

或者在pod的yaml文件中,spec.imagepullsecrets下,添加- name: myregistrykey来忽略账户自带的key,直接pull自己私有仓库的项目:

1
2
3
4
5
6
7
8
9
10
apiVersion: v1
kind: Pod
metadata:
name: private-reg
spec:
containers:
- name: private-reg-container
image: <your-private-image>
imagePullSecrets:
- name: myregistrykey

配置 default 账号 自动拉取 镜像

1、为docker私有仓库导入账户密码信息:

1
kubectl create secret docker-registry myregistrykey --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL

转换标准输出:

1
kubectl get secret myregistrykey --output="jsonpath={.data.\.dockerconfigjson}" | base64 -d

2、查看信息是否生成:

1
kubectl get secrets myregistrykey

可以顺便创建一个API令牌:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
apiVersion: v1
kind: Secret
metadata:
name: default-secret
annotations:
kubernetes.io/service-account.name: default
type: kubernetes.io/service-account-token
token可通过 kubectl get secret default-secret -o yaml 获取(或通过describe命令获取)

kubectl get serviceaccounts default -o yaml > ./sa.yaml

cat sa.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: "2023-04-23T05:36:00Z"
name: default
namespace: default
resourceVersion: "337"
uid: 1a3039ec-c43e-404e-9fa7-0fda9c736693


$ vi sa.yaml
[editor session not shown]
[delete line with key "resourceVersion"]
[add lines with "secrets:"]
[add lines with "imagePullSecret:"]

$ cat sa.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: 2023-04-23T05:36:00Z
name: default
namespace: default
selfLink: /api/v1/namespaces/default/serviceaccounts/default
uid: 1a3039ec-c43e-404e-9fa7-0fda9c736693
secrets:
- name: default-token
imagePullSecrets:
- name: myregistrykey
$ kubectl apply -f ./sa.yaml

这样在生成pod的yaml文件的编辑中,可使用如下账户在default空间下时,会自动调用myregistrykey

1
2
3
serviceAccount: default

serviceAccountName: default

或者在pod的yaml文件中,spec.imagepullsecrets下,添加- name: myregistrykey来忽略账户自带的key,直接pull自己私有仓库的项目:

1
2
3
4
5
6
7
8
9
10
apiVersion: v1
kind: Pod
metadata:
name: private-reg
spec:
containers:
- name: private-reg-container
image: <your-private-image>
imagePullSecrets:
- name: myregistrykey

For kubeadm provisioned clusters

1
kubeadm alpha certs check-expiration

For all clusters

1
openssl x509 -noout -dates -in /etc/kubernetes/pki/apiserver.crt

更新过期时间

方法1: 使用 kubeadm 升级集群自动轮换证书

1
kubeadm upgrade apply --certificate-renewal v1.15.0

方法2: 使用 kubeadm 手动生成并替换证书

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# Step 1): Backup old certs and kubeconfigs
mkdir /etc/kubernetes.bak
cp -r /etc/kubernetes/pki/ /etc/kubernetes.bak
cp /etc/kubernetes/*.conf /etc/kubernetes.bak
Kubeadm config view >kubeadm.yaml
# Step 2): Renew all certs
kubeadm alpha certs renew all --config kubeadm.yaml

# Step 3): Renew all kubeconfigs
kubeadm alpha kubeconfig user --client-name=admin
kubeadm alpha kubeconfig user --org system:masters --client-name kubernetes-admin > /etc/kubernetes/admin.conf
kubeadm alpha kubeconfig user --client-name system:kube-controller-manager > /etc/kubernetes/controller-manager.conf
kubeadm alpha kubeconfig user --org system:nodes --client-name system:node:$(hostname) > /etc/kubernetes/kubelet.conf
kubeadm alpha kubeconfig user --client-name system:kube-scheduler > /etc/kubernetes/scheduler.conf

# Another way to renew kubeconfigs
# kubeadm init phase kubeconfig all --config kubeadm.yaml

# Step 4): Copy certs/kubeconfigs and restart Kubernetes services etcd 证书排除

方法3: 非 kubeadm 集群

非 kubeadm 集群请参考 配置 CA 并创建 TLS 证书 重新生成证书,并重启各个 Kubernetes 服务。
kubelet 证书自动轮换
Kubelet 从 v1.8.0 开始支持证书轮换,当证书过期时,可以自动生成新的密钥,并从 Kubernetes API 申请新的证书。

证书轮换的开启方法如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15

# Step 1): Config kube-controller-manager
kube-controller-manager --experimental-cluster-signing-duration=87600h \
--feature-gates=RotateKubeletClientCertificate=true \ #新版本已经GA可以不配置
...

# Step 2): Config RBAC
# Refer https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#approval

# Step 3): Config Kubelet
kubelet --feature-gates=RotateKubeletClientCertificate=true \ #新版本已经GA可以不配置
--cert-dir=/var/lib/kubelet/pki \
--rotate-certificates \
--rotate-server-certificates \
...

撤销证书:
Kubernetes 目前还不支持通过 Certificate Rovocation List (CRL) 来撤销证书。所以,目前撤销证书的唯一方法就是使用新的 CA 重新生成所有证书,然后再重启所有服务。

原因是客户环境为双网卡环境,对内和对外有两个不同的网段,因为前期的部署 [那肯定不是我部署的,是我部署,我也不一定注意的到],因为本机路由不对,没有走对外的网卡,而加入控制节点的时候,没有指定 ip,导致走的默认路由,后期发现了问题,现在需要重新生成证书来修复 etcd 和 apiserver 因为修改 ip 而引发的一系列问题
正片开始
证书的修改,必须要 apiserver 服务可用
备份 kubernetes 目录

cp -r /etc/kubernetes{,-bak}

查看证书内的 ip

for i in $(find /etc/kubernetes/pki -type f -name "*.crt");do echo ${i} && openssl x509 -in ${i} -text | grep 'DNS:';done

可以看到,只有 apiserver 和 etcd 的证书里面是包含了 ip 的

1
2
3
4
5
6
7
8
9
10
11
12
13
/etc/kubernetes/pki/ca.crt
/etc/kubernetes/pki/front-proxy-ca.crt
/etc/kubernetes/pki/etcd/ca.crt
/etc/kubernetes/pki/etcd/server.crt
DNS:master-03, DNS:localhost, IP Address:192.168.11.135, IP Address:127.0.0.1, IP Address:0:0:0:0:0:0:0:1
/etc/kubernetes/pki/etcd/healthcheck-client.crt
/etc/kubernetes/pki/etcd/peer.crt
DNS:master-03, DNS:localhost, IP Address:192.168.11.135, IP Address:127.0.0.1, IP Address:0:0:0:0:0:0:0:1
/etc/kubernetes/pki/apiserver.crt
DNS:master-03, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:lb-vip, IP Address:10.96.0.1, IP Address:192.168.11.135
/etc/kubernetes/pki/apiserver-kubelet-client.crt
/etc/kubernetes/pki/front-proxy-client.crt
/etc/kubernetes/pki/apiserver-etcd-client.crt

生成集群配置

1
2
3
4
5
6
kubeadm config view > /root/kubeadm.yaml

上面报错请用下面命令

kubectl get cm -n kube-system kubeadm-config -o=jsonpath="{.data.ClusterConfiguration}" >/root/kubeadm.yaml

增加 ip

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
vim kubeadm.yaml
apiServer:
extraArgs:
authorization-mode: Node,RBAC
timeoutForControlPlane: 4m0s
# 增加下面的配置
certSANs:
- 192.168.11.131
- 192.168.11.134
- 192.168.11.136
# 增加上面的配置
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: lb-vip:6443
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
# 增加下面的配置
serverCertSANs:
- 192.168.11.131
- 192.168.11.135
- 192.168.11.136
peerCertSANs:
- 192.168.11.131
- 192.168.11.135
- 192.168.11.136
# 增加上面的配置
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.17.3
networking:
dnsDomain: cluster.local
podSubnet: 172.10.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}

删除原有的证书

需要保留 ca ,sa,front-proxy 这三个证书

1
2
rm -rf /etc/kubernetes/pki/{apiserver*,front-proxy-client*}
rm -rf /etc/kubernetes/pki/etcd/{healthcheck*,peer*,server*}

重新生成证书
kubeadm init phase certs all --config /root/kubeadm.yaml

再次查看证书内的 ip

for i in $(find /etc/kubernetes/pki -type f -name "*.crt");do echo ${i} && openssl x509 -in ${i} -text | grep 'DNS:';done

这里可以得到验证,不会覆盖之前证书内已经有的 ip,会将新的 ip 追加到后面

1
2
3
4
5
6
7
8
9
10
11
12
13
/etc/kubernetes/pki/etcd/ca.crt
/etc/kubernetes/pki/etcd/server.crt
DNS:master-02, DNS:localhost, IP Address:192.168.11.134, IP Address:127.0.0.1, IP Address:0:0:0:0:0:0:0:1, IP Address:192.168.11.131, IP Address:192.168.11.134, IP Address:192.168.11.136
/etc/kubernetes/pki/etcd/peer.crt
DNS:master-02, DNS:localhost, IP Address:192.168.11.134, IP Address:127.0.0.1, IP Address:0:0:0:0:0:0:0:1, IP Address:192.168.11.131, IP Address:192.168.11.134, IP Address:192.168.11.136
/etc/kubernetes/pki/etcd/healthcheck-client.crt
/etc/kubernetes/pki/ca.crt
/etc/kubernetes/pki/front-proxy-ca.crt
/etc/kubernetes/pki/apiserver.crt
DNS:master-02, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:lb-vip, IP Address:10.96.0.1, IP Address:192.168.11.134, IP Address:192.168.11.131, IP Address:192.168.11.134, IP Address:192.168.11.136
/etc/kubernetes/pki/apiserver-kubelet-client.crt
/etc/kubernetes/pki/front-proxy-client.crt
/etc/kubernetes/pki/apiserver-etcd-client.crt

将配置更新到 configmap 中
这样,以后有升级,或者增加其他 ip 时,也会将配置的 CertSANs 的 ip 保留下来,方便以后删减

kubeadm init phase upload-config kubeadm --config kubeadm.yaml