k8s 更换 SSL 证书
背景
公司 k8s 集群使用的 SSL 过期, 执行任何命令都报错: Unable to connect to the server: x509: certificate has expired or is not yet valid: current time 2023-05-23T16:22:32+08:00 is after 2023-05-19T05:40:00
核心命令
# 查看 SSL 证书信息
cfssl-certinfo -cert CERT_NAME
更换 SSL 证书过程
kubeadm 部署的 k8s 集群 更新 SSL 证书流程
# 1. 查看集群使用的 SSL 证书过期时间
kubeadm certs check-expiration
# 2. 备份所有节点上的kubernetes
cp -a /etc/kubernetes /etc/kubernetes.old
# 3. 更新证书
kubeadm certs renew
# 4. 更新 kubeconfig 文件
mv config config.old
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
sudo chmod 644 $HOME/.kube/config
# 5. 重启 etcd、scheduler、controller、apiserver 服务
kubeasz 部署的 k8s 集群 更新 SSL 证书流程
1. 备份 SSL 证书
# 1. 备份 过期 证书(在 deployment 主机上进行)
cd /etc/kubeasz/clusters/test-k8s/backup
mkdir -p ssl-exoired-backup-2023-05-30/ssl
cp -a /etc/kubeasz/clusters/test-k8s/*.kubeconfig /etc/kubeasz/clusters/test-k8s/ssl /etc/kubeasz/clusters/test-k8s/backup
2. 确认 ca 证书有效以及配置文件
# 确认 CA 证书有效
cd /etc/kubeasz/clusters/test-k8s/ssl
/etc/kubeasz/bin/cfssl-certinfo -cert ca.pem | grep not_after
# 确认 CA 签发证书的有效期是合理的
# CERT_EXPIRY: 为签发证书的有效期
cat /etc/kubeasz/clusters/test-k8s/ssl/ca-config.json
{ "signing": { "default": { "expiry": "{{ CERT_EXPIRY }}" }, "profiles": { "kubernetes": { "usages": [ "signing", "key encipherment", "server auth", "client auth" ], "expiry": "{{ CERT_EXPIRY }}" }, "kcfg": { "usages": [ "signing", "key encipherment", "client auth" ], "expiry": "438000h" } } } }
2.1 可选,修改 CA 签发证书的有效期
cd /etc/kubeasz/clusters/test-k8s/
# 1. 修改模版配置
vim config.yaml
CA_EXPIRY: "876000h"
CERT_EXPIRY: "48000h"
# 2. 修改当前使用的配置
vim ssl/ca-config.json
...
"expiry": "{{ CERT_EXPIRY }}"
...
# 3. 可选,修改 CA 的证书有效期
vim ssl/ca-csr.json
...
"ca": {
"expiry": "876000h"
}
...
3. 更新 etcd SSL
# etcd SSL
cd /etc/kubeasz/clusters/test-k8s/ssl
# 确认 etcd csr 请求文件存在
cat etcd-csr.json
# 生产新的 etcd SSL 证书
/etc/kubeasz/bin/cfssl gencert \
-ca=/etc/kubeasz/clusters/test-k8s/ssl/ca.pem \
-ca-key=/etc/kubeasz/clusters/test-k8s/ssl/ca-key.pem \
-config=/etc/kubeasz/clusters/test-k8s/ssl/ca-config.json \
-profile=kubernetes etcd-csr.json | /etc/kubeasz//bin/cfssljson -bare etcd
# 确认新的SSL证书有效
/etc/kubeasz/bin/cfssl-certinfo -cert etcd.pem | grep 'not_after'
# 将新的SSL证书分发至 etcd 节点上
ETCD_NODE_IP="192.168.20.101
192.168.20.102
192.168.20.104"
for ip in ${ETCD_NODE_IP}
do
scp etcd-key.pem etcd.pem ${ip}:/etc/kubernetes/ssl/
# 在 etcd 节点上重启 etcd
ssh root@${ip} systemctl restart etcd.service
done
# 在 etcd 节点上验证
export NODE_IPS="192.168.20.101 192.168.20.102 192.168.20.104"
for ip in ${NODE_IPS}; do
ETCDCTL_API=3 etcdctl \
--endpoints=https://${ip}:2379 \
--cacert=/etc/kubernetes/ssl/ca.pem \
--cert=/etc/kubernetes/ssl/etcd.pem \
--key=/etc/kubernetes/ssl/etcd-key.pem \
endpoint health; done
4. 更新 master 服务使用的 SSL 证书
api-server
cd /etc/kubeasz/clusters/test-k8s/ssl
# 确认 csr 请求文件存在
cat kubernetes-csr.json
# 生成新的SSL证书
/etc/kubeasz/bin/cfssl gencert \
-ca=/etc/kubeasz/clusters/test-k8s/ssl/ca.pem \
-ca-key=/etc/kubeasz/clusters/test-k8s/ssl/ca-key.pem \
-config=/etc/kubeasz/clusters/test-k8s/ssl/ca-config.json \
-profile=kubernetes kubernetes-csr.json | /etc/kubeasz//bin/cfssljson -bare kubernetes
# 确认新的SSL证书有效
/etc/kubeasz/bin/cfssl-certinfo -cert kubernetes.pem | grep 'not_after'
# 分发 ssl 证书到 master 节点上
K8S_MASTER_IP="192.168.20.111
192.168.20.112
192.168.20.113"
for ip in ${K8S_MASTER_IP}
do
scp kubernetes.pem kubernetes-key.pem root@${ip}:/etc/kubernetes/ssl
done
api-server 之 aggregator api
cd /etc/kubeasz/clusters/test-k8s/ssl
# 确认 csr 请求文件存在
cat aggregator-proxy-csr.json
# 生成新的SSL证书
/etc/kubeasz/bin/cfssl gencert \
-ca=/etc/kubeasz/clusters/test-k8s/ssl/ca.pem \
-ca-key=/etc/kubeasz/clusters/test-k8s/ssl/ca-key.pem \
-config=/etc/kubeasz/clusters/test-k8s/ssl/ca-config.json \
-profile=kubernetes aggregator-proxy-csr.json | /etc/kubeasz//bin/cfssljson -bare aggregator-proxy
# 确认新的SSL证书有效
/etc/kubeasz/bin/cfssl-certinfo -cert aggregator-proxy.pem | grep 'not_after'
# 分发 ssl 证书到 master 节点上
K8S_MASTER_IP="192.168.20.111
192.168.20.112
192.168.20.113"
for ip in ${K8S_MASTER_IP}
do
scp aggregator-proxy.pem aggregator-proxy-key.pem root@${ip}:/etc/kubernetes/ssl
done
controller-manager
cd /etc/kubeasz/clusters/test-k8s/ssl
# 确认 csr 请求文件存在
cat kube-controller-manager-csr.json
# 生成新的SSL证书
/etc/kubeasz/bin/cfssl gencert \
-ca=/etc/kubeasz/clusters/test-k8s/ssl/ca.pem \
-ca-key=/etc/kubeasz/clusters/test-k8s/ssl/ca-key.pem \
-config=/etc/kubeasz/clusters/test-k8s/ssl/ca-config.json \
-profile=kubernetes kube-controller-manager-csr.json | /etc/kubeasz//bin/cfssljson -bare kube-controller-manager
# 确认新的SSL证书有效
/etc/kubeasz/bin/cfssl-certinfo -cert kube-controller-manager.pem | grep 'not_after'
# 重新配置 kube-controller-manager.kubeconfig 中的 SSL 证书信息
kubectl config set-credentials system:kube-controller-manager \
--client-certificate=/etc/kubeasz/clusters/test-k8s/ssl/kube-controller-manager.pem \
--client-key=/etc/kubeasz/clusters/test-k8s/ssl/kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubeasz/clusters/test-k8s/kube-controller-manager.kubeconfig
# 分发 kube-controller-manager.kubeconfig 到 master节点上
K8S_MASTER_IP="192.168.20.111
192.168.20.112
192.168.20.113"
for ip in ${K8S_MASTER_IP}
do
scp /etc/kubeasz/clusters/test-k8s/kube-controller-manager.kubeconfig root@${ip}:/etc/kubernetes/
done
kube-scheduler
cd /etc/kubeasz/clusters/test-k8s/ssl
# 确认 csr 请求文件存在
cat kube-scheduler-csr.json
# 生成新的SSL证书
/etc/kubeasz/bin/cfssl gencert \
-ca=/etc/kubeasz/clusters/test-k8s/ssl/ca.pem \
-ca-key=/etc/kubeasz/clusters/test-k8s/ssl/ca-key.pem \
-config=/etc/kubeasz/clusters/test-k8s/ssl/ca-config.json \
-profile=kubernetes kube-scheduler-csr.json | /etc/kubeasz/bin/cfssljson -bare kube-scheduler
# 确认新的SSL证书有效
/etc/kubeasz/bin/cfssl-certinfo -cert kube-scheduler.pem | grep 'not_after'
# 重新设置 kube-scheduler.kubeconfig 文件中认证信息
kubectl config set-credentials system:kube-scheduler \
--client-certificate=/etc/kubeasz/clusters/test-k8s/ssl/kube-scheduler.pem \
--client-key=/etc/kubeasz/clusters/test-k8s/ssl/kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubeasz/clusters/test-k8s/kube-scheduler.kubeconfig
# 分发 kube-scheduler.kubeconfig 到 master节点上
K8S_MASTER_IP="192.168.20.111
192.168.20.112
192.168.20.113"
for ip in ${K8S_MASTER_IP}
do
scp /etc/kubeasz/clusters/test-k8s/kube-scheduler.kubeconfig root@${ip}:/etc/kubernetes/
done
重启所有master服务
K8S_MASTER_IP="192.168.20.111
192.168.20.112
192.168.20.113"
for ip in ${K8S_MASTER_IP}
do
ssh root@${ip} systemctl restart kube-apiserver.service kube-controller-manager.service kube-scheduler.service
done
5. 更新 node 组件使用的 SSL 证书
kubelet
cd /etc/kubeasz/clusters/test-k8s/ssl
# 确认 csr 请求文件存在
K8S_NODE_IP="192.168.20.111
192.168.20.112
192.168.20.113
192.168.20.121
192.168.20.122
192.168.20.123"
for ip in ${K8S_NODE_IP}
do
if [[ ! -f "${ip}-kubelet-csr.json" ]]
then
echo "${ip}-kubelet-csr.json 不存在"
fi
done
# 批量更新node节点上的 kubelet 使用的 SSL 证书
for ip in ${K8S_NODE_IP}
do
/etc/kubeasz/bin/cfssl gencert \
-ca=/etc/kubeasz/clusters/test-k8s/ssl/ca.pem \
-ca-key=/etc/kubeasz/clusters/test-k8s/ssl/ca-key.pem \
-config=/etc/kubeasz/clusters/test-k8s/ssl/ca-config.json \
-profile=kubernetes ${ip}-kubelet-csr.json | /etc/kubeasz//bin/cfssljson -bare ${ip}-kubelet
if [[ $? != 0 ]]
then
echo "${ip}-kubelet 更新 SSL 证书失败"
fi
done
# 验证 SSL 证书(随机挑选几个)
/etc/kubeasz/bin/cfssl-certinfo -cert 192.168.20.112-kubelet.pem | grep 'not_after'
# 批量更新 kubelet.kubeconfig 文件中的 SSL 信息
for ip in ${K8S_NODE_IP}
do
kubectl config set-credentials system:node:${ip} \
--client-certificate=/etc/kubeasz/clusters/test-k8s/ssl/${ip}-kubelet.pem \
--embed-certs=true \
--client-key=/etc/kubeasz/clusters/test-k8s/ssl//${ip}-kubelet-key.pem \
--kubeconfig=/etc/kubeasz/clusters/test-k8s/${ip}-kubelet.kubeconfig
done
# 分布 kubelet.kubeconfig SSL 证书 至 各个节点
for ip in ${K8S_NODE_IP}
do
scp /etc/kubeasz/clusters/test-k8s/${ip}-kubelet.kubeconfig root@${ip}:/etc/kubernetes/kubelet.kubeconfig
scp /etc/kubeasz/clusters/test-k8s/ssl/${ip}-kubelet.pem root@${ip}:/etc/kubernetes/ssl/kubelet.pem
scp /etc/kubeasz/clusters/test-k8s/ssl/${ip}-kubelet-key.pem root@${ip}:/etc/kubernetes/ssl/kubelet-key.pem
done
kube-proxy
cd /etc/kubeasz/clusters/test-k8s/ssl
# 确认 csr 请求文件存在
cat kube-proxy-csr.json
# 更新 SSL 证书
/etc/kubeasz/bin/cfssl gencert \
-ca=/etc/kubeasz/clusters/test-k8s/ssl/ca.pem \
-ca-key=/etc/kubeasz/clusters/test-k8s/ssl/ca-key.pem \
-config=/etc/kubeasz/clusters/test-k8s/ssl/ca-config.json \
-profile=kubernetes kube-proxy-csr.json | /etc/kubeasz/bin/cfssljson -bare kube-proxy
# 更新 kube-proxy.kubeconfig 文件中的 SSL 信息
kubectl config set-credentials kube-proxy \
--client-certificate=/etc/kubeasz/clusters/test-k8s/ssl/kube-proxy.pem \
--client-key=/etc/kubeasz/clusters/test-k8s/ssl/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubeasz/clusters/test-k8s/kube-proxy.kubeconfig
# 分发 kube-proxy.kubeconfig 至各个节点上
K8S_NODE_IP="192.168.20.111
192.168.20.112
192.168.20.113
192.168.20.121
192.168.20.122
192.168.20.123"
for ip in ${K8S_NODE_IP}
do
scp /etc/kubeasz/clusters/test-k8s/kube-proxy.kubeconfig root@${ip}:/etc/kubernetes/kube-proxy.kubeconfig
done
重启各个节点上的 kubelet、kube-proxy 服务
K8S_NODE_IP="192.168.20.111
192.168.20.112
192.168.20.113
192.168.20.121
192.168.20.122
192.168.20.123"
for ip in ${K8S_NODE_IP}
do
ssh root@${ip} systemctl restart kubelet.service kube-proxy.service
done
6. 更新网络附件 calico 使用的 SSL证书
cd /etc/kubeasz/clusters/test-k8s/ssl
# 确认 csr 请求文件存在
cat calico-csr.json
# 生成新的SSL证书
/etc/kubeasz/bin/cfssl gencert \
-ca=/etc/kubeasz/clusters/test-k8s/ssl/ca.pem \
-ca-key=/etc/kubeasz/clusters/test-k8s/ssl/ca-key.pem \
-config=/etc/kubeasz/clusters/test-k8s/ssl/ca-config.json \
-profile=kubernetes calico-csr.json|/etc/kubeasz/bin/cfssljson -bare calico
# 确认新的SSL证书有效
/etc/kubeasz/bin/cfssl-certinfo -cert calico.pem | grep 'not_after'
# 删除旧 calico-etcd-secrets
kubectl -n kube-system delete secrets calico-etcd-secrets || echo "NotFound"
# 重新生成 calico-etcd-secrets
cd /etc/kubeasz/clusters/test-k8s/ssl
kubectl create secret generic -n kube-system calico-etcd-secrets \
--from-file=etcd-ca=ca.pem \
--from-file=etcd-key=calico-key.pem \
--from-file=etcd-cert=calico.pem
# 分发 calico SSL 至各个节点上
K8S_NODE_IP="192.168.20.111
192.168.20.112
192.168.20.113
192.168.20.121
192.168.20.122
192.168.20.123"
for ip in ${K8S_NODE_IP}
do
scp calico-key.pem calico.pem root@${ip}:/etc/calico/ssl
done
# 重启 calico
kubectl rollout restart deployment calico-kube-controllers -n kube-system
kubectl rollout restart daemonset calico-node -n kube-system
7. 重新生成 管理员(admin)用户以及其它用户 使用的kubeconfig
admin 用户
cd /etc/kubeasz/clusters/test-k8s/ssl
# 确认 csr 请求文件存在
cat admin-csr.json
# 生成新的 SSL 证书
/etc/kubeasz/bin/cfssl gencert \
-ca=/etc/kubeasz/clusters/test-k8s/ssl/ca.pem \
-ca-key=/etc/kubeasz/clusters/test-k8s/ssl/ca-key.pem \
-config=/etc/kubeasz/clusters/test-k8s/ssl/ca-config.json \
-profile=kubernetes admin-csr.json | /etc/kubeasz//bin/cfssljson -bare admin
# 确认新的SSL证书有效
/etc/kubeasz/bin/cfssl-certinfo -cert admin.pem | grep 'not_after'
# 重新设置 kubectl.kubeconfig 文件中认证信息
kubectl config set-credentials admin \
--client-certificate=/etc/kubeasz/clusters/test-k8s/ssl/admin.pem \
--embed-certs=true \
--client-key=/etc/kubeasz/clusters/test-k8s/ssl/admin-key.pem \
--kubeconfig=/etc/kubeasz/clusters/test-k8s/kubectl.kubeconfig
cp -a /etc/kubeasz/clusters/test-k8s/kubectl.kubeconfig ~/.kube/config
# 验证
kubectl get nodes
(可选)各节点上的 kubectl.kubeconfig
# 如果 各个节点上 没有 配置 kubeconfig 文件,可以跳过此步骤
# kubeasz 新版本默认各个节点上都不配置 kubectl.kubeconfig
# kubeasz 老版本默认为各个节点上配置 kubectl.kubeconfig 使用 admin 权限
# 批量更新 kubectl.kubeconfig 文件中的 SSL 信息
kubectl config set-credentials admin \
--client-certificate=/etc/kubeasz/clusters/test-k8s/ssl/admin.pem \
--embed-certs=true \
--client-key=/etc/kubeasz/clusters/test-k8s/ssl/admin-key.pem \
--kubeconfig=/etc/kubeasz/clusters/test-k8s/kubectl.kubeconfig
K8S_NODE_IP="192.168.20.111
192.168.20.112
192.168.20.113"
for ip in ${K8S_NODE_IP}
do
kubectl config set-credentials admin \
--client-certificate=/etc/kubeasz/clusters/test-k8s/ssl/admin.pem \
--embed-certs=true \
--client-key=/etc/kubeasz/clusters/test-k8s/ssl/admin-key.pem \
--kubeconfig=/etc/kubeasz/clusters/test-k8s/${ip}-kubectl.kubeconfig
done
# 分布 kubectl.kubeconfig SSL 证书 至 各个节点
for ip in ${K8S_NODE_IP}
do
scp /etc/kubeasz/clusters/test-k8s/${ip}-kubectl.kubeconfig root@${ip}:~/config
done