二进制方式搭建Kubernetes 1.19.3高可用集群(四)——部署worker节点和网络插件

发布时间:2020-11-03 21:25:14阅读:(514)

本文将介绍通过二进制方式部署kubelet、kube-proxy组件以及网络组件calico的安装

部署kubelet(worker节点)

创建bootstrap配置文件

将kubeadm放到bin目录下,方便执行命令
cp master/kubeadm /usr/local/bin

#进入我们原先制作admin证书的目录
cd target/admin

#创建token
export BOOTSTRAP_TOKEN=$(kubeadm token create --description kubelet-bootstrap-token --groups system:bootstrappers:worker --kubeconfig kube.config)

#设置集群参数
kubectl config set-cluster kubernetes --certificate-authority=../ca.pem --embed-certs=true --server=https://10.0.50.254:6443 --kubeconfig=kubelet-bootstrap.kubeconfig

#设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig

#设置上下文参数
kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig

#设置默认上下文
kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig

#创建配置文件目录
ssh root@10.0.60.101 "mkdir -p /etc/kubernetes"
ssh root@10.0.60.102 "mkdir -p /etc/kubernetes"
ssh root@10.0.60.103 "mkdir -p /etc/kubernetes"

#分发配置文件到worker节点
scp kubelet-bootstrap.kubeconfig 10.0.60.101:/etc/kubernetes/kubelet-bootstrap.kubeconfig
scp kubelet-bootstrap.kubeconfig 10.0.60.102:/etc/kubernetes/kubelet-bootstrap.kubeconfig
scp kubelet-bootstrap.kubeconfig 10.0.60.103:/etc/kubernetes/kubelet-bootstrap.kubeconfig

#创建证书目录
ssh root@10.0.60.101 "mkdir -p /etc/kubernetes/pki"
ssh root@10.0.60.102 "mkdir -p /etc/kubernetes/pki"
ssh root@10.0.60.103 "mkdir -p /etc/kubernetes/pki"

#分发ca证书
scp ../ca.pem root@10.0.60.101:/etc/kubernetes/pki/
scp ../ca.pem root@10.0.60.102:/etc/kubernetes/pki/
scp ../ca.pem root@10.0.60.103:/etc/kubernetes/pki/

创建kubelet配置文件

#注意,修改address为各节点IP,clusterDNS为service cidr的第二个地址
cat > kubelet.config-60-101.json<<EOF
{
"kind": "KubeletConfiguration",
"apiVersion": "kubelet.config.k8s.io/v1beta1",
"authentication": {
"x509": {
"clientCAFile": "/etc/kubernetes/pki/ca.pem"
},
"webhook": {
"enabled": true,
"cacheTTL": "2m0s"
},
"anonymous": {
"enabled": false
}
},
"authorization": {
"mode": "Webhook",
"webhook": {
"cacheAuthorizedTTL": "5m0s",
"cacheUnauthorizedTTL": "30s"
}
},
"address": "10.0.60.101",
"port": 10250,
"readOnlyPort": 10255,
"cgroupDriver": "systemd",
"hairpinMode": "promiscuous-bridge",
"serializeImagePulls": false,
"featureGates": {
"RotateKubeletClientCertificate": true,
"RotateKubeletServerCertificate": true
},
"clusterDomain": "cluster.local.",
"clusterDNS": ["10.0.120.2"]
}
EOF

#分发配置文件
scp kubelet.config-60-101.json root@10.0.60.101:/etc/kubernetes/kubelet.config.json
scp kubelet.config-60-102.json root@10.0.60.102:/etc/kubernetes/kubelet.config.json
scp kubelet.config-60-103.json root@10.0.60.103:/etc/kubernetes/kubelet.config.json

创建kubelet服务文件

cat > kubelet.service<<EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \\
--bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \\
--cert-dir=/etc/kubernetes/pki \\
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
--config=/etc/kubernetes/kubelet.config.json \\
--network-plugin=cni \\
--pod-infra-container-image=k8s.gcr.io/pause-amd64:3.1 \\
--alsologtostderr=true \\
--logtostderr=false \\
--log-dir=/var/log/kubernetes \\
--v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

#分发服务文件
scp kubelet.service root@10.0.60.101:/etc/systemd/system/
scp kubelet.service root@10.0.60.101:/etc/systemd/system/
scp kubelet.service root@10.0.60.101:/etc/systemd/system/

启动服务(在各worker节点执行)

kublet 启动时查找配置的 --kubeletconfig 文件是否存在,如果不存在则使用 --bootstrap-kubeconfig 向 kube-apiserver 发送证书签名请求 (CSR)。kube-apiserver 收到 CSR 请求后,对其中的 Token 进行认证(事先使用 kubeadm 创建的 token),认证通过后将请求的 user 设置为 system:bootstrap:,group 设置为 system:bootstrappers,这就是Bootstrap Token Auth。

#bootstrap赋权(在某台master节点上执行)
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers

#启动服务(在各worker节点上执行)
mkdir -p /var/lib/kubelet
systemctl daemon-reload && systemctl enable kubelet && systemctl start kubelet

#在master上Approve bootstrap请求
kubectl get csr
kubectl certificate approve <name>

#查看服务状态
service kubelet status

#查看日志(日志中会出现cni的错误,因为网络插件还没有部署,所以这是正常的)
journalctl -f -u kubelet

部署kube-proxy(worker节点)

创建证书和私钥

cd target && mkdir proxy && cd proxy
cat > kube-proxy-csr.json<<EOF
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Hangzhou",
"L": "Hangzhou",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=../ca.pem -ca-key=../ca-key.pem -config=../ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

创建kubeconfig文件

#创建kube-proxy.kubeconfig
kubectl config set-cluster kubernetes --certificate-authority=../ca.pem --embed-certs=true --server=https://10.0.50.254:6443 --kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

#分发kube-proxy.kubeconfig
scp kube-proxy.kubeconfig root@10.0.60.101:/etc/kubernetes/
scp kube-proxy.kubeconfig root@10.0.60.102:/etc/kubernetes/
scp kube-proxy.kubeconfig root@10.0.60.103:/etc/kubernetes/

创建kube-proxy.config.yaml

#注意修改相应的ip
cat > kube-proxy.config-60-101.yaml<<EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 10.0.60.101
clientConnection:
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
clusterCIDR: 172.19.0.0/16
healthzBindAddress: 10.0.60.101:10256
kind: KubeProxyConfiguration
metricsBindAddress: 10.0.60.101:10249
mode: "ipvs"
EOF

#分发配置文件
scp kube-proxy.config-60-101.yaml root@10.0.60.101:/etc/kubernetes/kube-proxy.config.yaml
scp kube-proxy.config-60-102.yaml root@10.0.60.102:/etc/kubernetes/kube-proxy.config.yaml
scp kube-proxy.config-60-103.yaml root@10.0.60.103:/etc/kubernetes/kube-proxy.config.yaml

创建service文件

cat > kube-proxy.service<<EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \\
--config=/etc/kubernetes/kube-proxy.config.yaml \\
--alsologtostderr=true \\
--logtostderr=false \\
--log-dir=/var/log/kubernetes \\
--v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

#分发至worker节点
scp kube-proxy.service root@10.0.60.101:/etc/systemd/system/
scp kube-proxy.service root@10.0.60.102:/etc/systemd/system/
scp kube-proxy.service root@10.0.60.103:/etc/systemd/system/

启动服务

#创建目录
mkdir -p /var/lib/kube-proxy && mkdir -p /var/log/kubernetes

#启动服务
systemctl daemon-reload && systemctl enable kube-proxy && systemctl start kube-proxy

#查看状态
service kube-proxy status

#查看日志
journalctl -f -u kube-proxy

部署CNI插件-calico

因为部分镜像下载速度较慢,所有我这里打包好了,可以直接导入

链接: https://pan.baidu.com/s/15oZvg2N8uxJED5y1jRmszw
密码: a0q0
#分发镜像
scp cni-image.tar root@10.0.60.101:/tmp
scp cni-image.tar root@10.0.60.102:/tmp
scp cni-image.tar root@10.0.60.103:/tmp

#加载镜像
ssh root@10.0.60.101 "docker load -i /tmp/cni-image.tar"
ssh root@10.0.60.102 "docker load -i /tmp/cni-image.tar"
ssh root@10.0.60.103 "docker load -i /tmp/cni-image.tar"

部署calico和coredns(找一台master节点执行)

wget https://docs.projectcalico.org/v3.16/manifests/calico.yaml
wget https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dns/coredns/coredns.yaml.base -O coredns.yaml

#修改calico.yaml
...
- name: CALICO_IPV4POOL_CIDR
value: "172.19.0.0/16" #修改pod ip段
- name: FELIX_IPINIPENABLED # 添加这个配置,关闭felix_ipinip
value: "false"
- name: CALICO_IPV4POOL_IPIP
value: "off" # 这里修改为off,关闭ipip模式

#修改coredns.yaml
...
__DNS__DOMAIN__ 修改为 cluster.local
__DNS__MEMORY__LIMIT__ 修改为170Mi
__DNS__SERVER__ 修改为10.120.0.2

kubectl apply -f calico.yaml

kubectl apply -f coredns.yaml
#检查组件状态
kubectl get all --all-namespaces

部分配置文件,详见GITEE

标签:k8s

发表评论

评论列表(有0条评论514人围观)
暂无评论