1. 我是皮皮虾首页
  2. 网站部署

kubeadm直接安装多master节点高可用集群

前言

前面有两篇文章,一篇是安装单节点的master,另外一篇是讲单节点改为多节点集群。那这一篇是直接从开始就安装多master节点,更加简单。

环境准备

  • centos 7
  • 三台master节点机器
    • 10.4.7.10(虚拟VIP)
    • 10.4.7.12
    • 10.4.7.13
    • 10.4.7.14

测试

  • 关闭其中一台,集群正常
  • 关闭2太,集群访问失败,原因是etcd必须一半的节点存活才提供服务,因此,如果需要一台机器也支持服务,那就在其他的机器上安装etcd,保证一半的etcd存活就行

如果之前安装过kubeadm,已经存在的flannel需要删除

  • 不删除会报错:Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container “35ca94dad9ed14fefba648542cd6a0849f7ed643ee9bae3edfadd09e782740a0” network for pod “dashboard-metrics-scraper-856586f554-8j4zq”: networkPlugin cni failed to set up pod “dashboard-metrics-scraper-856586f554-8j4zq_kubernetes-dashboard” network: failed to delegate add: failed to set bridge addr: “cni0” already has an IP address different from 10.244.4.1/24
[root@localhost ~]# rm /etc/cni/net.d
[root@localhost ~]# ipvsadm --clear 
[root@localhost ~]# ip link set cni0 down && ip link set flannel.1 down
[root@localhost ~]# ip link delete cni0 && ip link delete flannel.1
[root@localhost ~]# systemctl restart containerd && systemctl restart kubelet

说明下,下面的基本环境安装只安装到一台模板机器上,通过模板机克隆出另外2台机器就行,这2台机器需要修改的地方如下:

1.修改ip地址:vi /etc/sysconfig/network-scripts/ifcfg-ens33

2.修改hostname:hostnamectl set-hostname k8s-master-2.host.com

3.安装keepalived

关闭防火墙、swap、selinux

# 关闭selinux
[root@localhost ~]# getenforce 
Enforcing
# 临时关闭
[root@localhost ~]# setenforce 0
[root@localhost ~]# getenforce 
Permissive
# 永久关闭
[root@localhost ~]# vi /etc/selinux/config
...
SELINUX=disabled
...

# 临时关闭sawp
swapoff -a
# 永久关闭(自己vi打开/etc/fstab,注释掉swap那行就行)
sed -i '/swap/s/^\(.*\)$/#\1/g' /etc/fstab
# 查看swap是否关闭,swap 都为0就是关闭了
free -m

# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld

安装软件源

[root@localhost ~]# yum install -y epel-release 
[root@localhost ~]# yum install -y bind-utils telnet iftop unzip net-tools make  cyrus-sasl-devel wget lrzsz dos2unix sysstat  ipvsadm

修改hosts

# 三台机器依此执行
[root@localhost ~]# hostnamectl set-hostname k8s-master-1.host.com
[root@localhost ~]# bash
 hostnamectl set-hostname k8s-master-2.host.com
 hostnamectl set-hostname k8s-master-3.host.com
# 在三台机器/etc/hosts添加,后面做bind9自建dns就不需要了
[root@k8s-master-1 ~]# vim /etc/hosts
10.4.7.12 k8s-master-1.host.com
10.4.7.13 k8s-master-2.host.com
10.4.7.14 k8s-master-3.host.com

ipv4流量转发到iptable

[root@k8s-master-1 ~]# yum install -y bridge-utils.x86_64
[root@k8s-master-1 ~]# modprobe  br_netfilter
[root@k8s-master-1 ~]#  cat <<EOF | tee /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@k8s-master-1 ~]# sysctl -p /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1

开启ipvs

lsmod |grep ip_vs
cat > /root/ipvs.sh <<'eof'
#!/bin/bash
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for i in $(ls $ipvs_mods_dir|grep -o "^[^.]*")
do
  /sbin/modinfo -F filename $i &>/dev/null
  if [ $? -eq 0 ];then
    /sbin/modprobe $i
  fi
done
eof
chmod +x /root/ipvs.sh 
sh /root/ipvs.sh 
lsmod |grep ip_vs

安装docker

# 依赖
yum install yum-utils device-mapper-persistent-data lvm2 -y
# 设置稳定的仓库
yum-config-manager --add-repo \
  https://download.docker.com/linux/centos/docker-ce.repo
# 查看需要安装的版本
yum list docker-ce --showduplicates | sort -r
# 安装20版本
yum install docker-ce-20.10.9-3.el7 docker-ce-cli-20.10.9 containerd.io-1.2.10 -y
# 配置docker加速
mkdir -p /etc/docker
# 这里systemd 是因为kubelet需要cgroupdriver为systemd
tee /etc/docker/daemon.json << "eof"
{
  "registry-mirrors": ["https://docker.mirrors.ustc.edu.cn"],
  "exec-opts": ["native.cgroupdriver=systemd"]
}
eof
# 开机启动docker 
systemctl enable docker && systemctl restart docker

安装kubelet、kubeadm、kubectl(模板机器配置到此结束)

[root@k8s-master-1 ~]#  cat << EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
[root@k8s-master-1 ~]#  yum install -y kubelet-1.22.3 kubeadm-1.22.3 kubectl-1.22.3 --disableexcludes=kubernetes
# 设置开机启动
[root@k8s-master-1 ~]# systemctl enable kubelet.service
[root@k8s-master-1 ~]# kubectl  version --short
Client Version: v1.22.1
Server Version: v1.20.4-aliyun.1

安装master节点

安装主keepalived

[root@k8s-master-1 ~]# tee /etc/keepalived/check_port.sh << 'EOF'
#!/bin/bash
# keepalived 监控端口脚本
CHK_PORT=$1
if [ -n "$CHK_PORT" ];then
        PORT_PROCESS=`ss -lnt|grep $CHK_PORT |wc -l`
        if [ $PORT_PROCESS -eq 0 ];then
                echo "Port $CHK_PORT Is Not Used,End."
                exit 1
        fi
else
        echo "Check Port Cant Be Empty!"
fi
EOF

# 主keepalived配置,注意需要修改网卡名称ens33,查看自己的网卡名称:ip addr |grep "ens"
[root@k8s-master-1 ~]# tee /etc/keepalived/keepalived.conf << "EOF"
! Configuration File for keepalived
global_defs {
   router_id 10.4.7.12
}
vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 6443"
    interval 2
    weight -20
}
vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 251
    priority 100
    advert_int 1
    mcast_src_ip 10.4.7.12
     nopreempt
   authentication {
        auth_type PASS
        auth_pass 11111111
    }
    track_script {
        chk_nginx
    }
    virtual_ipaddress {
        10.4.7.10
    }
}
EOF

[root@k8s-master-1 ~]# systemctl start keepalived
[root@k8s-master-1 ~]# systemctl enable keepalived
[root@k8s-master-1 ~]# systemctl status keepalived
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
   Active: active (running) since 一 2021-11-22 11:26:32 CST; 3s ago
[root@k8s-master-1 ~]# ip addr |grep "7.10"
    inet 10.4.7.10/32 scope global ens33

  • 操作机器10.4.7.11

方法一:kubeadm init 直接搞

# 注意 --control-plane-endpoint 这个后面接的是虚拟vip,也必须是这个--control-plane-endpoint参数,不然不会出现join master的提示You can now join any number of the control-plane node running the following command on each as root:
[root@k8s-master-1 ~]# kubeadm init  --kubernetes-version=v1.22.3  \
--image-repository=registry.aliyuncs.com/google_containers \
--service-cidr=172.17.0.0/16 \
--pod-network-cidr=10.244.0.0/16 \
--upload-certs \
--control-plane-endpoint 10.4.7.10:6443

'''
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 10.4.7.10:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:9dc732b42271a5f0f436d046aa1b94d850a336def4837d1ebc0d706b359875a3 \
	--control-plane --certificate-key 979b06fc4d9be0d94cea2519200fe32045f82f4eb4f096026b57b12f27a6b959

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.4.7.10:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:9dc732b42271a5f0f436d046aa1b94d850a336def4837d1ebc0d706b359875a3 

[root@k8s-master-1 ~]# mkdir -p $HOME/.kube
[root@k8s-master-1 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master-1 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

方法二:通过config文件创建

[root@k8s-master-1 ~]# systemctl enable kubelet.service
# 注意最有一个ip地址是上面keepalived虚拟ip地址
[root@k8s-master-1 ~]# kubeadm config print init-defaults --kubeconfig ClusterConfiguration > kubeadm-master.config
# 注意controlPlaneEndpoint: 10.4.7.10:6443这个参数,不然不会出现join master的提示You can now join any number of the control-plane node running the following 
[root@k8s-master-1 ~]# vim kubeadm-master.config
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  #修改为主节点IP,这里注意,如果是多master节点,这个改为vip地址
  advertiseAddress: 10.4.7.10
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  imagePullPolicy: IfNotPresent
  name: k8s-master-1
  taints: null
---
apiServer:
  certSANs:
  - api.k8s.local
  - k8s-master-1
  - k8s-master-2
  - k8s-master-3
  - homelab-0-11
  - 10.4.7.10
  - 10.4.7.12
  - 10.4.7.13
  - 10.4.7.14
  extraArgs:
    authorization-mode: Node,RBAC
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 10.4.7.10:6443
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
#修改镜像仓库 
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.22.0
networking:
  # 配置 pod 所在网段和虚拟机所在网段不重复(这里用的是Flannel 默认网段),如果宿主机已经使用该网段,则必须更改网段
  podSubnet: 10.244.0.0/16
  dnsDomain: cluster.local
  serviceSubnet: 172.17.0.0/16
scheduler: {}

# 查看需要下载的镜像
[root@k8s-master-1 ~]# kubeadm config images list --config kubeadm-master.config
registry.aliyuncs.com/google_containers/kube-apiserver:v1.22.3
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.22.3
registry.aliyuncs.com/google_containers/kube-scheduler:v1.22.3
registry.aliyuncs.com/google_containers/kube-proxy:v1.22.3
registry.aliyuncs.com/google_containers/pause:3.5
registry.aliyuncs.com/google_containers/etcd:3.5.0-0
registry.aliyuncs.com/google_containers/coredns:v1.8.4
# 下载镜像
[root@k8s-master-1 ~]# kubeadm config images pull --config kubeadm-master.config
# 步骤二:初始化k8s
[root@k8s-master-1 ~]# kubeadm init --config=kubeadm-master.config --upload-certs | tee kubeadm-init.log
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config


You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 10.4.7.10:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:9dc732b42271a5f0f436d046aa1b94d850a336def4837d1ebc0d706b359875a3 \
	--control-plane --certificate-key 979b06fc4d9be0d94cea2519200fe32045f82f4eb4f096026b57b12f27a6b959

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.4.7.10:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:9dc732b42271a5f0f436d046aa1b94d850a336def4837d1ebc0d706b359875a3 

[root@k8s-master-1 ~]# mkdir -p $HOME/.kube
[root@k8s-master-1 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master-1 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

从模板创建2个虚拟机加入master节点

  • 修改ip地址
[root@k8s-master-2 ~]# vim /etc/sysconfig/network-scripts/ifcfg-ens33 
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=d4320ba8-3daa-4535-b3be-0998e1679922
DEVICE=ens33
ONBOOT=yes
IPADDR=10.4.7.13
GATEWAY=10.4.7.2
NETMASK=255.255.255.0
DNS1=223.5.5.5
[root@k8s-master-1 ~]# hostnamectl set-hostname k8s-master-2.host.com
[root@k8s-master-1 ~]# bash
[root@k8s-master-2 ~]# 

安装keepalived 备

[root@k8s-master-2 ~]#  tee /etc/keepalived/check_port.sh << 'EOF'
#!/bin/bash
# keepalived 监控端口脚本
CHK_PORT=$1
if [ -n "$CHK_PORT" ];then
        PORT_PROCESS=`ss -lnt|grep $CHK_PORT |wc -l`
        if [ $PORT_PROCESS -eq 0 ];then
                echo "Port $CHK_PORT Is Not Used,End."
                exit 1
        fi
else
        echo "Check Port Cant Be Empty!"
fi
EOF
[root@k8s-master-2 ~]# chmod +x /etc/keepalived/check_port.sh
[root@k8s-master-2 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
   router_id 10.4.7.13
}
vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 6443"
    interval 2
    weight -20
}
vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 251
    priority 90
    advert_int 1
    mcast_src_ip 10.4.7.13
    nopreempt
    authentication {
        auth_type PASS
        auth_pass 11111111
    }
    track_script {
        chk_nginx
    }
    virtual_ipaddress {
        10.4.7.10
    }
}
[root@k8s-master-2 ~]# systemctl start keepalived && systemctl enable keepalived
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@k8s-master-20 ~]# systemctl status keepalived
 [root@k8s-master-2 ~]# kubeadm join 10.4.7.10:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:9dc732b42271a5f0f436d046aa1b94d850a336def4837d1ebc0d706b359875a3 \
	--control-plane --certificate-key 979b06fc4d9be0d94cea2519200fe32045f82f4eb4f096026b57b12f27a6b959

[root@k8s-master-2 ~]# mkdir -p $HOME/.kube
[root@k8s-master-2 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master-2 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

安装flannel CNI插件

[root@k8s-master-2 ~]#  kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created

[root@k8s-master-2 ~]# kubectl get nodes
NAME                    STATUS   ROLES                  AGE     VERSION
k8s-master-2.host.com   Ready    control-plane,master   8m23s   v1.22.3
k8s-master-3.host.com   Ready    control-plane,master   5m34s   v1.22.3
node                    Ready    control-plane,master   25m     v1.22.3

安装dashboard

[root@k8s-master-2 ~]#  kubectl apply -f http://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml
# 修改对外暴露端口,修改倒数第三行的样子,type:ClusterIP改为 type: NodePort
[root@k8s-master-2 ~]#  kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
  type: NodePort
status:
  loadBalancer: {}
[root@k8s-master-2 ~]# kubectl get svc  -n kubernetes-dashboard
NAME                        TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
dashboard-metrics-scraper   ClusterIP   172.17.45.130   <none>        8000/TCP        55s
kubernetes-dashboard        NodePort    172.17.103.90   <none>        443:32369/TCP   55s

# 本地访问https://10.4.7.10:32369

获取token

[root@k8s-master-1 ~]# kubectl create serviceaccount dashboard-admin-sa
serviceaccount/dashboard-admin-sa created
[root@k8s-master-1 ~]# kubectl create clusterrolebinding dashboard-admin-sa  --clusterrole=cluster-admin --serviceaccount=default:dashboard-admin-sa
clusterrolebinding.rbac.authorization.k8s.io/dashboard-admin-sa created
[root@k8s-master-1 ~]# kubectl get secrets
NAME                             TYPE                                  DATA   AGE
dashboard-admin-sa-token-2xsjt   kubernetes.io/service-account-token   3      21s
default-token-4kxdm              kubernetes.io/service-account-token   3      77m
[root@k8s-master-1 ~]# kubectl describe secret dashboard-admin-sa-token-2xsjt

原创文章,作者:站长,如若转载,请注明出处:https://wsppx.cn/2294/%e7%bd%91%e7%ab%99%e9%83%a8%e7%bd%b2/

发表评论

您的电子邮箱地址不会被公开。 必填项已用*标注