1. 我是皮皮虾首页
  2. 网站部署

kubeadm安装k8s-单master节点部署修改为多master节点实现高可用

前言

前面一篇文件已经安装好了k8s,但是master节点是单节点,因此需要部署多台master实现节点实现高可用

说明下:如果前期已经装好了keepalived有虚拟ip,那kubeadm init xxx安装好第一个master之后,会有提示如何加入到master节点;也可以直接跳转到文章下面的【在新的master节点上执行】这个内容,其他的不用看。这篇文章主要是刚开始装的是单master节点,现在要改为多master节点

机器节点

  • master
    • 192.168.0.8(虚拟ip)
    • 192.168.0.9
    • 192.168.0.10
    • 192.168.0.11
  • node
    • 192.168.0.12
    • 192.168.0.13

安装bind9

  • 前面机器的域名互通是通过修改/etc/hosts文件,但是随着机器的增加,这操作肯定不现实,因此需要自建dns,满足域名互通。
  • 在192.168.0.9机器操作
  • 需要注意的是,我这边nginx安装在192.168.0.20这台机器,以后的域名转发都是到这台机器转发
[root@homelab-0-11 ~]#  yum install bind -y

# 修改/etc/named.conf 文件,修改内容如下
[root@homelab-0-11 ~]#   vi /etc/named.conf 

listen-on port 53 { 10.4.7.11; };
删除 listen-on-v6 port 53 { ::1; };
allow-query     { any; };
forwarders      { 10.4.7.254; };

recursion yes;
dnssec-enable no;
dnssec-validation no;

[root@hdss-7-11 ~]# named-checkconf
  • 增加host.com和home.com域名
# 在文件末尾追加
vi /etc/named.rfc1912.zones 
# 粘贴内容
zone "host.com" IN {
        type master;
        file "host.com.zone";
        allow-update { 192.168.0.11; };
};

zone "home.com" IN {
        type master;
        file "home.com.zone";
        allow-update { 192.168.0.11; };
};
  • 增加host.com/home.com zone域,并配置子域名
[root@homelab-0-11 ~]# vim /var/named/host.com.zone
$TTL 600 ; 10 miutes
@       IN SOA  dns.host.com. dnsadmin.host.com. (
                                        2021111901      ; serial
                                        1D      ; refresh
                                        1H      ; retry
                                        1W      ; expire
                                        3H )    ; minimum
        NS      dns.host.com.
$TTL 60 ; 1 miutes
dns     A       192.168.0.11
homelab-0-8 A     192.168.0.8
homelab-0-9 A     192.168.0.9
homelab-0-10 A     192.168.0.10
homelab-0-11 A     192.168.0.11
homelab-0-12  A     192.168.0.12
homelab-0-13  A     192.168.0.13
k8s-manage  A     192.168.0.20

[root@homelab-0-11 ~]# vim /var/named/home.com.zone
$TTL 600 ; 10 miutes
@       IN SOA  dns.home.com. dnsadmin.home.com. (
                                        2021111901      ; serial
                                        1D      ; refresh
                                        1H      ; retry
                                        1W      ; expire
                                        3H )    ; minimum
        NS      dns.home.com.
$TTL 60 ; 1 miutes
dns     A       192.168.0.11
k8s A 192.168.0.20

配置每台机器的dns为192.168.0.11

  • 需要操作的机器
    • 192.168.0.10
    • 192.168.0.11
    • 192.168.0.12
    • 192.168.0.13
# 注意一定是DNS1,不是DNS,还有每台机器的IPADDR=192.168.0.10参数不同哦,这个是机器ip地址
[root@k8s-master-10 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens192
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens192
UUID=59af4bd9-469d-464f-af1e-fac11ad0f869
DEVICE=ens192
ONBOOT=yes
IPADDR=192.168.0.10
NETMASK=255.255.255.0
GATEWAY=192.168.0.1
DNS1=192.168.0.11

[root@k8s-master-10 ~]# service network restart
Restarting network (via systemctl):                        [  确定  ]

加载ipvs模块

  • 操作机器:所有master节点
lsmod |grep ip_vs
cat > /root/ipvs.sh <<'eof'
#!/bin/bash
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for i in $(ls $ipvs_mods_dir|grep -o "^[^.]*")
do
  /sbin/modinfo -F filename $i &>/dev/null
  if [ $? -eq 0 ];then
    /sbin/modprobe $i
  fi
done
eof
chmod +x /root/ipvs.sh 
sh /root/ipvs.sh 
lsmod |grep ip_vs

安装keepalived

[root@homelab-0-11 ~]# yum install -y keepalived
[root@homelab-0-11 ~]#  tee /etc/keepalived/check_port.sh << 'EOF'
#!/bin/bash
# keepalived 监控端口脚本
CHK_PORT=$1
if [ -n "$CHK_PORT" ];then
        PORT_PROCESS=`ss -lnt|grep $CHK_PORT |wc -l`
        if [ $PORT_PROCESS -eq 0 ];then
                echo "Port $CHK_PORT Is Not Used,End."
                exit 1
        fi
else
        echo "Check Port Cant Be Empty!"
fi
EOF

[root@homelab-0-11 ~]# chmod +x /etc/keepalived/check_port.sh

# 配置主keepalived ,需要注意的是interface ens192,修改ens192为当前电脑的网卡,ifconfig |grep ens就能看到了
[root@homelab-0-11 ~]# cat > /etc/keepalived/keepalived.conf <<EOF
! Configuration File for keepalived
global_defs {
   router_id 192.168.0.11
}
vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 6443"
    interval 2
    weight -20
}
vrrp_instance VI_1 {
    state MASTER
    interface ens192
    virtual_router_id 251
    priority 100
    advert_int 1
    mcast_src_ip 192.168.0.11
     nopreempt
   authentication {
        auth_type PASS
        auth_pass 11111111
    }
    track_script {
        chk_nginx
    }
    virtual_ipaddress {
        192.168.0.8
    }
}
EOF

# 在另外其他机器配置备keepalived
cat > /etc/keepalived/keepalived.conf <<'eof'
! Configuration File for keepalived
global_defs {
    router_id 192.168.0.9
}
vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 6443"
    interval 2
    weight -20
}
vrrp_instance VI_1 {
    state BACKUP
    interface ens192
    virtual_router_id 251
    mcast_src_ip 192.168.0.9
    priority 90
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 11111111
    }
    track_script {
        chk_nginx
    }
    virtual_ipaddress {
        192.168.0.8
    }
}
eof

# 启动
[root@homelab-0-11 ~]# systemctl start keepalived
[root@homelab-0-11 ~]# systemctl enable keepalived
[root@homelab-0-11 ~]# systemctl status keepalived
# 如下提示表示成功
[root@homelab-0-11 ~]# ip addr |grep "192.168.0.8"
    inet 192.168.0.8/32 scope global ens33

增加master节点

重要说明

由于前一面一章节我使用的是kubeadm init –kubernetes-version=v1.22.3 xxx 命令,没有使用kubeadm init –config=kubeadm-master.config,因此需要更新kubeadm config内容和证书内容

更新kubeadm config

  • 在[root@homelab-0-11 ~]#操作,生成kubeadm.yaml配置
[root@homelab-0-11 ~]# kubectl -n kube-system get configmap kubeadm-config -o jsonpath='{.data.ClusterConfiguration}' > kubeadm.yaml

# 修改kubeadm.yaml,增加certSANs这个,api.k8s.local必须的,其他的是master节点名称和master节点ip地址;controlPlaneEndpoint为虚拟vip地址
[root@homelab-0-11 ~]# cat kubeadm.yaml
apiServer:
  certSANs:
  - api.k8s.local
  - k8s-master-9
  - k8s-master-10
  - homelab-0-11
  - 192.168.0.8
  - 192.168.0.9
  - 192.168.0.10
  - 192.168.0.11
  extraArgs:
    authorization-mode: Node,RBAC
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.0.8:6443
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.22.3
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16
  serviceSubnet: 172.17.0.0/16
scheduler: {}
  • 更新kubeadm.yaml到configmap中
# 移除已有的apiservicer证书
[root@homelab-0-11 ~]#  mv /etc/kubernetes/pki/apiserver.{crt,key} ~
# 更新证书
[root@homelab-0-11 ~]# kubeadm init phase certs apiserver --config kubeadm.yaml

# 重启apiserver
[root@homelab-0-11 ~]# docker ps | grep kube-apiserver | grep -v pause
a7c4a76b1a35   e64579b7d886                                        "kube-apiserver --ad…"   19 minutes ago   Up 19 minutes
[root@homelab-0-11 ~]# docker kill a7c4a76b1a35
  • 查看证书是否更新成功
# 有输出就成功了
[root@homelab-0-11 ~]# openssl x509 -in /etc/kubernetes/pki/apiserver.crt -text

# 上传更新kubeadm 新的config
[root@homelab-0-11 ~]# kubeadm init phase upload-config kubeadm --config  kubeadm.yaml
# 查看是否更新成功
[root@homelab-0-11 ~]# kubectl -n kube-system get configmap kubeadm-config -o yaml

更新所有的ip为虚拟机ip

# 修改 kubelet 配置
$ vi /etc/kubernetes/kubelet.conf
......
    server: https://192.168.0.8:6443
  name: kubernetes
......
$ systemctl restart kubelet
# 修改 controller-manager
$ vi /etc/kubernetes/controller-manager.conf
......
    server: https://192.168.0.8:6443
  name: kubernetes
......
# 重启
$ docker kill $(docker ps | grep kube-controller-manager | \
grep -v pause | cut -d' ' -f1)
# 修改 scheduler
$ vi /etc/kubernetes/scheduler.conf
......
    server: https://192.168.0.8:6443
  name: kubernetes
......
# 重启
$ docker kill $(docker ps | grep kube-scheduler | grep -v pause | \
cut -d' ' -f1)

# 然后更新 kube-proxy
$ kubectl -n kube-system edit cm kube-proxy
......
  kubeconfig.conf: |-
    apiVersion: v1
    kind: Config
    clusters:
    - cluster:
        certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
        server: https://192.168.0.8:6443
      name: default

# 修改cluster-info
$ kubectl -n kube-public edit cm cluster-info
......
    server: https://192.168.0.8:6443
  name: ""
......
$  kubectl cluster-info
Kubernetes master is running at https://192.168.0.8:6443

拷贝新证书到要增加的master节点上

  • 在192.168.0.11操作(master节点)
[root@homelab-0-11 ~]# ssh root@192.168.0.10 mkdir -p /etc/kubernetes/pki/etcd
root@192.168.0.10's password:
[root@homelab-0-11 ~]# scp /etc/kubernetes/admin.conf root@192.168.0.10:/etc/kubernetes
root@192.168.0.10's password:
admin.conf                                                               100% 5636     2.7MB/s   00:00
[root@homelab-0-11 ~]# scp /etc/kubernetes/pki/{ca.*,sa.*,front-proxy-ca.*} root@192.168.0.10:/etc/kubernetes/pki
root@192.168.0.10's password:
ca.crt                                                                   100% 1099     1.1MB/s   00:00
ca.key                                                                   100% 1675     1.8MB/s   00:00
sa.key                                                                   100% 1679     1.9MB/s   00:00
sa.pub                                                                   100%  451   313.4KB/s   00:00
front-proxy-ca.crt                                                       100% 1115     1.7MB/s   00:00
front-proxy-ca.key                                                       100% 1675     1.7MB/s   00:00
[root@homelab-0-11 ~]# scp /etc/kubernetes/pki/etcd/ca.* root@192.168.0.10:/etc/kubernetes/pki/etcd
root@192.168.0.10's password:
ca.crt                                                                   100% 1086   835.4KB/s   00:00
ca.key                                                                   100% 1679   876.2KB/s   00:00
  • 在192.168.0.11(master节点)
[root@homelab-0-11 ~]# kubeadm token create --print-join-command
kubeadm join 192.168.0.11:6443 --token 2oc17r.39qeqqay8em4kqkw --discovery-token-ca-cert-hash sha256:0c8c150eaa555314ca4a5a466fff69523ee97f83470de0d2585cdb0a48a333db

# 得到certificate key
[root@homelab-0-11 ~]#  kubeadm init phase upload-certs --upload-certs
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
05d40723bfbcec62bb0be63646659c925626ba0ce0c9cfc1c6620e74b2ccc638
  • 在新的master节点上执行(配置好基础环境,在上章节有安装)
    • docker
    • kubeadm kubelet kubectl
# 加入master节点,其中 --certificate-key 是从上面的命令得到的
[root@k8s-master-10 ~]# kubeadm join 192.168.0.8:6443 --token 2oc17r.39qeqqay8em4kqkw \
  --discovery-token-ca-cert-hash sha256:0c8c150eaa555314ca4a5a466fff69523ee97f83470de0d2585cdb0a48a333db \
 --control-plane \
 --certificate-key 3c8cd39d57d9564f67295124dc4cdf4bd14c6a6f4751f496c493d3876c5f3631
  • 验证是否加入成功
[root@k8s-master-10 ~]# kubectl get nodes
NAME                     STATUS   ROLES                  AGE    VERSION
homelab-0-11.host.com    Ready    control-plane,master   2d1h   v1.22.3
homelab-0-12.host.com    Ready    node                   47h    v1.22.3
homelab-0-13.host.com    Ready    node                   47h    v1.22.3
k8s-master-10.host.com   Ready    control-plane,master   19m    v1.22.3

[root@k8s-master-10 ~]# kubectl cluster-info
Kubernetes control plane is running at https://192.168.0.8:6443
CoreDNS is running at https://192.168.0.8:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
  • 这个不需要执行(重新生成~/.kube/config配置文件)
[root@homelab-0-11 ~]# kubeadm init phase kubeconfig all --config=kubeadm.yaml
[root@homelab-0-11 ~]# cp -i /etc/kubernetes/admin.conf ~/.kube/config
  • 验证是否高可用
    • 更新~/.kube/config文件
    • 在2台master节点执行ip addr |grep “192.168.0.8”,如有结果表示当前机器为vip,另外一台没有输出。
    • 把有输出的这台机器关机,会发现在没有输出的机器ip addr |grep “192.168.0.8”,有结果了。

错误问题

问题1:关闭192.168.0.10机器,发现192.168.0.11的kube-apiserver连不上

netstat -ntlp |grep 6443 没有6443借口

错误如下:

[root@k8s-master-11 ~]# ip addr |grep "0.8"
    inet 192.168.0.8/32 scope global ens192
[root@k8s-master-11 ~]# kubectl get nodes
Error from server: etcdserver: request timed out

[root@homelab-0-11 ~]# netstat -ntlp |grep 6443
[root@homelab-0-11 ~]# docker ps -a |grep kube-apiser |grep -v pause
ec1deb02141e   e64579b7d886                                        "kube-apiserver --ad…"   About a minute ago   Exited (1) 42 seconds ago                  k8s_kube-apiserver_kube-apiserver-homelab-0-11.host.com_kube-system_883357c0f66de01a1dadb11f4a1c77a7_42
[root@homelab-0-11 ~]# docker logs ec1deb02141e
I1120 05:24:50.103102       1 server.go:553] external host was not specified, using 192.168.0.11
I1120 05:24:50.104422       1 server.go:161] Version: v1.22.2
I1120 05:24:50.859075       1 shared_informer.go:240] Waiting for caches to sync for node_authorizer
I1120 05:24:50.861025       1 plugins.go:158] Loaded 12 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,TaintNodesByCondition,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,RuntimeClass,DefaultIngressClass,MutatingAdmissionWebhook.
I1120 05:24:50.861040       1 plugins.go:161] Loaded 11 validating admission controller(s) successfully in the following order: LimitRanger,ServiceAccount,PodSecurity,Priority,PersistentVolumeClaimResize,RuntimeClass,CertificateApproval,CertificateSigning,CertificateSubjectRestriction,ValidatingAdmissionWebhook,ResourceQuota.
I1120 05:24:50.866679       1 plugins.go:158] Loaded 12 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,TaintNodesByCondition,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,RuntimeClass,DefaultIngressClass,MutatingAdmissionWebhook.
I1120 05:24:50.866780       1 plugins.go:161] Loaded 11 validating admission controller(s) successfully in the following order: LimitRanger,ServiceAccount,PodSecurity,Priority,PersistentVolumeClaimResize,RuntimeClass,CertificateApproval,CertificateSigning,CertificateSubjectRestriction,ValidatingAdmissionWebhook,ResourceQuota.
W1120 05:24:50.875414       1 clientconn.go:1326] [core] grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379 127.0.0.1 <nil> 0 <nil>}. Err: connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W1120 05:24:51.861498       1 clientconn.go:1326] [core] grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379 127.0.0.1 <nil> 0 <nil>}. Err: connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W1120 05:24:51.876788       1 clientconn.go:1326] [core] grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379 127.0.0.1 <nil> 0 <nil>}. Err: connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W1120 05:24:52.863109       1 clientconn.go:1326] [core] grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379 127.0.0.1 <nil> 0 <nil>}. Err: connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W1120 05:24:53.314584       1 clientconn.go:1326] [core] grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379 127.0.0.1 <nil> 0 <nil>}. Err: connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W1120 05:24:54.155817       1 clientconn.go:1326] [core] grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379 127.0.0.1 <nil> 0 <nil>}. Err: connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W1120 05:24:56.314865       1 clientconn.go:1326] [core] grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379 127.0.0.1 <nil> 0 <nil>}. Err: connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W1120 05:24:56.382216       1 clientconn.go:1326] [core] grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379 127.0.0.1 <nil> 0 <nil>}. Err: connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W1120 05:24:59.783759       1 clientconn.go:1326] [core] grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379 127.0.0.1 <nil> 0 <nil>}. Err: connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
W1120 05:25:01.091320       1 clientconn.go:1326] [core] grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379 127.0.0.1 <nil> 0 <nil>}. Err: connection error: desc = "transport: Error while dialing dial tcp 127.0.0.1:2379: connect: connection refused". Reconnecting...
Error: context deadline exceeded
  • 进入ectd看下列表,看了下etcd日志,是因为master节点挂了,没有选择其他的etcd作为master,起不来了
  • https://github.com/kubernetes/kubeadm/issues/1300
  • 解决方式:手动进入到etcd里面,剔除没用的master节点即可
[root@k8s-master-9 ~]# docker ps |grep "etcd"
f6841fb4e9b6   004811815584                                        "etcd --advertise-cl…"   2 minutes ago    Up 2 minutes              k8s_etcd_etcd-k8s-master-9.host.com_kube-system_f89ab40a8da9261ec122eed5dad7c274_4
4a6c9e7cb0e2   registry.aliyuncs.com/google_containers/pause:3.5   "/pause"                 20 minutes ago   Up 20 minutes             k8s_POD_etcd-k8s-master-9.host.com_kube-system_f89ab40a8da9261ec122eed5dad7c274_0

[root@k8s-master-9 ~]# docker logs f6841fb4e9b6
k8s-master-9.host.com ClientURLs:[https://192.168.0.9:2379]}","request-path":"/0/members/86b9aa6d7be9c51/attributes","publish-timeout":"7s","error":"etcdserver: request timed out"}
{"level":"warn","ts":"2021-11-20T09:23:02.309Z","caller":"rafthttp/probing_status.go:68","msg":"prober detected unhealthy status","round-tripper-name":"ROUND_TRIPPER_SNAPSHOT","remote-peer-id":"cb18584c4f4dbfc","rtt":"0s","error":"dial tcp 192.168.0.11:2380: connect: connection refused"}
{"level":"warn","ts":"2021-11-20T09:23:02.312Z","caller":"rafthttp/probing_status.go:68","msg":"prober detected unhealthy status","round-tripper-name":"ROUND_TRIPPER_RAFT_MESSAGE","remote-peer-id":"cb18584c4f4dbfc","rtt":"0s","error":"dial tcp 192.168.0.11:2380: connect: connection refused"}
{"level":"warn","ts":"2021-11-20T09:23:02.313Z","caller":"rafthttp/probing_status.go:68","msg":"prober detected unhealthy status","round-tripper-name":"ROUND_TRIPPER_SNAPSHOT","remote-peer-id":"4cacee0edb1ab8eb","rtt":"0s","error":"dial tcp 192.168.0.10:2380: connect: no route to host"}
{"level":"warn","ts":"2021-11-20T09:23:02.313Z","caller":"rafthttp/probing_status.go:68","msg":"prober detected unhealthy status","round-tripper-name":"ROUND_TRIPPER_RAFT_MESSAGE","remote-peer-id":"4cacee0edb1ab8eb","rtt":"0s","error":"dial tcp 192.168.0.10:2380: connect: no route to host"}
{"level":"info","ts":"2021-11-20T09:23:03.253Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"86b9aa6d7be9c51 is starting a new election at term 16"}
{"level":"info","ts":"2021-11-20T09:23:03.253Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"86b9aa6d7be9c51 became pre-candidate at term 16"}
{"level":"info","ts":"2021-11-20T09:23:03.253Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"86b9aa6d7be9c51 received MsgPreVoteResp from 86b9aa6d7be9c51 at term 16"}
{"level":"info","ts":"2021-11-20T09:23:03.253Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"86b9aa6d7be9c51 [logterm: 16, index: 215685] sent MsgPreVote request to cb18584c4f4dbfc at term 16"}
{"level":"info","ts":"2021-11-20T09:23:03.253Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"86b9aa6d7be9c51 [logterm: 16, index: 215685] sent MsgPreVote request to 4cacee0edb1ab8eb at term 16"}
{"level":"info","ts":"2021-11-20T09:23:04.753Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"86b9aa6d7be9c51 is starting a new election at term 16"}
{"level":"info","ts":"2021-11-20T09:23:04.753Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"86b9aa6d7be9c51 became pre-candidate at term 16"}
{"level":"info","ts":"2021-11-20T09:23:04.753Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"86b9aa6d7be9c51 received MsgPreVoteResp from 86b9aa6d7be9c51 at term 16"}
{"level":"info","ts":"2021-11-20T09:23:04.753Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"86b9aa6d7be9c51 [logterm: 16, index: 215685] sent MsgPreVote request to cb18584c4f4dbfc at term 16"}
{"level":"info","ts":"2021-11-20T09:23:04.753Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"86b9aa6d7be9c51 [logterm: 16, index: 215685] sent MsgPreVote request to 4cacee0edb1ab8eb at term 16"}
{"level":"info","ts":"2021-11-20T09:23:06.253Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"86b9aa6d7be9c51 is starting a new election at term 16"}
{"level":"info","ts":"2021-11-20T09:23:06.253Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"86b9aa6d7be9c51 became pre-candidate at term 16"}
{"level":"info","ts":"2021-11-20T09:23:06.253Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"86b9aa6d7be9c51 received MsgPreVoteResp from 86b9aa6d7be9c51 at term 16"}
{"level":"info","ts":"2021-11-20T09:23:06.254Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"86b9aa6d7be9c51 [logterm: 16, index: 215685] sent MsgPreVote request to cb18584c4f4dbfc at term 16"}
{"level":"info","ts":"2021-11-20T09:23:06.254Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"86b9aa6d7be9c51 [logterm: 16, index: 215685] sent MsgPreVote request to 4cacee0edb1ab8eb at term 16"}
{"level":"warn","ts":"2021-11-20T09:23:06.845Z","caller":"etcdhttp/metrics.go:166","msg":"serving /health false; no leader"}
{"level":"warn","ts":"2021-11-20T09:23:06.846Z","caller":"etcdhttp/metrics.go:78","msg":"/health error","output":"{\"health\":\"false\",\"reason\":\"RAFT NO LEADER\"}","status-code":503}

[root@k8s-master-9 ~]# docker exec -it f6841fb4e9b6 sh
# export ETCDCTL_API=3
# alias etcdctl='etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key'
# etcdctl member list
81823df8357bcc71, started, k8s-portal-master3, https://10.3.175.167:2380, https://10.3.175.167:2379
9d7d493298ff2c5f, started, k8s-portal-master1, https://10.3.175.165:2380, https://10.3.175.165:2379
fac8c4b57ce3b0af, started, k8s-portal-master2, https://10.3.175.166:2380, https://10.3.175.166:2379
# etcdctl member remove 9d7d493298ff2c5f
Member 9d7d493298ff2c5f removed from cluster bd092b6d7796dffd
# etcdctl member list
81823df8357bcc71, started, k8s-portal-master3, https://10.3.175.167:2380, https://10.3.175.167:2379
fac8c4b57ce3b0af, started, k8s-portal-master2, https://10.3.175.166:2380, https://10.3.175.166:2379
#

参考文章

原创文章,作者:站长,如若转载,请注明出处:https://wsppx.cn/2200/%e7%bd%91%e7%ab%99%e9%83%a8%e7%bd%b2/

发表评论

您的电子邮箱地址不会被公开。 必填项已用*标注