1. 我是皮皮虾首页
  2. 编程开发
  3. Docker

k8s基本安装(一)

前言

  • win10
  • vm16
  • 5台2c/2g/50G centos虚拟机

设计架构

k8s基本安装(一)

网络设置

  • vm NAT网络设置
    • 网段:10.4.7.0
    • 网关:10.4.7.254
k8s基本安装(一)

网络适配器设置

适配器vmnet8 自动跃点设置为10

k8s基本安装(一)

centos-7.6模板机器

  • uname -a (内核必须3.8以上)
    • Linux centos7 3.10.0-1127.19.1.el7.x86_64 #1
[root@centos7 ~]# getenforce 
Disabled
[root@centos7 ~]# systemctl stop firewalld
[root@centos7 ~]# yum install -y epel-release bind-utils telnet iftop unzip net-tools make gcc gcc-c++ python-devel python3-devel cyrus-sasl-devel wget lrzsz dos2unix sysstat 

创建5台机器

  • 2c/2g/50g
  • ip地址分别为
    • 10.4.7.11
    • 10.4.7.12
    • 10.4.7.21
    • 10.4.7.22
    • 10.4.7.200
  • 修改每台机器ip
[root@hdss-7-12 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33 
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=1f42c2e3-7fe9-4d31-a3c9-b314dd2d3653
DEVICE=ens33
ONBOOT=yes
IPADDR=10.4.7.12
GATEWAY=10.4.7.254
NETMASK=255.255.255.0
DNS1=10.4.7.254
  • 修改每台机器hostname
hostnamectl set-hostname hdss-7-11.host.com

安装bind9

  • 在10.4.7.11安装bind9
yum install bind -y
vi /etc/named.conf 

listen-on port 53 { 10.4.7.11; };
删除 listen-on-v6 port 53 { ::1; };
allow-query     { any; };
forwarders      { 10.4.7.254; };

recursion yes;
dnssec-enable no;
dnssec-validation no;

[root@hdss-7-11 ~]# named-checkconf

配置bind9区域

在结尾加上

[root@hdss-7-11 ~]# vi /etc/named.rfc1912.zones 
zone "host.com" IN {
        type master;
        file "host.com.zone";
        allow-update { 10.4.7.11; };
};

zone "od.com" IN {
        type master;
        file "od.com.zone";
        allow-update { 10.4.7.11; };
};

配置区域数据文件

[root@hdss-7-11 ~]# vi /var/named/host.com.zone
$TTL 600 ; 10 miutes
@	IN SOA	dns.host.com. dnsadmin.host.com. (
					2021091901	; serial
					1D	; refresh
					1H	; retry
					1W	; expire
					3H )	; minimum
	NS	dns.host.com.
$TTL 60 ; 1 miutes
dns	A       10.4.7.11
HDSS7-11  A     10.4.7.11
HDSS7-12  A     10.4.7.12
HDSS7-21  A     10.4.7.21
HDSS7-22  A     10.4.7.22
HDSS7-200  A     10.4.7.200


[root@hdss-7-11 ~]# vi /var/named/od.com.zone
$TTL 600 ; 10 miutes
@	IN SOA	dns.od.com. dnsadmin.od.com. (
					2021091901	; serial
					1D	; refresh
					1H	; retry
					1W	; expire
					3H )	; minimum
	NS	dns.od.com.
$TTL 60 ; 1 miutes
dns	A	10.4.7.11

[root@hdss-7-11 ~]# named
[root@hdss-7-11 ~]# named-checkconf 
[root@hdss-7-11 ~]# systemctl start named
[root@hdss-7-11 ~]# systemctl enable named
[root@hdss-7-11 ~]# netstat -tnlp |grep 53
tcp        0      0 10.4.7.11:53            0.0.0.0:*               LISTEN      1943/named          
tcp        0      0 127.0.0.1:953           0.0.0.0:*               LISTEN      1943/named          
tcp6       0      0 ::1:53                  :::*                    LISTEN      1943/named          
tcp6       0      0 ::1:953                 :::*                    LISTEN      1943/named     

修改每台机器dns为10.4.7.11

[root@hdss-7-11 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33 
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=1f42c2e3-7fe9-4d31-a3c9-b314dd2d3653
DEVICE=ens33
ONBOOT=yes
IPADDR=10.4.7.11
GATEWAY=10.4.7.254
NETMASK=255.255.255.0
DNS1=10.4.7.11

[root@hdss-7-11 ~]# systemctl restart network

[root@hdss-7-11 ~]# cat /etc/resolv.conf
# Generated by NetworkManager
search host.com
nameserver 10.4.7.11

测试dns是否通

[root@hdss-7-11 ~]# dig -t A hdss7-200.host.com @10.4.7.11 +short
10.4.7.200

[root@hdss-7-12 ~]# ping hdss7-200.host.com
PING HDSS7-200.host.com (10.4.7.200) 56(84) bytes of data.
64 bytes from 10.4.7.200 (10.4.7.200): icmp_seq=1 ttl=64 time=0.931 ms
64 bytes from 10.4.7.200 (10.4.7.200): icmp_seq=2 ttl=64 time=0.710 ms

修改window主机vmnet8 网关

k8s基本安装(一)

SSL 自签证书

安装在 10.4.7.200

curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/local/bin/cfssl
curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson
curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo



[root@hdss-7-200 ~]# cd /opt/
[root@hdss-7-200 opt]# mkdir certs

[root@hdss-7-200 opt]# cd certs/
[root@hdss-7-200 certs]# pwd
/opt/certs

证书配置文件

[root@hdss-7-200 certs]# vi ca-csr.json
{
	"CN": "kubernetes",

	"hosts": [],
	"key": {
		"algo": "rsa",
		"size": 2048
	},
	"names": [{
		"C": "CN",
		"ST": "BeiJing",
		"L": "BeiJing",
		"O": "od",
		"OU": "ops"
	}],
	"ca": {
		"expiry": "175200h"
	}
}
# 生成证书
[root@hdss-7-200 certs]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca
2021/09/20 10:17:22 [INFO] generating a new CA key and certificate from CSR
2021/09/20 10:17:22 [INFO] generate received request
2021/09/20 10:17:22 [INFO] received CSR
2021/09/20 10:17:22 [INFO] generating key: rsa-2048
2021/09/20 10:17:22 [INFO] encoded CSR
2021/09/20 10:17:22 [INFO] signed certificate with serial number 188746031813236842140510726410636693959243331288
[root@hdss-7-200 certs]# ls
ca.csr  ca-csr.json  ca-key.pem  ca.pem

安装docker

  • 安装目标机器
    • 10.4.7.21
    • 10.4.7.22
    • 10.4.7.200
# 也可以直接安装下面的方法
[root@hdss-7-200 certs]# history 
 423  yum install -y yum-utils device-mapper-persistent-data lvm2
  424  yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
  425  yum list docker-ce --showduplicates | sort -r
  426  sudo yum install docker-ce-17.12.0.ce

[root@hdss-7-200 certs]# systemctl start docker
[root@hdss-7-200 certs]# systemctl enable  docker
  • 配置/etc/docker/daemon.json
mkdir -p /data/docker /etc/docker
# 需要注意下面红色的,每台机器不同,10.4.7.21配置为172.7.21.1/24;10.4.7.22配置172.7.22.1/24
[root@hdss-7-21 certs]# cat /etc/docker/daemon.json
{
    "registry-mirrors": ["https://registry.docker-cn.com"],
    "storage-driver":"overlay2",
    "graph": "/data/docker",
    "insecure-registries":["registry.access.redhat.com","quay.io","harbor.od.com"],
   "registry-mirrors": ["https://docker.mirrors.ustc.edu.cn"],
    "bip": "172.7.21.1/24",
    "exec-opts":["native.cgroupdriver=systemd"],
    "live-restore":true
}

部署docker harbor

  • 部署在10.4.7.200机器上
mkdir -p /opt/src
cd /opt/src
wget https://github.com/goharbor/harbor/releases/download/v2.3.2/harbor-offline-installer-v2.3.2.tgz
tar xf harbor-offline-installer-v2.3.2.tgz -C  /opt
mv harbor harbor-v2.3.2
ln -s /opt/harbor-v2.3.2 /opt/harbor
  • 配置harbor.yml
[root@hdss-7-200 harbor]# cp harbor.yml.tmpl  harbor.yml

[root@hdss-7-200 harbor]# vim harbor.yml

hostname: harbor.od.com
http:

  port: 180

#https:

  # https port for harbor, default is 443

  #port: 443

  # The path of cert and key files for nginx

  #certificate: /your/certificate/path

  #private_key: /your/private/key/path
data_volume: /data/harbor

[root@hdss-7-200 harbor]#  yum install -y docker-compose
[root@hdss-7-200 harbor]# ./install.sh 

[root@hdss-7-200 harbor]# docker-compose ps
      Name                     Command               State             Ports          
--------------------------------------------------------------------------------------
harbor-core         /harbor/entrypoint.sh            Up                               
harbor-db           /docker-entrypoint.sh 96 13      Up                               
harbor-jobservice   /harbor/entrypoint.sh            Up                               
harbor-log          /bin/sh -c /usr/local/bin/ ...   Up      127.0.0.1:1514->10514/tcp
harbor-portal       nginx -g daemon off;             Up                               
nginx               nginx -g daemon off;             Up      0.0.0.0:180->8080/tcp    
redis               redis-server /etc/redis.conf     Up                               
registry            /home/harbor/entrypoint.sh       Up                               
registryctl         /home/harbor/start.sh            Up  

配置nginx

  491  yum install nginx -y
  492  systemctl start nginx
  493  systemctl enable nginx
[root@hdss-7-200 harbor]# cat /etc/nginx/conf.d/harbor.od.com.conf
server {
    listen 80;

    client_max_body_size 1000M;
    server_name harbor.od.com;

    location / {

        proxy_pass http://127.0.0.1:180;
        proxy_connect_timeout 180;
        proxy_send_timeout 180;
        proxy_read_timeout 180;
        proxy_set_header Host $host;
        proxy_set_header  X-Forwarded-For $proxy_add_x_forwarded_for;
        access_log /var/log/nginx/harbor_access.log;
        error_log /var/log/nginx/harbor_error.log;
    }
}

配置dns转发到harbor

  • 操作机器10.4.7.11
# 如果是低版本的bind9需要前滚一个序列号	即修改为:2021091902	; serial,我当前版本不需要

[root@hdss-7-11 ~]# cat /var/named/od.com.zone
$TTL 600 ; 10 miutes
@	IN SOA	dns.od.com. dnsadmin.od.com. (
					2021091901	; serial
					1D	; refresh
					1H	; retry
					1W	; expire
					3H )	; minimum
	NS	dns.od.com.
$TTL 60 ; 1 miutes
dns	A	10.4.7.11
harbor  A       10.4.7.200 
[root@hdss-7-11 ~]# systemctl restart named
[root@hdss-7-11 ~]# dig -t A harbor.od.com +short
10.4.7.200
[root@hdss-7-200 harbor]# curl harbor.od.com

# 如果出现502,需要关闭selinux
[root@hdss-7-200 harbor]# sestatus  
SELinux status:                 enabled
SELinuxfs mount:                /sys/fs/selinux
SELinux root directory:         /etc/selinux
Loaded policy name:             targeted
Current mode:                   enforcing
Mode from config file:          enforcing
Policy MLS status:              enabled
Policy deny_unknown status:     allowed
Max kernel policy version:      31

# 临时关闭
[root@hdss-7-200 harbor]# setenforce 0
# 永久关闭,需要重启
vim /etc/selinux/config
修改:SELINUX=disable
  • 在window登录harbor.od.com
k8s基本安装(一)
  • 后记,出现的错误,修改了harmbor.yaml,不能登录
harbor-db           /docker-entrypoint.sh 96 13      Restarting   
[root@hdss-7-200 harbor]# tail -f /var/log/harbor/postgresql.log 
Sep 20 11:45:23 172.17.0.1 postgresql[2569]: 
Sep 20 11:45:23 172.17.0.1 postgresql[2569]: Data page checksums are disabled.
Sep 20 11:45:23 172.17.0.1 postgresql[2569]: 
Sep 20 11:45:23 172.17.0.1 postgresql[2569]: initdb: error: directory "/var/lib/postgresql/data/pg13" exists but is not empty
Sep 20 11:45:23 172.17.0.1 postgresql[2569]: If you want to create a new database system, either remove or empty
Sep 20 11:45:23 172.17.0.1 postgresql[2569]: the directory "/var/lib/postgresql/data/pg13" or run initdb
Sep 20 11:45:23 172.17.0.1 postgresql[2569]: with an argument other than "/var/lib/postgresql/data/pg13".
  • 解放方式
    • /data/harbor/ 这个路径在harbor中配置的data_volumn
 rm -rf /data/harbor/*
重新安装./install.sh

安装ETCD

  • 安装机器10.4.7.12/10.4.7.22/10.4.7.21
  • 在10.4.7.200给etcd ssl签名
[root@hdss-7-200 certs]# vi ca-config.json
[root@hdss-7-200 certs]# cat ca-config.json
{
    "signing": {
        "default": {
            "expiry": "175200h"
        },
        "profiles": {
            "server": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth"
                ]
            },
            "client": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "client auth"
                ]
            },
            "peer": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            }
        }
    }
}

[root@hdss-7-200 certs]# vi etcd-peer-csr.json
[root@hdss-7-200 certs]# cat etcd-peer-csr.json 
{
    "CN": "k8s-etcd",
    "hosts": [
        "10.4.7.11",
        "10.4.7.12",
        "10.4.7.21",
        "10.4.7.22"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
      {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "od",
            "OU": "ops"
        }
    ]
}
  • 给etcd 签发证书
[root@hdss-7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer etcd-peer-csr.json | cfssljson -bare  etcd-peer
2021/09/20 15:33:20 [INFO] generate received request
2021/09/20 15:33:20 [INFO] received CSR
2021/09/20 15:33:20 [INFO] generating key: rsa-2048
2021/09/20 15:33:20 [INFO] encoded CSR
2021/09/20 15:33:20 [INFO] signed certificate with serial number 183926383270581447982895516920781576082225689733
2021/09/20 15:33:20 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

[root@hdss-7-200 certs]# ll
总用量 36
-rw-r--r--. 1 root root  836 9月  20 15:15 ca-config.json
-rw-r--r--. 1 root root  997 9月  20 10:17 ca.csr
-rw-r--r--. 1 root root  215 9月  20 10:16 ca-csr.json
-rw-------. 1 root root 1679 9月  20 10:17 ca-key.pem
-rw-r--r--. 1 root root 1346 9月  20 10:17 ca.pem
-rw-r--r--. 1 root root 1062 9月  20 15:33 etcd-peer.csr
-rw-r--r--. 1 root root  361 9月  20 15:30 etcd-peer-csr.json
-rw-------. 1 root root 1679 9月  20 15:33 etcd-peer-key.pem
-rw-r--r--. 1 root root 1428 9月  20 15:33 etcd-peer.pem
[root@hdss-7-12 opt]# mkdir -p /opt/src
[root@hdss-7-12 opt]# cd /opt/src/
# 不要使用最新版本,推荐使用 https://github.com/etcd-io/etcd/releases/tag/v3.1.20
[root@hdss-7-12 src]# wget https://github.com/etcd-io/etcd/releases/download/v3.1.20/etcd-v3.1.20-linux-amd64.tar.gz
[root@hdss-7-12 src]# tar xvf etcd-v3.1.20-linux-amd64.tar.gz -C /opt/
[root@hdss-7-12 src]# cd ../
[root@hdss-7-12 opt]# mv etcd-v3.1.20-linux-amd64 etcd-v3.1.20
[root@hdss-7-12 opt]# ln -s /opt/etcd-v3.1.20 /opt/etcd
[root@hdss-7-12 opt]# mkdir -p /opt/etcd/certs /data/etcd /data/logs/etcd-server
[root@hdss-7-12 opt]# cd etcd/certs/
[root@hdss-7-12 certs]# scp hdss7-200:/opt/certs/ca.pem .
[root@hdss-7-12 certs]# scp hdss7-200:/opt/certs/etcd-peer* .
[root@hdss-7-12 certs]# ls -lrt
总用量 12
-rw-r--r--. 1 root root 1346 9月  20 15:37 ca.pem
-rw-r--r--. 1 root root 1428 9月  20 15:37 etcd-peer.pem
-rw-------. 1 root root 1679 9月  20 15:38 etcd-peer-key.pem

  • 创建etcd启动脚本
[root@hdss-7-12 etcd] cat > /opt/etcd/etcd-server-startup.sh <<'eof'
#!/bin/sh
./etcd --name etcd-server-7-12 \
       --data-dir /data/etcd/etcd-server \
       --listen-peer-urls https://10.4.7.12:2380 \
       --listen-client-urls https://10.4.7.12:2379,http://127.0.0.1:2379 \
       --quota-backend-bytes 8589934592 \
       --initial-advertise-peer-urls https://10.4.7.12:2380 \
       --advertise-client-urls https://10.4.7.12:2379,http://127.0.0.1:2379 \
       --initial-cluster  etcd-server-7-12=https://10.4.7.12:2380,etcd-server-7-21=https://10.4.7.21:2380,etcd-server-7-22=https://10.4.7.22:2380 \
       --ca-file ./certs/ca.pem \
       --cert-file ./certs/etcd-peer.pem \
       --key-file ./certs/etcd-peer-key.pem \
       --client-cert-auth  \
       --trusted-ca-file ./certs/ca.pem \
       --peer-ca-file ./certs/ca.pem \
       --peer-cert-file ./certs/etcd-peer.pem \
       --peer-key-file ./certs/etcd-peer-key.pem \
       --peer-client-cert-auth \
       --peer-trusted-ca-file ./certs/ca.pem \
       --log-output stdout
eof
  • 参数说明
--name # 节点名称
--data-dir # 数据目录
--listen-peer-urls # 集群通信监听地址
--listen-client-urls # 客户端访问监听地址
--quota-backend-bytes
--initial-advertise-peer-urls # 集群通告地址
--advertise-client-urls # 客户端通告地址
--initial-cluster   # ETCDdb数据大小,默认是2G,当数据达到2G的时候就不允许写入,必须对历史数据进行压缩才能继续写入;参加1里面说的,我们启动的时候就应该提前确定大小,官方推荐是8G,这里我们也使用8G的配置。
  • 授权etcd
[root@hdss-7-12 etcd]# chmod +x /opt/etcd/etcd-server-startup.sh
[root@hdss-7-12 etcd]# useradd -s /sbin/nologin -M etcd
[root@hdss-7-12 etcd]# chown -R etcd.etcd /opt/etcd-v3.1.20/
[root@hdss-7-12 etcd]# chown -R etcd.etcd /data/etcd/
[root@hdss-7-12 etcd]# chown -R etcd.etcd /data/logs/etcd-server/
[root@hdss-7-12 etcd]# yum install supervisor -y
  • supvisord启动etcd
[root@hdss-7-12 etcd]# yum install supervisor -y
[root@hdss-7-12 etcd]# systemctl start supervisord
[root@hdss-7-12 etcd]# systemctl enable supervisord
[root@hdss-7-12 etcd]# cat /etc/supervisord.d/etcd-server.ini 
[program:etcd-server-7-12]
command=/opt/etcd/etcd-server-startup.sh                        ; the program (relative uses PATH, can take args)
numprocs=1                                                      ; number of processes copies to start (def 1)
directory=/opt/etcd                                             ; directory to cwd to before exec (def no cwd)
autostart=true                                                  ; start at supervisord start (default: true)
autorestart=true                                                ; retstart at unexpected quit (default: true)
startsecs=30                                                    ; number of secs prog must stay running (def. 1)
startretries=3                                                  ; max # of serial start failures (default 3)
exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
user=etcd                                                       ; setuid to this UNIX account to run the program
redirect_stderr=true                                            ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/etcd-server/etcd.stdout.log           ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                     ; emit events on stdout writes (default false)
[root@hdss-7-12 etcd]# supervisorctl update
[root@hdss-7-12 etcd]# supervisorctl status 
etcd-server-7-12                 RUNNING   pid 2901, uptime 0:00:36

[root@hdss-7-12 etcd]# netstat -ntlp |grep etcd
tcp        0      0 10.4.7.12:2379          0.0.0.0:*               LISTEN      2902/./etcd         
tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      2902/./etcd         
tcp        0      0 10.4.7.12:2380          0.0.0.0:*               LISTEN      2902/./etcd
  • 参考上面的方法安装其他2台机器 10.4.7.21/10.4.7.22
  • 需要注意的地方是:修改etcd-server-startup.sh 里面的ip地址和supervisor里面的配置
  412  yum install supervisor -y
  413  systemctl enable supervisord
  414  systemctl start supervisord
  
  418  mkdri -p /opt/src
  419   mkdir-p /opt/src
  420  mkdir -p /opt/src
  421  cd /opt/src/
  422  wget https://github.com/etcd-io/etcd/releases/download/v3.1.20/etcd-v3.1.20-linux-amd64.tar.gz
  423  ls
  424  tar xvf etcd-v3.1.20-linux-amd64.tar.gz -C /opt/
  425  ls
  426  cd ../
  427  ll
  428   mv etcd-v3.1.20-linux-amd64 etcd-v3.1.20
  429  ln -s /opt/etcd-v3.1.20 /opt/etcd
  430  mkdir -p /opt/etcd/certs /data/etcd /data/logs/etcd-server
  431  cd etcd
  432  ls
  433  scp -r hdss7-12:/opt/etcd/etcd-server-startup.sh .
  434  scp -r hdss7-12:/opt/etcd/certs .
  435  ls
  436  vi etcd-server-startup.sh 
  437  chmod +x /opt/etcd/etcd-server-startup.sh
  438  useradd -s /sbin/nologin -M etcd
  439  chown -R etcd.etcd /opt/etcd-v3.1.20/
  440  chown -R etcd.etcd /data/etcd/
  441  chown -R etcd.etcd /data/logs/etcd-server/
  442  systemctl start supervisord
  443  vim /etc/supervisord.d/etcd-server.ini 
  444  supervisorctl update
  445  vim /etc/supervisord.d/etcd-server.ini 
  446  supervisorctl update
  447  supervisorctl status
  • 验证是否成功
[root@hdss-7-22 etcd]# ./etcdctl member list
988139385f78284: name=etcd-server-7-22 peerURLs=https://10.4.7.22:2380 clientURLs=http://127.0.0.1:2379,https://10.4.7.22:2379 isLeader=false
5a0ef2a004fc4349: name=etcd-server-7-21 peerURLs=https://10.4.7.21:2380 clientURLs=http://127.0.0.1:2379,https://10.4.7.21:2379 isLeader=false
f4a0cb0a765574a8: name=etcd-server-7-12 peerURLs=https://10.4.7.12:2380 clientURLs=http://127.0.0.1:2379,https://10.4.7.12:2379 isLeader=true

部署主控节点apiservser

  • 下载v1.15.2版本的
    • https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.15.md#downloads-for-v1152
    • https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.15.md#server-binaries-10
  • 在hdss7-200上签证书
    • 生成apiserver的client证书文件
[root@hdss-7-200 certs]# cat > /opt/certs/client-csr.json <<'eof'
{
    "CN": "k8s-node",
    "hosts": [
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "GuangZhou",
            "L": "GuangZhou",
            "O": "k8s",
            "OU": "opt"
        }
    ]
}
eof
[root@hdss-7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client-csr.json | cfssljson -bare client
2021/09/20 22:00:31 [INFO] generate received request
2021/09/20 22:00:31 [INFO] received CSR
2021/09/20 22:00:31 [INFO] generating key: rsa-2048
2021/09/20 22:00:31 [INFO] encoded CSR
2021/09/20 22:00:31 [INFO] signed certificate with serial number 705334988911680099129328185890273104558098933168
2021/09/20 22:00:31 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

[root@hdss-7-200 certs]# ls -lrt
总用量 40
-rw-r--r--. 1 root root  215 9月  20 10:16 ca-csr.json
-rw-r--r--. 1 root root 1346 9月  20 10:17 ca.pem
-rw-------. 1 root root 1679 9月  20 10:17 ca-key.pem
-rw-r--r--. 1 root root  997 9月  20 10:17 ca.csr
-rw-r--r--. 1 root root  836 9月  20 15:15 ca-config.json
-rw-r--r--. 1 root root  361 9月  20 15:30 etcd-peer-csr.json
-rw-r--r--. 1 root root 1428 9月  20 15:33 etcd-peer.pem
-rw-------. 1 root root 1679 9月  20 15:33 etcd-peer-key.pem
-rw-r--r--. 1 root root 1062 9月  20 15:33 etcd-peer.csr
-rw-r--r--. 1 root root  284 9月  20 21:57 client-csr.json
-rw-r--r--. 1 root root 1371 9月  20 22:00 client.pem
-rw-------. 1 root root 1675 9月  20 22:00 client-key.pem
-rw-r--r--. 1 root root  997 9月  20 22:00 client.csr

  • 生成api server证书

cat > /opt/certs/apiserver-csr.json <<'eof'
{
    "CN": "k8s-apiserver",
    "hosts": [
        "127.0.0.1",
        "192.168.0.1",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local",
        "10.4.7.10",
        "10.4.7.21",
        "10.4.7.22",
        "10.4.7.23"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "k8s",
            "OU": "opt"
        }
    ]
}
eof


[root@hdss-7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server apiserver-csr.json |cfssljson -bare apiserver

[root@hdss-7-200 certs]# ls api* -lrt
-rw-r--r--. 1 root root  567 9月  20 22:02 apiserver-csr.json
-rw-r--r--. 1 root root 1602 9月  20 22:02 apiserver.pem
-rw-------. 1 root root 1675 9月  20 22:02 apiserver-key.pem
-rw-r--r--. 1 root root 1253 9月  20 22:02 apiserver.csr
  • 在hdss7-21上面操作
wget https://dl.k8s.io/v1.15.2/kubernetes-server-linux-amd64.tar.gz
tar xvf kubernetes-server-linux-amd64.tar.gz -C /opt

mv kubernetes kubernetes-v1.15.2
ln -s /opt/kubernetes-v1.15.2 /opt/kubernetes

  • 拷贝证书数据
mkdir /opt/kubernetes/server/bin/certs
mkdir /opt/kubernetes/server/bin/conf
scp hdss7-200:/opt/certs/ca.pem /opt/kubernetes/server/bin/certs
scp hdss7-200:/opt/certs/ca-key.pem /opt/kubernetes/server/bin/certs
scp hdss7-200:/opt/certs/client.pem /opt/kubernetes/server/bin/certs
scp hdss7-200:/opt/certs/client-key.pem /opt/kubernetes/server/bin/certs
scp hdss7-200:/opt/certs/apiserver.pem /opt/kubernetes/server/bin/certs
scp hdss7-200:/opt/certs/apiserver-key.pem /opt/kubernetes/server/bin/certs
  • 创建配置文件
cat > /opt/kubernetes/server/bin/conf/audit.yaml <<'eof'
apiVersion: audit.k8s.io/v1beta1 # This is required.
kind: Policy
# Don't generate audit events for all requests in RequestReceived stage.
omitStages:
  - "RequestReceived"
rules:
  # Log pod changes at RequestResponse level
  - level: RequestResponse
    resources:
    - group: ""
      # Resource "pods" doesn't match requests to any subresource of pods,
      # which is consistent with the RBAC policy.
      resources: ["pods"]
  # Log "pods/log", "pods/status" at Metadata level
  - level: Metadata
    resources:
    - group: ""
      resources: ["pods/log", "pods/status"]

  # Don't log requests to a configmap called "controller-leader"
  - level: None
    resources:
    - group: ""
      resources: ["configmaps"]
      resourceNames: ["controller-leader"]

  # Don't log watch requests by the "system:kube-proxy" on endpoints or services
  - level: None
    users: ["system:kube-proxy"]
    verbs: ["watch"]
    resources:
    - group: "" # core API group
      resources: ["endpoints", "services"]

  # Don't log authenticated requests to certain non-resource URL paths.
  - level: None
    userGroups: ["system:authenticated"]
    nonResourceURLs:
    - "/api*" # Wildcard matching.
    - "/version"

  # Log the request body of configmap changes in kube-system.
  - level: Request
    resources:
    - group: "" # core API group
      resources: ["configmaps"]
    # This rule only applies to resources in the "kube-system" namespace.
    # The empty string "" can be used to select non-namespaced resources.
    namespaces: ["kube-system"]

  # Log configmap and secret changes in all other namespaces at the Metadata level.
  - level: Metadata
    resources:
    - group: "" # core API group
      resources: ["secrets", "configmaps"]

  # Log all other resources in core and extensions at the Request level.
  - level: Request
    resources:
    - group: "" # core API group
    - group: "extensions" # Version of group should NOT be included.

  # A catch-all rule to log all other requests at the Metadata level.
  - level: Metadata
    # Long-running requests like watches that fall under this rule will not
    # generate an audit event in RequestReceived.
    omitStages:
      - "RequestReceived"
eof
  • 创建启动脚本
cat > /opt/kubernetes/server/bin/kube-apiserver.sh <<'eof'
#!/bin/bash
./kube-apiserver \
  --apiserver-count 2 \
  --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log \
  --audit-policy-file ./conf/audit.yaml \
  --authorization-mode RBAC \
  --client-ca-file ./certs/ca.pem \
  --requestheader-client-ca-file ./certs/ca.pem \
  --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
  --etcd-cafile ./certs/ca.pem \
  --etcd-certfile ./certs/client.pem \
  --etcd-keyfile ./certs/client-key.pem \
  --etcd-servers https://10.4.7.12:2379,https://10.4.7.21:2379,https://10.4.7.22:2379 \
  --service-account-key-file ./certs/ca-key.pem \
  --service-cluster-ip-range 192.168.0.0/16 \
  --service-node-port-range 3000-29999 \
  --target-ram-mb=1024 \
  --kubelet-client-certificate ./certs/client.pem \
  --kubelet-client-key ./certs/client-key.pem \
  --log-dir  /data/logs/kubernetes/kube-apiserver \
  --tls-cert-file ./certs/apiserver.pem \
  --tls-private-key-file ./certs/apiserver-key.pem \
  --allow-privileged \
  --v 2
eof
mkdir -p /data/logs/kubernetes/kube-apiserver
chmod +x /opt/kubernetes/server/bin/kube-apiserver.sh 
  • 创建supervisor启动文件
cat > /etc/supervisord.d/kube-apiserver.ini <<eof
[program:kube-apiserver-7-21]
command=/opt/kubernetes/server/bin/kube-apiserver.sh            ; the program (relative uses PATH, can take args)
numprocs=1                                                      ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                            ; directory to cwd to before exec (def no cwd)
autostart=true                                                  ; start at supervisord start (default: true)
autorestart=true                                                ; retstart at unexpected quit (default: true)
startsecs=30                                                    ; number of secs prog must stay running (def. 1)
startretries=3                                                  ; max # of serial start failures (default 3)
exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                       ; setuid to this UNIX account to run the program
redirect_stderr=true                                            ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log        ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                     ; emit events on stdout writes (default false)
eof
  • 检查服务是否启动成功
[root@hdss7-21 bin]# supervisorctl update
[root@hdss7-21 bin]# supervisorctl status
etcd-server-7-21                 RUNNING   pid 1116, uptime 1:29:22
kube-apiserver-7-21              RUNNING   pid 2117, uptime 0:06:45

配置四成反向代理

操作主机:hdss7-11,hdss7-12

一定要先关闭selinux, 不然会报错:nginx: [emerg] bind() to 0.0.0.0:80 failed (13: Permission denied)

  • 安装nginx和keepalive
yum install nginx -y
# 这个看情况安装,我这边报错了unknown directive "stream" in /etc/nginx/nginx.conf:86,解决方式: https://serverfault.com/questions/858067/unknown-directive-stream-in-etc-nginx-nginx-conf86
yum install nginx-mod-stream -y
yum install -y keepalived
  • 配置nginx
cat >> /etc/nginx/nginx.conf <<'eof'
stream {
    upstream kube-apiserver {
        server 10.4.7.21:6443     max_fails=3 fail_timeout=30s;
        server 10.4.7.22:6443     max_fails=3 fail_timeout=30s;
    }
    server {
        listen 7443;
        proxy_connect_timeout 2s;
        proxy_timeout 900s;
        proxy_pass kube-apiserver;
    }
}
eof

# 测试是否有问题
nginx -t
  • 添加监控端口脚本
cat > /etc/keepalived/check_port.sh <<'EOF'
#!/bin/bash
# keepalived 监控端口脚本
CHK_PORT=$1
if [ -n "$CHK_PORT" ];then
        PORT_PROCESS=`ss -lnt|grep $CHK_PORT |wc -l`
        if [ $PORT_PROCESS -eq 0 ];then
                echo "Port $CHK_PORT Is Not Used,End."
                exit 1
        fi
else
        echo "Check Port Cant Be Empty!"
fi
eof
chmod +x /etc/keepalived/check_port.sh
  • 配置keepalived主
cat > /etc/keepalived/keepalived.conf <<eof
! Configuration File for keepalived

global_defs {
   router_id 10.4.7.11

}

vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 7443"
    interval 2
    weight -20
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 251
    priority 100
    advert_int 1
    mcast_src_ip 10.4.7.11
    nopreempt

    authentication {
        auth_type PASS
        auth_pass 11111111
    }
    track_script {
         chk_nginx
    }
    virtual_ipaddress {
        10.4.7.10
    }
}
eof
  • 配置keepalived备
cat > /etc/keepalived/keepalived.conf <<'eof'
! Configuration File for keepalived

global_defs {
    router_id 10.4.7.12
}
vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 7443"
    interval 2
    weight -20
}
vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 251
    mcast_src_ip 10.4.7.12
    priority 90
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 11111111
    }
    track_script {
        chk_nginx
    }
    virtual_ipaddress {
        10.4.7.10
    }
}
eof
  • 启动服务并检查
systemctl start nginx keepalived 
systemctl enable nginx keepalived 
systemctl status keepalived


[root@hdss-7-12 ~]# netstat -ntlp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 10.4.7.12:2379          0.0.0.0:*               LISTEN      1146/./etcd         
tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      1146/./etcd         
tcp        0      0 10.4.7.12:2380          0.0.0.0:*               LISTEN      1146/./etcd         
tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      1713/nginx: master  
tcp        0      0 0.0.0.0:7443            0.0.0.0:*               LISTEN      1713/nginx: master  
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      928/sshd            
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      1137/master         
tcp6       0      0 :::80                   :::*                    LISTEN      1713/nginx: master  
tcp6       0      0 :::22                   :::*                    LISTEN      928/sshd            
tcp6       0      0 ::1:25                  :::*                    LISTEN      1137/master         
[root@hdss-7-12 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:ce:52:44 brd ff:ff:ff:ff:ff:ff
    inet 10.4.7.12/24 brd 10.4.7.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet 10.4.7.10/32 scope global ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::9c3c:c02:f95d:c178/64 scope link tentative noprefixroute dadfailed 
       valid_lft forever preferred_lft forever
    inet6 fe80::6d28:5a36:8ccc:2f2f/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever

原创文章,作者:站长,如若转载,请注明出处:https://wsppx.cn/1556/%e7%bd%91%e7%bb%9c%e5%bc%80%e5%8f%91/

发表评论

您的电子邮箱地址不会被公开。 必填项已用*标注