0%

服务器规划

注:首次搭建时服务器规格设置为1C1G,发现服务器内存和CPU基本占满,集群启动失败,因此下列服务器规格为2C2G

节点 ip 组件
Master 11.0.1.128 kube-apiserver、kube-controller-manager、kube-scheduler、etcd1、kube-proxy、kubelet、docker
Node1 11.0.1.129 etcd2、kube-proxy、kubelet、docker
Node2 11.0.1.130 etcd3、kube-proxy、kubelet、docker
组件 版本
kubernetes 1.20.15
docker 19.03.9
etcd 3.4.9

环境配置

注:如下命令每台服务器均执行

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
# 关闭防火墙
[root@k8s-master ~]# systemctl stop firewalld
[root@k8s-master ~]# systemctl disable firewalld

# 关闭selinux
# 永久关闭
[root@k8s-master ~]# sed -i 's/enforcing/disabled/' /etc/selinux/config

# 关闭swap
# 永久关闭
sed -ri 's/.*swap.*/#&/' /etc/fstab

# 设置主机名
hostnamectl set-hostname hostname
# 例如:hostnamectl set-hostname k8s-master

# 添加hosts
[root@k8s-master ~]# cat >> /etc/hosts << EOF
11.0.1.128 k8s-master
11.0.1.129 k8s-node1
11.0.1.130 k8s-node2
EOF

# 将桥接的IPv4流量传递到iptables的链
[root@k8s-master ~]# cat >> /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
# 加载生效
[root@k8s-master ~]# modprobe br_netfilter && sysctl -p /etc/sysctl.d/k8s.conf

# 开启时钟同步
[root@k8s-master ~]# yum install -y ntpdate
[root@k8s-master ~]# ntpdate time.windows.com

部署ETCD

master、node1、node2均部署etcd,可容忍一台etcd宕机

ca证书制作

如下步骤在master节点操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
# 1、证书工具下载
# master节点执行如下命令,获取cfssl证书制作工具,如下载失败,可直接访问下方的地址下载对应文件后,再通过FTP上传
[root@k8s-master ~]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
[root@k8s-master ~]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
[root@k8s-master ~]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
[root@k8s-master ~]# chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
[root@k8s-master ~]# mv cfssl_linux-amd64 /usr/local/bin/cfssl
[root@k8s-master ~]# mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
[root@k8s-master ~]# mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

# 2、生成ca证书
# 证书知识介绍:https://www.cnblogs.com/hahaha111122222/p/16016351.html
[root@k8s-master ~]# mkdir -p ~/TLS/{etcd,k8s} # 创建证书的工作目录
[root@k8s-master ~]# cd ~/TLS/etcd

# (1)、自签CA
[root@k8s-master etcd]# cat >> ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"www": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF

# (2)生成ca证书的相关配置信息
[root@k8s-master etcd]# cat >> ca-csr.json << EOF
{
"CN": "etcd CA",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing"
}
]
}
EOF

# (3)生成证书,得到ca.pem和ca-key.pem文件
# 使用cfssl可以得到三个文件:*.pem(公钥)、*-key.pem(私钥)和*.csr
[root@k8s-master etcd]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

使用自签发的CA签发etcd证书

如下步骤在master节点操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# 1、生成etcd证书配置信息,文件中hosts字段的IP为所有etcd节点的集群内部通信IP,一个都不能少!为了方便后期扩容可以多写几个预留的IP。
[root@k8s-master etcd]# cat >> server-csr.json << EOF
{
"CN": "etcd",
"hosts": [
"11.0.1.128",
"11.0.1.129",
"11.0.1.130",
"11.0.1.131",
"11.0.1.132"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing"
}
]
}
EOF

# 2、生成证书,生成server.pem和server-key.pem文件
[root@k8s-master etcd]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server

安装etcd

先在MASTER节点执行,再将相关配置文件copy到其他node上

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
# 1、创建工作目录
[root@k8s-master etcd]# mkdir -p /opt/etcd/{bin,cfg,ssl}
[root@k8s-master etcd]# cd /opt/etcd

# 2、将etcd压缩包上传到/opt/etcd目录下,并解压
[root@k8s-master etcd]# tar zxvf etcd-v3.4.9-linux-amd64.tar.gz

# 3、将bin文件移动到工作目录的bin目录下
[root@k8s-master etcd]# mv ./etcd-v3.4.9-linux-amd64/{etcd,etcdctl} ./bin

# 4、创建etcd配置文件
[root@k8s-master etcd]# cat >> ./cfg/etcd.conf << EOF
#[Member]
ETCD_NAME="etcd-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://11.0.1.128:2380"
ETCD_LISTEN_CLIENT_URLS="https://11.0.1.128:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://11.0.1.128:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://11.0.1.128:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://11.0.1.128:2380,etcd-2=https://11.0.1.129:2380,etcd-3=https://11.0.1.130:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
# 说明
# ETCD_NAME:节点名称,集群中唯一
# ETCD_DATA_DIR:数据目录
# ETCD_LISTEN_PEER_URLS:集群通信监听地址
# ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址
# ETCD_INITIAL_ADVERTISE_PEERURLS:集群通告地址
# ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址
# ETCD_INITIAL_CLUSTER:集群节点地址
# ETCD_INITIALCLUSTER_TOKEN:集群Token
# ETCD_INITIALCLUSTER_STATE:加入集群的当前状态,new是新集群,existing表示加入已有集群

# 5、systemd管理etcd
[root@k8s-master etcd]# cat >> /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf
ExecStart=/opt/etcd/bin/etcd \
--cert-file=/opt/etcd/ssl/server.pem \
--key-file=/opt/etcd/ssl/server-key.pem \
--peer-cert-file=/opt/etcd/ssl/server.pem \
--peer-key-file=/opt/etcd/ssl/server-key.pem \
--trusted-ca-file=/opt/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/opt/etcd/ssl/ca.pem \
--logger=zap
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 6、证书拷贝
[root@k8s-master etcd]# cp ~/TLS/etcd/ca*pem ~/TLS/etcd/server*pem /opt/etcd/ssl/

# 将master 生成的所有文件拷贝到node1 和node2
[root@k8s-master etcd]# cd /opt/etcd/cfg/
[root@k8s-master cfg]# scp -r /opt/etcd/ root@k8s-node1:/opt/
[root@k8s-master cfg]# scp -r /opt/etcd/ root@k8s-node2:/opt/
[root@k8s-master cfg]# scp /usr/lib/systemd/system/etcd.service root@k8s-node1:/usr/lib/systemd/system/
[root@k8s-master cfg]# scp /usr/lib/systemd/system/etcd.service root@k8s-node2:/usr/lib/systemd/system/

# !!!注:需要将拷贝过去的/opt/etcd/cfg/etcd.conf修改,如下为node1上的配置,node2也要同步修改
#[Member]
ETCD_NAME="etcd-2"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://11.0.1.129:2380"
ETCD_LISTEN_CLIENT_URLS="https://11.0.1.129:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://11.0.1.129:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://11.0.1.129:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://11.0.1.128:2380,etcd-2=https://11.0.1.129:2380,etcd-3=https://11.0.1.130:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"



# 7、启动etcd服务并设置开机自动启动(3台服务器一起执行)
[root@k8s-master cfg]# systemctl daemon-reload
[root@k8s-master cfg]# systemctl start etcd
[root@k8s-master cfg]# systemctl enable etcd

# 8、查看etcd集群状态
[root@k8s-master ~]# /opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://11.0.1.128:2379,https://11.0.1.129:2379,https://11.0.1.130:2379" endpoint health

https://11.0.1.129:2379 is healthy: successfully committed proposal: took = 10.331196ms
https://11.0.1.128:2379 is healthy: successfully committed proposal: took = 12.055311ms
https://11.0.1.130:2379 is healthy: successfully committed proposal: took = 13.703839ms

部署docker

下载链接:https://download.docker.com/linux/static/stable/x86_64/docker-19.03.9.tgz

如下步骤在三台服务器均执行

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
[root@k8s-master ~]# mkdir /opt/docker

#1、将docker压缩包上传到docker目录下并解压
[root@k8s-master docker]# cd /opt/docker
[root@k8s-master docker]# tar zxvf docker-19.03.9.tgz && mv ./docker/* /usr/bin && rm -rf ./docker

# 2、使用systemd管理docker
[root@k8s-master docker]# cat >> /usr/lib/systemd/system/docker.service << EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target

[Service]
Type=notify
ExecStart=/usr/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s

[Install]
WantedBy=multi-user.target
EOF

# 3、创建docker配置文件,配置阿里云镜像仓库地址
[root@k8s-master docker]# mkdir /etc/docker
[root@k8s-master docker]# cat >> /etc/docker/daemon.json << EOF
{
"registry-mirrors": ["https://3uwngq1j.mirror.aliyuncs.com"]
}
EOF

# 4、启动docker
[root@k8s-master docker]# systemctl daemon-reload && systemctl start docker && systemctl status docker && systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.

部署k8s

ca证书制作

k8s通信和ETCD使用两套不同的证书体系,因此需要重新生成ca证书,仅在master节点执行

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
[root@k8s-master ~]# cd ~/TLS/k8s
# 1、自签CA
[root@k8s-master k8s]# cat >> ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
# 2、生成ca证书请求信息
# CA机构的请求配置,用于生成一对公私钥,公钥作为根证书用于验证签名,私钥用于给其他NODE节点的证书签名以及对信息的加密
[root@k8s-master k8s]# cat >> ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
# 3、制作ca证书
[root@k8s-master k8s]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

使用自签发的CA签发kube-apiserver证书

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
# 1、创建证书使用者相关信息,即server-csr.json
[root@k8s-master k8s]# cat >> server-csr.json << EOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"11.0.1.128",
"11.0.1.129",
"11.0.1.130",
"11.0.1.131",
"11.0.1.132",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
注:上述文件hosts字段中IP为所有Master/LB/VIP IP,一个都不能少!为了方便后期扩容可以多写几个预留的IP。
# 2、签发服务端证书
# 生成server证书,得到server.csr、server-key.pem和 server.pem
[root@k8s-master k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server

下载k8s

1
2
3
4
5
6
7
8
9
10
# 下载k8s安装包并解压,
# 下载地址:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.20.md,本文使用的是v1.20.15
[root@k8s-master k8s]# mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}
# 下载后上传到/opt/kubernete目录下
[root@k8s-master k8s]# cd /opt/kubernetes/
[root@k8s-master kubernetes]# tar zxvf kubernetes-server-linux-amd64.tar.gz
# 将二进制文件拷贝到bin目录下
[root@k8s-master kubernetes]# cd kubernetes/server/bin
[root@k8s-master bin]# cp kube-apiserver kube-scheduler kube-controller-manager /opt/kubernetes/bin
[root@k8s-master bin]# cp kubectl /usr/bin/

部署kube-apiserver

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
# 1、创建kube-apiserver配置文件
[root@k8s-master bin]# cat >> /opt/kubernetes/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--etcd-servers=https://11.0.1.128:2379,https://11.0.1.129:2379,https://11.0.1.130:2379 \\
--bind-address=11.0.1.128 \\
--secure-port=6443 \\
--advertise-address=11.0.1.128 \\
--allow-privileged=true \\
--service-cluster-ip-range=10.0.0.0/24 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/opt/kubernetes/cfg/token.csv \\
--service-node-port-range=30000-32767 \\
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \\
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \\
--tls-cert-file=/opt/kubernetes/ssl/server.pem \\
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--service-account-issuer=api \\
--service-account-signing-key-file=/opt/kubernetes/ssl/server-key.pem \\
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/server.pem \\
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \\
--requestheader-client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--proxy-client-cert-file=/opt/kubernetes/ssl/server.pem \\
--proxy-client-key-file=/opt/kubernetes/ssl/server-key.pem \\
--requestheader-allowed-names=kubernetes \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-username-headers=X-Remote-User \\
--enable-aggregator-routing=true \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
EOF
# 注释
上面两个\ \ 第一个是转义符,第二个是换行符,使用转义符是为了使用EOF保留换行符。
--logtostderr:启用日志
---v:日志等级
--log-dir:日志目录
--etcd-servers:etcd集群地址
--bind-address:监听地址
--secure-port:https安全端口
--advertise-address:集群通告地址
--allow-privileged:启用授权
--service-cluster-ip-range:Service虚拟IP地址段
--enable-admission-plugins:准入控制模块
--authorization-mode:认证授权,启用RBAC授权和节点自管理
--enable-bootstrap-token-auth:启用TLS bootstrap机制
--token-auth-file:bootstrap token文件
--service-node-port-range:Service nodeport类型默认分配端口范围
--kubelet-client-xxx:apiserver访问kubelet客户端证书
--tls-xxx-file:apiserver https证书
1.20版本必须加的参数:--service-account-issuer,--service-account-signing-key-file
--etcd-xxxfile:连接Etcd集群证书
•--audit-log-xxx:审计日志
启动聚合层相关配置:--requestheader-client-ca-file,--proxy-client-cert-file,--proxy-client-key-file,--requestheader-allowed-names,--requestheader-extra-headers-prefix,--requestheader-group-headers,--requestheader-username-headers,--enable-aggregator-routing

# 2、将证书拷贝到工作目录
[root@k8s-master ssl]# cp ~/TLS/k8s/ca*pem ~/TLS/k8s/server*pem /opt/kubernetes/ssl/

# 3、创建上述配置文件中 token 文件
# 格式:token,用户名,UID,用户组
# token 也可自行生成替换:使用命令生成token:head -c 16 /dev/urandom | od -An -t x | tr -d ' '
[root@k8s-master ssl]# cat >> /opt/kubernetes/cfg/token.csv << EOF
eca2a85b0310f5ed2472d0aff3cc5068,kubelet-bootstrap,10001,"system:node-bootstrapper"
EOF


# 4、systemd 管理kube-apiserver
[root@k8s-master ssl]# cat >> /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

# 5、启动kube-apiserver服务并设置开机启动
[root@k8s-master ssl]# systemctl daemon-reload && systemctl start kube-apiserver && systemctl enable kube-apiserver && systemctl status kube-apiserver
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.

部署kube-controller-manager

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
# 1、创建配置文件
[root@k8s-master bin]# cat >> /opt/kubernetes/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--leader-elect=true \\
--kubeconfig=/opt/kubernetes/cfg/kube-controller-manager.kubeconfig \\
--bind-address=127.0.0.1 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/16 \\
--service-cluster-ip-range=10.0.0.0/24 \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--cluster-signing-duration=87600h0m0s"
EOF
# 注释
--kubeconfig:连接apiserver配置文件
--leader-elect:当该组件启动多个时,自动选举(HA)
--cluster-signing-cert-file/--cluster-signing-key-file:自动为kubelet颁发证书的CA,与apiserver保持一致

# 2、生成kubeconfig文件
# 切换目录
[root@k8s-master cfg]# cd ~/TLS/k8s
# 创建证书请求文件
[root@k8s-master k8s]# cat >> kube-controller-manager-csr.json << EOF
{
"CN": "system:kube-controller-manager",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF

# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
# 生成kubeconfig文件(直接执行如下sh命令)
KUBE_CONFIG="/opt/kubernetes/cfg/kube-controller-manager.kubeconfig"
KUBE_APISERVER="https://11.0.1.128:6443"

kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials kube-controller-manager \
--client-certificate=./kube-controller-manager.pem \
--client-key=./kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-controller-manager \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}

# 3、systemd管理controll-manager
[root@k8s-master k8s]# cat >> /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF
# 4、启动并设置开机启动
[root@k8s-master k8s]# systemctl stop kube-controller-manager && systemctl start kube-controller-manager && systemctl enable kube-controller-manager && systemctl status kube-controller-manager

Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.r

部署kube-scheduler

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# 1、创建配置文件
[root@k8s-master k8s]# cat >> /opt/kubernetes/cfg/kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--leader-elect \\
--kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig \\
--bind-address=127.0.0.1"
EOF
# 注释:
--kubeconfig:连接apiserver配置文件
--leader-elect:当该组件启动多个时,自动选举(HA)
# 2、生成kubeconfig文件
# 切换目录
[root@k8s-master k8s]# cd ~/TLS/k8s
# 创建证书请求文件
[root@k8s-master k8s]# cat >> kube-scheduler-csr.json << EOF
{
"CN": "system:kube-scheduler",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
# 生成kubeconfig文件
KUBE_CONFIG="/opt/kubernetes/cfg/kube-scheduler.kubeconfig"
KUBE_APISERVER="https://11.0.1.128:6443"

kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials kube-scheduler \
--client-certificate=./kube-scheduler.pem \
--client-key=./kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-scheduler \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}

# 3、systemd管理scheduler
[root@k8s-master k8s]# cat >> /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

# 4、启动并设置开机启动
[root@k8s-master k8s]# systemctl daemon-reload && systemctl start kube-scheduler && systemctl enable kube-scheduler && systemctl status kube-scheduler

Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.

配置kubectl

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
# 1、生成kubectl连接集群的证书
[root@k8s-master k8s]# cat >> admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
# 2、生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

# 3、生成kubeconfig文件
mkdir /root/.kube

KUBE_CONFIG="/root/.kube/config"
KUBE_APISERVER="https://11.0.1.128:6443"

kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials cluster-admin \
--client-certificate=./admin.pem \
--client-key=./admin-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user=cluster-admin \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
# 4、通过kubectl工具查看当前集群组件状态
[root@k8s-master k8s]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
etcd-1 Healthy {"health":"true"}
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Healthy {"health":"true"}
etcd-2 Healthy {"health":"true"}

授权kubelet-bootstrap用户允许请求证书

1
2
3
4
5
[root@k8s-master k8s]# kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap

clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created

部署node

准备工作

1
2
3
4
5
6
7
8
9
# 创建工作目录,node节点执行
[root@node1 ~]# mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}
# 将kubelet和kube-proxy从/opt/kubernetes/kubernetes/server/bin拷贝过来
# master节点执行:
[root@k8s-master bin]# cd /opt/kubernetes/kubernetes/server/bin
[root@k8s-master bin]# cp ./kubelet ./kube-proxy /opt/kubernetes/bin/
# 将kubelet和kube-proxy从master节点拷贝到node节点
[root@k8s-master bin]# scp kubelet kube-proxy root@k8s-node1:/opt/kubernetes/bin
[root@k8s-master bin]# scp kubelet kube-proxy root@k8s-node2:/opt/kubernetes/bin

部署kubelet

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
# 先在master节点部署之后,在拷贝到node上
# 1、创建配置文件
# 注意,需要使用docker登录阿里云镜像仓库,并将pause镜像获取到个人仓库,pod-infra-container-image再配置为该镜像。(三台Node上均需要登录阿里云镜像仓库)
# 登录镜像仓库
docker login --username=wbl1996 registry.cn-hangzhou.aliyuncs.com
# 拉取镜像
docker pull registry.cn-hangzhou.aliyuncs.com/wbl_k8s/pause:3.2
# 打标签
docker tag 80d28bedfe5d pause:3.2
# 创建配置文件
cat >> /opt/kubernetes/cfg/kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--hostname-override=k8s-master \\
--network-plugin=cni \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet-config.yml \\
--cert-dir=/opt/kubernetes/ssl \\
--pod-infra-container-image=pause:3.2"
EOF
# 注释:
--hostname-override:显示名称,集群中唯一
--network-plugin:启用CNI
--kubeconfig:空路径,会自动生成,后面用于连接apiserver
--bootstrap-kubeconfig:首次启动向apiserver申请证书
--config:配置参数文件
--cert-dir:kubelet证书生成目录
--pod-infra-container-image:管理Pod网络容器的镜像

# 2、创建配置参数文件
cat >> /opt/kubernetes/cfg/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local
failSwapOn: false
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /opt/kubernetes/ssl/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF

# 3、生成kubelet初次加入集群引导kubeconfig文件
KUBE_CONFIG="/opt/kubernetes/cfg/bootstrap.kubeconfig"
KUBE_APISERVER="https://11.0.1.128:6443" # apiserver IP:PORT
TOKEN="eca2a85b0310f5ed2472d0aff3cc5068" # 与token.csv里保持一致

# 生成 kubelet bootstrap kubeconfig 配置文件
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials "kubelet-bootstrap" \
--token=${TOKEN} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user="kubelet-bootstrap" \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}

# 4、systemd管理kubelet
cat >> /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet.conf
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 5、启动并设置开机自启
systemctl daemon-reload && systemctl start kubelet && systemctl enable kubelet && systemctl status kubelet

批准kubelet证书申请并加入集群

1
2
3
4
5
6
7
8
9
10
11
12
# 查看kubelet证书请求
[root@k8s-master bin]# kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
node-csr-RTvoDxqN5eC9CT_Mbu66tzD96bgOKH6mFFAaIYlLkOk 35s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending

# 批注申请
kubectl certificate approve node-csr-RTvoDxqN5eC9CT_Mbu66tzD96bgOKH6mFFAaIYlLkOk

# 查看节点。由于还没有部署网络插件,因此目前为NotReady状态
[root@k8s-master bin]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master NotReady <none 6s v1.20.15

部署kube-proxy

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
# 1、创建配置文件
cat >> /opt/kubernetes/cfg/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--config=/opt/kubernetes/cfg/kube-proxy-config.yml"
EOF

# 2、创建配置参数文件
cat >> /opt/kubernetes/cfg/kube-proxy-config.yml << EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
metricsBindAddress: 0.0.0.0:10249
clientConnection:
kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig
hostnameOverride: k8s-master
clusterCIDR: 10.0.0.0/24
EOF

# 3、生成kube-proxy.kubeconfig文件
# 生成kube-proxy证书
# 切换工作目录
cd ~/TLS/k8s

# 创建证书请求文件
cat >> kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF

# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
生成kubeconfig文件:
KUBE_CONFIG="/opt/kubernetes/cfg/kube-proxy.kubeconfig"
KUBE_APISERVER="https://11.0.1.128:6443"

kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials kube-proxy \
--client-certificate=./kube-proxy.pem \
--client-key=./kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}

# 4、systemd管理kube-proxy
cat >> /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-proxy.conf
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 5、启动并设置开机启动
systemctl daemon-reload && systemctl start kube-proxy && systemctl enable kube-proxy && systemctl status kube-proxy

部署网络插件

1
2
3
4
5
6
# 使用Calico作为k8s集群的网络插件
# 将calico的yaml配置文件上传到/opt/kubernets/cfg目录
# 部署Calico,calico版本要和k8s的版本匹配
# calico 3.19
# https://docs.projectcalico.org/archive/v3.19/manifests/calico.yaml
[root@k8s-master cfg]# kubectl apply -f calico.yaml

授权kube-apiserver访问kubelet

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
# 应用场景:例如kubectl logs
cat >> apiserver-to-kubelet-rbac.yaml << EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
- pods/log
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubernetes
EOF

kubectl apply -f apiserver-to-kubelet-rbac.yaml

增加work node

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# 1、将master节点相关的文件拷贝到node节点
scp -r /opt/kubernetes/ root@k8s-node1:/opt/
scp -r /opt/kubernetes/ root@k8s-node2:/opt/

scp -r /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@k8s-node1:/usr/lib/systemd/system
scp -r /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@k8s-node2:/usr/lib/systemd/system

scp /opt/kubernetes/ssl/ca.pem root@k8s-node1:/opt/kubernetes/ssl
scp /opt/kubernetes/ssl/ca.pem root@k8s-node2:/opt/kubernetes/ssl

# 2、删除NODE节点上master自动生成的证书
rm -f /opt/kubernetes/cfg/kubelet.kubeconfig
rm -f /opt/kubernetes/ssl/kubelet*

# 3、修改配置文件中的主机名称
vi /opt/kubernetes/cfg/kubelet.conf
--hostname-override=k8s-node1

vi /opt/kubernetes/cfg/kube-proxy-config.yml
hostnameOverride: k8s-node1

# 4、启动kubelet和kube-proxy并设置开机启动
systemctl daemon-reload && systemctl start kubelet kube-proxy && systemctl enable kubelet kube-proxy && systemctl status kubelet kube-proxy

# 5、在Master上批准新Node kubelet证书申请
# 查看证书请求
[root@k8s-master ~]# kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
node-csr-5Xhn34f8O6RSef5UcDkfuU8dIkY--FiGwSCTfezmIYo 2m33s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
node-csr-RTvoDxqN5eC9CT_Mbu66tzD96bgOKH6mFFAaIYlLkOk 3d kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued
node-csr-fC6rUV4KFHgNBIqMJtgwDkmD_3y59jBf6UcgVdkAuWU 47s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
# 授权请求
kubectl certificate approve node-csr-5Xhn34f8O6RSef5UcDkfuU8dIkY--FiGwSCTfezmIYo
kubectl certificate approve node-csr-fC6rUV4KFHgNBIqMJtgwDkmD_3y59jBf6UcgVdkAuWU



部署nginx测试

部署dashboard

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# 将dashboart.yml上传到/opt/kubernetes/cfg目录下
# https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.0/aio/deploy/recommended.yaml
# 注:获取的配置文件中Service未制定type,因此默认按照ClusterIP来部署,为了在浏览器可以访问,将Service改为如下内容

kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort # 增加NodePort类型
ports:
- port: 443
targetPort: 8443
nodePort: 30001 # 增加nodePort
selector:
k8s-app: kubernetes-dashboard

kubectl apply -f kubernetes-dashboard.yaml


# 创建ServiceAccount并绑定默认cluster-admin管理员集群角色
[root@k8s-master cfg]# kubectl create sa dashboard-admin -n kube-system
serviceaccount/dashboard-admin created

# 创建clusterrolebinding,将clusterrole和serviceaccount绑定
[root@k8s-master cfg]# kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
clusterrolebinding.rbac.authorization.k8s.io/dashboard-admin created
# 查看Pod和svc状态
kubectl get pods,svc -n kubernetes-dashboard
# 通过浏览器访问https://master_ip:30001
# 若edge浏览器无法访问,则直接在该页面按thisisunsafe即可自动刷新
# 获取token
kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
# 将token粘贴到上述页面中即可

部署CoreDNS

未完待续…