环境准备
软件版本
- 系统:Ubuntu 22.04.3 TLS
- 内核:5.15.0-88-generic
- kubernetes:1.27.8
- etcd:etcd v3.5.9
- containerd: v1.6.23
- calico: v3.24.6
生产建议:
kubernetes版本版本大于5,如1.27.6
etcd集群 应独立运行,部署5台服务器
k8s-master 服务器数量5台或部署3台运行着,预留扩容节点
规划部署三主两从,master也充当node(安装kubelet,kube-proxy),这里硬件资源紧张,apiserver和etcd安装在同一服务器(生产不建议)。
IP规划
内网IP规划
hostname | IP/VIP | software |
---|---|---|
k8s-master01 | 192.168.77.81 | etcd、apiserver、scheduler、controller-manager、kubelet、kube-proxy |
k8s-master02 | 192.168.77.82 | etcd、apiserver、scheduler、controller-manager、kubelet、kube-proxy |
k8s-master03 | 192.168.77.83 | etcd、apiserver、scheduler、controller-manager、kubelet、kube-proxy |
k8s-node01 | 192.168.77.86 | kubelet、kube-proxy |
k8s-node02 | 192.168.77.87 | kubelet、kube-proxy |
haproxy01 | 192.168.77.101 | haproxy、keepalived |
haproxy02 | 192.168.77.102 | haproxy、keepalived |
VIP | 192.168.77.100 |
集群内部IP规划
kubernetes Service网段:10.96.0.0/16
kubernetes Pod网段:10.244.0.0/16
kubernetes CoreDns IP地址: 10.96.0.10
配置静态IP
root@k8s-master01:~# cat /etc/netplan/00-installer-config.yaml
# This is the network config written by 'subiquity'
network:
ethernets:
ens160:
dhcp4: false
addresses: [192.168.77.81/24]
nameservers:
addresses:
- 223.5.5.5
routes:
- to: default
via: 192.168.77.1
version: 2
配置hosts解析
cat >> /etc/hosts << EOF
192.168.77.81 k8s-master01
192.168.77.82 k8s-master02
192.168.77.83 k8s-master03
192.168.77.86 k8s-node01
192.168.77.87 k8s-node02
EOF
配置主机名
hostnamectl set-hostname k8s-master01
bash
配置默认编辑器为vim
EDITOR_OPTION=3 # vim.basic
sudo update-alternatives --config editor <<< $EDITOR_OPTION
配置sudo 免密码
ubuntu 默认禁止root ssh登陆,这里使用ssh普通用户sudo权限操作
visudo
修改47行
# Members of the admin group may gain root privileges
%admin ALL=(ALL) NOPASSWD: ALL
这里使用sunday用户,注意需切换用户su -
或退出终端重新sudo免密码才会生效
groupadd admin
usermod -G admin sunday
配置apt加速
sed -i 's@archive.ubuntu.com@mirrors.aliyun.com@g' /etc/apt/sources.list
apt-get update
安装必备包
apt-get update
apt-get install -y psmisc ca-certificates curl net-tools gnupg lsb-release nfs-kernel-server apt-transport-https telnet lvm2 ca-certificates gcc bash-completion ntpdate ipvsadm ipset sysstat conntrack libseccomp2 chrony -y
配置ssh密钥登陆
在master01配置
这里假设所有服务器的ssh用户为sunday 密码为123456,请根据需求自行更改
apt-get install -y expect
ssh-keygen -t rsa -P "" -f /root/.ssh/id_rsa
export user=sunday
export pass=123456
name=(k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02)
for host in ${name[@]};do expect -c "
spawn ssh-copy-id -i /root/.ssh/id_rsa.pub $user@$host
expect {
\"*yes/no*\" {send \"yes\r\"; exp_continue}
\"*password*\" {send \"$pass\r\"; exp_continue}
\"*Password*\" {send \"$pass\r\";}
}";
done
ssh首次连接公钥确认
apt install -y sshpass
export SSHPASS="123456"
user=sunday
name=(k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02)
for host in ${name[@]};do
sshpass -e ssh-copy-id -o StrictHostKeyChecking=no $user@$host
done
关闭防火墙
systemctl stop ufw.service
关闭swap分区
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
swapoff -a
sysctl --w vm.swappiness=0
配置chrony时间同步
timedatectl set-timezone Asia/Shanghai
timedatectl set-local-rtc 0
echo "LC_TIME=en_DK.UTF-8" >> /etc/default/locale # 24小时制 切换用户生效 su - 或重新连接
apt-get install -y chrony
服务端配置(k8s-master01)
ntp="ntp.aliyun.com"
sed -i "s@^pool ntp.ubuntu.com.*@pool $ntp iburst@g" /etc/chrony/chrony.conf
# 重启服务
systemctl restart chronyd
# 使用客户端进行验证
chronyc sources -v
客户端配置
其他服务器chrony ntp指向这个服务器(k8s-master01)
ntp="192.168.77.81"
sed -i "s@^pool@#pool@g" /etc/chrony/chrony.conf
sed -i "s@^#pool ntp.ubuntu.com.*@server $ntp iburst@g" /etc/chrony/chrony.conf
# 重启服务
systemctl restart chronyd
# 使用客户端进行验证
chronyc sources -v
#查看时间同步源的状态
chronyc sourcestats -v
配置limits
cat >> /etc/security/limits.conf <<EOF
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
EOF
加载IPVS
cat > /etc/modules-load.d/ipvs.conf << EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
systemctl restart systemd-modules-load.service
检查模块是否加载成功
root@k8s-master01:~# lsmod |grep -e ip_vs -e nf_conntrack
ip_vs_sh 16384 0
ip_vs_wrr 16384 0
ip_vs_rr 16384 0
ip_vs 176128 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 172032 1 ip_vs
nf_defrag_ipv6 24576 2 nf_conntrack,ip_vs
nf_defrag_ipv4 16384 1 nf_conntrack
libcrc32c 16384 5 nf_conntrack,btrfs,xfs,raid456,ip_vs
内核参数配置
cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.swappiness = 0
vm.overcommit_memory = 1
vm.panic_on_oom = 0
fs.inotify.max_user_watches = 89100
fs.file-max = 52706963
fs.nr_open = 52706963
net.netfilter.nf_conntrack_max = 2310720
net.ipv4.tcp_keepalive_time = 300
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 655535
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
升级内核
低内核升级内核至4.18版本以上
Ubuntu 22.04.03 内核够新了 可以不用升级
$ uname -r
5.15.0-88-generic
解决53端口冲突
systemd-resolved占用53端口,这个会跟coredns端口冲突,导致coredns无法运行,所以需要禁止监听53端口
备份原有文件
mv /etc/systemd/resolved.conf /etc/systemd/resolved.conf.old
cat > /etc/systemd/resolved.conf << EOF
[Resolve]
#DNS=223.5.5.5
# 关闭53端口监听
DNSStubListener=no
EOF
# 重启服务,让配置生效
systemctl restart systemd-resolved
软件下载地址
https://github.com/containerd/containerd/releases/download/v1.6.23/cri-containerd-1.6.23-linux-amd64.tar.gz
https://dl.k8s.io/v1.27.8/kubernetes-server-linux-amd64.tar.gz
https://github.com/etcd-io/etcd/releases/download/v3.5.9/etcd-v3.5.9-linux-amd64.tar.gz
安装etcd集群
下载解压etcd
这里部署在master01-03三个节点,建议单独机器部署etcd集群
wget https://github.com/etcd-io/etcd/releases/download/v3.5.9/etcd-v3.5.9-linux-amd64.tar.gz
tar -zxvf etcd-v3.5.9-linux-amd64.tar.gz --strip-components=1 -C /usr/local/bin etcd-v3.5.9-linux-amd64/etcd{,ctl}
etcd证书配置
目录说明
- etcd 证书生成存放目录 /opt/pki/etcd
- etcd 证书生产使用目录 /etc/etcd/ssl
etcd配置文件
创建etcd证书配置文件
mkdir -p /opt/pki/etcd /etc/etcd/ssl
cd /opt/pki/etcd
# 创建ca 配置文件
cat > /opt/pki/etcd/ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
EOF
# 创建etcd-ca.json, etcd的ca文件配置
cat > /opt/pki/etcd/etcd-ca-csr.json << EOF
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Guangdong",
"L": "Guangzhou",
"O": "etcd",
"OU": "Etcd Security"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
# 创建 etcd-csr.json,ca证书签名请求(CSR)
cat > /opt/pki/etcd/etcd-csr.json << EOF
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Guangdong",
"L": "Guangzhou",
"O": "etcd",
"OU": "Etcd Security"
}
]
}
EOF
生成etcd证书
安装证书生成工具
apt install -y golang-cfssl
etcd ca证书生成
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /opt/pki/etcd/etcd-ca
etcd 证书生成, 需要etcd集群节点的IP和域名都写在hostname配置中
cfssl gencert -ca=/opt/pki/etcd/etcd-ca.pem -ca-key=/opt/pki/etcd/etcd-ca-key.pem -config=ca-config.json --hostname=127.0.0.1,k8s-master01,k8s-master02,k8s-master03,192.168.77.81,192.168.77.82,192.168.77.83 -profile=kubernetes etcd-csr.json |cfssljson -bare /opt/pki/etcd/etcd
生成的证书列表
root@sunday:/opt/pki/etcd# ls -la
total 36
drwxr-xr-x 2 root root 186 Dec 16 16:31 .
drwxr-xr-x 3 root root 18 Dec 16 16:31 ..
-rw-r--r-- 1 root root 294 Dec 16 16:30 ca-config.json
-rw-r--r-- 1 root root 1050 Dec 16 16:31 etcd-ca.csr
-rw-r--r-- 1 root root 249 Dec 16 16:30 etcd-ca-csr.json
-rw------- 1 root root 1679 Dec 16 16:31 etcd-ca-key.pem
-rw-r--r-- 1 root root 1318 Dec 16 16:31 etcd-ca.pem
-rw-r--r-- 1 root root 1131 Dec 16 16:31 etcd.csr
-rw-r--r-- 1 root root 210 Dec 16 16:30 etcd-csr.json
-rw------- 1 root root 1675 Dec 16 16:31 etcd-key.pem
-rw-r--r-- 1 root root 1464 Dec 16 16:31 etcd.pem
同步etcd证书和etcd二进制
复制etcd证书到master节点 /etc/etcd/ssl
name=(k8s-master01 k8s-master02 k8s-master03)
user=sunday
for host in ${name[@]}; do \
echo $host
ssh $user@$host "sudo mkdir -p /etc/etcd/ssl"; \
rsync --rsync-path="sudo rsync" /opt/pki/etcd/{etcd-ca.pem,etcd-key.pem,etcd.pem} $user@$host:/etc/etcd/ssl/;
rsync --rsync-path="sudo rsync" /usr/local/bin/{etcd,etcdctl} $user@$host:/usr/local/bin/;
done
etcd 服务配置
etcd-cluster01节点配置文件
mkdir -p /data/etcd
cat > /etc/etcd/etcd.config.yml << EOF
name: 'k8s-master01' # 更换成master01的主机名
data-dir: /data/etcd
wal-dir: /data/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.77.81:2380' # 更换成master01的IP
listen-client-urls: 'https://192.168.77.81:2379,http://127.0.0.1:2379' # 更换成master01的IP和本地回环地址
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.77.81:2380' # 更换成master01的IP
advertise-client-urls: 'https://192.168.77.81:2379' # 更换成master01的IP
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://192.168.77.81:2380,k8s-master02=https://192.168.77.82:2380,k8s-master03=https://192.168.77.83:2380' # 所有集群主机名和IP地址
initial-cluster-token: 'etcd-k8s-cluster' # # 集群的token
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/etcd/ssl/etcd.pem'
key-file: '/etc/etcd/ssl/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/etcd/ssl/etcd.pem'
key-file: '/etc/etcd/ssl/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
etcd-cluster02节点配置文件
mkdir -p /data/etcd
cat > /etc/etcd/etcd.config.yml << EOF
name: 'k8s-master02' # 更换成master02的主机名
data-dir: /data/etcd
wal-dir: /data/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.77.82:2380' # 更换成master02的IP
listen-client-urls: 'https://192.168.77.82:2379,http://127.0.0.1:2379' # 更换成master02的IP和本地回环地址
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.77.82:2380' # 更换成master02的IP
advertise-client-urls: 'https://192.168.77.82:2379' # 更换成master02的IP
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://192.168.77.81:2380,k8s-master02=https://192.168.77.82:2380,k8s-master03=https://192.168.77.83:2380' # 所有集群主机名和IP地址
initial-cluster-token: 'etcd-k8s-cluster' # # 集群的token
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/etcd/ssl/etcd.pem'
key-file: '/etc/etcd/ssl/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/etcd/ssl/etcd.pem'
key-file: '/etc/etcd/ssl/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
etcd-cluster03节点配置文件
mkdir -p /data/etcd
cat > /etc/etcd/etcd.config.yml << EOF
name: 'k8s-master03' # 更换成node01的主机名
data-dir: /data/etcd
wal-dir: /data/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.77.83:2380' # 更换成node01的IP
listen-client-urls: 'https://192.168.77.83:2379,http://127.0.0.1:2379' # 更换成node01的IP和本地回环地址
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.77.83:2380' # 更换成node01的IP
advertise-client-urls: 'https://192.168.77.83:2379' # 更换成node01的IP
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://192.168.77.81:2380,k8s-master02=https://192.168.77.82:2380,k8s-master03=https://192.168.77.83:2380' # 所有集群主机名和IP地址
initial-cluster-token: 'etcd-k8s-cluster' # # 集群的token
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/etcd/ssl/etcd.pem'
key-file: '/etc/etcd/ssl/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/etcd/ssl/etcd.pem'
key-file: '/etc/etcd/ssl/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
配置Services文件
cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
Alias=etcd3.service
EOF
启动服务并且加入开机启动
systemctl daemon-reload
systemctl enable --now etcd
systemctl status etcd
测试etcd集群状态
root@k8s-master01:~# etcdctl --endpoints="192.168.77.81:2379,192.168.77.82:2379,192.168.77.83:2379" --cacert=/etc/etcd/ssl/etcd-ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem endpoint status --write-out=table
+--------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+--------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| 192.168.77.81:2379 | 4f618a57e2e4f983 | 3.5.9 | 20 kB | true | false | 5 | 15 | 15 | |
| 192.168.77.82:2379 | c74a110d8f902a2a | 3.5.9 | 20 kB | false | false | 5 | 15 | 15 | |
| 192.168.77.83:2379 | 53179a9063ddf0cc | 3.5.9 | 20 kB | false | false | 5 | 15 | 15 | |
+--------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
高可用服务安装
haproxy
安装keepalived和haproxy
apt install -y keepalived haproxy
两台haproxy一样
cat > /etc/haproxy/haproxy.cfg <<EOF
global
maxconn 2000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s
listen monitor
bind 0.0.0.0:8100
mode http
stats enable
stats uri /
stats refresh 5s
frontend k8s-master
bind 0.0.0.0:8443
mode tcp
option tcplog
tcp-request inspect-delay 5s
default_backend k8s-master
backend k8s-master
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server k8s-master01 192.168.77.81:6443 check
server k8s-master02 192.168.77.82:6443 check
server k8s-master03 192.168.77.83:6443 check
EOF
keepalived
keepalived配置文件不一样,注意每个节点的IP和interface
keepalived01
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens192
mcast_src_ip 192.168.77.101
virtual_router_id 61
priority 100
nopreempt
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.77.100
}
track_script {
chk_apiserver
}
}
EOF
keepalived02
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state BACKUP
interface ens192
mcast_src_ip 192.168.77.102
virtual_router_id 61
priority 90
nopreempt
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.77.100
}
track_script {
chk_apiserver
}
}
EOF
健康检查配置
cat > /etc/keepalived/check_apiserver.sh << \EOF
#!/bin/bash
err=0
for k in $(seq 1 3)
do
check_code=$(pgrep haproxy)
if [[ $check_code == "" ]]; then
err=$(expr $err + 1)
sleep 1
continue
else
err=0
break
fi
done
if [[ $err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
EOF
chmod +x /etc/keepalived/check_apiserver.sh
启动haproxy和keepalived
systemctl daemon-reload
systemctl enable --now haproxy
systemctl enable --now keepalived
systemctl restart haproxy keepalived
测试高可用
ping 192.168.77.100
关闭主节点, vip是否漂移到备节点
注:现在apiserver还未部署,所以不能telnet 8443端口,因为负载不到6443端口。
安装containerd
这里master也充当node,所以也要安装
配置containerd 内核参数
cat > /etc/sysctl.d/99-kubernetes-cri.conf << EOF
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
sysctl --system
配置containerd所需的模块
cat > /etc/modules-load.d/containerd.conf <<EOF
overlay
br_netfilter
EOF
systemctl restart systemd-modules-load.service
下载解压
wget https://github.com/containerd/containerd/releases/download/v1.6.23/cri-containerd-1.6.23-linux-amd64.tar.gz
tar zxf cri-containerd-1.6.23-linux-amd64.tar.gz -C /
mkdir -p /etc/containerd
containerd config default > /etc/containerd/config.toml
修改/etc/containerd/config.toml文件
vim /etc/containerd/config.toml
sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml
.....
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6"
.....
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
...
SystemdCgroup = true # 重点 没设置的话 pod会不断自动重启
....
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://xxxxx.mirror.aliyuncs.com"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
endpoint = ["gcr.m.daocloud.io"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"]
endpoint = ["k8s-gcr.m.daocloud.io"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
endpoint = ["quay.m.daocloud.io"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."registry.k8s.io"]
endpoint = ["k8s.m.daocloud.io"]
启动服务并加入开机启动项
systemctl enable --now containerd
systemctl status containerd
分发containerd 到其他服务器
name=(k8s-master02 k8s-master03 k8s-node01 k8s-node02)
user=sunday
for host in ${name[@]}; do \
echo $host;
ssh $user@$host "sudo mkdir -p /etc/containerd /opt/cni/bin /opt/containerd /etc/cni/net.d"; \
rsync --rsync-path="sudo rsync" /usr/local/sbin/runc $user@$host:/usr/local/sbin/; \
rsync --rsync-path="sudo rsync" /usr/local/bin/* $user@$host:/usr/local/bin/; \
rsync --rsync-path="sudo rsync" /etc/systemd/system/containerd.service $user@$host:/etc/systemd/system/; \
rsync --rsync-path="sudo rsync" /etc/containerd/config.toml $user@$host:/etc/containerd/; \
rsync --rsync-path="sudo rsync" /etc/sysctl.d/99-kubernetes-cri.conf $user@$host:/etc/sysctl.d/;
done
kubernetes master组件
下载安装
wget https://dl.k8s.io/v1.27.8/kubernetes-server-linux-amd64.tar.gz
tar xf kubernetes-server-linux-amd64.tar.gz --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}
分发master组件
masters='k8s-master02 k8s-master03'
user=sunday
for i in $masters; do
echo $i;
rsync --rsync-path="sudo rsync" /usr/local/bin/kube{ctl,-apiserver,-controller-manager,-scheduler} $user@$i:/usr/local/bin/;
done
分发node组件
nodes='k8s-master02 k8s-master03 k8s-node01 k8s-node02'
user=sunday
for i in $nodes; do
rsync --rsync-path="sudo rsync" /usr/local/bin/kube{let,-proxy} $user@$i:/usr/local/bin/;
done
kubernetes证书配置
目录说明
- k8s 证书生成存放目录 /opt/pki/k8s
- k8s 证书生产使用目录 /etc/kubernetes/pki
证书配置文件
mkdir -p /opt/pki/k8s /etc/kubernetes/pki
# 创建ca 配置文件
cat > /opt/pki/k8s/ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
EOF
# ca 证书配置
cat > /opt/pki/k8s/ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Guangdong",
"L": "Guangzhou",
"O": "Kubernetes",
"OU": "Kubernetes-manual"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
# apiserver服务证书配置
cat > /opt/pki/k8s/apiserver-csr.json << EOF
{
"CN": "kube-apiserver",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Guangdong",
"L": "Guangzhou",
"O": "Kubernetes",
"OU": "Kubernetes-manual"
}
]
}
EOF
# kube-controller-manager服务证书配置
cat > /opt/pki/k8s/kube-controller-manager-csr.json << EOF
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Guangdong",
"L": "Guangzhou",
"O": "system:kube-controller-manager",
"OU": "Kubernetes-manual"
}
]
}
EOF
# kube-scheduler服务证书配置
cat > /opt/pki/k8s/kube-scheduler-csr.json << EOF
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Guangdong",
"L": "Guangzhou",
"O": "system:kube-scheduler",
"OU": "Kubernetes-manual"
}
]
}
EOF
# 集群管理员证书配置
cat > /opt/pki/k8s/admin-csr.json << EOF
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Guangdong",
"L": "Guangzhou",
"O": "system:masters",
"OU": "Kubernetes-manual"
}
]
}
EOF
# 最小权限证书配置
cat > /opt/pki/k8s/front-proxy-ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"ca": {
"expiry": "876000h"
}
}
EOF
# 最小权限客户端证书配置
cat > /opt/pki/k8s/front-proxy-client-csr.json << EOF
{
"CN": "front-proxy-client",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
证书生成
apiserver证书hostname 说明
证书hostname 需要把所有master的IP和VIP地址包含在证书里面,所以在部署的时候需要规划好master节点和vip,多写几个IP,预留扩容节点
证书还需要包含service的IP,以及集群内部默认的service名称,否则需要重新生成证书
包含的IP和主机名如下
10.96.0.1 是service的第一个IP地址
# ca证书生成
cfssl gencert -initca /opt/pki/k8s/ca-csr.json | cfssljson -bare /opt/pki/k8s/ca
# api server 服务证书生成
cfssl gencert -ca=/opt/pki/k8s/ca.pem -ca-key=/opt/pki/k8s/ca-key.pem -config=/opt/pki/k8s/ca-config.json -hostname=k8s-master01,k8s-master02,k8s-master03,k8s-master04,k8s-master05,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,127.0.0.1,10.96.0.1,192.168.77.81,192.168.77.82,192.168.77.83,192.168.77.84,192.168.77.85,192.168.77.100 -profile=kubernetes /opt/pki/k8s/apiserver-csr.json | cfssljson -bare /opt/pki/k8s/apiserver
# api server 聚合证书,第三服务使用的最小权限证书
cfssl gencert -initca /opt/pki/k8s/front-proxy-ca-csr.json | cfssljson -bare /opt/pki/k8s/front-proxy-ca
cfssl gencert -ca=/opt/pki/k8s/front-proxy-ca.pem -ca-key=/opt/pki/k8s/front-proxy-ca-key.pem -config=/opt/pki/k8s/ca-config.json -profile=kubernetes front-proxy-client-csr.json | cfssljson -bare /opt/pki/k8s/front-proxy-client
# controller-manage证书生成
cfssl gencert -ca=/opt/pki/k8s/ca.pem -ca-key=/opt/pki/k8s/ca-key.pem -config=/opt/pki/k8s/ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare /opt/pki/k8s/controller-manager
# scheduler证书生成
cfssl gencert -ca=/opt/pki/k8s/ca.pem -ca-key=/opt/pki/k8s/ca-key.pem -config=/opt/pki/k8s/ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare /opt/pki/k8s/scheduler
# 集群管理员证书生成
cfssl gencert -ca=/opt/pki/k8s/ca.pem -ca-key=/opt/pki/k8s/ca-key.pem -config=/opt/pki/k8s/ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare /opt/pki/k8s/admin
cp -r /opt/pki/k8s/*.pem /etc/kubernetes/pki/
配置文件
controller-manager用户配置文件
这里使用haproxy高可用负载apiserver,apiserver 填写的是vip:port(192.168.77.100:8443
), 如没有配置高可用,则直接填写一台apiserver的ip和端口(默认6443)
# 配置集群访问相关配置
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.77.100:8443 --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 配置用户上下文
kubectl config set-context system:kube-controller-manager@kubernetes --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 设置用户 credentials
kubectl config set-credentials system:kube-controller-manager --client-certificate=/etc/kubernetes/pki/controller-manager.pem --client-key=/etc/kubernetes/pki/controller-manager-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 设置默认使用的用户上下文
kubectl config use-context system:kube-controller-manager@kubernetes --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
Scheduler用户配置文件
# 配置集群访问相关配置
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.77.100:8443 --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
# 配置用户上下文
kubectl config set-context system:kube-scheduler@kubernetes --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
# 设置用户 credentials
kubectl config set-credentials system:kube-scheduler --client-certificate=/etc/kubernetes/pki/scheduler.pem --client-key=/etc/kubernetes/pki/scheduler-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
# 设置默认使用的用户上下文
kubectl config use-context system:kube-scheduler@kubernetes --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
集群管理员配置文件
# 配置集群访问相关配置
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.77.100:8443 --kubeconfig=/etc/kubernetes/admin.kubeconfig
# 配置用户上下文
kubectl config set-context kubernetes-admin@kubernetes --cluster=kubernetes --user=kubernetes-admin --kubeconfig=/etc/kubernetes/admin.kubeconfig
# 设置用户 credentials
kubectl config set-credentials kubernetes-admin --client-certificate=/etc/kubernetes/pki/admin.pem --client-key=/etc/kubernetes/pki/admin-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/admin.kubeconfig
# 设置默认使用的用户上下文
kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=/etc/kubernetes/admin.kubeconfig
用于配置servcieAccount和secret证书
# sa证书
openssl genrsa -out /opt/pki/k8s/sa.key 2048
openssl rsa -in /opt/pki/k8s/sa.key -pubout -out /opt/pki/k8s/sa.pub
cp /opt/pki/k8s/{sa.key,sa.pub} /etc/kubernetes/pki/
同步证书和配置文件
mkdir -p /etc/kubernetes/pki/
cp -r /opt/pki/k8s/*.pem /etc/kubernetes/pki/
user=sunday
for i in k8s-master02 k8s-master03;do
ssh $user@$i "sudo mkdir -p /etc/kubernetes/pki/";
rsync --rsync-path="sudo rsync" /etc/kubernetes/pki/*.{pem,key,pub} $user@$i:/etc/kubernetes/pki/;
rsync --rsync-path="sudo rsync" /etc/kubernetes/*.kubeconfig $user@$i:/etc/kubernetes/;
done
创建集群所需的目录(所有节点都需要创建)
mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
kube-apiserver 服务配置
cat > /usr/lib/systemd/system/kube-apiserver.service << \EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
--v=2 \
--allow-privileged=true \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--advertise-address=192.168.77.81 \
--service-cluster-ip-range=10.96.0.0/16 \
--service-node-port-range=30000-32767 \
--etcd-servers=https://192.168.77.81:2379,https://192.168.77.82:2379,https://192.168.77.83:2379 \
--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--client-ca-file=/etc/kubernetes/pki/ca.pem \
--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \
--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/pki/sa.pub \
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
--service-account-issuer=https://kubernetes.default.svc.cluster.local \
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
--authorization-mode=Node,RBAC \
--enable-bootstrap-token-auth=true \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \
--requestheader-allowed-names=aggregator \
--requestheader-group-headers=X-Remote-Group \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-username-headers=X-Remote-User
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
EOF
启动 api server服务并加入开机启动
systemctl daemon-reload
systemctl enable --now kube-apiserver.service
systemctl status kube-apiserver.service
kube-controller-manager 服务配置
cat > /usr/lib/systemd/system/kube-controller-manager.service << \EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
--v=2 \
--root-ca-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
--service-account-private-key-file=/etc/kubernetes/pki/sa.key \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
--leader-elect=true \
--use-service-account-credentials=true \
--node-monitor-grace-period=40s \
--node-monitor-period=5s \
--controllers=*,bootstrapsigner,tokencleaner \
--allocate-node-cidrs=true \
--cluster-cidr=10.244.0.0/16 \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
--node-cidr-mask-size=24
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
启动kube-controller-manager服务
systemctl daemon-reload
systemctl enable --now kube-controller-manager
systemctl status kube-controller-manager
kube-Scheduler服务配置
cat > /usr/lib/systemd/system/kube-scheduler.service << \EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-scheduler \
--v=2 \
--leader-elect=true \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
启动kube-scheduler服务
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler
systemctl status kube-scheduler
配置admin配置文件
mkdir -p ~/.kube
cp /etc/kubernetes/admin.kubeconfig ~/.kube/config
检查当前集群状态
root@k8s-master01:~# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
etcd-0 Healthy
scheduler Healthy ok
etcd-2 Healthy
etcd-1 Healthy
root@k8s-master01:~# kubectl get --raw='/readyz?verbose'
[+]ping ok
[+]log ok
[+]etcd ok
[+]etcd-readiness ok
[+]informer-sync ok
[+]poststarthook/start-kube-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/rbac/bootstrap-roles ok
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-deprecated-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-status-available-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]shutdown ok
readyz check passed
kubernetes node组件
kube-proxy证书
# 生成证书
cat > /opt/pki/k8s/kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Guangdong",
"L": "Guangzhou",
"O": "system:kube-proxy",
"OU": "Kubernetes-manual"
}
]
}
EOF
cfssl gencert -ca=/opt/pki/k8s/ca.pem -ca-key=/opt/pki/k8s/ca-key.pem -config=/opt/pki/k8s/ca-config.json -profile=kubernetes /opt/pki/k8s/kube-proxy-csr.json | cfssljson -bare /opt/pki/k8s/kube-proxy
cp -r /opt/pki/k8s/kube-proxy*.pem /etc/kubernetes/pki/
kube-proxy配置文件
# 配置集群访问相关配置
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.77.100:8443 --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
# 设置用户 credentials
kubectl config set-credentials kube-proxy --client-certificate=/etc/kubernetes/pki/kube-proxy.pem --client-key=/etc/kubernetes/pki/kube-proxy-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
# 配置用户上下文
kubectl config set-context kube-proxy@kubernetes --cluster=kubernetes --user=kube-proxy --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
# 设置默认使用的用户上下文
kubectl config use-context kube-proxy@kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
tls bootstrap 用户相关权限配置
在master01创建bootstrap
生成bootstrap的token
TOKEN_ID=$(openssl rand -hex 3)
TOKEN_SECRET=$(openssl rand -hex 8)
BOOTSTRAP_TOKEN=${TOKEN_ID}.${TOKEN_SECRET}
echo $BOOTSTRAP_TOKEN # 例: 9afe5c.07be6c9e50a9dd5
tls bootstrap 配置,用于node节点自动颁发证书
# 配置集群访问相关配置
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.77.100:8443 --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
# 设置用户 credentials
kubectl config set-credentials tls-bootstrap-token-user --token=${BOOTSTRAP_TOKEN} --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
# 配置用户上下文
kubectl config set-context tls-bootstrap-token-user@kubernetes --cluster=kubernetes --user=tls-bootstrap-token-user --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
# 设置默认使用的用户上下文
kubectl config use-context tls-bootstrap-token-user@kubernetes --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
- 注意:bootstrap-secret.yaml的token-id、token-secret,需与上命令token保持一致(即
9afe5c.07be6c9e50a9dd5
)
mkdir -p /etc/kubernetes/yaml
cat > /etc/kubernetes/yaml/bootstrap-secret.yaml << EOF
apiVersion: v1
kind: Secret
metadata:
name: bootstrap-token-${TOKEN_ID}
namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
description: "The default bootstrap token generated by 'kubelet '."
token-id: ${TOKEN_ID}
token-secret: ${TOKEN_SECRET}
usage-bootstrap-authentication: "true"
usage-bootstrap-signing: "true"
auth-extra-groups: system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
EOF
cat > /etc/kubernetes/yaml/kubelet-bootstrap-rbac.yaml << EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubelet-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-certificate-rotation
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kube-apiserver
EOF
kubectl create -f /etc/kubernetes/yaml/bootstrap-secret.yaml
kubectl create -f /etc/kubernetes/yaml/kubelet-bootstrap-rbac.yaml
kubelet 服务配置
cat > /usr/lib/systemd/system/kubelet.service << \EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
mkdir -p /etc/systemd/system/kubelet.service.d
cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf << \EOF
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--container-runtime-endpoint=unix:///run/containerd/containerd.sock"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yaml"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS
EOF
clusterDNS: 10.96.0.10
cat > /etc/kubernetes/kubelet-conf.yaml << \EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF
同步证书及配置
user=sunday
for i in k8s-master02 k8s-master03 k8s-node01 k8s-node02; do
ssh $user@$i "sudo mkdir -p /etc/kubernetes /etc/systemd/system/kubelet.service.d";
rsync --rsync-path="sudo rsync" /opt/pki/k8s/{ca.pem,ca-key.pem,front-proxy-ca.pem,kube-proxy.pem,kube-proxy-key.pem} $user@$i:/etc/kubernetes/pki/;
rsync --rsync-path="sudo rsync" /etc/kubernetes/{kubelet.kubeconfig,kube-proxy.kubeconfig} $user@$i:/etc/kubernetes/;
rsync --rsync-path="sudo rsync" /etc/kubernetes/kubelet-conf.yaml $user@$i:/etc/kubernetes/kubelet-conf.yaml;
rsync --rsync-path="sudo rsync" /etc/systemd/system/kubelet.service.d/10-kubelet.conf $user@$i:/etc/systemd/system/kubelet.service.d/10-kubelet.conf;
rsync --rsync-path="sudo rsync" /usr/lib/systemd/system/kubelet.service $user@$i:/usr/lib/systemd/system/kubelet.service;
done
启动服务并加入开机启动
systemctl daemon-reload
systemctl enable --now kubelet
systemctl status kubelet
kube-proxy 服务配置
# 服务启动文件配置(搜有节点)
cat > /usr/lib/systemd/system/kube-proxy.service << \EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-proxy \
--config=/etc/kubernetes/kube-proxy.yaml \
--v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
clusterCIDR: 10.244.0.0/16 集群网络地址段, 与pod网段保持一致
cat > /etc/kubernetes/kube-proxy.yaml << \EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
qps: 5
clusterCIDR: 10.244.0.0/16
configSyncPeriod: 15m0s
conntrack:
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
minSyncPeriod: 5s
scheduler: "rr"
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
EOF
同步配置文件
user=sunday
for i in k8s-master02 k8s-master03 k8s-node01 k8s-node02; do
ssh $user@$i "sudo mkdir -p /etc/kubernetes";
rsync --rsync-path="sudo rsync" /etc/kubernetes/{bootstrap-kubelet.kubeconfig,kubelet-conf.yaml} $user@$i:/etc/kubernetes/;
rsync --rsync-path="sudo rsync" /etc/kubernetes/kube-proxy.yaml $user@$i:/etc/kubernetes/kube-proxy.yaml;
rsync --rsync-path="sudo rsync" /usr/lib/systemd/system/kube-proxy.service $user@$i:/usr/lib/systemd/system/kube-proxy.service;
done
启动服务并且加入开机启动
systemctl daemon-reload
systemctl enable --now kube-proxy
systemctl status kube-proxy
master节点打上禁止调度污点,防止pod调度到master节点
kubectl taint nodes k8s-master01 k8s-master02 k8s-master03 node.kubernetes.io/master:NoSchedule
安装CoreDNS
git clone https://github.com/coredns/deployment.git
cd deployment/kubernetes
./deploy.sh -s -i 10.96.0.10 | kubectl apply -f -
[root@k8s-master01 kubernetes]# kubectl get po -n kube-system -l k8s-app=kube-dns
NAME READY STATUS RESTARTS AGE
coredns-6f4b4bd8fb-v26qh 1/1 Running 0 47s
安装网络插件Calico
网络插件使用calico,模式使用默认的IPIP (master01节点执行即可)
wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/calico.yaml
# 需要修改calico.yaml配置
vim calico.yaml
- name: CALICO_IPV4POOL_CIDR
value: "10.244.0.0/16"
- name: IP_AUTODETECTION_METHOD
value: "interface=eth.*|ens.*"
kubectl apply -f calico.yaml
rm -rf /etc/cni/net.d/*
ifconfig tunl0 down && ip link delete tunl0
modprobe -r ipip && modprobe ipip
iptables -v -t nat -F && iptables -v -t mangle -F && iptables -v -F && iptables -v -X
root@k8s-master01:~# kubectl get pod -n kube-system # 等待pod运行(5-10分钟)
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-6849cf9bcf-d6qjc 0/1 Pending 0 26s
calico-node-2gbw8 0/1 Init:0/3 0 26s
calico-node-9fcrk 0/1 Init:0/3 0 26s
calico-node-br4tp 0/1 Init:0/3 0 26s
calico-node-kd78d 0/1 Init:0/3 0 26s
calico-node-lr5wj 0/1 Init:0/3 0 26s
# 安装CoreDNS
作为集群内部的dns解析
```bash
cat > coredns.yaml << \EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
app.kubernetes.io/name: coredns
spec:
# replicas: not specified here:
# 1. Default is 1.
# 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
app.kubernetes.io/name: coredns
template:
metadata:
labels:
k8s-app: kube-dns
app.kubernetes.io/name: coredns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.9.4
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
app.kubernetes.io/name: coredns
spec:
selector:
k8s-app: kube-dns
app.kubernetes.io/name: coredns
# 需要改成coredns的地址
clusterIP: 10.96.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
EOF
# 执行命令进行安装
kubectl apply -f coredns.yaml
安装metrics-server
metrics-server主要用集群内部监控项搜集,安装后可以使用kubectl top node等命令查看到cpu内存的资源占用
cat > metrics-server.yaml<<\EOF
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- nodes/metrics
verbs:
- get
- apiGroups:
- ""
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: metrics-server
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: metrics-server
namespaces:
- kube-system
topologyKey: kubernetes.io/hostname
containers:
- args:
- --cert-dir=/tmp
- --kubelet-insecure-tls
- --secure-port=4443
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem # 增加证书
- --requestheader-username-headers=X-Remote-User # 增加头信息
- --requestheader-group-headers=X-Remote-Group
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
image: registry.aliyuncs.com/google_containers/metrics-server:v0.6.4
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
# 挂载证书
- mountPath: /etc/kubernetes/pki
name: ca-ssl
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
- name: ca-ssl
hostPath:
path: /etc/kubernetes/pki
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: metrics-server
namespace: kube-system
spec:
minAvailable: 1
selector:
matchLabels:
k8s-app: metrics-server
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
EOF
kubectl apply -f metrics-server.yaml
root@k8s-master01:~# kubectl top nodes
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
k8s-master01 224m 11% 2761Mi 73%
k8s-master02 255m 12% 2271Mi 60%
k8s-master03 201m 10% 2153Mi 57%
k8s-node01 83m 4% 1799Mi 47%
k8s-node02 138m 6% 1787Mi 47%
安装dashboard
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
# 修改recommended.yaml中的内容
....
32 kind: Service
33 apiVersion: v1
34 metadata:
35 labels:
36 k8s-app: kubernetes-dashboard
37 name: kubernetes-dashboard
38 namespace: kubernetes-dashboard
39 spec:
40 type: NodePort # 修改service类型
41 ports:
42 - port: 443
43 targetPort: 8443
44 nodePort: 30443 # 增加node port端口
45 selector:
46 k8s-app: kubernetes-dashboard
....
# 执行安装
kubectl apply -f recommended.yaml
创建dashboard登陆用户
cat > dashboard_user.yaml <<\EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
EOF
kubectl apply -f dashboard_user.yaml
# 生成token用于访问dashbord服务
kubectl create token admin-user -n kubernetes-dashboard
eyJhbGciOiJSUzI1NiIsImtpZCI6IkNnb050YnBvM1R0a1A4eV9QYnpEcXhM
# dashbord访问端口 https://{NodeIP}:30443
集群验证
[root@k8s-master01 ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
etcd-2 Healthy {"health":"true","reason":""}
controller-manager Healthy ok
etcd-1 Healthy {"health":"true","reason":""}
etcd-0 Healthy {"health":"true","reason":""}
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready 19h v1.27.11
k8s-master02 Ready 17h v1.27.11
k8s-master03 Ready 17h v1.27.11
k8s-node01 Ready 17h v1.27.11
k8s-node02 Ready 17h v1.27.11
busybox测试dns
cat <
coredns测试
[root@k8s-master01 ~]# kubectl exec -it busybox -- nslookup kubernetes
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
Name: kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
[root@k8s-master01 ~]# kubectl exec -it busybox -- ping -c2 www.baidu.com
PING www.baidu.com (14.215.177.38): 56 data bytes
64 bytes from 14.215.177.38: seq=0 ttl=49 time=9.445 ms
64 bytes from 14.215.177.38: seq=1 ttl=49 time=9.524 ms
测试nginx svc以及Pod内部网络通信是否正常
cat > nginx_deploy.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx:alpine
name: nginx
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx
spec:
selector:
app: nginx
type: NodePort
ports:
- protocol: TCP
port: 80
targetPort: 80
nodePort: 30001
EOF
kubectl apply -f nginx_deploy.yaml
[root@k8s-master01 ~]# kubectl get svc,pod -owide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/kubernetes ClusterIP 10.96.0.1 443/TCP 45h
service/nginx NodePort 10.102.100.128 80:30001/TCP 16m app=nginx
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/busybox 1/1 Running 0 24m 172.18.195.3 k8s-master03
pod/nginx-6fb79bc456-s84wf 1/1 Running 0 16m 172.25.92.66 k8s-master02
service_ip=10.102.100.128
pod_ip=172.25.92.66
for i in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02; do
echo $i
ssh sunday@$i curl -s $service_ip | grep "using nginx" # nginx svc ip
ssh sunday@$i curl -s $pod_ip | grep "using nginx" # pod ip
done
访问宿主机vip:nodePort
[root@k8s-master01 ~]# curl -I 192.168.77.100:30001
HTTP/1.1 200 OK
Server: nginx/1.23.2
Date: Thu, 20 Oct 2022 12:22:07 GMT
Content-Type: text/html
Content-Length: 615
Last-Modified: Wed, 19 Oct 2022 10:28:53 GMT
Connection: keep-alive
ETag: "634fd165-267"
Accept-Ranges: bytes
安装命令行补全
apt install bash-completion -y
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc