Contents

ubuntu-24.04-server-live搭建K8s-完整版

Contents

系统检查升级:

sudo apt-get update && sudo apt-get upgrade

配置主机名

1
2
3
sudo hostnamectl hostname master
sudo hostnamectl hostname node-a
sudo hostnamectl hostname node-b

安装nfs

1
sudo apt-get install nfs4-acl-tools nfs-common -y

qemu-guest-agent-虚拟机使用-非虚拟机可不用安装

1
2
3
4
5
sudo apt-get install qemu-guest-agent -y

# 启动-开机启动-查看状态

sudo systemctl enable qemu-guest-agent && sudo systemctl start qemu-guest-agent && sudo systemctl status qemu-guest-agent

修改hosts文件

sudo vim /etc/hosts


1
2
3
4
10.10.10.120    master  MASTER
10.10.10.122    node-a NODE-A
10.10.10.124    node-b NODE-B
10.10.10.130    gitlab  GITLAB

关闭防火墙

1
2
3
sudo service ufw stop
sudo update-rc.d ufw defaults-disabled
sudo systemctl disable ufw.service

配置iptables

1
2
3
sudo iptables -P INPUT ACCEPT
sudo iptables -P FORWARD ACCEPT
sudo iptables -L -n

禁用 swap 分区

1
2
sudo swapoff -a
sudo vim /etc/fstab

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
cat /etc/fstab 
# /etc/fstab: static file system information.
#
# Use 'blkid' to print the universally unique identifier for a
# device; this may be used with UUID= as a more robust way to name devices
# that works even if disks are added and removed. See fstab(5).
#
# <file system> <mount point>   <type>  <options>   <dump>  <pass>
# /dev/disk/by-uuid/b37ae6c4-bf8d-4f7c-8500-9a8f108b8c5f none swap sw 0 0
# / was on /dev/sda4 during curtin installation
/dev/disk/by-uuid/8db6546c-34c9-4077-9f6c-ed47e1d5769b / ext4 defaults 0 1
# /boot was on /dev/sda2 during curtin installation
/dev/disk/by-uuid/155150cb-0911-4ec5-9870-d34c175a5a8f /boot ext4 defaults 0 1

句柄配置

ulimit -SHn 65535


sudo vim /etc/security/limits.conf

1
2
3
4
5
6
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* seft memlock unlimited
* hard memlock unlimitedd

ulimit -a

安装时间同步服务

sudo apt-get install chrony

备份原有配置

sudo mv /etc/chrony/chrony.conf /etc/chrony/chrony.conf.bak

修改配置文件

sudo vim /etc/chrony/chrony.conf


 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
server ntp1.aliyun.com iburst
server ntp.aliyun.com iburst
stratumweight 0
driftfile /var/lib/chrony/drift
rtcsync
makestep 10 3
bindcmaaddress 127.0.0.1
binddaddress ::1
keyfile /etc/chrony.keys
commandkey 1
generatecommandkey
Logchange 0.5
logdir /var/log/chrony

时区配置

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
sudo ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime

chronyc sources -v
启动系统服务chronyd

#重启校时服务并配置开机自启
sudo systemctl restart chronyd && sudo systemctl enable chronyd

查看时间
timedatectl

最大可用配置

sudo vim /etc/default/grub

修改»> GRUB_CMDLINE_LINUX="numa=off"

系统优化

1
sudo vim /etc/sysctl.d/k8s_better.conf

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720

1
2
3
4
sudo modprobe br_netfilter
sudo lsmod |grep conntrack
sudo modprobe ip_conntrack
sysctl -p /etc/sysctl.d/k8s_better.conf

免密登陆

ssh-keygen -t rsa »敲3下回车,生成秘钥

ssh-copy-id xxxxxxxxx

系统依赖包:

sudo apt-get install -y conntrack ipvsadm ipset jq iptables curl sysstat wget net-tools git

开启ipvs 转发

1
2
sudo modprobe br_netfilter
sudo mkdir -p /etc/sysconfig/modules/

sudo vim /etc/sysconfig/modules/ipvs.modules


1
2
3
4
5
6
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack

1
2
3
sduo chmod 755 /etc/sysconfig/modules/ipvs.modules
sudo bash /etc/sysconfig/modules/ipvs.modules
sudo lsmod | grep -e ip_vs -e nf_conntrack

helm安装

官网 https://heml.sh

下载

1
2
3
4
5
https://get.helm.sh/helm-v3.16.3-linux-amd64.tar.gz
tar -zxvf helm-v3.16.3-linux-amd64.tar.gz
cd linux-amd64/
sudo cp helm /usr/local/bin/
helm version

ETCD

安装ETCD

下载 etcd-v3.5.17-linux-amd64
将内部的 etcd etcdctl etcdutl 移动至 /usr/local/bin/ 下. 创建 etcd 数据目录.

创建用户-配置权限

1
2
3
4
sudo groupadd -r etcd
sudo useradd -r -g etcd -s /sbin/nologin etcd
sudo mkdir -p /var/lib/etcd
sudo chown -R etcd:etcd /var/lib/etcd

配置service

sudo vim /etc/systemd/system/etcd.service

master (10.10.10.120) 的配置:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
[Unit]
Description=etcd key-value store
Documentation=https://etcd.io
After=network.target

[Service]
User=etcd
ExecStart=/usr/local/bin/etcd \
--name master \
--data-dir /var/lib/etcd \
--listen-client-urls https://10.10.10.120:2379 \
--advertise-client-urls https://10.10.10.120:2379 \
--listen-peer-urls https://10.10.10.120:2380 \
--initial-advertise-peer-urls https://10.10.10.120:2380 \
--initial-cluster master=https://10.10.10.120:2380 \
,node-a=https://10.10.10.122:2380 \
,node-b=https://10.10.10.124:2380
--initial-cluster-token my-etcd-cluster \
--initial-cluster-state new \
--client-cert-auth \
--trusted-ca-file=/etc/kubernetes/pki/etcd/etcd-ca.pem \
--cert-file=/etc/kubernetes/pki/etcd/etcd-server.pem \
--key-file=/etc/kubernetes/pki/etcd/etcd-server-key.pem \
--peer-client-cert-auth \
--peer-trusted-ca-file=/etc/kubernetes/pki/etcd/etcd-ca.pem \
--peer-cert-file=/etc/kubernetes/pki/etcd/etcd-server.pem \
--peer-key-file=/etc/kubernetes/pki/etcd/etcd-server-key.pem
Restart=always
RestartSec=5

[Install]
WantedBy=multi-user.target

node-a (10.10.10.122) 的配置:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
[Unit]
Description=etcd key-value store
Documentation=https://etcd.io
After=network.target

[Service]
User=etcd
ExecStart=/usr/local/bin/etcd \
--name node-a \
--data-dir /var/lib/etcd \
--listen-client-urls https://10.10.10.122:2379 \
--advertise-client-urls https://10.10.10.122:2379 \
--listen-peer-urls https://10.10.10.122:2380 \
--initial-advertise-peer-urls https://10.10.10.122:2380 \
--initial-cluster master=https://10.10.10.120:2380 \
,node-a=https://10.10.10.122:2380 \
,node-b=https://10.10.10.124:2380
--initial-cluster-token my-etcd-cluster \
--initial-cluster-state new \
--client-cert-auth \
--trusted-ca-file=/etc/kubernetes/pki/etcd/etcd-ca.pem \
--cert-file=/etc/kubernetes/pki/etcd/etcd-server.pem \
--key-file=/etc/kubernetes/pki/etcd/etcd-server-key.pem \
--peer-client-cert-auth \
--peer-trusted-ca-file=/etc/kubernetes/pki/etcd/etcd-ca.pem \
--peer-cert-file=/etc/kubernetes/pki/etcd/etcd-server.pem \
--peer-key-file=/etc/kubernetes/pki/etcd/etcd-server-key.pem
Restart=always
RestartSec=5

[Install]
WantedBy=multi-user.target

node-b (10.10.10.124) 的配置

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
[Unit]
Description=etcd key-value store
Documentation=https://etcd.io
After=network.target

[Service]
User=etcd
ExecStart=/usr/local/bin/etcd \
--name node-b \
--data-dir /var/lib/etcd \
--listen-client-urls https://10.10.10.124:2379 \
--advertise-client-urls https://10.10.10.124:2379 \
--listen-peer-urls https://10.10.10.124:2380 \
--initial-advertise-peer-urls https://10.10.10.124:2380 \
--initial-cluster master=https://10.10.10.120:2380 \
,node-a=https://10.10.10.122:2380 \
,node-b=https://10.10.10.124:2380
--initial-cluster-token my-etcd-cluster \
--initial-cluster-state new \
--client-cert-auth \
--trusted-ca-file=/etc/kubernetes/pki/etcd/etcd-ca.pem \
--cert-file=/etc/kubernetes/pki/etcd/etcd-server.pem \
--key-file=/etc/kubernetes/pki/etcd/etcd-server-key.pem \
--peer-client-cert-auth \
--peer-trusted-ca-file=/etc/kubernetes/pki/etcd/etcd-ca.pem \
--peer-cert-file=/etc/kubernetes/pki/etcd/etcd-server.pem \
--peer-key-file=/etc/kubernetes/pki/etcd/etcd-server-key.pem
Restart=always
RestartSec=5

[Install]
WantedBy=multi-user.target

启动 etcd 服务 在每台服务器上执行:

1
2
3
4
sudo systemctl daemon-reload
sudo systemctl enable etcd
sudo systemctl start etcd
sudo systemctl status etcd

自签名证书

1
2
curl -o cfssl https://github.com/cloudflare/cfssl/releases/download/v1.6.5/cfssl_1.6.5_linux_amd64
curl -o cfssljson https://github.com/cloudflare/cfssl/releases/download/v1.6.5/cfssljson_1.6.5_linux_amd64
1
2
sudo mv cfssl /usr/local/bin/cfssl
sudo mv cfssljson /usr/local/bin/cfssljson

创建 CA 配置文件 ca-config.json

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
{
"signing": {
    "default": {
    "expiry": "87600h"
    },
    "profiles": {
    "etcd": {
        "usages": ["signing", "key encipherment", "server auth", "client auth"],
    "expiry": "87600h"
    }
    }
    }
}

创建 CA 请求文件 etcd-client-csr.json :

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
{
"CN": "etcd-ca",
"hosts": [
    "10.10.10.120",
    "10.10.10.122",
    "10.10.10.124"
    ],
    "key": {
    "algo": "rsa",
    "size": 2048
    },
"names": [
    {
    "C": "CN",
    "ST": "Beijing",
    "L": "Beijing",
    "O": "Kubernetes",
    "OU": "CA"
    }
]
}

创建 etcd 客户端证书请求文件 etcd-server-csr.json:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
{
"CN": "etcd-ca",
"hosts": [
    "10.10.10.120",
    "10.10.10.122",
    "10.10.10.124"
    ],
"key": {
    "algo": "rsa",
    "size": 2048
},
"names": [
    {
   "C": "CN",
    "ST": "Beijing",
    "L": "Beijing",
    "O": "Kubernetes",
    "OU": "CA"
    }
]
}

生成 CA 证书:

#ca.pem (CA 证书) ca-key.pem (CA 私钥)

etcd-client.pem (客户端证书)

etcd-client-key.pem (客户端私钥)

1
2
3
4
5
6
7
8
cfssl gencert -initca etcd-client-csr.json | cfssljson -bare ca

cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=etcd \
etcd-server-csr.json | cfssljson -bare server

文件命名及路径

1
2
3
4
sudo mkdir -p /etc/kubernetes/pki/etcd
sudo mv ca.pem /etc/kubernetes/pki/etcd/etcd-ca.pem
sudo mv etcd-server.pem /etc/kubernetes/pki/etcd/etcd-server.pem
sudo mv etcd-server-key.pem /etc/kubernetes/pki/etcd/etcd-server-key.pem

权限设置

1
2
sudo chmod 600 /etc/kubernetes/pki/etcd/*.key
sudo chmod 644 /etc/kubernetes/pki/etcd/*.crt

验证证书是否有效

1
2
3
4
5
6
sudo ETCDCTL_API=3 etcdctl \
--endpoints=https://10.10.10.120:2379,https://10.10.10.122:2379,https://10.10.10.124:2379 \
--cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem \
--cert=/etc/kubernetes/pki/etcd/etcd-server.pem \
--key=/etc/kubernetes/pki/etcd/etcd-server-key.pem \
--write-out=table endpoint health

验证需要输出

1
2
3
4
5
6
7
+---------------------------+--------+------------+-------+
|         ENDPOINT          | HEALTH |    TOOK    | ERROR |
+---------------------------+--------+------------+-------+
| https://10.10.10.120:2379 |   true |  4.72759ms |       |
| https://10.10.10.122:2379 |   true | 6.575434ms |       |
| https://10.10.10.124:2379 |   true | 7.625506ms |       |
+---------------------------+--------+------------+-------+

安装 Docker-CE

清除原有配置

1
2
sudo apt-get remove docker docker-engine docker.io
sudo apt-get install apt-transport-https ca-certificates curl gnupg2 software-properties-common

添加公钥-下载

1
sudo curl -fsSL https://mirrors.huaweicloud.com/docker-ce/linux/ubuntu/gpg -o /etc/apt/trusted.gpg.d/docker-ce-keyring.asc

添加软件仓库

1
sudo add-apt-repository "deb [signed-by=/etc/apt/trusted.gpg.d/docker-ce-keyring.asc] https://mirrors.huaweicloud.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"

重命名源文件

1
sudo mv /etc/apt/sources.list.d/archive_uri-https_mirrors_huaweicloud_com_docker-ce_linux_ubuntu-noble.list  /etc/apt/sources.list.d/docker-ce.list

更新索引文件并安装

sudo apt-get update

安装

sudo apt-get install docker-ce

配置镜像加速

sudo vim /etc/docker/daemon.json

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
{
"registry-mirrors": [
    "https://hbr.hckz.top/docker",
    "https://hbr.hckz.top/docker-io",
    "https://hbr.hckz.top/github",
    "https://docker.1ms.run",
    "https://registry.dockermirror.com",
    "https://docker.m.daocloud.io",
    "https://docker.kubesre.xyz"
],
"insecure-registries": [
    "https://hbr.hckz.top"
],
"experimental": false,
"live-restore": true,
"ipv6": false
}

重启docker-配置开机启动-查看状态

sudo systemctl restart docker.service && sudo systemctl enable docker.service && sudo systemctl status docker.service

安装 cri-dockerd_0.3.15.3-0.ubuntu-bionic_amd64.deb

需要在github上下载

dpkg - i cri-dockerd_0.3.15.3-0.ubuntu-bionic_amd64.deb

配置 cri-dockerd

1
2
sudo vim /usr/lib/systemd/system/cri-docker.service
    --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.10

配置开启自启

1
2
3
4
sudo systemctl daemon-reload && \
sudo systemctl restart cri-docker.service && \
sudo systemctl enable cri-docker.service && \
sudo systemctl status cri-docker.service

添加kubernetes仓库

1
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.31/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list

证书-密钥

1
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg

apt-get update && apt-get install -y apt-transport-https

1
2
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.31/deb/ /" |
    tee /etc/apt/sources.list.d/kubernetes.list

更新 sudo apt update

安装软件 kubelet kubeadm kubectl 1.31.3-1.1版本

1
2
3
sudo apt list kubelet 插看版本
sudo apt-get install -y kubelet kubeadm kubectl
sudo systemctl enable kubelet.service

锁定版本

sudo apt-mark hold kubelet kubeadm kubectl

解除锁定版本

sudo apt-mark unhold kubelet kubeadm kubectl

K8s初始化:-内嵌 ETCD

1
2
3
4
5
6
7
sudo kubeadm init \
--apiserver-advertise-address=10.10.10.120 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.31.3 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16 \
--cri-socket=unix:///var/run/cri-dockerd.sock

K8s初始化-独立ETCD方式

1
sudo vim kubeadm-config.yaml

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
apiVersion: kubeadm.k8s.io/v1beta4 # 设置 API 版本为 v1beta4
kind: InitConfiguration # 初始化配置对象类型为 InitConfiguration
bootstrapTokens: # 引导 Token 配置
- groups: 
    - system:bootstrappers:kubeadm:default-node-token # 定义 Token 所属组
    token: abcdef.0123456789abcdef # 引导 Token 字符串
    ttl: 24h0m0s # Token 存活时间为 24 小时
    usages: 
    - signing # Token 用途:签名
    - authentication # Token 用途:身份验证
localAPIEndpoint: # 本地 API 服务端点配置
advertiseAddress: 10.10.10.120 # 设置 API 服务器对外暴露的地址,与 master 节点 IP 一致
bindPort: 6443 # 设置 Kubernetes API 服务监听的端口号
nodeRegistration: # 节点注册相关配置
criSocket: unix:///var/run/cri-dockerd.sock # 设置 CRI 插槽路径,使用 cri-dockerd
imagePullPolicy: IfNotPresent # 镜像拉取策略,如果镜像本地存在,则不拉取
imagePullSerial: true # 镜像拉取设置为串行方式
name: master # 节点名称为 master
taints: null # 设置节点 taints,当前不添加任何 taints
timeouts: # 设置超时时间
controlPlaneComponentHealthCheck: 4m0s # 控制平面组件健康检查超时时间
discovery: 5m0s # 发现节点超时时间
etcdAPICall: 2m0s # etcd API 调用超时时间
kubeletHealthCheck: 4m0s # kubelet 健康检查超时时间
kubernetesAPICall: 1m0s # Kubernetes API 调用超时时间
tlsBootstrap: 5m0s # TLS 引导超时时间
upgradeManifests: 5m0s # 升级清单超时时间
---
apiVersion: kubeadm.k8s.io/v1beta4 # 设置 API 版本为 v1beta4
kind: ClusterConfiguration # 集群配置对象类型为 ClusterConfiguration
apiServer: {} # 空的 API 服务器配置
caCertificateValidityPeriod: 87600h0m0s # CA 证书有效期设置为 10 年(87600 小时)
certificateValidityPeriod: 87600h0m0s # 工作节点证书有效期设置为 10 年(87600 小时)
certificatesDir: /etc/kubernetes/pki # 设置存储证书的目录路径
clusterName: kubernetes # 设置集群名称为 kubernetes
controllerManager: {} # 空的控制器管理器配置
dns: {} # 空的 DNS 配置
encryptionAlgorithm: RSA-2048 # 设置加密算法为 RSA-2048
etcd: # etcd 集群配置
external: # 使用外部 etcd 集群
    endpoints: 
    - https://10.10.10.120:2379 # master 节点的 etcd 地址
    - https://10.10.10.122:2379 # node-a 节点的 etcd 地址
    - https://10.10.10.124:2379 # node-b 节点的 etcd 地址
    caFile: /etc/kubernetes/pki/etcd/etcd-ca.pem # 设置 etcd CA 文件路径
    certFile: /etc/kubernetes/pki/etcd/etcd-server.pem # 设置 etcd 证书文件路径
    keyFile: /etc/kubernetes/pki/etcd/etcd-server-key.pem # 设置 etcd 密钥文件路径
imageRepository: registry.aliyuncs.com/google_containers # 设置镜像仓库地址
kubernetesVersion: v1.31.3 # 设置 Kubernetes 版本为 v1.31.3
networking: # 网络相关设置
dnsDomain: cluster.local # 设置集群 DNS 域名为 cluster.local
serviceSubnet: 10.96.0.0/12 # 设置服务网络子网地址范围
podSubnet: 10.244.0.0/16 # 设置 Pod 网络子网地址范围
proxy: {} # 空的代理设置
scheduler: {} # 空的调度器设置

sudo kubeadm init --config kubeadm-config.yaml

可以提前使用 下方命令进行拉取images 注意 使用docker时无使用 kubeadm config images pull

创建文件夹

1
2
3
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

使node端链接到master上:

1
2
3
sudo kubeadm join 10.10.10.120:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:a5e00000000000000000000000000000000000071011bff31fc2 \
--cri-socket=unix:///var/run/cri-dockerd.sock

安装网络插件

1
curl -L -o tigera-operator.yaml https://raw.githubusercontent.com/projectcalico/calico/v3.29.1/manifests/tigera-operator.yaml
1
kubectl create -f tigera-operator.yaml 
1
curl -L -o custom-resources.yaml https://raw.githubusercontent.com/projectcalico/calico/v3.29.1/manifests/custom-resources.yaml

kubectl create -f custom-resources.yaml

网络可达Docker时-可提前下载(calico相关镜像)

1
2
3
4
5
6
7
8
sudo docker pull calico/node:v3.29.1
sudo docker pull calico/typha:v3.29.1
sudo docker pull calico/cni:v3.29.1
sudo docker pull calico/kube-controllers:v3.29.1
sudo docker pull calico/pod2daemon-flexvol:v3.29.1
sudo docker pull calico/node-driver-registrar:v3.29.1
sudo docker pull calico/apiserver:v3.29.1
sudo docker pull calico/csi:v3.29.1

网络不可达-使用以下方式-网络插件-用其设备下载(calico相关镜像)

1
2
3
4
5
6
7
8
sudo docker pull hbr.hckz.top/docker-io/calico/node:v3.29.1
sudo docker pull hbr.hckz.top/docker-io/calico/typha:v3.29.1
sudo docker pull hbr.hckz.top/docker-io/calico/cni:v3.29.1
sudo docker pull hbr.hckz.top/docker-io/calico/kube-controllers:v3.29.1
sudo docker pull hbr.hckz.top/docker-io/calico/pod2daemon-flexvol:v3.29.1
sudo docker pull hbr.hckz.top/docker-io/calico/node-driver-registrar:v3.29.1
sudo docker pull hbr.hckz.top/docker-io/calico/apiserver:v3.29.1
sudo docker pull hbr.hckz.top/docker-io/calico/csi:v3.29.1

保存至文件

1
docker save -o images.tar calico/csi:v3.29.1 calico/apiserver:v3.29.1 calico/node-driver-registrar:v3.29.1 calico/pod2daemon-flexvol:v3.29.1 calico/kube-controllers:v3.29.1 calico/cni:v3.29.1 calico/typha:v3.29.1 calico/node:v3.29.1

将文件转发至k8s-master主机上

scp ./ images.tar 10.10.10.x:~

在K8s主机上运行

sudo docker load -i images.tar

将K8S配置文件分发至node节点

1
2
scp -r ./.kube/ 10.10.10.122:~/
scp -r ./.kube/ 10.10.10.124:~/

在node-a和node-b运行

如果运行正常那么即可在node节点使用 kubectl命令查看集群状态

1
2
sudo chown $(id -u):$(id -g) ~/.kube/config
chmod 600 ~/.kube/config