基于containerd容器运行时部署k8s 1.28集群
一、主机准备
1.1 主机操作系统说明
序号 | 操作系统及版本 | 备注 |
---|---|---|
1 | CentOS7u9 |
1.2 主机硬件配置说明
需求 | CPU | 内存 | 硬盘 | 角色 | 主机名 |
---|---|---|---|---|---|
值 | 8C | 8G | 1024GB | master | k8s-master01 |
值 | 8C | 16G | 1024GB | worker(node) | k8s-worker01 |
值 | 8C | 16G | 1024GB | worker(node) | k8s-worker02 |
1.3 主机配置
1.3.1 主机名配置
由于本次使用3台主机完成kubernetes集群部署,其中1台为master节点,名称为k8s-master01;其中2台为worker节点,名称分别为:k8s-worker01及k8s-worker02
master节点
# hostnamectl set-hostname k8s-master01
worker01节点
# hostnamectl set-hostname k8s-worker01
worker02节点
# hostnamectl set-hostname k8s-worker02
1.3.2 主机IP地址配置
k8s-master节点IP地址为:192.168.10.140/24
# vim /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="none"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens33"
DEVICE="ens33"
ONBOOT="yes"
IPADDR="192.168.10.140"
PREFIX="24"
GATEWAY="192.168.10.2"
DNS1="119.29.29.29"
k8s-worker1节点IP地址为:192.168.10.141/24
# vim /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="none"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens33"
DEVICE="ens33"
ONBOOT="yes"
IPADDR="192.168.10.141"
PREFIX="24"
GATEWAY="192.168.10.2"
DNS1="119.29.29.29"
k8s-worker2节点IP地址为:192.168.10.142/24
# vim /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="none"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens33"
DEVICE="ens33"
ONBOOT="yes"
IPADDR="192.168.10.142"
PREFIX="24"
GATEWAY="192.168.10.2"
DNS1="119.29.29.29"
1.3.3 主机名与IP地址解析
所有集群主机均需要进行配置。
# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.10.140 k8s-master01
192.168.10.141 k8s-worker01
192.168.10.142 k8s-worker02
1.3.4 防火墙配置
所有主机均需要操作。
关闭现有防火墙firewalld
# systemctl disable firewalld
# systemctl stop firewalld
# firewall-cmd --state
not running
1.3.5 SELINUX配置
所有主机均需要操作。修改SELinux配置需要重启操作系统。
# sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
# sestatus
1.3.6 时间同步配置
所有主机均需要操作。最小化安装系统需要安装ntpdate软件。
# crontab -l
0 */1 * * * /usr/sbin/ntpdate time1.aliyun.com
1.3.7 升级操作系统内核
所有主机均需要操作。
导入elrepo gpg key
# rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
安装elrepo YUM源仓库
# yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
安装kernel-ml版本,ml为最新稳定版本,lt为长期维护版本
# yum --enablerepo="elrepo-kernel" -y install kernel-lt.x86_64
设置grub2默认引导为0
# grub2-set-default 0
重新生成grub2引导文件
# grub2-mkconfig -o /boot/grub2/grub.cfg
更新后,需要重启,使用升级的内核生效。
# reboot
重启后,需要验证内核是否为更新对应的版本
# uname -r
1.3.8 配置内核转发及网桥过滤
所有主机均需要操作。
添加网桥过滤及内核转发配置文件
# cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF
加载br_netfilter模块
# modprobe br_netfilter
查看是否加载
# lsmod | grep br_netfilter
br_netfilter 22256 0
bridge 151336 1 br_netfilter
1.3.9 安装ipset及ipvsadm
所有主机均需要操作。
安装ipset及ipvsadm
# yum -y install ipset ipvsadm
配置ipvsadm模块加载方式
添加需要加载的模块
# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
授权、运行、检查是否加载
# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack
1.3.10 关闭SWAP分区
修改完成后需要重启操作系统,如不重启,可临时关闭,命令为swapoff -a
永远关闭swap分区,需要重启操作系统
# cat /etc/fstab
......
# /dev/mapper/centos-swap swap swap defaults 0 0
在上一行中行首添加#
二、容器运行时 Containerd准备
2.1 Containerd准备
2.1.1 Containerd部署文件获取
# wget https://github.com/containerd/containerd/releases/download/v1.7.3/cri-containerd-1.7.3-linux-amd64.tar.gz
# tar xf cri-containerd-1.7.3-linux-amd64.tar.gz -C /
2.1.2 Containerd配置文件生成并修改
# mkdir /etc/containerd
# containerd config default > /etc/containerd/config.toml
# vim /etc/containerd/config.toml
sandbox_image = "registry.k8s.io/pause:3.9" 由3.8修改为3.9
2.1.3 Containerd启动及开机自启动
# systemctl enable --now containerd
验证其版本
# containerd --version
2.2 runc准备
2.2.1 libseccomp准备
# wget https://github.com/opencontainers/runc/releases/download/v1.1.5/libseccomp-2.5.4.tar.gz
# tar xf libseccomp-2.5.4.tar.gz
# cd libseccomp-2.5.4/
# yum install gperf -y
# ./configure
# make && make install
# find / -name "libseccomp.so"
2.2.2 runc安装
# wget https://github.com/opencontainers/runc/releases/download/v1.1.9/runc.amd64
# chmod +x runc.amd64
查找containerd安装时已安装的runc所在的位置,然后替换
# which runc
替换containerd安装已安装的runc
# mv runc.amd64 /usr/local/sbin/runc
执行runc命令,如果有命令帮助则为正常
# runc
如果运行runc命令时提示:runc: error while loading shared libraries: libseccomp.so.2: cannot open shared object file: No such file or directory,则表明runc没有找到libseccomp,需要检查libseccomp是否安装,本次安装默认就可以查询到。
三、K8S集群部署
3.1 K8S集群软件YUM源准备
3.1.1 google提供YUM源
# cat > /etc/yum.repos.d/k8s.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
3.1.2 阿里云提供YUM源
# cat > /etc/yum.repos.d/k8s.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
3.2 K8S集群软件安装
3.2.1 集群软件安装
所有节点均可安装
默认安装
# yum -y install kubeadm kubelet kubectl
查看指定版本
# yum list kubeadm.x86_64 --showduplicates | sort -r
# yum list kubelet.x86_64 --showduplicates | sort -r
# yum list kubectl.x86_64 --showduplicates | sort -r
安装指定版本
# yum -y install kubeadm-1.28.X-0 kubelet-1.28.X-0 kubectl-1.28.X-0
3.2.2 配置kubelet
为了实现docker使用的cgroupdriver与kubelet使用的cgroup的一致性,建议修改如下文件内容。
# vim /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
设置kubelet为开机自启动即可,由于没有生成配置文件,集群初始化后自动启动
# systemctl enable kubelet
3.3 K8S集群初始化
3.3.1 替换100年证书kubeadm
# which kubeadm
/usr/bin/kubeadm
3.3.1.1 获取kubernetes源码
[root@k8s-master01 ~]# wget https://github.com/kubernetes/kubernetes/archive/refs/tags/v1.28.0.tar.gz
[root@k8s-master01 ~]# ls
v1.28.0.tar.gz
[root@k8s-master01 ~]# tar xf v1.28.0.tar.gz
[root@k8s-master01 ~]# ls
kubernetes-1.28.0
3.3.1.2 修改kubernetes源码
修改CA证书为100年有效期(默认为10年)
[root@k8s-master01 ~]# vim kubernetes-1.28.0/staging/src/k8s.io/client-go/util/cert/cert.go
72 tmpl := x509.Certificate{
73 SerialNumber: serial,
74 Subject: pkix.Name{
75 CommonName: cfg.CommonName,
76 Organization: cfg.Organization,
77 },
78 DNSNames: []string{cfg.CommonName},
79 NotBefore: notBefore,
80 NotAfter: now.Add(duration365d * 100).UTC(),
81 KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
82 BasicConstraintsValid: true,
83 IsCA: true,
84 }
修改说明:
把文件中80行,10修改为100即可 。
修改kubeadm证书有效期为100年(默认为1年)
[root@k8s-master01 ~]# vim kubernetes-1.28.0/cmd/kubeadm/app/constants/constants.go
......
37 const (
38 // KubernetesDir is the directory Kubernetes owns for storing various configuration files
39 KubernetesDir = "/etc/kubernetes"
40 // ManifestsSubDirName defines directory name to store manifests
41 ManifestsSubDirName = "manifests"
42 // TempDirForKubeadm defines temporary directory for kubeadm
43 // should be joined with KubernetesDir.
44 TempDirForKubeadm = "tmp"
45
46 // CertificateBackdate defines the offset applied to notBefore for CA certificates generated by kubeadm
47 CertificateBackdate = time.Minute * 5
48 // CertificateValidity defines the validity for all the signed certificates generated by kubeadm
49 CertificateValidity = time.Hour * 24 * 365 * 100
修改说明:
把CertificateValidity = time.Hour * 24 * 365 修改为:CertificateValidity = time.Hour * 24 * 365 * 100
3.3.1.3 安装Go
[root@k8s-master01 ~]# wget https://go.dev/dl/go1.21.0.linux-amd64.tar.gz
[root@k8s-master01 ~]# tar xf go1.21.0.linux-amd64.tar.gz
[root@k8s-master01 ~]# mv go /usr/local/go
[root@k8s-master01 ~]# vim /etc/profile
[root@k8s-master01 ~]# cat /etc/profile
......
export PATH=$PATH:/usr/local/go/bin
[root@k8s-master01 ~]# source /etc/profile
[root@k8s-master01 ~]# go version
go version go1.21.0 linux/amd64
3.3.1.4 kubernetes源码编译
[root@k8s-master01 ~]# cd kubernetes-1.28.0/
[root@k8s-master01 kubernetes-1.28.0]# make all WHAT=cmd/kubeadm GOFLAGS=-v
[root@k8s-master01 kubernetes-1.28.0]# ls
_output
[root@k8s-master01 kubernetes-1.28.0]# ls _output/
bin local
[root@k8s-master01 kubernetes-1.28.0]# ls _output/bin/
kubeadm ncpu
3.3.1.5 替换所有集群主机kubeadm
[root@k8s-master01 kubernetes-1.28.0]# which kubeadm
/usr/bin/kubeadm
[root@k8s-master01 kubernetes-1.28.0]# rm -rf `which kubeadm`
[root@k8s-master01 kubernetes-1.28.0]# cp _output/bin/kubeadm /usr/bin/kubeadm
[root@k8s-master01 kubernetes-1.28.0]# which kubeadm
/usr/bin/kubeadm
[root@k8s-worker01 ~]# rm -rf `which kubeadm`
[root@k8s-worker01 ~]# rm -rf `which kubeadm`
[root@k8s-master01 kubernetes-1.28.0]# scp _output/bin/kubeadm 192.168.10.141:/usr/bin/kubeadm
[root@k8s-master01 kubernetes-1.28.0]# scp _output/bin/kubeadm 192.168.10.142:/usr/bin/kubeadm
3.3.2 获取kubernetes 1.28组件容器镜像
[root@k8s-master01 ~]# kubeadm config images list
registry.k8s.io/kube-apiserver:v1.28.0
registry.k8s.io/kube-controller-manager:v1.28.0
registry.k8s.io/kube-scheduler:v1.28.0
registry.k8s.io/kube-proxy:v1.28.0
registry.k8s.io/pause:3.9
registry.k8s.io/etcd:3.5.9-0
registry.k8s.io/coredns/coredns:v1.10.1
[root@k8s-master01 ~]# kubeadm config images pull
3.3.3 kubernetes 1.28集群初始化
[root@k8s-master01 ~]# kubeadm init --kubernetes-version=v1.28.0 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.10.140 --cri-socket unix:///var/run/containerd/containerd.sock
[init] Using Kubernetes version: v1.28.0
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.10.140]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master01 localhost] and IPs [192.168.10.140 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master01 localhost] and IPs [192.168.10.140 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 20.502191 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: hd74hg.r8l1pe4tivwyjz73
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.10.140:6443 --token hd74hg.r8l1pe4tivwyjz73 \
--discovery-token-ca-cert-hash sha256:29a00daed8d96dfa8e913ab4c0a8c4037f1c253a20742ca8913932dd7c8b3bd1
3.4 工作节点加入集群
[root@k8s-worker01 ~]# kubeadm join 192.168.10.140:6443 --token hd74hg.r8l1pe4tivwyjz73 \
> --discovery-token-ca-cert-hash sha256:29a00daed8d96dfa8e913ab4c0a8c4037f1c253a20742ca8913932dd7c8b3bd1 --cri-socket unix:///var/run/containerd/containerd.sock
[root@k8s-worker02 ~]# kubeadm join 192.168.10.140:6443 --token hd74hg.r8l1pe4tivwyjz73 \
> --discovery-token-ca-cert-hash sha256:29a00daed8d96dfa8e913ab4c0a8c4037f1c253a20742ca8913932dd7c8b3bd1 --cri-socket unix:///var/run/containerd/containerd.sock
3.5 验证K8S集群节点是否可用
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready control-plane 15m v1.28.0
k8s-worker01 Ready <none> 13m v1.28.0
k8s-worker02 Ready <none> 13m v1.28.0
3.6 验证证书有效期
[root@k8s-master01 ~]# openssl x509 -in /etc/kubernetes/pki/apiserver.crt -noout -text | grep ' Not '
Not Before: Aug 21 10:24:00 2023 GMT
Not After : Jul 28 10:29:00 2123 GMT
[root@k8s-master01 ~]# kubeadm certs check-expiration
[check-expiration] Reading configuration from the cluster...
[check-expiration] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
CERTIFICATE EXPIRES RESIDUAL TIME CERTIFICATE AUTHORITY EXTERNALLY MANAGED
admin.conf Jul 28, 2123 10:29 UTC 99y ca no
apiserver Jul 28, 2123 10:29 UTC 99y ca no
apiserver-etcd-client Jul 28, 2123 10:29 UTC 99y etcd-ca no
apiserver-kubelet-client Jul 28, 2123 10:29 UTC 99y ca no
controller-manager.conf Jul 28, 2123 10:29 UTC 99y ca no
etcd-healthcheck-client Jul 28, 2123 10:29 UTC 99y etcd-ca no
etcd-peer Jul 28, 2123 10:29 UTC 99y etcd-ca no
etcd-server Jul 28, 2123 10:29 UTC 99y etcd-ca no
front-proxy-client Jul 28, 2123 10:29 UTC 99y front-proxy-ca no
scheduler.conf Jul 28, 2123 10:29 UTC 99y ca no
CERTIFICATE AUTHORITY EXPIRES RESIDUAL TIME EXTERNALLY MANAGED
ca Aug 18, 2033 10:29 UTC 99y no
etcd-ca Aug 18, 2033 10:29 UTC 99y no
front-proxy-ca Aug 18, 2033 10:29 UTC 99y no
四、网络插件calico部署
calico访问链接:https://projectcalico.docs.tigera.io/about/about-calico
文章来源:https://www.uudwc.com/A/AA1oB/
# kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/tigera-operator.yaml
# wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/custom-resources.yaml
# vim custom-resources.yaml
# cat custom-resources.yaml
# This section includes base Calico installation configuration.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: 26
cidr: 10.244.0.0/16 修改此行内容为初始化时定义的pod network cidr
encapsulation: VXLANCrossSubnet
natOutgoing: Enabled
nodeSelector: all()
---
# This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}
# kubectl create -f custom-resources.yaml
installation.operator.tigera.io/default created
apiserver.operator.tigera.io/default created
[root@k8s-master01 ~]# kubectl get pods -n calico-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-6bb86c78b4-cnr9l 1/1 Running 0 2m26s
calico-node-86cs9 1/1 Running 0 2m26s
calico-node-gjgcc 1/1 Running 0 2m26s
calico-node-hlr69 1/1 Running 0 2m26s
calico-typha-6f877c9d8f-8f5fb 1/1 Running 0 2m25s
calico-typha-6f877c9d8f-spxqf 1/1 Running 0 2m26s
csi-node-driver-9b8nd 2/2 Running 0 2m26s
csi-node-driver-rg6dc 2/2 Running 0 2m26s
csi-node-driver-tf82w 2/2 Running 0 2m26s
operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}文章来源地址https://www.uudwc.com/A/AA1oB/
~~~powershell
# kubectl create -f custom-resources.yaml
installation.operator.tigera.io/default created
apiserver.operator.tigera.io/default created