kubeadm安装k8s高可用集群

2023-05-16

k8s资源配置清单生成器
https://k8syaml.com/

nginx代理配置

代理IP : 192.168.41.10

vim /etc/nginx/nginx.conf

stream {
    upstream kube-apiserver {
        server 192.168.41.30:6443     max_fails=3 fail_timeout=30s;
        server 192.168.41.31:6443     max_fails=3 fail_timeout=30s;
        server 192.168.41.32:6443     max_fails=3 fail_timeout=30s;
    }
    server {
        listen 6443;
        proxy_connect_timeout 2s;
        proxy_timeout 900s;
        proxy_pass kube-apiserver;
    }
}

hosts文件

192.168.41.30 k8s-master-1
192.168.41.31 k8s-master-2
192.168.41.32 k8s-master-3
192.168.41.36 k8s-node-1
192.168.41.37 k8s-node-2
192.168.41.38 k8s-node-3

关闭k8s所有节点swap

操作步骤此文省略

安装docker(使用dockershim的安装docker)

yum install -y yum-utils device-mapper-persistent-data lvm2 wget
wget -O /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+mirrors.tuna.tsinghua.edu.cn/docker-ce+' /etc/yum.repos.d/docker-ce.repo
#查看docker-ce版本
yum list docker-ce --showduplicates
yum install docker-ce-x.x.x -y

mkdir /etc/docker


vim /etc/docker/daemon.json
{
  "storage-driver": "overlay2",
  "storage-opts": ["overlay2.override_kernel_check=true"],
  "log-driver": "json-file",
  "log-opts": {"max-size":"100m", "max-file":"1"},
  "insecure-registries": ["registry.access.redhat.com"],
  "registry-mirrors": ["https://gqk8w9va.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "max-concurrent-downloads": 10,
  "hosts": ["tcp://0.0.0.0:2375", "unix:///var/run/docker.sock"]
}



systemctl start docker
systemctl enable docker

containerd(使用containerd安装)

yum -y install containerd.io
containerd config default >/etc/containerd/config.toml

vim /etc/containerd/config.toml

#修改镜像地址
sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.5"
...
SystemdCgroup = true
...
#镜像加速器
      [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
          endpoint = ["https://p4oudlho.mirror.aliyuncs.com"]

#镜像仓库
      [plugins."io.containerd.grpc.v1.cri".registry.configs]
        [plugins."io.containerd.grpc.v1.cri".registry.configs."harbor.com".tls]
          insecure_skip_verify = true
        [plugins."io.containerd.grpc.v1.cri".registry.configs."harbor.com".auth]
          username = "admin"
          password = "xxx"
systemctl start containerd

文件句柄

/etc/security/limits.conf

...
* soft nofile 65535
* hard nofile 65535
* soft nproc 65535
* hard nproc 65535

设置内核参数

vim /etc/sysctl.conf

# sysctl settings are defined through files in
# /usr/lib/sysctl.d/, /run/sysctl.d/, and /etc/sysctl.d/.
#
# Vendors settings live in /usr/lib/sysctl.d/.
# To override a whole file, create a new file with the same in
# /etc/sysctl.d/ and put new settings there. To override
# only specific settings, add a file with a lexically later
# name in /etc/sysctl.d/ and put new settings there.
#
# For more information, see sysctl.conf(5) and sysctl.d(5).
kernel.pid_max=573440
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
vm.swappiness = 0
net.ipv4.ip_forward = 1
# https://help.aliyun.com/knowledge_detail/39428.html
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
net.ipv4.conf.default.arp_announce=2
net.ipv4.conf.lo.arp_announce=2
net.ipv4.conf.all.arp_announce=2
# https://help.aliyun.com/knowledge_detail/41334.html
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
kernel.sysrq = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_fin_timeout = 30
# iptables 透明网桥,装了docker再打开,否则会报错;
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1
# ulimit
fs.inotify.max_user_watches = 65536
fs.inotify.max_user_instances = 8192
# bbr,内核版本高于4.9可以选择打开;
#net.core.default_qdisc=fq
#net.ipv4.tcp_congestion_control=bbr
sysctl -p  /etc/sysctl.conf

配置ipvs模块

yum install ipvsadm ipset sysstat conntrack libseccomp -y

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
modprobe -- ip_tables
modprobe -- ip_set
modprobe -- xt_set
modprobe -- ipt_set
modprobe -- ipt_rpfilter
modprobe -- ipt_REJECT
modprobe -- ipip
EOF

chmod +x /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack

开机自动加载模块

cat > /etc/modules-load.d/k8s.conf <<EOF
br_netfilter
ip6_udp_tunnel
ip_set
ip_set_hash_ip
ip_set_hash_net
iptable_filter
iptable_nat
iptable_mangle
iptable_raw
nf_conntrack_netlink
nf_conntrack
nf_conntrack_ipv4
nf_defrag_ipv4
nf_nat
nf_nat_ipv4
nf_nat_masquerade_ipv4
nfnetlink
udp_tunnel
veth
vxlan
x_tables
xt_addrtype
xt_conntrack
xt_comment
xt_mark
xt_multiport
xt_nat
xt_recent
xt_set
xt_statistic
xt_tcpudp
EOF
cat > /etc/modules-load.d/ipvs.conf <<EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
EOF
sysctl --system

安装kubeadm、kubelet、kubectl

cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=kubernetes repo
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
enabled=1
EOF

yum install kubelet-1.18.3 kubeadm-1.18.3 kubectl-1.18.3 -y
yum install -y kubelet-1.23.5 kubeadm-1.23.5 kubectl-1.23.5
systemctl enable kubelet

设置使用containerd

crictl config runtime-endpoint /run/containerd/containerd.sock
vim /etc/sysconfig/kubelet
KUBELET_KUBEADM_ARGS="--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock"

初始化群集之前重新编译kubeadm,证书有效期自动为100年

查看go版本

kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"21", GitVersion:"v1.21.9", GitCommit:"b631974d68ac5045e076c86a5c66fba6f128dc72", GitTreeState:"clean", BuildDate:"2022-01-19T17:50:04Z", GoVersion:"go1.16.12", Compiler:"gc", Platform:"linux/amd64"}

安装go,配置GOPATH

#下载安装go
wget https://studygolang.com/dl/golang/go1.16.12.linux-amd64.tar.gz
tar xf go1.16.12.linux-amd64.tar.gz -C /usr/local/

#配置go相关环境变量
vim /etc/profile.d/go.sh
export GOROOT=/usr/local/go
export PATH=$PATH:/usr/local/go/bin
export GOPATH=/go

# 加载profile
. /etc/profile

下载k8s源码包修改证书时间并编译

mkdir /go/src/k8s.io -p
cd /go/src/k8s.io
wget https://github.com/kubernetes/kubernetes/archive/v1.18.3.tar.gz
tar xf v1.18.3.tar.gz
mv kubernetes-1.18.3 kubernetes
cd kubernetes

# 修改 CA 有效期为 100 年(默认为 10 年)

vim staging/src/k8s.io/client-go/util/cert/cert.go
...
                //NotAfter:              now.Add(duration365d * 10).UTC(),
                NotAfter:              now.Add(duration365d * 100).UTC(),
...



# 修改证书有效期为 100 年(默认为 1 年)

vim cmd/kubeadm/app/constants/constants.go
...
        CertificateValidity = time.Hour * 24 * 365 * 100
...



#编译
make all WHAT=cmd/kubeadm GOFLAGS=-v
#make all WHAT=cmd/kubelet GOFLAGS=-v
#make all WHAT=cmd/kubectl GOFLAGS=-v

编译完成后,编译结果在 _output/bin/下
用重新编译的文件将 /usr/bin 下的 kubeadm替换掉,然后初始化集群

初始化master节点

集群初始配置文件

生成kubeadmconfig文件

kubeadm config print init-defaults > kubeadm-config.yaml

kubeadm-config.yaml

apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
  extraArgs:
    authorization-mode: Node,RBAC
    service-node-port-range: 1-65535
    feature-gates: RemoveSelfLink=false
  timeoutForControlPlane: 4m0s
controllerManager:
  extraArgs:
    port: "10252"
scheduler:
  extraArgs:
    port: "10251"
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.41.10:6443
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
    extraArgs:
      listen-metrics-urls: http://0.0.0.0:2381
imageRepository: registry.aliyuncs.com/google_containers
kubernetesVersion: v1.18.3
networking:
  dnsDomain: cluster.local
  podSubnet: 10.154.0.0/16
  serviceSubnet: 10.97.0.0/16
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
metricsBindAddress: 0.0.0.0:10249
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
maxPods: 250

基于containerd

apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
nodeRegistration:
  criSocket: /run/containerd/containerd.sock
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
  extraArgs:
    authorization-mode: Node,RBAC
    service-node-port-range: 1-65535
    feature-gates: RemoveSelfLink=false
  timeoutForControlPlane: 4m0s
controllerManager:
  extraArgs:
    port: "10252"
scheduler:
  extraArgs:
    port: "10251"
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.41.10:6443
dns:
  type: CoreDNS
#etcd:
#  external:
#    caFile: /etc/kubernetes/pki/etcd/ca.crt
#    certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt
#    endpoints:
#    - https://192.168.192.101:2379
#    - https://192.168.192.102:2379
#    - https://192.168.192.103:2379
#    keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key
etcd:
  local:
    dataDir: /var/lib/etcd
    extraArgs:
      listen-metrics-urls: http://0.0.0.0:2381
imageRepository: registry.aliyuncs.com/google_containers
# Version
kubernetesVersion: v1.20.7
networking:
  dnsDomain: cluster.local
  podSubnet: 10.2.0.0/16
  serviceSubnet: 10.1.0.0/16
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
metricsBindAddress: 0.0.0.0:10249
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
maxPods: 250
containerLogMaxFiles: 3
containerLogMaxSize: 50Mi

v1.21.9

apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.23.113
  bindPort: 6443
nodeRegistration:
  criSocket: /run/containerd/containerd.sock
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
  extraArgs:
    authorization-mode: Node,RBAC
    service-node-port-range: 1-65535
    feature-gates: RemoveSelfLink=false
  timeoutForControlPlane: 4m0s
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.41.10:6443
controllerManager:
  extraArgs:
    port: "10252"
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kubernetesVersion: v1.21.9
networking:
  dnsDomain: cluster.local
  podSubnet: 172.20.0.0/16
  serviceSubnet: 10.98.0.0/16
scheduler:
  extraArgs:
    port: "10251"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
metricsBindAddress: 0.0.0.0:10249
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
maxPods: 250
containerLogMaxFiles: 3
containerLogMaxSize: 50Mi

v1.23.5

apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 10.10.96.11
  bindPort: 6443
nodeRegistration:
  criSocket: /run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  extraArgs:
    authorization-mode: Node,RBAC
    service-node-port-range: 1-65535
    feature-gates: RemoveSelfLink=false
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.41.10:6443
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.23.5
networking:
  dnsDomain: cluster.local
  podSubnet: 172.99.0.0/16
  serviceSubnet: 10.249.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
metricsBindAddress: 0.0.0.0:10249
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
maxPods: 250
containerLogMaxFiles: 3
containerLogMaxSize: 50Mi

初始化master节点

kubeadm config images list --config kubeadm-config.yaml
kubeadm config images pull --config kubeadm-config.yaml
kubeadm init --config kubeadm-config.yaml --upload-certs --node-name 192.168.41.30

...
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.41.10:6443 --token 3oh6is.lpapalm6lir1ncch \
    --discovery-token-ca-cert-hash sha256:b3b6b09bfa2fac882e04611f1b8f44f568cac4cc1cbf82650d31e69a9a510ab8 \
    --control-plane --certificate-key db61b0fd57236db0a58f297ece6795443836adc9872629cd3e2f87850ac59713

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.41.10:6443 --token 3oh6is.lpapalm6lir1ncch \
    --discovery-token-ca-cert-hash sha256:b3b6b09bfa2fac882e04611f1b8f44f568cac4cc1cbf82650d31e69a9a510ab8
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

设置kubectl命令补全

echo "source <(kubectl completion bash)" >> /etc/profile

安装网络插件flannel

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
修改
  net-conf.json: |
    {
      "Network": "10.244.0.0/16", 修改为 10.153.0.0/16  与podSubnet一致
      "Backend": {
        "Type": "vxlan"
      }
    }

其它配置

{
    "Network": "10.244.0.0/16",
    "SubnetLen": 24,
    "Backend": {
        "Type": "VxLAN",
        "Port": 8472
    }
}
{
	"Network": "10.0.0.0/8",
	"SubnetLen": 20,
	"SubnetMin": "10.10.0.0",
	"SubnetMax": "10.99.0.0",
	"Backend": {
		"Type": "udp",
		"Port": 7890
	}
}

kube-flannel.yml

---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ['extensions']
  resources: ['podsecuritypolicies']
  verbs: ['use']
  resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.153.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.14.0
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.14.0
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg
kubectl apply -f kube-flannel.yml

其他master节点加入集群

kubeadm join 192.168.41.10:6443 --node-name 192.168.41.32 --token 3oh6is.lpapalm6lir1ncch \
    --discovery-token-ca-cert-hash sha256:b3b6b09bfa2fac882e04611f1b8f44f568cac4cc1cbf82650d31e69a9a510ab8 \
    --control-plane --certificate-key db61b0fd57236db0a58f297ece6795443836adc9872629cd3e2f87850ac59713
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

node节点加入集群

kubeadm join 192.168.41.10:6443 --node-name 192.168.41.38  --token 3oh6is.lpapalm6lir1ncch \
    --discovery-token-ca-cert-hash sha256:b3b6b09bfa2fac882e04611f1b8f44f568cac4cc1cbf82650d31e69a9a510ab8

验证coredns

[root@k8s-master-1 ~]# kubectl -n kube-system get service
NAME       TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)                  AGE
kube-dns   ClusterIP   10.249.0.10   <none>        53/UDP,53/TCP,9153/TCP   19d
[root@k8s-master-1 ~]# di
diff            diff3           dig             dir             dircolors       dirmngr         dirmngr-client  dirname         dirs            disown          
[root@k8s-master-1 ~]# dig -t -a www.baidu.com @10.249.0.10
;; Warning, ignoring invalid type -a

; <<>> DiG 9.11.26-RedHat-9.11.26-6.el8 <<>> -t -a www.baidu.com @10.249.0.10
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 58547
;; flags: qr rd ra; QUERY: 1, ANSWER: 3, AUTHORITY: 4, ADDITIONAL: 4

;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
; COOKIE: c874a4f24784e1c8 (echoed)
;; QUESTION SECTION:
;www.baidu.com.			IN	A

;; ANSWER SECTION:
www.baidu.com.		30	IN	CNAME	www.a.shifen.com.
www.a.shifen.com.	30	IN	A	180.101.49.11
www.a.shifen.com.	30	IN	A	180.101.49.12

;; AUTHORITY SECTION:
shifen.com.		30	IN	NS	dns.baidu.com.
shifen.com.		30	IN	NS	ns2.baidu.com.
shifen.com.		30	IN	NS	ns4.baidu.com.
shifen.com.		30	IN	NS	ns3.baidu.com.

;; ADDITIONAL SECTION:
ns4.baidu.com.		30	IN	A	14.215.178.80
ns3.baidu.com.		30	IN	A	112.80.248.64
ns2.baidu.com.		30	IN	A	220.181.33.31
[root@master1 yaml]# kubectl -n kube-system get service
NAME       TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)                  AGE
kube-dns   ClusterIP   10.122.0.10   <none>        53/UDP,53/TCP,9153/TCP   8d
[root@master1 yaml]# kubectl -n test get service
NAME    TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
nginx   ClusterIP   10.122.177.108   <none>        80/TCP    19s
[root@master1 yaml]# nslookup nginx.test.svc.cluster.local 10.122.0.10
Server:		10.122.0.10
Address:	10.122.0.10#53

Name:	nginx.test.svc.cluster.local
Address: 10.122.177.108

部署nginx测试
vim deploy_service_nginx.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-test
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx-test
  template:
    metadata:
      labels:
        app: nginx-test
    spec:
      containers:
        - name: nginx-test
          image: nginx:alpine
          ports:
            - containerPort: 80
---
apiVersion: v1   
kind: Service
metadata:
  name: nginx-test
spec:
  selector:
    app: nginx-test
  type: NodePort
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 80

创建k8s访问凭据

#创建用户
kubectl create serviceaccount dashboard-admin -n kube-system

#绑定集群角色
kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin

#查看token
kubectl -n kube-system get secrets | grep dashboard-admin
kubectl -n kube-system get secrets dashboard-admin-token-bqnz5




#设置集群
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.crt --server=https://10.10.95.18:6443 --embed-certs=true --kubeconfig=/root/dashboard-admin.conf


#设置用户
DEF_NS_ADMIN_TOKEN=$(kubectl -n kube-system get secrets dashboard-admin-token-bqnz5 -o jsonpath={.data.token} |base64 -d)
kubectl config set-credentials dashboard-admin --token=$DEF_NS_ADMIN_TOKEN --kubeconfig=/root/dashboard-admin.conf

#设置context
kubectl config set-context dashboard-admin@kubernetes --cluster=kubernetes --user=dashboard-admin --kubeconfig=/root/dashboard-admin.conf

#使用context
kubectl config use-context dashboard-admin@kubernetes --kubeconfig=/root/dashboard-admin.conf

部署ingress-nginx

在这里插入图片描述

#下载yaml清单
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.2/deploy/static/provider/cloud/deploy.yaml
#修改镜像为对应阿里云镜像并预拉取
ctr -n k8s.io i pull -k registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller:v1.1.2
ctr -n k8s.io i pull -k registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v1.1.1

#修改deployment为daemonset
#修改daemonset里面hostNetwork: true 并添加80 443 hostport
#service中 type: LoadBalancer 改为 type: NodePort
#修改后的文件如下
#GENERATED FOR K8S 1.20
apiVersion: v1
kind: Namespace
metadata:
  labels:
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
  name: ingress-nginx
---
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.1.2
    helm.sh/chart: ingress-nginx-4.0.18
  name: ingress-nginx
  namespace: ingress-nginx
---
apiVersion: v1
kind: ServiceAccount
metadata:
  annotations:
    helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.1.2
    helm.sh/chart: ingress-nginx-4.0.18
  name: ingress-nginx-admission
  namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.1.2
    helm.sh/chart: ingress-nginx-4.0.18
  name: ingress-nginx
  namespace: ingress-nginx
rules:
- apiGroups:
  - ""
  resources:
  - namespaces
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - configmaps
  - pods
  - secrets
  - endpoints
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - services
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - networking.k8s.io
  resources:
  - ingresses
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - networking.k8s.io
  resources:
  - ingresses/status
  verbs:
  - update
- apiGroups:
  - networking.k8s.io
  resources:
  - ingressclasses
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resourceNames:
  - ingress-controller-leader
  resources:
  - configmaps
  verbs:
  - get
  - update
- apiGroups:
  - ""
  resources:
  - configmaps
  verbs:
  - create
- apiGroups:
  - ""
  resources:
  - events
  verbs:
  - create
  - patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  annotations:
    helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.1.2
    helm.sh/chart: ingress-nginx-4.0.18
  name: ingress-nginx-admission
  namespace: ingress-nginx
rules:
- apiGroups:
  - ""
  resources:
  - secrets
  verbs:
  - get
  - create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.1.2
    helm.sh/chart: ingress-nginx-4.0.18
  name: ingress-nginx
rules:
- apiGroups:
  - ""
  resources:
  - configmaps
  - endpoints
  - nodes
  - pods
  - secrets
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - services
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - networking.k8s.io
  resources:
  - ingresses
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - events
  verbs:
  - create
  - patch
- apiGroups:
  - networking.k8s.io
  resources:
  - ingresses/status
  verbs:
  - update
- apiGroups:
  - networking.k8s.io
  resources:
  - ingressclasses
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.1.2
    helm.sh/chart: ingress-nginx-4.0.18
  name: ingress-nginx-admission
rules:
- apiGroups:
  - admissionregistration.k8s.io
  resources:
  - validatingwebhookconfigurations
  verbs:
  - get
  - update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.1.2
    helm.sh/chart: ingress-nginx-4.0.18
  name: ingress-nginx
  namespace: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: ingress-nginx
subjects:
- kind: ServiceAccount
  name: ingress-nginx
  namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  annotations:
    helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.1.2
    helm.sh/chart: ingress-nginx-4.0.18
  name: ingress-nginx-admission
  namespace: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
  name: ingress-nginx-admission
  namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.1.2
    helm.sh/chart: ingress-nginx-4.0.18
  name: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: ingress-nginx
subjects:
- kind: ServiceAccount
  name: ingress-nginx
  namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.1.2
    helm.sh/chart: ingress-nginx-4.0.18
  name: ingress-nginx-admission
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
  name: ingress-nginx-admission
  namespace: ingress-nginx
---
apiVersion: v1
data:
  allow-snippet-annotations: "true"
kind: ConfigMap
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.1.2
    helm.sh/chart: ingress-nginx-4.0.18
  name: ingress-nginx-controller
  namespace: ingress-nginx
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.1.2
    helm.sh/chart: ingress-nginx-4.0.18
  name: ingress-nginx-controller
  namespace: ingress-nginx
spec:
  externalTrafficPolicy: Local
  ipFamilies:
  - IPv4
  ipFamilyPolicy: SingleStack
  ports:
  - appProtocol: http
    name: http
    port: 80
    protocol: TCP
    targetPort: http
  - appProtocol: https
    name: https
    port: 443
    protocol: TCP
    targetPort: https
  selector:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
  type: NodePort
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.1.2
    helm.sh/chart: ingress-nginx-4.0.18
  name: ingress-nginx-controller-admission
  namespace: ingress-nginx
spec:
  ports:
  - appProtocol: https
    name: https-webhook
    port: 443
    targetPort: webhook
  selector:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
  type: ClusterIP
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.1.2
    helm.sh/chart: ingress-nginx-4.0.18
  name: ingress-nginx-controller
  namespace: ingress-nginx
spec:
  minReadySeconds: 0
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      app.kubernetes.io/component: controller
      app.kubernetes.io/instance: ingress-nginx
      app.kubernetes.io/name: ingress-nginx
  template:
    metadata:
      labels:
        app.kubernetes.io/component: controller
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/name: ingress-nginx
    spec:
      hostNetwork: true
      containers:
      - args:
        - /nginx-ingress-controller
        - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller
        - --election-id=ingress-controller-leader
        - --controller-class=k8s.io/ingress-nginx
        - --ingress-class=nginx
        - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
        - --validating-webhook=:8443
        - --validating-webhook-certificate=/usr/local/certificates/cert
        - --validating-webhook-key=/usr/local/certificates/key
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: LD_PRELOAD
          value: /usr/local/lib/libmimalloc.so
        image: registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller:v1.1.2
        imagePullPolicy: IfNotPresent
        lifecycle:
          preStop:
            exec:
              command:
              - /wait-shutdown
        livenessProbe:
          failureThreshold: 5
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        name: controller
        ports:
        - containerPort: 80
          hostPort: 80
          name: http
          protocol: TCP
        - containerPort: 443
          hostPort: 443
          name: https
          protocol: TCP
        - containerPort: 8443
          hostPort: 8443
          name: webhook
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /healthz
            port: 10254
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        resources:
          requests:
            cpu: 100m
            memory: 90Mi
        securityContext:
          allowPrivilegeEscalation: true
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - ALL
          runAsUser: 101
        volumeMounts:
        - mountPath: /usr/local/certificates/
          name: webhook-cert
          readOnly: true
      dnsPolicy: ClusterFirst
      nodeSelector:
        kubernetes.io/os: linux
      serviceAccountName: ingress-nginx
      terminationGracePeriodSeconds: 300
      volumes:
      - name: webhook-cert
        secret:
          secretName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
  annotations:
    helm.sh/hook: pre-install,pre-upgrade
    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.1.2
    helm.sh/chart: ingress-nginx-4.0.18
  name: ingress-nginx-admission-create
  namespace: ingress-nginx
spec:
  template:
    metadata:
      labels:
        app.kubernetes.io/component: admission-webhook
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
        app.kubernetes.io/version: 1.1.2
        helm.sh/chart: ingress-nginx-4.0.18
      name: ingress-nginx-admission-create
    spec:
      containers:
      - args:
        - create
        - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
        - --namespace=$(POD_NAMESPACE)
        - --secret-name=ingress-nginx-admission
        env:
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        image: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v1.1.1 
        imagePullPolicy: IfNotPresent
        name: create
        securityContext:
          allowPrivilegeEscalation: false
      nodeSelector:
        kubernetes.io/os: linux
      restartPolicy: OnFailure
      securityContext:
        fsGroup: 2000
        runAsNonRoot: true
        runAsUser: 2000
      serviceAccountName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
  annotations:
    helm.sh/hook: post-install,post-upgrade
    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.1.2
    helm.sh/chart: ingress-nginx-4.0.18
  name: ingress-nginx-admission-patch
  namespace: ingress-nginx
spec:
  template:
    metadata:
      labels:
        app.kubernetes.io/component: admission-webhook
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
        app.kubernetes.io/version: 1.1.2
        helm.sh/chart: ingress-nginx-4.0.18
      name: ingress-nginx-admission-patch
    spec:
      containers:
      - args:
        - patch
        - --webhook-name=ingress-nginx-admission
        - --namespace=$(POD_NAMESPACE)
        - --patch-mutating=false
        - --secret-name=ingress-nginx-admission
        - --patch-failure-policy=Fail
        env:
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        image: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v1.1.1
        imagePullPolicy: IfNotPresent
        name: patch
        securityContext:
          allowPrivilegeEscalation: false
      nodeSelector:
        kubernetes.io/os: linux
      restartPolicy: OnFailure
      securityContext:
        fsGroup: 2000
        runAsNonRoot: true
        runAsUser: 2000
      serviceAccountName: ingress-nginx-admission
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.1.2
    helm.sh/chart: ingress-nginx-4.0.18
  name: nginx
spec:
  controller: k8s.io/ingress-nginx
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
  labels:
    app.kubernetes.io/component: admission-webhook
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.1.2
    helm.sh/chart: ingress-nginx-4.0.18
  name: ingress-nginx-admission
webhooks:
- admissionReviewVersions:
  - v1
  clientConfig:
    service:
      name: ingress-nginx-controller-admission
      namespace: ingress-nginx
      path: /networking/v1/ingresses
  failurePolicy: Fail
  matchPolicy: Equivalent
  name: validate.nginx.ingress.kubernetes.io
  rules:
  - apiGroups:
    - networking.k8s.io
    apiVersions:
    - v1
    operations:
    - CREATE
    - UPDATE
    resources:
    - ingresses
  sideEffects: None

部署应用测试

vim nginx_deployment_service_ingress.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-test
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx-test
  template:
    metadata:
      labels:
        app: nginx-test
    spec:
      containers:
        - name: nginx-test
          image: nginx:alpine
          ports:
            - containerPort: 80
---
apiVersion: v1   
kind: Service
metadata:
  name: nginx-test
spec:
  selector:
    app: nginx-test
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
---
apiVersion: networking.k8s.io/v1 
kind: Ingress
metadata:
  name: nginx-test
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  rules:
    - host: test.com.cn
      http:
        paths:
          - path: /
            pathType: ImplementationSpecific
            backend:
              service:
                name: nginx-test
                port:
                  number: 80
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-xia
  namespace: xia
spec:
  replicas: 2
  selector:
    matchLabels:
      appname: nginx-xia
  template:
    metadata:
      annotations:
        prometheus.io/path: "/actuator/prometheus"
        prometheus.io/port: "80"
        prometheus.io/scrape: "true"
      labels:
        appname: nginx-xia
    spec:
      affinity:
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
          - podAffinityTerm:
              labelSelector:
                matchExpressions:
                - key: appname
                  operator: In
                  values:
                  - nginx-xia
              topologyKey: kubernetes.io/hostname
            weight: 50
      containers:
      - name: nginx-xia
        image: nginx:alpine
        ports:
        - containerPort: 80
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /
            port: 80
            scheme: HTTP
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 1
        resources:
          requests:
            cpu: 200m
            memory: 512Mi
          limits:
            cpu: 200m
            memory: 512Mi
---
apiVersion: v1   
kind: Service
metadata:
  name: nginx-xia
  namespace: xia
spec:
  selector:
    appname: nginx-xia
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
---
apiVersion: networking.k8s.io/v1 
kind: Ingress
metadata:
  name: nginx-xia
  namespace: xia
  annotations:
    kubernetes.io/ingress.class: "nginx"
    nginx.ingress.kubernetes.io/proxy-body-size: "1000m"
spec:
  rules:
    - host: xia.wuxingge.com
      http:
        paths:
          - path: /
            pathType: ImplementationSpecific
            backend:
              service:
                name: nginx-xia
                port:
                  number: 80

修改pod最大数量

vim /var/lib/kubelet/config.yaml

apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 0s
    cacheUnauthorizedTTL: 0s
cgroupDriver: cgroupfs
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
logging: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s
maxPods: 250

kubectl命令补全

echo "source <(kubectl completion bash)" >> /etc/profile

创建token

kubeadm token create --print-join-command

k8s驱逐pod

#设置不可调度
kubectl cordon k8s-node-uat-01
 
#取消节点不可调度
kubectl uncordon k8s-node-uat-01
 
#驱逐节点的pod
kubectl drain --ignore-daemonsets k8s-node-uat-01
kubectl drain k8s-node-uat-01 --ignore-daemonsets --delete-local-data --force


#删除节点
kubectl delete node k8s-node-uat-01

kubeadm重置k8s集群

kubeadm reset

# 1.20 + containerd,kubeadm reset 不会自动清理旧容器,需要手动搞,先删 task,再删 container;
ctr -n k8s.io t rm -f `ctr -n k8s.io t ls -q`
ctr -n k8s.io c rm `ctr -n k8s.io c ls -q`
# 试过了,没用,删不了

systemctl stop kubelet docker containerd

rm -rf \
  /var/lib/{cni,kubelet,etcd,calico,dockershim} \
  /etc/{kubernetes,cni} \
  /run/{flannel,calico} \
  /var/run/kubernetes \
  $HOME/.kube/*

ip link set cni0 down
ip link set flannel.1 down
ip link set docker0 down
ip link set kube-ipvs0 down
ip link delete cni0
ip link delete flannel.1
ip link delete kube-ipvs0
iptables -F
iptables -X
iptables -t nat -F
iptables -t nat -X
ipvsadm -C

systemctl start docker containerd

查看证书到期时间

kubeadm alpha certs check-expiration
for i in $(ls *.crt); do echo $i; openssl x509 -enddate -noout -in $i; done

验证证书

cfssl-certinfo -cert apiserver.crt

验证域名对应证书

cfssl-certinfo -domain www.baidu.com

docker清理

docker image prune
docker system prune --volumes
docker system prune -a

容器日志清理

for i in `find /var/lib/docker/containers/ -name *-json.log`;do cat /dev/null > $i;done
本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)

kubeadm安装k8s高可用集群 的相关文章

  • k8s优雅停服

    在应用程序的整个生命周期中 正在运行的 pod 会由于多种原因而终止 在某些情况下 Kubernetes 会因用户输入 例如更新或删除 Deployment 时 而终止 pod 在其他情况下 Kubernetes 需要释放给定节点上的资源时
  • Harbor镜像仓库搭建

    1 安装docker comprose docker comprose是docker容器批量管理工具 curl L https get daocloud io docker compose releases download 1 25 0
  • K8s-yaml的使用及命令

    YAML配置文件管理对象 对象管理 创建deployment资源 kubectl create f nginx deployment yaml 查看deployment kubectl get deploy 查看ReplicaSet kub
  • Prometheus监控 controller-manager scheduler etcd

    用prometheus插件监控kubernetes控制平面 例如 您使用kubeadm构建k8s集群 然后kube控制器管理器 kube调度程序和etcd需要一些额外的工作来进行发现 create service for kube cont
  • docker的联合文件系统(UnionFS)

    docker最大的贡献就是定义了容器镜像的分层的存储格式 docker镜像技术的基础是联合文件系统 UnionFS 其文件系统是分层的 这样既可以充分利用共享层 又可以减少存储空间占用 联合挂载系统的工作原理 读 如果文件在upperdir
  • Deployment Controller 典型使用场景

    1 重新调度 Rescheduling 不管想运行 1 个副本还是 1000 个副本 副本控制器都能确保指定数量的副本存在于集群中 即使发生节点故障或 Pod 副本被终止运行等意外状况 2 弹性伸缩 Scaling 手动或者通过自动扩容代理
  • k8s Pod定义yaml配置文件详解

    此文件相关配置查询 此文件只做参考 以查询为准 kubectl explain 为文档查询命令如 kubectl explain pod spec volumes apiVersion v1 版本 kind pod 类型 pod metad
  • kubeadm方式部署k8s最新版本V1.26.2

    Kubernetes核心概念 Master主要负责资源调度 控制副本 和提供统一访问集群的入口 核心节点也是管理节点 Node是Kubernetes集群架构中运行Pod的服务节点 Node是Kubernetes集群操作的单元 用来承载被分配
  • k8s中Endpoint是什么

    在Kubernetes K8s 中 Endpoint是一种资源对象 用于表示一个Service所依赖的真实后端节点的Pod信息 它存储了一组IP地址和端口号的列表 这些IP地址和端口号对应着提供相同服务的Pod实例 主要作用 Endpoin
  • kube-flannel.yml

    flannel作为k8s的集群中常用的网络组件 其yml文件的获取 建议去github中获取 具体的获取方式如下 apiVersion policy v1beta1 kind PodSecurityPolicy metadata name
  • Kubernetes 集群部署 ------ 二进制部署(二)

    单节点 https blog csdn net Yplayer001 article details 104234807 先具备单master1节点部署环境 三 master02部署 优先关闭防火墙和selinux服务 在master01上
  • k8s基础5——Pod常用命令、资源共享机制、重启策略和健康检查、环境变量、初始化容器、静态pod

    文章目录 一 基本了解 二 管理命令 三 yaml文件参数大全 四 创建pod的工作流程 五 资源共享机制 5 1 共享网络 5 2 共享存储 六 生命周期 重启策略 健康检查 七 环境变量 八 Init Containe初始化容器 九 静
  • K8S暴露端口-dubbo应用远程Debug

    1 Dockerfile增加环境变量 ENV JAVA OPTS Xdebug Xrunjdwp transport dt socket address 9901 server y suspend n 2 暴露端口 template ser
  • kubernetes集群中的DNS解析问题

    我们有一个 kubernetes 集群 由 4 个工作节点和 1 个主节点组成 上worker1 and worker2我们无法解析 DNS 名称 但在其他两个节点中一切正常 我按照官方文档的说明进行操作here and 我意识到 core
  • 如何彻底卸载kubernetes

    我使用安装了 kubernetes 集群kubeadm按照此guide https kubernetes io docs setup independent create cluster kubeadm 一段时间后 我决定重新安装 K8s
  • flannel和calico区别

    k8s网络模式 Flannel数据包在主机间转发是由backend实现的 目前已经支持UDP VxLAN host gw等多种模式 VxLAN 使用内核中的VxLAN模块进行封装报文 也是flannel推荐的方式 host gw虽然VXLA
  • kubectl 无法从私有存储库中提取镜像

    我正在运行 kubeadm alpha 版本来设置我的 kubernates 集群 我正在尝试从 kubernates 提取托管在 nexus 存储库中的 docker 镜像 当我尝试创建一个 Pod 时 它每次都会给出 ImagePull
  • 如何增加 kubernetes 工作节点中临时存储的大小

    我们使用 kubeadm 部署集群 1 个主节点 4 个工作节点 kubectl describe node worker1 Name worker1 Roles
  • Kubeadm 加入失败:无法请求集群信息

    我有两台服务器作为本地服务器网络上的主节点和工作节点 master node 10 20 20 214 worker node 10 20 20 218 在主节点中 我成功使用 kubeadm init 设置 Calico 网络 它报告消息
  • kubernetes master 的 x509 证书无效

    我正在尝试从我的工作站访问我的 k8s master 我可以从 LAN 很好地访问主站 但不能从我的工作站访问 错误信息是 kubectl context employee context get pods Unable to connec

随机推荐