• k8s集群搭建


    注意

    k8s有硬件要求,必须运行cpu为2核,内存为2G以上

    前提条件

    docker:k8s运行用来运行容器

    kubeadm :k8s集群搭建

    kubectl: 操作k8s集群客户端

    kubelet:运行每个节点容器

    步骤

    1-8 (除了4) 在所有节点执行

    1.关闭防火墙,配置免密登录,这点基本所有教程都有

    systemctl stop firewalld #防止端口不开放,k8s集群无法启动
    

    2.关闭selinux

    setenforce 0 
    

    3.关闭swap

    1. 1
    2. 2
    3. 3
    1. swapoff -a #临时关闭
    2. free #可以通过这个命令查看swap是否关闭了
    3. vim /etc/fstab #永久关闭 注释swap那一行(访问内存分区,k8s无法启动)

    4.添加主机名与IP对应的关系,免密(这一步可以只在master执行),这一步我为后面传输网络做准备

    1. 1
    2. 2
    3. 3
    4. 4
    5. 5
    6. 6
    7. 7
    8. 8
    9. 9
    10. 10
    11. 11
    1. vim /etc/hosts
    2. 192.168.44.6 tony06
    3. 192.168.44.4 tony02
    4. ssh-keygen
    5. cat .ssh/id_rsa.pub >> .ssh/authorized_keys
    6. chmod 600 .ssh/authorized_keys
    7. # 可以在master生成,然后拷贝到node节点(免密登录,主机之间互相传文件)
    8. scp -r .ssh root@192.168.44.4:/root

    5.将桥接的IPV4流量传递到iptables 的链

    1. 1
    2. 2
    3. 3
    4. 4
    1. vi /etc/sysctl.d/k8s.conf
    2. net.bridge.bridge-nf-call-ip6tables = 1
    3. net.bridge.bridge-nf-call-iptables = 1

    6.安装Docker及同步时间

    1. 1
    2. 2
    3. 3
    4. 4
    5. 5
    6. 6
    7. 7
    8. 8
    9. 9
    10. 10
    1. wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O/etc/yum.repos.d/docker-ce.repo
    2. yum -y install docker-ce
    3. systemctl start docker
    4. systemctl enable docker
    5. # 同步时间(这一步必须做,否则后面安装flannel可能会有证书错误)
    6. yum install ntpdate -y
    7. ntpdate cn.pool.ntp.org

    7.添加阿里云YUM软件源

    1. 1
    2. 2
    3. 3
    4. 4
    5. 5
    6. 6
    7. 7
    8. 8
    9. 9
    10. 10
    1. vi /etc/yum.repos.d/kubernetes.repo
    2. [kubernetes]
    3. name=Kubernetes
    4. baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
    5. enabled=1
    6. gpgcheck=1
    7. repo_gpgcheck=1
    8. gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
    9. https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

    8.安装kubeadm,kubelet和kubectl

    1. 1
    2. 2
    3. 3
    1. yum makecache fast
    2. yum install -y kubectl-1.18.0 kubeadm-1.18.0 kubelet-1.18.0 --nogpgcheck

    9. 部署Kubernetes Master

    初始化master(在master执行)

    1. 1
    2. 2
    3. 3
    4. 4
    5. 5
    6. 6
    7. 7
    8. 8
    9. 9
    10. 10
    11. 11
    12. 12
    13. 13
    14. 14
    15. 15
    16. 16
    17. 17
    18. 18
    19. 19
    1. # 第一次初始化比较慢,需要拉取镜像
    2. kubeadm init \
    3. --apiserver-advertise-address=192.168.44.4 \ # 换成自己master的IP
    4. --image-repository registry.aliyuncs.com/google_containers \
    5. --kubernetes-version v1.18.0 \
    6. --service-cidr=10.1.0.0/16 \
    7. --pod-network-cidr=10.244.0.0/16 # 使用flannel网络必须设置成这个cidrKUB
    8. kubeadm init \
    9. --apiserver-advertise-address=192.168.44.6 \
    10. --image-repository registry.aliyuncs.com/google_containers \
    11. --kubernetes-version v1.18.0 \
    12. --service-cidr=10.1.0.0/16 \
    13. --pod-network-cidr=10.244.0.0/16
    14. 接下来,将初始化结果中的命令复制出来执行:
    15. mkdir -p $HOME/.kube
    16. sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    17. sudo chown $(id -u):$(id -g) $HOME/.kube/config

    验证状态,发现前两个是pending,get pods 发现是not ready

    1. 1
    2. 2
    3. 3
    4. 4
    5. 5
    6. 6
    7. 7
    8. 8
    9. 9
    1. kubectl get pods --all-namespaces
    2. NAMESPACE NAME READY STATUS RESTARTS AGE
    3. kube-system coredns-9d85f5447-fhdmx 0/1 Pending 0 100d
    4. kube-system coredns-9d85f5447-x5wfq 0/1 Pending 0 100d
    5. kube-system etcd-local1 1/1 Running 0 100d
    6. kube-system kube-apiserver-local1 1/1 Running 0 100d
    7. kube-system kube-controller-manager-local1 1/1 Running 0 100d
    8. kube-system kube-proxy-2trv9 1/1 Running 0 100d
    9. kube-system kube-scheduler-local1 1/1 Running 0 100d

    需要安装flannel

    1. 1
    2. 2
    3. 3
    4. 4
    5. 5
    6. 6
    7. 7
    8. 8
    9. 9
    10. 10
    11. 11
    12. 12
    13. 13
    14. 14
    15. 15
    16. 16
    17. 17
    18. 18
    19. 19
    20. 20
    21. 21
    22. 22
    1. # 安装flannel(在master执行)/
    2. // 1、在线安装
    3. kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
    4. // 1、离线安装
    5. 如果kube-flannel.yml无法下载
    6. 手动配置网路地址
    7. mkdir /run/flannel/
    8. cat <<EOF> /run/flannel/subnet.env
    9. FLANNEL_NETWORK=10.244.0.0/16
    10. FLANNEL_SUBNET=10.244.1.0/24
    11. FLANNEL_MTU=1450
    12. FLANNEL_IPMASQ=true
    13. EOF
    14. # 安装完flannel,将配置拷到node节点,否则添加节点之后状态不对
    15. scp -r /etc/cni root@192.168.44.4:/etc
    16. # 这一步也要拷贝,否则节点看着正常,但是pod由于网络原因无法创建
    17. scp -r /run/flannel/ root@192.168.44.4:/run

    再次初始化

    1. 1
    2. 2
    3. 3
    4. 4
    5. 5
    6. 6
    7. 7
    8. 8
    9. 9
    1. # 执行第9步的命令
    2. kubeadm init ...
    3. 参数
    4. --kubernetes-version 指定Kubernetes版本
    5. --apiserver-advertise-address 指定apiserver的监听地址
    6. --pod-network-cidr 10.244.0.0/16 指定使用flanneld网络
    7. --apiserver-bind-port api-server 6443的端口
    8. --ignore-preflight-errors all 跳过之前已安装部分(出问题时,问题解决后加上继续运行)

    查看集群状态,master正常

    1. 1
    2. 2
    3. 3
    4. 4
    5. 5
    6. 6
    7. 7
    8. 8
    9. 9
    10. 10
    11. 11
    12. 12
    13. 13
    14. 14
    15. 15
    16. 16
    17. 17
    18. 18
    19. 19
    20. 20
    1. [root@local1 ~]# kubectl get cs
    2. NAME STATUS MESSAGE ERROR
    3. scheduler Healthy ok
    4. controller-manager Healthy ok
    5. etcd-0 Healthy {"health":"true"}
    6. [root@local1 ~]# kubectl get nodes
    7. NAME STATUS ROLES AGE VERSION
    8. local1 Ready master 2m16s v1.17.3
    9. [root@local1 ~]# kubectl get pods --all-namespaces
    10. NAMESPACE NAME READY STATUS RESTARTS AGE
    11. kube-system coredns-9d85f5447-9s4mc 1/1 Running 0 16m
    12. kube-system coredns-9d85f5447-gt2nf 1/1 Running 0 16m
    13. kube-system etcd-local1 1/1 Running 0 16m
    14. kube-system kube-apiserver-local1 1/1 Running 0 16m
    15. kube-system kube-controller-manager-local1 1/1 Running 0 16m
    16. kube-system kube-proxy-sdbl9 1/1 Running 0 15m
    17. kube-system kube-proxy-v4vxg 1/1 Running 0 16m
    18. kube-system kube-scheduler-local1 1/1 Running 0

    10、node工作节点加载

    node节点执行1-8,如果第五步不执行,会添加失败

    在node节点执行上面初始化时生成的join命令

    1. 1
    2. 2
    3. 3
    4. 4
    5. 5
    6. 6
    7. 7
    8. 8
    9. 9
    10. 10
    1. kubeadm join 192.168.235.145:6443 --token w5rify.gulw6l1yb63zsqsa
    2. --discovery-token-ca-cert-hash
    3. sha256:4e7f3a03392a7f9277d9f0ea2210f77d6e67ce0367e824ed891f6fefc7dae3c8
    4. # 输出
    5. This node has joined the cluster:
    6. * Certificate signing request was sent to apiserver and a response was received.
    7. * The Kubelet was informed of the new secure connection details.
    8. Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

    在master查看

    1. 1
    2. 2
    3. 3
    4. 4
    1. [root@local1 ~]# kubectl get nodes
    2. NAME STATUS ROLES AGE VERSION
    3. local1 Ready master 4m58s v1.18.3
    4. local2 Ready <none> 3m36s v1.18.3

    在node节点查看

    1. 1
    2. 2
    3. 3
    4. 4
    5. 5
    6. 6
    7. 7
    8. 8
    9. 9
    10. 10
    11. 11
    12. 12
    13. 13
    14. 14
    15. 15
    16. 16
    17. 17
    18. 18
    1. [root@local3 ~]# kubectl get nodes
    2. Unable to connect to the server: x509: certificate signed by unknown authority (possibly because of "crypto/rsa: verification error" while trying to verify candidate authority certificate "kubernetes")
    3. # 如果报错,需要将master的admin.conf拷贝过来
    4. # master执行
    5. scp /etc/kubernetes/admin.conf root@local3:/etc/kubernetes/
    6. # 然后在node执行下面三步
    7. mkdir -p $HOME/.kube
    8. sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    9. sudo chown $(id -u):$(id -g) $HOME/.kube/config
    10. 再次在node查看
    11. [root@local3 ~]# kubectl get nodes
    12. NAME STATUS ROLES AGE VERSION
    13. local1 Ready master 6m36s v1.18.0
    14. local2 Ready <none> 31s v1.18.0
    15. local3 Ready <none> 5m43s v1.18.0

    11、如果节点出错,可以移除节点

    1. 1
    2. 2
    3. 3
    4. 4
    5. 5
    1. #重置节点
    2. kubeadm reset
    3. #删除节点,删除后 数据就从etcd中清除了(可运行kubectl的任一节点中执行)
    4. kubectl delete node node-1

    12、如果加入节点时,token过期,可以重新生成

    1. 1
    2. 2
    3. 3
    4. 4
    5. 5
    6. 6
    7. 7
    8. 8
    9. 9
    10. 10
    11. 11
    12. 12
    13. 13
    14. 14
    15. 15
    1. 查看token
    2. kubeadm token list
    3. 默认生成的token有效期是一天,生成永不过期的token
    4. [root@k8s-master ~]# kubeadm token create --ttl 0
    5. W0501 09:14:13.887115 38074 validation.go:28] Cannot validate kube-proxy config - no validator is available
    6. W0501 09:14:13.887344 38074 validation.go:28] Cannot validate kubelet config - no validator is available
    7. 创建token
    8. [root@k8s-master ~]# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
    9. # token
    10. 4dc852fb46813f5b1840f06578ba01283c1a12748419ba8f25ce2788419ab1c2
    11. 在worker节点执行join
    12. kubeadm join 192.168.0.104:6443 --token vahjcu.rhm7864v6l400188 --discovery-token-ca-cert-hash sha256:4dc852fb46813f5b1840f06578ba01283c1a12748419ba8f25ce27
  • 相关阅读:
    如何使用virtualenv的虚拟环境
    1.12 进程注入ShellCode套接字
    Redis布隆过滤器和布谷鸟过滤器
    js基础笔记学习28-逻辑与和或
    http请求头中的Content-Type到底有什么用?
    机器学习小白理解之一元线性回归
    转换流和(字符输入流和字节输入流)
    java:jvm参数设置
    重学SpringBoot3-日志Logging
    Java语言中的数据流的概念
  • 原文地址:https://blog.csdn.net/greenwaves3000/article/details/127487025