• kudeadm 部署 k8s


    一、部署规划

    kubeadm 部署 k8s ,没有硬性要求必须有几台master节点,或者slave节点,保证最基础有一台master节点即可,本文节省资源只部署master节点。

    主机名系统角色部署组件
    k8s-mastercentos 7masteretcd,kube-apiserver,kube-controller-manager,kubectl,kubeadm,kubelet,kube-proxy,flannel
    k8s-slave1centos 7slavekubectl,kubelet,kube-proxy,flannel
    k8s-slave2centos 7slavekubectl,kubelet,kube-proxy,flannel

    端口开放

    master节点TCP : 6443 , 2379 , 2380 , 60080 , 60081    UDP : 协议端口全部打开
    slave节点UDP : 协议端口全部打开

    二、基础环境搭建  (master+slave)

     1、设置主机名

    1. [root@localhost ~]# hostnamectl set-hostname k8s-master
    2. [root@localhost ~]# bash
    3. [root@k8s-master ]#

    2、修改hosts 

    1. [root@k8s-master ~]# echo "192.168.18.7 k8s-master" >>/etc/hosts

    3、设置iptables 

    iptables -P FORWARD ACCEPT

    4、关闭交换分区

    1. sed -ri 's/.*swap.*/#&/' /etc/fstab
    2. swapoff -a && sysctl -w vm.swappiness=0

    5、关闭防火墙、SELinux

    systemctl stop firewalld.service;systemctl disable firewalld.service;sed -i -r 's#(^SELIN.*=)enforcing#\1disable#g' /etc/selinux/config;setenforce 0

    6、修改内核参数

    1. [root@k8s-master ~]# cat >>/etc/sysctl.d/k8s.conf<
    2. net.bridge.bridge-nf-call-ip6tables = 1
    3. net.bridge.bridge-nf-call-iptables = 1
    4. net.ipv4.ip_forward = 1
    5. vm.max_map_count=262144
    6. EOF
    7. [root@k8s-master ~]# modprobe br_netfilter
    8. [root@k8s-master ~]# sysctl -p /etc/sysctl.d/k8s.conf

    7、设置yum 源

    1. [root@k8s-master ~]# wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    2. [root@k8s-master ~]# mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo_bak
    3. [root@k8s-master ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
    4. [root@k8s-master ~]# wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
    5. [root@k8s-master ~]# cat >>/etc/yum.repos.d/kubernetes.repo<
    6. [kubernetes]
    7. name=Kubernetes
    8. baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
    9. enabled=1
    10. gpgcheck=0
    11. EOF
    12. [root@k8s-master ~]# yum clean all && yum makecache

    三、部署 docker  (master+slave)

    1. [root@k8s-master ~]# yum install -y docker-ce
    2. [root@hdss7-21 ~]# mkdir /etc/docker/
    3. [root@k8s-master ~]# cat >>/etc/docker/daemon.json<
    4. {
    5. "graph": "/data/docker",
    6. "storage-driver": "overlay2",
    7. "insecure-registries": ["registry.access.redhat.com","quay.io","harbor.od.com","harbor.od.com:180"],
    8. "registry-mirrors": ["https://6kx4zyno.mirror.aliyuncs.com"],
    9. "exec-opts": ["native.cgroupdriver=systemd"],
    10. "live-restore": true
    11. }
    12. EOF
    13. [root@hdss7-21 ~]# mkdir -p /data/docker
    14. [root@hdss7-21 ~]# systemctl start docker ; systemctl enable docker

    四、部署 k8s-v1.16.2

    验证过部署v1.21.1无问题

    1、下载kubeadm  (master+slave)

    1. [root@k8s-master ~]# yum install -y kubelet-1.16.2 kubeadm-1.16.2 kubectl-1.16.2 -- disableexcludes=kubernetes
    2. [root@k8s-master ~]# kubeadm version
    3. kubeadm version: &version.Info{Major:"1", Minor:"16", GitVersion:"v1.16.2", GitCommit:"c97fe5036ef3df2967d086711e6c0c405941e14b", GitTreeState:"clean", BuildDate:"2019-10-15T19:15:39Z", GoVersion:"go1.12.10", Compiler:"gc", Platform:"linux/amd64"}
    4. [root@k8s-master ~]# systemctl enable kubelet

    2、使用kubeadm模板部署k8s (master)

    1. [root@k8s-master ~]# mkdir /opt/k8s-install/;cd /opt/k8s-install/
    2. [root@k8s-master k8s-install]# kubeadm config print init-defaults > kubeadm.yaml
    3. [root@k8s-master k8s-install]# vi kubeadm.yaml
    4. apiVersion: kubeadm.k8s.io/v1beta2
    5. bootstrapTokens:
    6. - groups:
    7. - system:bootstrappers:kubeadm:default-node-token
    8. token: abcdef.0123456789abcdef
    9. ttl: 24h0m0s
    10. usages:
    11. - signing
    12. - authentication
    13. kind: InitConfiguration
    14. localAPIEndpoint:
    15. advertiseAddress: 192.168.18.7 # apiserver的地IP
    16. bindPort: 6443 # apiserver的port
    17. nodeRegistration:
    18. criSocket: /var/run/dockershim.sock
    19. name: k8s-master # 此位置为你apiserver节点也就是master节点的hostname,对应的在kubectl get node 显示的NAME
    20. taints: # 此位置是配置给此节点打一个污点,因为此配置文件是给kubeadm做初始化,也就是master节点
    21. 做初始化,所以默认给master打一个污点,也可以写taints: null
    22. - effect: NoSchedule
    23. key: node-role.kubernetes.io/master
    24. ---
    25. apiServer:
    26. timeoutForControlPlane: 4m0s
    27. apiVersion: kubeadm.k8s.io/v1beta2
    28. certificatesDir: /etc/kubernetes/pki
    29. clusterName: kubernetes
    30. controllerManager: {}
    31. dns:
    32. type: CoreDNS
    33. etcd:
    34. local:
    35. dataDir: /var/lib/etcd
    36. imageRepository: registry.aliyuncs.com/google_containers # 修改为阿里云地址
    37. kind: ClusterConfiguration
    38. kubernetesVersion: v1.16.2
    39. networking:
    40. dnsDomain: cluster.local # 此位置不用改
    41. podSubnet: 172.7.0.0/16 # 新增字段,pod 网段,flannel插件需要使用网段。定义后docker启动容器的IP优先考虑
    42. 此位置,若不定义则采会依赖docker中daemon.json中的BIP定义。
    43. serviceSubnet: 10.96.0.0/12 # service 资源IP定义,可以不用改
    44. scheduler: {}
    45. [root@k8s-master k8s-install]#

    提前将容器下载到本地,以防在部署时候,是镜像导致的部署失败  (master+slave)

    1. [root@k8s-master k8s-install]# kubeadm config images pull --config kubeadm.yaml
    2. [config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.16.2
    3. [config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.16.2
    4. [config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.16.2
    5. [config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.16.2
    6. [config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.1
    7. [config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.3.15-0
    8. [config/images] Pulled registry.aliyuncs.com/google_containers/coredns:1.6.2
    9. [root@k8s-master k8s-install]# docker pull registry.aliyuncs.com/google_containers/kube-apiserver:v1.16.2
    10. [root@k8s-master k8s-install]# docker pull registry.aliyuncs.com/google_containers/kube-controller-manager:v1.16.2
    11. [root@k8s-master k8s-install]# docker pull registry.aliyuncs.com/google_containers/kube-scheduler:v1.16.2
    12. [root@k8s-master k8s-install]# docker pull registry.aliyuncs.com/google_containers/kube-proxy:v1.16.2
    13. [root@k8s-master k8s-install]# docker pull registry.aliyuncs.com/google_containers/pause:3.1
    14. [root@k8s-master k8s-install]# docker pull registry.aliyuncs.com/google_containers/etcd:3.3.15-0
    15. [root@k8s-master k8s-install]# docker pull registry.aliyuncs.com/google_containers/coredns:1.6.2

    3、初始化master节点  (master节点)

    [root@k8s-master k8s-install]# kubeadm init --config kubeadm.yaml # 提示:Your kubernetes master has initialized successfully! 为成功
    

    提示你需要执行

    1. mkdir -p $HOME/.kube
    2. cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    3. chown $(id -u):$(id -g) $HOME/.kube/config
    1. [root@k8s-master .kube]# kubectl get node
    2. NAME STATUS ROLES AGE VERSION
    3. k8s-master NotReady master 6m14s v1.16.2

     4、添加 slave到 k8s (所有的slave)

    在每台slave节点上执行如下命令,该命令是在kubeadm init 成功后提示信息打印出来的

    1. kubeadm join 192.168.18.7:6443 --token abcdef.0123456789abcdef \
    2. --discovery-token-ca-cert-hash sha256:7d9e753992ae96e0d5bd34e20129a5b9e5f65b355f69e8d59f89f578c1558a0d

    5、后续追加添加从节点

    kubeadm init 后生成的 token ( kubeadm join 192.168.18.7:6443 ), 有效时间为24h,超过需要重新申请,超过24h的 kubeadm join命令,将 node加入 master时,出现 error execution phase preflight: couldn't validate the identity of the API Server: abort connecting to API servers after timeout of 5m0s错误,即节点纳入管理失败,五分钟后超时放弃连接。具体信息如下

    出现该问题的原因有很多,但主要有两个:

    1. token 过期

    此时需要通过kubedam重新生成token

    1. //解决方法
    2. //master主机上重新生成token
    3. [root@master ~]# kubeadm token generate #生成toke
    4. 55rc0f.kdsdbg7vpymrkhr1 #下面这条命令中会用到该结果
    5. [root@master ~]# kubeadm token create 55rc0f.kdsdbg7vpymrkhr1 --print-join-command --ttl=0 #根据token输出添加命令
    6. kubeadm join 192.168.206.10:6443 --token 55rc0f.kdsdbg7vpymrkhr1 --discovery-token-ca-cert-hash sha256:73bb4deef64b7ceb47710d049e2f0d89f439f833baf3812d7868cf07da515fae

    然后用上面输出的kubeadm join命令放到想要添加的节点中执行 

    2. k8s api server不可达
    此时需要检查和关闭所有服务器的firewalld和selinux

    [root@master ~]#setenforce 0
    [root@master ~]#sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
    [root@master ~]#systemctl disable firewalld --now

     五、部署flannel 插件 (一台master节点)

    查看status状态是NotReady,是由于没有部署flannel插件,flannel插件是能够让不同节点之间,pod与pod,pod与各个节点宿主机互相通信的组件,所以是必不可少的,所以状态为NotReady。即使你现在只有master一个节点,也需要部署,k8s 检测的是有无CNI网络插件

    1、下载flannel (一台master节点)

    https://github.com/flannel-io/flannel/tree/v0.12.0

    或者

    1. echo "199.232.68.133 raw.githubusercontent.com" >> /etc/hosts
    2. wget https://raw.githubusercontent.com/coreos/flannel/2140ac876ef134e0ed5af15c65e414cf26827915/Documentation/kube-flannel.yml

    2、部署flannel  ( 一台master节点)

    理解flannel : flannel如何实现pod与pod通信?pod是k8s最小单元,容器是docker的最小单元,而pod中可能存在一个或者多个容器。所以说白了,k8s通过手段管理的就是容器,而容器与容器怎么通信,如果是同一台机器,是通过docker0通信的,也就相当于docker0是一个交换机一个网关,一个容器访问另一个容器,通过网关协调。而容器如何跟另一台的物理机的容器通信?首先容器通过docker0无法找到另一台容器的,所以docker0就把请求往上给,给到了物理机的物理网卡,物理网卡又是怎么去知道另一台的物理机的容器在哪?flannel起到了作用。假如有A B 两个物理机器,A机器通过ens33 10.4.7.21/24 跟 B 机器通信,于是配置flannel 绑定ens33,然后在 A 机器daemon.json中配置,当前物理机启动的pod网段是172.7.21/24,也就是说当前物理机启动的pod的IP是172.7.21/24中之一,而 B 物理机的pod网段是172.7.22/24。于是flannel 就会在 A 物理机生成一个路由规则,172.7.22/24   10.4.7.22  255.255.255.0   ens33 ,代表如果访问的是172.7.22/24就把通过ens33网卡流量给10.4.7.22,具体的解释就是,A中的pod,访问172.7.22/24,docker0发现没有符合的地址,给到了上层网卡ens33,通过路由发现,去172.7.22/24的,直接通过ens33发给10.4.7.22,当 B 机器接收到后,发现是访问172.7.22/24,直接转给docker0,在给到pod。

    通过上述,首先需要查看物理网卡的名字是什么

    找到\flannel-0.12.0\Documentation\kube-flannel.yml 进行修改,添加--iface=网卡,如果不添加此配置,会默认选择第一个物理网卡

    1. ---
    2. kind: Namespace
    3. apiVersion: v1
    4. metadata:
    5. name: kube-flannel
    6. labels:
    7. pod-security.kubernetes.io/enforce: privileged
    8. ---
    9. kind: ClusterRole
    10. apiVersion: rbac.authorization.k8s.io/v1
    11. metadata:
    12. name: flannel
    13. rules:
    14. - apiGroups:
    15. - ""
    16. resources:
    17. - pods
    18. verbs:
    19. - get
    20. - apiGroups:
    21. - ""
    22. resources:
    23. - nodes
    24. verbs:
    25. - get
    26. - list
    27. - watch
    28. - apiGroups:
    29. - ""
    30. resources:
    31. - nodes/status
    32. verbs:
    33. - patch
    34. - apiGroups:
    35. - "networking.k8s.io"
    36. resources:
    37. - clustercidrs
    38. verbs:
    39. - list
    40. - watch
    41. ---
    42. kind: ClusterRoleBinding
    43. apiVersion: rbac.authorization.k8s.io/v1
    44. metadata:
    45. name: flannel
    46. roleRef:
    47. apiGroup: rbac.authorization.k8s.io
    48. kind: ClusterRole
    49. name: flannel
    50. subjects:
    51. - kind: ServiceAccount
    52. name: flannel
    53. namespace: kube-flannel
    54. ---
    55. apiVersion: v1
    56. kind: ServiceAccount
    57. metadata:
    58. name: flannel
    59. namespace: kube-flannel
    60. ---
    61. kind: ConfigMap
    62. apiVersion: v1
    63. metadata:
    64. name: kube-flannel-cfg
    65. namespace: kube-flannel
    66. labels:
    67. tier: node
    68. app: flannel
    69. data:
    70. cni-conf.json: |
    71. {
    72. "name": "cbr0",
    73. "cniVersion": "0.3.1",
    74. "plugins": [
    75. {
    76. "type": "flannel",
    77. "delegate": {
    78. "hairpinMode": true,
    79. "isDefaultGateway": true
    80. }
    81. },
    82. {
    83. "type": "portmap",
    84. "capabilities": {
    85. "portMappings": true
    86. }
    87. }
    88. ]
    89. }
    90. net-conf.json: |
    91. {
    92. "Network": "172.7.0.0/16",
    93. "Backend": {
    94. "Type": "vxlan"
    95. }
    96. }
    97. ---
    98. apiVersion: apps/v1
    99. kind: DaemonSet
    100. metadata:
    101. name: kube-flannel-ds
    102. namespace: kube-flannel
    103. labels:
    104. tier: node
    105. app: flannel
    106. spec:
    107. selector:
    108. matchLabels:
    109. app: flannel
    110. template:
    111. metadata:
    112. labels:
    113. tier: node
    114. app: flannel
    115. spec:
    116. affinity:
    117. nodeAffinity:
    118. requiredDuringSchedulingIgnoredDuringExecution:
    119. nodeSelectorTerms:
    120. - matchExpressions:
    121. - key: kubernetes.io/os
    122. operator: In
    123. values:
    124. - linux
    125. hostNetwork: true
    126. priorityClassName: system-node-critical
    127. tolerations:
    128. - operator: Exists
    129. effect: NoSchedule
    130. serviceAccountName: flannel
    131. initContainers:
    132. - name: install-cni-plugin
    133. image: docker.io/flannel/flannel-cni-plugin:v1.1.2
    134. #image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.2
    135. command:
    136. - cp
    137. args:
    138. - -f
    139. - /flannel
    140. - /opt/cni/bin/flannel
    141. volumeMounts:
    142. - name: cni-plugin
    143. mountPath: /opt/cni/bin
    144. - name: install-cni
    145. image: docker.io/flannel/flannel:v0.20.2
    146. #image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2
    147. command:
    148. - cp
    149. args:
    150. - -f
    151. - /etc/kube-flannel/cni-conf.json
    152. - /etc/cni/net.d/10-flannel.conflist
    153. volumeMounts:
    154. - name: cni
    155. mountPath: /etc/cni/net.d
    156. - name: flannel-cfg
    157. mountPath: /etc/kube-flannel/
    158. containers:
    159. - name: kube-flannel
    160. image: docker.io/flannel/flannel:v0.20.2
    161. #image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2
    162. command:
    163. - /opt/bin/flanneld
    164. args:
    165. - --ip-masq
    166. - --kube-subnet-mgr
    167. - --iface=ens33
    168. - --public-ip=$(PUBLIC_IP)
    169. resources:
    170. requests:
    171. cpu: "100m"
    172. memory: "50Mi"
    173. securityContext:
    174. privileged: false
    175. capabilities:
    176. add: ["NET_ADMIN", "NET_RAW"]
    177. env:
    178. - name: PUBLIC_IP
    179. valueFrom:
    180. fieldRef:
    181. fieldPath: status.podIP
    182. - name: POD_NAME
    183. valueFrom:
    184. fieldRef:
    185. fieldPath: metadata.name
    186. - name: POD_NAMESPACE
    187. valueFrom:
    188. fieldRef:
    189. fieldPath: metadata.namespace
    190. - name: EVENT_QUEUE_DEPTH
    191. value: "5000"
    192. volumeMounts:
    193. - name: run
    194. mountPath: /run/flannel
    195. - name: flannel-cfg
    196. mountPath: /etc/kube-flannel/
    197. - name: xtables-lock
    198. mountPath: /run/xtables.lock
    199. volumes:
    200. - name: run
    201. hostPath:
    202. path: /run/flannel
    203. - name: cni-plugin
    204. hostPath:
    205. path: /opt/cni/bin
    206. - name: cni
    207. hostPath:
    208. path: /etc/cni/net.d
    209. - name: flannel-cfg
    210. configMap:
    211. name: kube-flannel-cfg
    212. - name: xtables-lock
    213. hostPath:
    214. path: /run/xtables.lock
    215. type: FileOrCreate

    提前下载镜像,如果要修改镜像,需要都改

    docker pull quay.io/coreos/flannel:v0.12.0-amd64

    应用flannel

    [root@k8s-master k8s-install]# kubectl create -f kube-flannel.yml
    

    查看node 节点的状态

     测试验证

    1. [root@k8s-master k8s-install]# kubectl run test-nginx --image=nginx:alpine
    2. [root@k8s-master k8s-install]# kubectl get pod
    3. NAME READY STATUS RESTARTS AGE
    4. test-nginx-5bd8859b98-9bs2p 0/1 Pending 0 59s

    六、设置 master 节点是否可以调度

    默认kubeadm 部署的k8s,master节点无法被调度,也就是master无法启动业务pod,主要原因是master默认打上了一个污点,所以可以去除

    [root@k8s-master nginx]# kubectl taint node k8s-master node-role.kubernetes.io/master:NoSchedule-

    查看是否还有不可调度的,比如

    七、部署 ingress-controller 

    7.1、 下载ingress-nginx

    找到\ingress-nginx-nginx-0.30.0\deploy\static\mandatory.yaml

    7.2、 部署 ingress-nginx

    [root@k8s-master opt]# cd k8s-install/
    [root@k8s-master k8s-install]# rz mandatory.yaml
    [root@k8s-master k8s-install]# mv mandatory.yaml ingress-nginx.yaml
    [root@k8s-master k8s-install]# vi  ingress-nginx.yaml  # 修改mandatory.yaml的两地地方,一个是使用hostNetwork,共享宿主机的网络名称空间,将访问宿主机的80端口或者443端口,直接给到ingress-nginx容器,在通过ingres-controller控制器,也就是提供nginx代理功能,给到ingress资源。另一个NodeSelector添加一个ingress=true,因为kubernetes.io/os: linux是每个node节点都有的标签,由于是Deployment资源,replicas:1,这就导致每次重启k8s,ingres-controller控制器来回的在所有的节点飘,那我如何将访问ingress的流量传递给ingres-controller控制器,每次都在边,所以为了固定下来,再加一个限制条件

    在需要部署的节点上,查看80和443端口是不是被占用,由于我之部署了master节点,打上ingress=true 标签

    [root@k8s-master k8s-install]# kubectl label node k8s-master ingress=true

    提前将镜像下载下来

    [root@k8s-master k8s-install]# docker pull quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.30.0

    应用资源配置清单

    [root@k8s-master k8s-install]# kubectl create -f ingress-nginx.yaml

    7.3、验证

    [root@k8s-master nginx]# vi nginx.yaml

    1. apiVersion: apps/v1
    2. kind: Deployment
    3. metadata:
    4. name: nginx-dp
    5. labels:
    6. app: nginx-dp
    7. spec:
    8. selector:
    9. matchLabels:
    10. app: nginx-dp
    11. template:
    12. metadata:
    13. labels:
    14. app: nginx-dp
    15. spec:
    16. containers:
    17. - name: nginx-dp
    18. image: nginx:latest
    19. ports:
    20. - containerPort: 80
    21. ---
    22. apiVersion: v1
    23. kind: Service
    24. metadata:
    25. labels:
    26. app: nginx-dp
    27. name: nginx-dp
    28. namespace: default
    29. spec:
    30. ports:
    31. - port: 9000
    32. protocol: TCP
    33. targetPort: 80
    34. selector:
    35. app: nginx-dp
    36. type: ClusterIP
    37. ---
    38. apiVersion: extensions/v1beta1
    39. kind: Ingress
    40. metadata:
    41. name: nginx
    42. namespace: default
    43. spec:
    44. rules:
    45. - host: nginx.default.com
    46. http:
    47. paths:
    48. - path: /
    49. backend:
    50. serviceName: nginx-dp
    51. servicePort: 9000
    52. status:
    53. loadBalancer: {}

    [root@k8s-master nginx]# kubectl apply -f nginx.yaml

    八、清理重置环境

    如果在部署的时候,遇到了其他问题,可以使用如下命令重置

    1. kubeadm reset
    2. ifconfig cni0 down && ip link delete cni0
    3. ifconfig flannel.1 down && ip link delete flannel.1
    4. rm -rf /var/lib/cni/
  • 相关阅读:
    XAF 属性编辑器(PropertyEditor)- 原理篇
    Go 语言 设计模式-工厂模式
    pytorch深度学习实战lesson17
    下载bigemap建筑物轮廓在arcscene如何生成模型
    Leetcode 目标和(递归)
    ADAS和ADS有什么区别?
    C语言数据类型
    MVCC - Read View的可见性判断理解
    学个Antenna:手机天线入门
    Color Mapping
  • 原文地址:https://blog.csdn.net/Jerry00713/article/details/126440061