目录
Configmap用于保存配置数据,以键值对形式存储;
ConfigMap资源提供了向Pod注入配置数据的方法,旨在让镜像和配置文件解耦,以便实现镜像的可移植性和可复用性;
典型的使用场景有:填充环境变量的值、设置容器内的命令行参数、填充卷的配置文件
1.创建ConfigMap的方式有4种:使用字面值创建、使用文件创建、使用目录创建、编写configmap的yaml文件创建
- ##使用字面值创建,键值对的方式:
- [root@node22 ~]# mkdir configmap
- [root@node22 ~]# cd configmap/
- [root@node22 configmap]# ls
- [root@node22 configmap]# kubectl create configmap my-config --from-literal=key1=config1 --from-literal=key2=config2
- configmap/my-config created
- [root@node22 configmap]# kubectl get cm
- NAME DATA AGE
- kube-root-ca.crt 1 3d5h
- my-config 2 8s
- [root@node22 configmap]# kubectl describe cm my-config
- Name: my-config
- Namespace: default
- Labels: <none>
- Annotations: <none>
-
- Data
- ====
- key2:
- ----
- config2
- key1:
- ----
- config1
-
- BinaryData
- ====
-
- Events: <none>
-
- ##使用文件创建,文件名为key,文件内容为值
- [root@node22 configmap]# kubectl create configmap my-config-2 --from-file=/etc/resolv.conf
- configmap/my-config-2 created
- [root@node22 configmap]# kubectl describe cm my-config-2
- Name: my-config-2
- Namespace: default
- Labels: <none>
- Annotations: <none>
-
- Data
- ====
- resolv.conf:
- ----
- # Generated by NetworkManager
- nameserver 114.114.114.114
-
-
- BinaryData
- ====
-
- Events: <none>
-
- ##使用目录创建,文件名为key,文件内容为值
- [root@node22 configmap]# kubectl create configmap my-config-3 --from-file=test
- configmap/my-config-3 created
- [root@node22 configmap]# kubectl describe cm my-config-3
- Name: my-config-3
- Namespace: default
- Labels: <none>
- Annotations: <none>
-
- Data
- ====
- passwd:
- ----
- root:x:0:0:root:/root:/bin/bash
- bin:x:1:1:bin:/bin:/sbin/nologin
- daemon:x:2:2:daemon:/sbin:/sbin/nologin
- adm:x:3:4:adm:/var/adm:/sbin/nologin
- lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin
- sync:x:5:0:sync:/sbin:/bin/sync
- shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown
- fstab:
- ----
-
- #
- # /etc/fstab
- # Created by anaconda on Fri Aug 5 17:48:43 2022
- #
- # Accessible filesystems, by reference, are maintained under '/dev/disk'
- # See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
- #
- /dev/mapper/rhel-root / xfs defaults 0 0
- UUID=d319ed7a-9c18-4cda-b34a-9e2399f1f1fc /boot xfs defaults 0 0
- #/dev/mapper/rhel-swap swap swap defaults 0 0
-
-
- BinaryData
- ====
-
- Events: <none>
-
- ##编写comfigmap的yml文件创建
- [root@node22 configmap]# vim cm1.yaml
- apiVersion: v1
- kind: ConfigMap
- metadata:
- name: cm1-config
- data:
- db_host: "192.168.0.1"
- db_port: "3306"
- [root@node22 configmap]# kubectl apply -f cm1.yaml
- configmap/cm1-config created
- [root@node22 configmap]# kubectl get cm
- NAME DATA AGE
- cm1-config 2 11s
- kube-root-ca.crt 1 3d5h
- my-config 2 9m45s
- my-config-2 1 5m54s
- my-config-3 2 3m23s
- [root@node22 configmap]# kubectl describe cm cm1-config
- Name: cm1-config
- Namespace: default
- Labels: <none>
- Annotations: <none>
-
- Data
- ====
- db_port:
- ----
- 3306
- db_host:
- ----
- 192.168.0.1
-
- BinaryData
- ====
-
- Events: <none>
2.如何使用configmap:
- 1).通过环境变量的方式直接传递给pod
- [root@node22 configmap]# vim pod.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: pod1
- spec:
- containers:
- - name: pod1
- image: busybox
- command: ["/bin/sh", "-c", "env"]
- env:
- - name: key1
- valueFrom:
- configMapKeyRef:
- name: cm1-config
- key: db_host
- - name: key2
- valueFrom:
- configMapKeyRef:
- name: cm1-config
- key: db_port
- restartPolicy: Never
- [root@node22 configmap]# kubectl apply -f pod.yaml
- pod/pod1 created
- [root@node22 configmap]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- demo 1/1 Running 1 (32m ago) 32m
- myapp-1-6666f57846-n8zgn 1/1 Running 0 87m
- pod1 0/1 Completed 0 101s
- [root@node22 configmap]# kubectl delete pod pod1
- pod "pod1" deleted
- [root@node22 configmap]# kubectl delete pod demo --force
- [root@node22 configmap]# kubectl delete deployments.apps myapp-1
- [root@node22 configmap]# kubectl -n test delete pod demo --force
-
- 2) .通过在pod的命令行下运行的方式
- ##使用conigmap设置命令行参数
- [root@node22 configmap]# vim pod2.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: pod1
- spec:
- containers:
- - name: pod1
- image: busybox
- command: ["/bin/sh", "-c", "echo $(db_host) $(db_port)"]
- envFrom:
- - configMapRef:
- name: cm1-config
- restartPolicy: Never
- [root@node22 configmap]# kubectl apply -f pod2.yaml
- pod/pod1 created
-
- 3).作为volume的方式挂载到pod内
- ##通过数据卷使用configmap
- [root@node22 configmap]# vim pod3.yaml
- kind: Pod
- metadata:
- name: pod2
- spec:
- containers:
- - name: pod2
- image: busybox
- command: ["/bin/sh", "-c", "cat /config/db_host"]
- volumeMounts:
- - name: config-volume
- mountPath: /config
- volumes:
- - name: config-volume
- configMap:
- name: cm1-config
- restartPolicy: Never
- [root@node22 configmap]# kubectl apply -f pod3.yaml
- pod/pod2 created
- [root@node22 configmap]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- pod2 0/1 Completed 0 57s
- [root@node22 configmap]# kubectl logs pod2
- 192.168.0.1
示例:
例子:configmap热更新
[root@node22 configmap]# vim nginx.conf
server {
listen 8000;
server_name _;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
}
[root@node22 configmap]# kubectl create configmap nginxconf --from-file=nginx.conf
configmap/nginxconf created
[root@node22 configmap]# kubectl get cm
NAME DATA AGE
cm1-config 2 28m
kube-root-ca.crt 1 3d6h
my-config 2 37m
my-config-2 1 33m
my-config-3 2 31m
nginxconf 1 15s
[root@node22 configmap]# kubectl describe cm nginxconf
Name: nginxconf
Namespace: default
Labels:
Annotations:
Data
====
nginx.conf:
----
server {
listen 8000;
server_name _;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
}
BinaryData
====
Events:
[root@node22 configmap]# vim nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
volumeMounts:
mountPath: /etc/nginx/conf.d
volumes:
- name: config-volume
configMap:
name: nginxconf
[root@node22 configmap]# kubectl apply -f nginx.yaml
deployment.apps/my-nginx created
[root@node22 ~]# cd calico/
[root@node22 calico]# kubectl delete -f networkpolicy.yaml
networkpolicy.networking.k8s.io "test-network-policy" deleted
[root@node22 calico]# cd
[root@node22 ~]# cd configmap/
[root@node22 configmap]# curl 10.244.144.76
curl: (7) Failed connect to 10.244.144.76:80; Connection refused
[root@node22 configmap]# kubectl edit cm nginxconf
##编辑nginxconf这个configmap将其端口修改为8080
[root@node22 configmap]# kubectl exec my-nginx-7b84dc948c-f6vlb -- cat /etc/nginx/conf.d/nginx.conf
server {
listen 8080;
server_name _;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
}
[root@node22 configmap]# curl 10.244.144.76:8080
curl: (7) Failed connect to 10.244.144.76:8080; Connection refused
##此时虽然容器内的文件内容会更新(有一定延迟),但是更新的设定并没有生效;需要手动触发Pod滚动更新, 这样才能再次加载nginx.conf配置文件
[root@node22 configmap]# kubectl delete pod my-nginx-7b84dc948c-qmq9h
pod "my-nginx-7b84dc948c-qmq9h" deleted
root@node22 configmakubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
my-nginx-7b84dc948c-qxgtb 1/1 Running 0 5s 10.244.144.80 node33
[root@node22 configmap]# curl 10.244.144.80:8080
Welcome to nginx!
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
Welcome to nginx!
If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.
For online documentation and support please refer to
Commercial support is available at
Thank you for using nginx.
再改回8000
[root@node22 configmap]# kubectl edit cm nginxconf
[root@node22 configmap]# kubectl patch deployments.apps my-nginx --patch '{"spec": {"template": {"metadata": {"annotations": {"version/config": "20220827"}}}}}'
deployment.apps/my-nginx patched
[root@node22 configmap]# kubectl get all
NAME READY STATUS RESTARTS AGE
pod/my-nginx-645f5bbfd6-lrmtp 1/1 Running 0 10s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1
443/TCP 3d6h service/my-svc ClusterIP 10.108.185.37
80/TCP 9h service/web-service ClusterIP 10.109.238.119
80/TCP 38h NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/my-nginx 1/1 1 1 30m
NAME DESIRED CURRENT READY AGE
replicaset.apps/my-nginx-645f5bbfd6 1 1 1 10s
replicaset.apps/my-nginx-7b84dc948c 0 0 0 30m
Secret对象类型用来保存敏感信息,例如密码、OAuth令牌和ssh key;敏感信息放在secret中比放在Pod的定义或者容器镜像中来说更加安全和灵活
Pod可以用两种方式使用secret:作为volume中的文件被挂载到pod中的一个或者多个容器里;当 kubelet为pod拉取镜像时使用
Secret的类型:
Service Account:Kubernetes自动创建包含访问API凭据的secret,并自动修改pod以使用此类型的secret
Opaque:使用base64编码存储信息,可以通过base64 --decode解码获得原始数据,因此安全性弱,Opaque类型的Secret其value为base64编码后的值
kubernetes.io/dockerconfigjson:用于存储docker registry的认证信息
- 1.##serviceaccout创建时Kubernetes会默认创建对应的secret,对应的secret会自动挂载到Pod的 /var/run/secrets/kubernetes.io/serviceaccount目录中
- [root@node22 ~]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- my-nginx-645f5bbfd6-lrmtp 1/1 Running 1 (49m ago) 13h
- [root@node22 ~]# kubectl describe pod
- Mounts:
- /etc/nginx/conf.d from config-volume (rw)
- /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-r9k7t (ro)
- [root@node22 ~]# kubectl exec my-nginx-645f5bbfd6-lrmtp -- ls /var/run/secrets/kubernetes.io/serviceaccount
- ca.crt
- namespace
- token
-
- ##每个namespace下有一个名为default的默认的ServiceAccount对象
- [root@node22 ~]# kubectl get sa
- NAME SECRETS AGE
- default 1 3d20h
- [root@node22 ~]# kubectl get sa -n test
- NAME SECRETS AGE
- default 1 16h
-
- 2.##ServiceAccount里有一个名为Tokens的可以作为Volume一样被Mount到Pod里的Secret,当Pod启动时这个Secret会被自动Mount到Pod的指定目录下,用来协助完成Pod中的进程访问API Server时的身份鉴权过程
- [root@node22 ~]# kubectl get pod my-nginx-645f5bbfd6-lrmtp -o yaml
- spec:
- containers:
- - image: nginx
- imagePullPolicy: Always
- name: nginx
- resources: {}
- terminationMessagePath: /dev/termination-log
- terminationMessagePolicy: File
- volumeMounts:
- - mountPath: /etc/nginx/conf.d
- name: config-volume
- - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
- name: kube-api-access-r9k7t
- readOnly: true
-
- 3.##从文件中创建secret
- [root@node22 ~]# cd secret/
- [root@node22 secret]# echo -n 'admin' > ./username.txt
- [root@node22 secret]# echo -n 'westos' > ./password.txt
- [root@node22 secret]# kubectl create secret generic db-user-pass --from-file=./username.txt --from-file=./password.txt
- secret/db-user-pass created
- [root@node22 secret]# kubectl get secrets
- NAME TYPE DATA AGE
- basic-auth Opaque 1 23h
- db-user-pass Opaque 2 28s
- default-token-pf6bb kubernetes.io/service-account-token 3 3d20h
- tls-secret kubernetes.io/tls 2 23h
- [root@node22 secret]# kubectl describe secrets db-user-pass
- Name: db-user-pass
- Namespace: default
- Labels: <none>
- Annotations: <none>
-
- Type: Opaque
-
- Data
- ====
- password.txt: 6 bytes
- username.txt: 5 bytes
- 默认情况下 kubectl get和kubectl describe 为了安全是不会显示密码的内容,可以
- 通过以下方式查看:
- 如果密码具有特殊字符,则需要使用 \ 字符对其进行转义,执行以下命令:
- kubectl create secret generic dev-db-secret --from-literal=username=devuser
- --from-literal=password=S\!B\\*d\$zDs
-
- [root@node22 secret]# kubectl get secrets db-user-pass -o yaml
- apiVersion: v1
- data:
- password.txt: d2VzdG9z
- username.txt: YWRtaW4=
- kind: Secret
- metadata:
- creationTimestamp: "2022-08-28T09:59:53Z"
- name: db-user-pass
- namespace: default
- resourceVersion: "187151"
- uid: 2630512d-c437-4d41-bdc8-a4748df06835
- type: Opaque
- 查看密码可以采用下面方法:
- [root@node22 secret]# echo d2VzdG9z | base64 -d
- westos
- [root@node22 secret]# echo YWRtaW4= | base64 -d
- admin
-
- 4.##从yaml文件创建secret
- [root@node22 secret]# vim secret.yaml
- apiVersion: v1
- kind: Secret
- metadata:
- name: mysecret
- type: Opaque
- data:
- username: YWRtaW4=
- password: d2VzdG9z
- [root@node22 secret]# kubectl apply -f secret.yaml
- secret/mysecret created
- [root@node22 secret]# kubectl get secrets mysecret -o yaml
- apiVersion: v1
- data:
- password: d2VzdG9z
- username: YWRtaW4=
- kind: Secret
- metadata:
- annotations:
- kubectl.kubernetes.io/last-applied-configuration: |
- {"apiVersion":"v1","data":{"password":"d2VzdG9z","username":"YWRtaW4="},"kind":"Secret","metadata":{"annotations":{},"name":"mysecret","namespace":"default"},"type":"Opaque"}
- creationTimestamp: "2022-08-28T10:08:23Z"
- name: mysecret
- namespace: default
- resourceVersion: "188013"
- uid: cbba6019-bad6-4e95-8acb-b53b9d022bea
- type: Opaque
-
- ##将secrect通过volume挂载到pod中
- [root@node22 secret]# vim pod.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: mysecret
- spec:
- containers:
- - name: nginx
- image: nginx
- volumeMounts:
- - name: secrets
- mountPath: "/secret"
- readOnly: true
- volumes:
- - name: secrets
- secret:
- secretName: mysecret
- [root@node22 secret]# kubectl apply -f pod.yaml
- pod/mysecret created
- [root@node22 secret]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- my-nginx-645f5bbfd6-lrmtp 1/1 Running 1 (75m ago) 14h
- mysecret 1/1 Running 0 10s
- [root@node22 secret]# kubectl exec mysecret -- ls /secret
- password
- username
-
- 5.##向指定路径映射secret的键值
- [root@node22 secret]# vim pod2.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: mysecret
- spec:
- containers:
- - name: nginx
- image: nginx
- volumeMounts:
- - name: secrets
- mountPath: "/secret"
- readOnly: true
- volumes:
- - name: secrets
- secret:
- secretName: mysecret
- items:
- - key: username
- path: my-group/my-username
- [root@node22 secret]# kubectl apply -f pod2.yaml
- pod/mysecret created
- [root@node22 secret]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- my-nginx-645f5bbfd6-lrmtp 1/1 Running 1 (79m ago) 14h
- mysecret 1/1 Running 0 6s
- [root@node22 secret]# kubectl exec mysecret -- ls /secret
- my-group
- [root@node22 secret]# kubectl exec mysecret -- ls /secret/my-group
- my-username
- [root@node22 secret]# kubectl exec mysecret -- cat /secret/my-group/my-username
- admin
-
- ##kubernetes.io/dockerconfigjson用于存储docker registry的认证信息
- [root@node22 secret]# vim docker.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: mypod
- spec:
- containers:
- - name: game2048
- image: reg.westos.org/westos/game2048
- #imagePullSecrets: 不做认证
- # - name: myregistrykey
- [root@node22 secret]# kubectl apply -f docker.yaml
- pod/mypod created
- [root@node22 secret]# kubectl get pod 镜像拉取失败
- NAME READY STATUS RESTARTS AGE
- mypod 0/1 ImagePullBackOff 0 14s
- [root@node22 secret]# kubectl create secret docker-registry myregistrykey --docker-server=reg.westos.org --docker-username=admin --docker-password=westos --docker-email=zcx0216@westos.org
- secret/myregistrykey created 做认证
- [root@node22 secret]# kubectl get secrets
- NAME TYPE DATA AGE
- basic-auth Opaque 1 23h
- db-user-pass Opaque 2 28m
- default-token-pf6bb kubernetes.io/service-account-token 3 3d21h
- myregistrykey kubernetes.io/dockerconfigjson 1 40s认证信息
- mysecret Opaque 2 20m
- tls-secret kubernetes.io/tls 2 23h
- [root@node22 secret]# vim docker.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: mypod
- spec:
- containers:
- - name: game2048
- image: reg.westos.org/westos/game2048
- imagePullSecrets:
- - name: myregistrykey
- [root@node22 secret]# kubectl apply -f docker.yaml
- pod/mypod created
- [root@node22 secret]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- mypod 1/1 Running 0 6s
- [root@node22 secret]# kubectl delete -f docker.yaml
- pod "mypod" deleted
容器中的文件在磁盘上是临时存放的,这给容器中运行的特殊应用程序带来一些问题,首先,当容器崩溃时,kubelet将重新启动容器,容器中的文件将会丢失,因为容器会以干净的状态重建;其次,当在一个Pod中同时运行多个容器时,常常需要在这些容器之间共享文件;Kubernetes抽象出 Volume对象来解决这两个问题
Kubernetes卷具有明确的生命周期,与包裹它的Pod相同;因此,卷比Pod中运行的任何容器的存活期都长,在容器重新启动时数据也会得到保留;当一个Pod不再存在时,卷也将不再存在;也许更重要的是,Kubernetes可以支持许多类型的卷,Pod也能同时使用任意数量的卷
卷不能挂载到其他卷,也不能与其他卷有硬链接;Pod中的每个容器必须独立地指定每个卷的挂载位置
Kubernetes 支持下列类型的卷:
awsElasticBlockStore 、azureDisk、azureFile、cephfs、cinder、configMap、csi、downwardAPI、emptyDir、fc (fibre channel)、flexVolume、flocker gcePersistentDisk、gitRepo(deprecated)、glusterfs、hostPath、iscsi、local、 nfs、persistentVolumeClaim、projected、portworxVolume、quobyte、rbd scaleIO、secret、storageos、vsphereVolume
1).emptyDir卷:
当Pod指定到某个节点上时,首先创建的是一个emptyDir卷,并且只要Pod在该节点上运行,卷就一直存在;就像其名称表示的那样,卷最初是空的;尽管Pod中的容器挂载emptyDir卷的路径可能相同也可能不同,但是这些容器都可以读写emptyDir卷中相同的文件;当Pod因为某些原因被从节点上删除时,emptyDir卷中的数据也会永久删除
emptyDir的使用场景:缓存空间,例如基于磁盘的归并排序;为耗时较长的计算任务提供检查点,以便任务能方便地从崩溃前状态恢复执行;在Web服务器容器服务数据时,保存内容管理器容器获取的文件
默认情况下,emptyDir卷存储在支持该节点所使用的介质上,这里的介质可以是磁盘或SSD或网络存储,这取决于您的环境;但是,您可以将emptyDir.medium字段设置为"Memory",以告诉Kubernetes为您安装tmpfs(基于内存的文件系统);虽然tmpfs速度非常快,但是要注意它与磁盘不同,tmpfs在节点重启时会被清除,并且您所写入的所有文件都会计入容器的内存消耗,受容器内存限制约束
- [root@node22 secret]# cd
- [root@node22 ~]# mkdir volumes
- [root@node22 ~]# cd volumes/
- [root@node22 volumes]# vim vol1.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: vol1
- spec:
- containers:
- - image: busyboxplus
- name: vm1
- command: ["sleep", "300"]
- volumeMounts:
- - mountPath: /cache
- name: cache-volume
- - name: vm2
- image: nginx
- volumeMounts:
- - mountPath: /usr/share/nginx/html
- name: cache-volume
- volumes:
- - name: cache-volume
- emptyDir:
- medium: Memory
- sizeLimit: 100Mi
- [root@node22 volumes]# kubectl apply -f vol1.yaml
- pod/vol1 created
- [root@node22 volumes]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- vol1 2/2 Running 0 4s
- [root@node22 volumes]# kubectl exec -it vol1 -c vm1 -- sh
- / # ls
- bin cache dev etc home lib lib64 linuxrc media mnt opt proc root run sbin sys tmp usr var
- / # cd cache/
- /cache # ls
- /cache # echo www.westos.org > index.html
- /cache # curl localhost
- www.westos.org
- /cache # dd if=/dev/zero of=bigfile bs=1M count=100
- 100+0 records in
- 99+1 records out
- /cache # dd if=/dev/zero of=bigfile bs=1M count=101
- dd: writing 'bigfile': No space left on device
- 101+0 records in
- 99+1 records out
-
2).hostPath卷
hostPath卷能将主机节点文件系统上的文件或目录挂载到Pod中,虽然这不是大多数Pod需要的,但是它为一些应用程序提供了强大的逃生舱
hostPath卷的一些用法:运行一个需要访问Docker引擎内部机制的容器,挂载/var/lib/docker路径;在容器中运行cAdvisor时,以hostPath方式挂载 /sys;允许Pod指定给定的hostPath在运行Pod之前是否应该存在,是否应该创建以及应该以什么方式存在
除了必需的path属性之外,用户可以选择性地为hostPath卷指定type

hostPath卷的缺点:具有相同配置(例如从podTemplate创建)的多个Pod会由于节点上文件的不同而在不同节点上有不同的行为;当Kubernetes按照计划添加资源感知的调度时,这类调度机制将无法考虑由hostPath使用的资源;基础主机上创建的文件或目录只能由root用户写入;您需要在特权容器中以root身份运行进程,或者修改主机上的文件权限以便容器能够写入hostPath卷
- [root@node22 volumes]# kubectl delete -f vol1.yaml
- pod "vol1" deleted
- [root@node22 volumes]# vim hostpath.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: test-pd
- spec:
- containers:
- - image: nginx
- name: test-container
- volumeMounts:
- - mountPath: /usr/share/nginx/html
- name: test-volume
- volumes:
- - name: test-volume
- hostPath:
- path: /data
- type: DirectoryOrCreate
- [root@node22 volumes]# kubectl apply -f hostpath.yaml 生效
- pod/test-pd created
- [root@node22 volumes]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- test-pd 1/1 Running 0 10s
- [root@node22 volumes]# kubectl get pod -o wide 分配到了node33
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- test-pd 1/1 Running 0 62s 10.244.144.90 node33 <none> <none>
- [root@node22 volumes]# curl 10.244.144.90 访问时报错,再node33主机创建首页即可解决
- <html>
- <head><title>403 Forbidden</title></head>
- <body>
- <center><h1>403 Forbidden</h1></center>
- <hr><center>nginx/1.21.5</center>
- </body>
- </html>
-
- [root@node33 ~]# cd /data
- [root@node33 data]# ls
- [root@node33 data]# pwd
- /data
- [root@node33 data]# echo www.westos.org > index.html
- 此时再次访问就没有问题
- [root@node22 volumes]# curl 10.244.144.90
- www.westos.org
-
- 如果pod被迁移到node44上:
- [root@node44 ~]# mkdir /data
- [root@node44 ~]# cd /data/
- [root@node44 data]# ls
- [root@node44 data]# echo bbs.westos.org > index.html
-
- [root@node22 volumes]# kubectl delete -f hostpath.yaml
- pod "test-pd" deleted
- [root@node22 volumes]# vim hostpath.yaml 做迁移
- apiVersion: v1
- kind: Pod
- metadata:
- name: test-pd
- spec:
- containers:
- - image: nginx
- name: test-container
- volumeMounts:
- - mountPath: /usr/share/nginx/html
- name: test-volume
- volumes:
- - name: test-volume
- hostPath:
- path: /data
- type: DirectoryOrCreate
- nodeName: node44
- [root@node22 volumes]# kubectl apply -f hostpath.yaml
- pod/test-pd created
- [root@node22 volumes]# kubectl get pod -o wide已经迁移到node44
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- test-pd 1/1 Running 0 38s 10.244.214.130 node44 <none> <none>
- [root@node22 volumes]# curl 10.244.214.130此时访问的就是node44主机的页面
- bbs.westos.org
- [root@node22 volumes]# kubectl delete -f hostpath.yaml
- pod "test-pd" deleted
3).nfs卷
在每个节点上安装nfs
- [root@node22 volumes]# yum install -y nfs-utils
- [root@node33 data]# yum install -y nfs-utils
- [root@node44 data]# yum install -y nfs-utils
- [root@node11 harbor]# yum install -y nfs-utils 仓库上安装nfs
- [root@node11 harbor]# cat /etc/exports
- /nfsdata *(rw,no_root_squash)
- [root@node11 harbor]# mkdir /nfsdata
- [root@node11 harbor]# cd /nfsdata/
- [root@node11 nfsdata]# chmod 777 .
- [root@node11 nfsdata]# ll -d .
- drwxrwxrwx 2 root root 6 Aug 28 20:40 .
- [root@node11 nfsdata]# systemctl start nfs
- [root@node11 nfsdata]# echo www.westos.org > index.html
-
-
- [root@node22 volumes]# showmount -e 192.168.0.11
- Export list for 192.168.0.11:
- /nfsdata *
- [root@node22 volumes]# vim nfs.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: test-pd
- spec:
- containers:
- - image: nginx
- name: test-container
- volumeMounts:
- - mountPath: /usr/share/nginx/html
- name: test-volume
- volumes:
- - name: test-volume
- nfs:
- server: 192.168.0.11
- path: /nfsdata
- [root@node22 volumes]# kubectl apply -f nfs.yaml
- pod/test-pd created
- [root@node22 volumes]# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- test-pd 1/1 Running 0 20s 10.244.144.91 node33 <none> <none>
- [root@node22 volumes]# curl 10.244.144.91
- www.westos.org
- [root@node22 volumes]# kubectl delete -f nfs.yaml删除后迁移到
- pod "test-pd" deleted
- [root@node22 volumes]# vim nfs.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: test-pd
- spec:
- containers:
- - image: nginx
- name: test-container
- volumeMounts:
- - mountPath: /usr/share/nginx/html
- name: test-volume
- volumes:
- - name: test-volume
- nfs:
- server: 192.168.0.11
- path: /nfsdata
- nodeName: node44 迁移到node44
- [root@node22 volumes]# kubectl apply -f nfs.yaml
- pod/test-pd created
- [root@node22 volumes]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- test-pd 1/1 Running 0 39s
- [root@node22 volumes]# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- test-pd 1/1 Running 0 2m17s 10.244.214.131 node44 <none> <none>
- [root@node22 volumes]# curl 10.244.214.131
- www.westos.org
4).持久卷PV与持久卷声明PVC
PersistentVolume是集群内,由管理员提供的网络存储的一部分,就像集群中的节点一样,PV也是集群中的一种资源,它也像Volume一样,是一种volume插件,但是它的生命周期却是和使用它的Pod相互独立的;PV这个API对象,捕获了诸如NFS、ISCSI、或其他云存储系统的实现细节
PersistentVolumeClaim是用户的一种存储请求,它和Pod类似,Pod消耗Node资源,而PVC消耗PV资源;Pod能够请求特定的资源(如CPU和内存),PVC能够请求指定的大小和访问的模式(可以被映射为一次读写或者多次只读)
有两种PV提供的方式:静态和动态
静态PV:集群管理员创建多个PV,它们携带着真实存储的详细信息,这些存储对于集群用户是可用的,它们存在于Kubernetes API中,并可用于存储使用
动态PV:当管理员创建的静态PV都不匹配用户的PVC时,集群可能会尝试专门地供给volume给PVC,这种供给基于StorageClass
PVC与PV的绑定是一对一的映射,没找到匹配的PV,那么PVC会无限期处于unbound即未绑定状态
PV使用:
Pod使用PVC就像使用volume一样;集群检查PVC,查找绑定的PV,并映射PV给Pod,对于支持多种访问模式的PV,用户可以指定想用的模式;一旦用户拥有了一个PVC,并且PVC被绑定,那么只要用户还需要,PV就一直属于这个用户;用户调度Pod,通过在Pod的volume块中包含PVC来访问PV
PV释放:
当用户使用PV完毕后,他们可以通过API来删除PVC对象,当PVC被删除后,对应的PV就被认为是已经是“released”了,但还不能再给另外一个PVC使用,前一个PVC的属于还存在于该PV中,必须根据策略来处理掉
PV回收:
PV的回收策略告诉集群,在PV被释放之后集群应该如何处理该PV;当前,PV可以被Retained(保留)、Recycled(再利用)或者Deleted(删除);保留允许手动地再次声明资源,对于支持删除操作的PV卷,删除操作会从Kubernetes中移除PV对象,还有对应的外部存储(如AWS EBS、GCE PD、Azure Disk或者Cinder volume);动态供给的卷总是会被删除
##创建不同容量和访问模式的两个pv
访问模式:ReadWriteOnce(RWO)==该volume只能被单个节点以读写的方式映射;ReadOnlyMany(ROX)==该volume可以被多个节点以只读方式映射;ReadWriteMany(RWX)==该volume可以被多个节点以读写的方式映射
回收策略:Retain==保留,需要手动回收;Recycle==回收,自动删除卷中数据;Delete==删除,相关联的存储资产,如AWS EBS,GCE PD,Azure Disk,or OpenStack Cinder卷都会被删除
当前只有NFS卷和HostPath卷支持回收利用,AWS EBS、GCE PD、Azure Disk、OpenStack Cinder卷支持删除操作
状态:Available==空闲的资源,未绑定给PVC;Bound==绑定给了某个PVC;Released==PVC已经删除了,但是PV还没有被集群回收;Failed==PV在自动回收中失败了
- [root@node11 nfsdata]# mkdir pv1
- [root@node11 nfsdata]# mkdir pv2
- [root@node11 nfsdata]# mkdir pv3
-
- [root@node22 volumes]# vim pv.yaml
- apiVersion: v1
- kind: PersistentVolume
- metadata:
- name: pv0003
- spec:
- capacity:
- storage: 5Gi
- volumeMode: Filesystem
- accessModes:
- - ReadWriteOnce
- persistentVolumeReclaimPolicy: Recycle
- storageClassName: nfs
- mountOptions:
- nfs:
- path: /nfsdata/pv1
- server: 192.168.0.11
- [root@node22 volumes]# kubectl apply -f pv.yaml
- persistentvolume/pv0003 created
- [root@node22 volumes]# kubectl get pv
- NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
- pv0003 5Gi RWO Recycle Available nfs 7s
-
-
- ##创建pvc,pvc会根据文件中定义的存储类、容量需求和访问模式匹配到合适的pv进行绑定[root@node22 volumes]# vim pvc.yaml
- apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: pvc1
- spec:
- storageClassName: nfs
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 1Gi
- [root@node22 volumes]# kubectl apply -f pvc.yaml
- persistentvolumeclaim/pvc1 created
- [root@node22 volumes]# kubectl get pvc
- NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
- pvc1 Bound pv0003 5Gi RWO nfs 25s
- [root@node22 volumes]# kubectl get pv
- NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
- pv0003 5Gi RWO Recycle Bound default/pvc1 nfs 3m41s
-
- ##pod中挂载pv
- [root@node22 volumes]# vim pod.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: test-pd
- spec:
- containers:
- - image: nginx
- name: nginx
- volumeMounts:
- - mountPath: /usr/share/nginx/html
- name: vol1
- volumes:
- - name: vol1
- persistentVolumeClaim:
- claimName: pvc1
- [root@node22 volumes]# kubectl delete pod test-pd
- pod "test-pd" deleted
- [root@node22 volumes]# kubectl apply -f pod.yaml
- pod/test-pd created
-
- [root@node11 nfsdata]# cd pv1
- [root@node11 pv1]# echo pv1 > index.html
-
- [root@node22 volumes]# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- test-pd 1/1 Running 0 2m37s 10.244.144.92 node33 <none> <none>
- [root@node22 volumes]# curl 10.244.144.92
- pv1
-
- 创建pv2,pv3
- [root@node22 volumes]# vim pv.yaml
- apiVersion: v1
- kind: PersistentVolume
- metadata:
- name: pv0003
- spec:
- capacity:
- storage: 5Gi
- volumeMode: Filesystem
- accessModes:
- - ReadWriteOnce
- persistentVolumeReclaimPolicy: Recycle
- storageClassName: nfs
- mountOptions:
- nfs:
- path: /nfsdata/pv1
- server: 192.168.0.11
-
- ---
- apiVersion: v1
- kind: PersistentVolume
- metadata:
- name: pv2
- spec:
- capacity:
- storage: 10Gi
- volumeMode: Filesystem
- accessModes:
- - ReadWriteMany
- persistentVolumeReclaimPolicy: Recycle
- storageClassName: nfs
- mountOptions:
- nfs:
- path: /nfsdata/pv2
- server: 192.168.0.11
-
- ---
- apiVersion: v1
- kind: PersistentVolume
- metadata:
- name: pv3
- spec:
- capacity:
- storage: 20Gi
- volumeMode: Filesystem
- accessModes:
- - ReadOnlyMany
- persistentVolumeReclaimPolicy: Recycle
- storageClassName: nfs
- mountOptions:
- nfs:
- path: /nfsdata/pv3
- server: 192.168.0.11
- [root@node22 volumes]# kubectl apply -f pv.yaml
- persistentvolume/pv0003 configured
- persistentvolume/pv2 created
- persistentvolume/pv3 created
- [root@node22 volumes]# kubectl get pv
- NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
- pv0003 5Gi RWO Recycle Bound default/pvc1 nfs 15m
- pv2 10Gi RWX Recycle Available nfs 12s
- pv3 20Gi ROX Recycle Available nfs 12s
-
- 创建pvc2,pvc3
- [root@node22 volumes]# vim pvc.yaml
- apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: pvc1
- spec:
- storageClassName: nfs
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 1Gi
-
- ---
- apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: pvc2
- spec:
- storageClassName: nfs
- accessModes:
- - ReadWriteMany
- resources:
- requests:
- storage: 10Gi
-
- ---
- apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: pvc3
- spec:
- storageClassName: nfs
- accessModes:
- - ReadOnlyMany
- resources:
- requests:
- storage: 20Gi
- [root@node22 volumes]# kubectl apply -f pvc.yaml
- persistentvolumeclaim/pvc1 unchanged
- persistentvolumeclaim/pvc2 created
- persistentvolumeclaim/pvc3 created
- [root@node22 volumes]# kubectl get pvc
- NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
- pvc1 Bound pv0003 5Gi RWO nfs 14m
- pvc2 Bound pv2 10Gi RWX nfs 6s
- pvc3 Bound pv3 20Gi ROX nfs 6s
- [root@node22 volumes]# kubectl get pv
- NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
- pv0003 5Gi RWO Recycle Bound default/pvc1 nfs 18m
- pv2 10Gi RWX Recycle Bound default/pvc2 nfs 3m7s
- pv3 20Gi ROX Recycle Bound default/pvc3 nfs 3m7s
-
- 创建test-pd-2
- [root@node22 volumes]# cp pod.yaml pod2.yaml
- [root@node22 volumes]# vim pod2.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: test-pd-2
- spec:
- containers:
- - image: nginx
- name: nginx
- volumeMounts:
- - mountPath: /usr/share/nginx/html
- name: vol2
- volumes:
- - name: vol2
- persistentVolumeClaim:
- claimName: pvc2
- [root@node22 volumes]# kubectl apply -f pod2.yaml
- pod/test-pd-2 created
- [root@node22 volumes]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- test-pd 1/1 Running 0 11m
- test-pd-2 1/1 Running 0 15s
- [root@node22 volumes]# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- test-pd 1/1 Running 0 12m 10.244.144.92 node33 <none> <none>
- test-pd-2 1/1 Running 0 93s 10.244.144.93 node33 <none> <none>
- [root@node11 pv1]# cd ..
- [root@node11 nfsdata]# cd pv2
- [root@node11 pv2]# echo pv2 > index.html
- [root@node22 volumes]# curl 10.244.144.93
- pv2
- [root@node22 volumes]# curl 10.244.144.92
- pv1
-
- 删除:(pod被迁移到其他节点的时候,再次挂接pvc)
- [root@node22 volumes]# kubectl delete -f pod2.yaml
- pod "test-pd-2" deleted
- [root@node22 volumes]# kubectl delete -f pod.yaml
- pod "test-pd" deleted
- [root@node22 volumes]# kubectl get pvc
- NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
- pvc1 Bound pv0003 5Gi RWO nfs 25m
- pvc2 Bound pv2 10Gi RWX nfs 11m
- pvc3 Bound pv3 20Gi ROX nfs 11m
-
- ##删除pvc,先前被绑定的pv的状态由bound->released->available;挂载卷中数据被删除(recycle的回收策略)
- [root@node22 volumes]# kubectl delete -f pvc.yaml
- persistentvolumeclaim "pvc1" deleted
- persistentvolumeclaim "pvc2" deleted
- persistentvolumeclaim "pvc3" deleted
- [root@node22 volumes]# kubectl get pv
- NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
- pv0003 5Gi RWO Recycle Available nfs 31m
- pv2 10Gi RWX Recycle Available nfs 16m
- pv3 20Gi ROX Recycle Available nfs 16m
- [root@node22 volumes]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- recycler-for-pv0003 0/1 Completed 0 89s
- [root@node22 volumes]# kubectl delete pod recycler-for-pv0003
- pod "recycler-for-pv0003" deleted
- [root@node22 volumes]# kubectl get pv
- NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
- pv0003 5Gi RWO Recycle Available nfs 33m
- pv2 10Gi RWX Recycle Available nfs 18m
- pv3 20Gi ROX Recycle Available nfs 18m
- [root@node22 volumes]# kubectl delete -f pv.yaml
- persistentvolume "pv0003" deleted
- persistentvolume "pv2" deleted
- persistentvolume "pv3" deleted
- [root@node22 volumes]# kubectl get pv
- No resources found
5).StorageClass提供了一种描述存储类(class)的方法,不同的class可能会映射到不同的服务质量等级和备份策略或其他策略等;每个StorageClass都包含provisioner、parameters和reclaimPolicy字段, 这些字段会在StorageClass需要动态分配PersistentVolume时会使用到
StorageClass的属性:
Provisioner(存储分配器):用来决定使用哪个卷插件分配PV,该字段必须指定,可以指定内部分配器,也可以指定外部分配器;外部分配器的代码地址为: kubernetes-incubator/external-storage,其中包括NFS和Ceph等
Reclaim Policy(回收策略):通过reclaimPolicy字段指定创建的PersistentVolume的回收策略,回收策略包括:Delete或者Retain,没有指定默认为Delete
更多属性查看:https://kubernetes.io/zh/docs/concepts/storage/storage-classes/
NFS Client Provisioner是一个automatic provisioner,使用NFS作为存储,自动创建PV和对应的PVC,其本身不提供NFS存储,需要外部先有一套NFS存储服务
PV以 ${namespace}-${pvcName}-${pvName} 的命名格式提供(在NFS服务器上);PV回收的时候以 archieved-${namespace}-${pvcName}-${pvName} 的命名格式(在NFS服务器上);nfs-client-provisioner源码地址:external-storage/nfs-client at master · kubernetes-retired/external-storage · GitHub
- ##配置授权,编写yaml文件并应用,创建yaml文件中指定的namespace
- [root@node22 ~]# mkdir nfs
- [root@node22 ~]# cd nfs
- [root@node22 nfs]# vim delpoyment.yaml
- [root@node22 nfs]# kubectl create namespace nfs-client-provisioner
- namespace/nfs-client-provisioner created
- [root@node22 nfs]# vim delpoyment.yaml
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: nfs-client-provisioner
- labels:
- app: nfs-client-provisioner
- namespace: nfs-client-provisioner
- spec:
- replicas: 1
- strategy:
- type: Recreate
- selector:
- matchLabels:
- app: nfs-client-provisioner
- template:
- metadata:
- labels:
- app: nfs-client-provisioner
- spec:
- serviceAccountName: nfs-client-provisioner
- containers:
- - name: nfs-client-provisioner
- image: sig-storage/nfs-subdir-external-provisioner:v4.0.2
- volumeMounts:
- - name: nfs-client-root
- mountPath: /persistentvolumes
- env:
- - name: PROVISIONER_NAME
- value: k8s-sigs.io/nfs-subdir-external-provisioner
- - name: NFS_SERVER
- value: 192.168.0.11
- - name: NFS_PATH
- value: /nfsdata
- volumes:
- - name: nfs-client-root
- nfs:
- server: 192.168.0.11
- path: /nfsdata
- [root@node22 nfs]# vim rbac.yaml
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: nfs-client-provisioner
- # replace with namespace where provisioner is deployed
- namespace: nfs-client-provisioner
- ---
- kind: ClusterRole
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- name: nfs-client-provisioner-runner
- rules:
- - apiGroups: [""]
- resources: ["nodes"]
- verbs: ["get", "list", "watch"]
- - apiGroups: [""]
- resources: ["persistentvolumes"]
- verbs: ["get", "list", "watch", "create", "delete"]
- - apiGroups: [""]
- resources: ["persistentvolumeclaims"]
- verbs: ["get", "list", "watch", "update"]
- - apiGroups: ["storage.k8s.io"]
- resources: ["storageclasses"]
- verbs: ["get", "list", "watch"]
- - apiGroups: [""]
- resources: ["events"]
- verbs: ["create", "update", "patch"]
- ---
- kind: ClusterRoleBinding
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- name: run-nfs-client-provisioner
- subjects:
- - kind: ServiceAccount
- name: nfs-client-provisioner
- # replace with namespace where provisioner is deployed
- namespace: nfs-client-provisioner
- roleRef:
- kind: ClusterRole
- name: nfs-client-provisioner-runner
- apiGroup: rbac.authorization.k8s.io
- ---
- kind: Role
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- name: leader-locking-nfs-client-provisioner
- # replace with namespace where provisioner is deployed
- namespace: nfs-client-provisioner
- rules:
- - apiGroups: [""]
- resources: ["endpoints"]
- verbs: ["get", "list", "watch", "create", "update", "patch"]
- ---
- kind: RoleBinding
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- name: leader-locking-nfs-client-provisioner
- # replace with namespace where provisioner is deployed
- namespace: nfs-client-provisioner
- subjects:
- - kind: ServiceAccount
- name: nfs-client-provisioner
- # replace with namespace where provisioner is deployed
- namespace: nfs-client-provisioner
- roleRef:
- kind: Role
- name: leader-locking-nfs-client-provisioner
- apiGroup: rbac.authorization.k8s.io
- [root@node22 nfs]# kubectl apply -f rbac.yaml
- serviceaccount/nfs-client-provisioner created
- clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
- clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created
- role.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
- rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
- [root@node22 nfs]# kubectl apply -f delpoyment.yaml
- deployment.apps/nfs-client-provisioner created
- [root@node22 nfs]# kubectl -n nfs-client-provisioner get all
- NAME READY STATUS RESTARTS AGE
- pod/nfs-client-provisioner-784f85c9-mz5nx 1/1 Running 0 23s
-
- NAME READY UP-TO-DATE AVAILABLE AGE
- deployment.apps/nfs-client-provisioner 1/1 1 1 23s
-
- NAME DESIRED CURRENT READY AGE
- replicaset.apps/nfs-client-provisioner-784f85c9 1 1 1 23s
-
- ##部署NFS Client Provisioner
- [root@node22 nfs]# vim class.yaml
- apiVersion: storage.k8s.io/v1
- kind: StorageClass
- metadata:
- name: managed-nfs-storage
- provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
- parameters:
- archiveOnDelete: "false"
- [root@node22 nfs]# kubectl apply -f class.yaml
- storageclass.storage.k8s.io/managed-nfs-storage created
- [root@node22 nfs]# kubectl get storageclasses.storage.k8s.io
- NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
- managed-nfs-storage k8s-sigs.io/nfs-subdir-external-provisioner Delete Immediate false 21s
-
- ##创建NFS存储类
- [root@node22 nfs]# vim pvc.yaml
- kind: PersistentVolumeClaim
- apiVersion: v1
- metadata:
- name: test-claim
- spec:
- storageClassName: managed-nfs-storage
- accessModes:
- - ReadWriteMany
- resources:
- requests:
- storage: 1Gi
- [root@node22 nfs]# kubectl apply -f pvc.yaml
- persistentvolumeclaim/test-claim created
- [root@node22 nfs]# kubectl get pvc
- NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
- test-claim Bound pvc-9506279b-4c3c-43eb-bebe-7cd89e6062d3 1Gi RWX managed-nfs-storage 3s
- [root@node22 nfs]# kubectl get pv
- NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
- pvc-9506279b-4c3c-43eb-bebe-7cd89e6062d3 1Gi RWX Delete Bound default/test-claim managed-nfs-storage 42s
- [root@node22 nfs]# vim pod.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: test-pd
- spec:
- containers:
- - image: nginx
- name: nginx
- volumeMounts:
- - mountPath: /usr/share/nginx/html
- name: nfs-pvc
- volumes:
- - name: nfs-pvc
- persistentVolumeClaim:
- claimName: test-claim
- [root@node22 nfs]# kubectl apply -f pod.yaml
- pod/test-pd created
- [root@node22 nfs]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- test-pd 1/1 Running 0 8s
- [root@node22 nfs]# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- test-pd 1/1 Running 0 30s 10.244.144.99 node33 <none> <none>
- [root@node22 nfs]# curl 10.244.144.99
- <html>
- <head><title>403 Forbidden</title></head>
- <body>
- <center><h1>403 Forbidden</h1></center>
- <hr><center>nginx/1.21.5</center>
- </body>
- </html>
-
- [root@node11 nfsdata]# cd default-test-claim-pvc-9506279b-4c3c-43eb-bebe-7cd89e6062d3
- [root@node11 default-test-claim-pvc-9506279b-4c3c-43eb-bebe-7cd89e6062d3]# ls
- [root@node11 default-test-claim-pvc-9506279b-4c3c-43eb-bebe-7cd89e6062d3]# echo test-claim > index.html
-
- [root@node22 nfs]# curl 10.244.144.99
- test-claim
- [root@node22 nfs]# kubectl delete -f pod.yaml
- pod "test-pd" deleted
- [root@node22 nfs]# kubectl delete -f pvc.yaml
- persistentvolumeclaim "test-claim" deleted
- [root@node22 nfs]# kubectl get pv
- No resources found
- ##删除上述pvc时,共享目录下自动创建的pv卷也被删除(因为class.yml文件中设定archiveOnDelete=false)
6).StatefulSet控制器
StatefulSet是用来管理有状态应用的工作负载API对象
StatefulSet用来管理Deployment和扩展一组Pod,并且能为这些Pod提供序号和唯一性保证
和Deployment相同的是,StatefulSet管理了基于相同容器定义的一组Pod,但和Deployment不同的是,StatefulSet为它们的每个Pod维护了一个固定的ID;这些Pod是基于相同的声明来创建的,但是不能相互替换:无论怎么调度,每个Pod都有一个永久不变的ID
StatefulSet和其他控制器使用相同的工作模式,在StatefulSet对象中定义你期望的状态,然后StatefulSet的控制器就会通过各种更新来达到那种你想要的状态
StatefulSets对于需要满足以下一个或多个需求的应用程序很有价值:
稳定的、唯一的网络标识符
稳定的、持久的存储
有序的、优雅的部署和缩放
有序的、自动的滚动更新
在上面,稳定意味着Pod调度或重调度的整个过程是有持久性的;如果应用程序不需要任何稳定的标识符或有序的部署、删除或伸缩,则应该使用由一组无状态的副本控制器提供的工作负载来部署应用程序,比如Deployment或者ReplicaSet可能更适用于无状态应用的部署需要
- StatefulSet如何通过Headless Service维持Pod的拓扑状态:
- 创建headless svc:
- [root@node22 statefulset]# vim svc.yaml
- apiVersion: v1
- kind: Service
- metadata:
- name: nginx-svc
- labels:
- app: nginx
- spec:
- ports:
- - port: 80
- name: web
- clusterIP: None
- selector:
- app: nginx
- [root@node22 statefulset]# kubectl apply -f svc.yaml 生效yaml文件创建svc
- service/nginx-svc created
- [root@node22 statefulset]# kubectl get svc
- NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
- kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 4d19h
- my-svc ClusterIP 10.108.185.37 <none> 80/TCP 45h
- nginx-svc ClusterIP None <none> 80/TCP 5s
- web-service ClusterIP 10.109.238.119 <none> 80/TCP 3d2h
- [root@node22 statefulset]# kubectl delete svc my-svc删除不用的svc
- service "my-svc" deleted
- [root@node22 statefulset]# kubectl delete svc web-service
- service "web-service" deleted
- [root@node22 statefulset]# kubectl get svc
- NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
- kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 4d19h
- nginx-svc ClusterIP None <none> 80/TCP 33s
-
- 创建statefulSet控制器:
- [root@node22 statefulset]# vim statefulset.yaml
- apiVersion: apps/v1
- kind: StatefulSet
- metadata:
- name: web
- spec:
- serviceName: "nginx-svc"
- replicas: 2
- selector:
- matchLabels:
- app: nginx
- template:
- metadata:
- labels:
- app: nginx
- spec:
- containers:
- - name: nginx
- image: nginx
- [root@node22 statefulset]# kubectl apply -f statefulset.yaml
- statefulset.apps/web created
- [root@node22 statefulset]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- web-0 1/1 Running 0 2m20s
- web-1 1/1 Running 0 49s
- [root@node22 statefulset]# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- web-0 1/1 Running 0 2m49s 10.244.144.100 node33 <none> <none>
- web-1 1/1 Running 0 78s 10.244.214.132 node44 <none> <none>
##statefulset控制器下pod的按顺序创建和回收:
StatefulSet将应用状态抽象成了两种情况:
拓扑状态:应用实例必须按照某种顺序启动;新创建的Pod必须和原来Pod的网络标识一样
存储状态:应用的多个实例分别绑定了不同存储数据
StatefulSet给所有的Pod进行了编号,编号规则是:$(statefulset名称)-$(序号)并且从0开始
Pod被删除后重建,重建Pod的网络标识也不会改变,Pod的拓扑状态按照“名字+编号”的方式固定下来,并且为每个Pod提供了一个固定且唯一的访问入口,即Pod对应的DNS记录
创建:(从第一个开始创建,第一个无法启动剩下的也无法创建)
- [root@node22 statefulset]# dig -t A nginx-svc.default.svc.cluster.local. @10.96.0.10
- ; <<>> DiG 9.9.4-RedHat-9.9.4-72.el7 <<>> -t A nginx-svc.default.svc.cluster.local. @10.96.0.10
- ;; global options: +cmd
- ;; Got answer:
- ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 29984
- ;; flags: qr aa rd; QUERY: 1, ANSWER: 2, AUTHORITY: 0, ADDITIONAL: 1
- ;; WARNING: recursion requested but not available
-
- ;; OPT PSEUDOSECTION:
- ; EDNS: version: 0, flags:; udp: 4096
- ;; QUESTION SECTION:
- ;nginx-svc.default.svc.cluster.local. IN A
-
- ;; ANSWER SECTION:
- nginx-svc.default.svc.cluster.local. 30 IN A 10.244.144.100
- nginx-svc.default.svc.cluster.local. 30 IN A 10.244.214.132
-
- ;; Query time: 362 msec
- ;; SERVER: 10.96.0.10#53(10.96.0.10)
- ;; WHEN: Mon Aug 29 16:49:23 CST 2022
- ;; MSG SIZE rcvd: 166
- [root@node22 statefulset]# dig -t A web-0.nginx-svc.default.svc.cluster.local. @10.96.0.10
- ;; ANSWER SECTION:
- web-0.nginx-svc.default.svc.cluster.local. 30 IN A 10.244.144.100
- [root@node22 statefulset]# dig -t A web-1.nginx-svc.default.svc.cluster.local. @10.96.0.10
- ;; ANSWER SECTION:
- web-1.nginx-svc.default.svc.cluster.local. 30 IN A 10.244.214.132
- 回收:(从最后一个开始回收)
- [root@node22 statefulset]# vim statefulset.yaml
- apiVersion: apps/v1
- kind: StatefulSet
- metadata:
- name: web
- spec:
- serviceName: "nginx-svc"
- replicas: 0 将此处改为0即是回收,并不是delete
- selector:
- matchLabels:
- app: nginx
- template:
- metadata:
- labels:
- app: nginx
- spec:
- containers:
- - name: nginx
- image: nginx
- [root@node22 statefulset]# kubectl apply -f statefulset.yaml
- statefulset.apps/web configured
- [root@node22 statefulset]# kubectl get pod
- No resources found in default namespace.
- [root@node22 statefulset]# kubectl delete -f statefulset.yaml
- statefulset.apps "web" deleted
##PV和PVC的设计,使得StatefulSet对存储状态的管理成为了可能:
- [root@node22 statefulset]# vim statefulset.yaml
- apiVersion: apps/v1
- kind: StatefulSet
- metadata:
- name: web
- spec:
- serviceName: "nginx-svc"
- replicas: 3
- selector:
- matchLabels:
- app: nginx
- template:
- metadata:
- labels:
- app: nginx
- spec:
- containers:
- - name: nginx
- image: nginx
- volumeMounts:
- - name: www
- mountPath: /usr/share/nginx/html
- volumeClaimTemplates:
- - metadata:
- name: www
- spec:
- storageClassName: nfs-client
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 1Gi
- [root@node22 statefulset]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- web-0 0/1 Pending 0 16s
- (此处是因为storageClassName: nfs-client错误,讲nfs中的class.yaml文件改为如下
- apiVersion: storage.k8s.io/v1
- kind: StorageClass
- metadata:
- name: nfs-client
- provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
- parameters:
- archiveOnDelete: "false"
- )
- [root@node22 statefulset]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- web-0 1/1 Running 0 3m10s
- web-1 1/1 Running 0 3m8s
- web-2 1/1 Running 0 3m3s
- [root@node22 statefulset]# kubectl get pvc
- NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
- www-web-0 Bound pvc-1cbb7bd9-1fe0-472d-8068-6a7a81fc3322 1Gi RWO nfs-client 9m30s
- www-web-1 Bound pvc-7be7b00f-8a40-447f-92f1-4064f8466629 1Gi RWO nfs-client 3m28s
- www-web-2 Bound pvc-3186a28f-c878-437b-8a03-1e4166ac0cbf 1Gi RWO nfs-client 3m23s
- [root@node22 statefulset]# kubectl get pv
- NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
- pvc-1cbb7bd9-1fe0-472d-8068-6a7a81fc3322 1Gi RWO Delete Bound default/www-web-0 nfs-client 3m44s
- pvc-3186a28f-c878-437b-8a03-1e4166ac0cbf 1Gi RWO Delete Bound default/www-web-2 nfs-client 3m29s
- pvc-7be7b00f-8a40-447f-92f1-4064f8466629 1Gi RWO Delete Bound default/www-web-1 nfs-client 3m34s
Pod的创建也是严格按照编号顺序进行的。比如在web-0进入到running状态,并且Conditions为Ready之前,web-1一直会处于pending状态。
StatefulSet还会为每一个Pod分配并创建一个同样编号的PVC。这样,kubernetes就可以通过 Persistent Volume机制为这个PVC绑定对应的PV,从而保证每一个Pod都拥有一个独立的Volume。
测试:
- [root@node11 nfsdata]# ll
- total 4
- drwxrwxrwx 2 root root 6 Aug 29 17:09 default-www-web-0-pvc-1cbb7bd9-1fe0-472d-8068-6a7a81fc3322
- drwxrwxrwx 2 root root 6 Aug 29 17:09 default-www-web-1-pvc-7be7b00f-8a40-447f-92f1-4064f8466629
- drwxrwxrwx 2 root root 6 Aug 29 17:09 default-www-web-2-pvc-3186a28f-c878-437b-8a03-1e4166ac0cbf
- -rw-r--r-- 1 root root 15 Aug 28 20:48 index.html
- drwxr-xr-x 2 root root 6 Aug 28 21:50 pv1
- drwxr-xr-x 2 root root 6 Aug 28 21:50 pv2
- drwxr-xr-x 2 root root 6 Aug 28 21:18 pv3
- [root@node11 nfsdata]# cd default-www-web-0-pvc-1cbb7bd9-1fe0-472d-8068-6a7a81fc3322
- [root@node11 default-www-web-0-pvc-1cbb7bd9-1fe0-472d-8068-6a7a81fc3322]# echo web-0 > index.html
- [root@node11 default-www-web-0-pvc-1cbb7bd9-1fe0-472d-8068-6a7a81fc3322]# cd ..
- [root@node11 nfsdata]# cd default-www-web-1-pvc-7be7b00f-8a40-447f-92f1-4064f8466629
- [root@node11 default-www-web-1-pvc-7be7b00f-8a40-447f-92f1-4064f8466629]# echo web-1 > index.html
- [root@node11 default-www-web-1-pvc-7be7b00f-8a40-447f-92f1-4064f8466629]# cd ..
- [root@node11 nfsdata]# cd default-www-web-2-pvc-3186a28f-c878-437b-8a03-1e4166ac0cbf
- [root@node11 default-www-web-2-pvc-3186a28f-c878-437b-8a03-1e4166ac0cbf]# echo web-2 > index.html
- [root@node11 default-www-web-2-pvc-3186a28f-c878-437b-8a03-1e4166ac0cbf]# cd ..
- [root@node22 statefulset]# kubectl run demo --image=busyboxplus -it --rm 进入容器内访问
- If you don't see a command prompt, try pressing enter.
- / # curl web-0.nginx-svc
- web-0
- / # curl web-1.nginx-svc
- web-1
- / # curl web-2.nginx-svc
- web-2
- ##回收pod并重新创建后,访问对应的pod可得到原来对应挂载卷的内容
- [root@node22 statefulset]# kubectl delete -f statefulset.yaml
- statefulset.apps "web" deleted
- [root@node22 statefulset]# kubectl delete pvc --all
- persistentvolumeclaim "www-web-0" deleted
- persistentvolumeclaim "www-web-1" deleted
- persistentvolumeclaim "www-web-2" deleted
- [root@node22 statefulset]# kubectl get pv
- No resources found
- [root@node22 statefulset]# kubectl get pod
- No resources found in default namespace.
7).使用statefullset部署mysql主从集群
- [root@node22 statefulset]# mkdir mysql
- [root@node22 statefulset]# cd mysql/
- [root@node22 mysql]# vim config.yaml
- apiVersion: v1
- kind: ConfigMap
- metadata:
- name: mysql
- labels:
- app: mysql
- app.kubernetes.io/name: mysql
- data:
- primary.cnf: |
- # 仅在主服务器上应用此配置
- [mysqld]
- log-bin
- replica.cnf: |
- # 仅在副本服务器上应用此配置
- [mysqld]
- super-read-only
- [root@node22 mysql]# kubectl apply -f config.yaml
- configmap/mysql created
- [root@node22 mysql]# kubectl get cm
- NAME DATA AGE
- cm1-config 2 38h
- kube-root-ca.crt 1 4d20h
- my-config 2 38h
- my-config-2 1 38h
- my-config-3 2 38h
- mysql 2 14s
- nginxconf 1 38h
- [root@node22 mysql]# kubectl delete cm my-config
- configmap "my-config" deleted
- [root@node22 mysql]# kubectl delete cm my-config-2
- configmap "my-config-2" deleted
- [root@node22 mysql]# kubectl delete cm my-config-3
- configmap "my-config-3" deleted
- [root@node22 mysql]# kubectl get cm
- NAME DATA AGE
- cm1-config 2 38h
- kube-root-ca.crt 1 4d20h
- mysql 2 41s
- nginxconf 1 38h
-
- 创建两个svc:
- [root@node22 mysql]# vim svc.yaml
- apiVersion: v1
- kind: Service
- metadata:
- name: mysql
- labels:
- app: mysql
- app.kubernetes.io/name: mysql
- spec:
- ports:
- - name: mysql
- port: 3306
- clusterIP: None
- selector:
- app: mysql
- ---
- # 用于连接到任一 MySQL 实例执行读操作的客户端服务
- # 对于写操作,你必须连接到主服务器:mysql-0.mysql
- apiVersion: v1
- kind: Service
- metadata:
- name: mysql-read
- labels:
- app: mysql
- app.kubernetes.io/name: mysql
- readonly: "true"
- spec:
- ports:
- - name: mysql
- port: 3306
- selector:
- app: mysql
- [root@node22 mysql]# kubectl apply -f svc.yaml
- service/mysql created
- service/mysql-read created
- [root@node22 mysql]# kubectl delete svc nginx-svc
- service "nginx-svc" deleted
- [root@node22 mysql]# kubectl get svc
- NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
- kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 4d20h
- mysql ClusterIP None <none> 3306/TCP 17s
- mysql-read ClusterIP 10.106.229.89 <none> 3306/TCP 17s
-
- 创建statehulset控制器:
- [root@node22 mysql]# vim mysql.yaml
- [root@node22 mysql]# kubectl apply -f mysql.yaml
- statefulset.apps/mysql created
- [root@node22 mysql]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- mysql-0 1/1 Running 1 (64s ago) 9m56s
- [root@node22 mysql]# kubectl logs mysql-0 init-mysql
- ++ hostname
- + [[ mysql-0 =~ -([0-9]+)$ ]]
- + ordinal=0
- + echo '[mysqld]'
- + echo server-id=100
- + [[ 0 -eq 0 ]]
- + cp /mnt/config-map/primary.cnf /mnt/conf.d/
-
- 上传镜像:
- [root@node11 ~]# docker load -i mysql-xtrabackup.tar
- [root@node11 ~]# docker push reg.westos.org/library/mysql:5.7
- [root@node11 ~]# docker push reg.westos.org/library/xtrabackup:1.0
-
- [root@node22 mysql]# kubectl run demo --image=mysql:5.7 -it bash
- If you don't see a command prompt, try pressing enter.
- root@demo:/# mysql -h mysql-0.mysql
• 调度器通过 kubernetes 的 watch 机制来发现集群中新创建且尚未被调度到 Node
上的 Pod。调度器会将发现的每一个未调度的 Pod 调度到一个合适的 Node 上来运
行。
• kube-scheduler 是 Kubernetes 集群的默认调度器,并且是集群控制面的一部分。
如果你真的希望或者有这方面的需求,kube-scheduler 在设计上是允许你自己写一
个调度组件并替换原有的 kube-scheduler。
• 在做调度决定时需要考虑的因素包括:单独和整体的资源请求、硬件/软件/策略限
制、亲和以及反亲和要求、数据局域性、负载间的干扰等等。
• 默认策略可以参考:https://kubernetes.io/zh/docs/concepts/scheduling/kube[1]
scheduler/
• 调度框架:https://kubernetes.io/zh/docs/concepts/configuration/scheduling[1]
framework/
• nodeName 是节点选择约束的最简单方法,但一般不推荐。如果 nodeName 在
PodSpec 中指定了,则它优先于其他的节点选择方法。
使用 nodeName 来选择节点的一些限制:
• 如果指定的节点不存在。
• 如果指定的节点没有资源来容纳 pod,则pod 调度失败。
• 云环境中的节点名称并非总是可预测或稳定的。
示例:
- [root@node22 ~]# cd yaml/
- [root@node22 yaml]# vim pod.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: demo
- namespace: default
- labels:
- app: nginx
- spec:
- containers:
- - name: nginx
- image: nginx
- nodeName: node44
- [root@node22 yaml]# kubectl apply -f pod.yaml
- pod/demo created
- [root@node22 yaml]# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- demo 0/1 ContainerCreating 0 15s <none> node44 <none> <none>
- 虽然次方法使用简单,但节点的不稳定性会影响其使用。
- [root@node22 yaml]# kubectl delete -f pod.yaml
- pod "demo" deleted
-
- • nodeSelector 是节点选择约束的最简单推荐形式。
- • 给选择的节点添加标签:
- • kubectl label nodes server2 disktype=ssd
- • 添加 nodeSelector 字段到 pod 配置中:
- [root@node22 yaml]# vim pod.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: demo
- namespace: default
- labels:
- app: nginx
- spec:
- containers:
- - name: nginx
- image: nginx
- #nodeName: no
- nodeSelector:
- disktype: ssd
- [root@node22 yaml]# kubectl apply -f pod.yaml
- pod/demo created
- [root@node22 yaml]# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- demo 1/1 Running 0 9s 10.244.144.110 node33 <none> <none>
- [root@node22 yaml]# kubectl delete -f pod.yaml
- pod "demo" deleted
亲和与反亲和 :
• nodeSelector 提供了一种非常简单的方法来将 pod 约束到具有特定标签的节
点上。亲和/反亲和功能极大地扩展了你可以表达约束的类型。
• 你可以发现规则是“软”/“偏好”,而不是硬性要求,因此,如果调度器无
法满足该要求,仍然调度该 pod
• 你可以使用节点上的 pod 的标签来约束,而不是使用节点本身的标签,来允
许哪些 pod 可以或者不可以被放置在一起。
• 节点亲和
• requiredDuringSchedulingIgnoredDuringExecution
必须满足
• preferredDuringSchedulingIgnoredDuringExecution
倾向满足
• IgnoreDuringExecution 表示如果在Pod运行期间Node的标签发生变化,导致
亲和性策略不能满足,则继续运行当前的Pod。
• 参考:https://kubernetes.io/zh/docs/concepts/configuration/assign-pod[1]
node/
节点亲和性pod示例:
- [root@node22 ~]# mkdir node
- [root@node22 ~]# cd node/
- [root@node22 node]# vim pod.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: node-affinity
- spec:
- containers:
- - name: nginx
- image: nginx
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: disktype
- operator: In
- values:
- - ssd
- - sata
- [root@node22 node]# kubectl apply -f pod.yaml
- pod/node-affinity created
- [root@node22 node]# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- node-affinity 1/1 Running 0 14s 10.244.144.111 node33 <none> <none>
- [root@node22 node]# kubectl get node --show-labels
- NAME STATUS ROLES AGE VERSION LABELS
- node22 Ready control-plane,master 9d v1.23.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node22,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=
- node33 Ready <none> 9d v1.23.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disktype=ssd,kubernetes.io/arch=amd64,kubernetes.io/hostname=node33,kubernetes.io/os=linux
- node44 Ready <none> 9d v1.23.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disktype=ssd,ingress=nginx,kubernetes.io/arch=amd64,kubernetes.io/hostname=node44,kubernetes.io/os=linux
- [root@node22 node]# kubectl delete -f pod.yaml
- pod "node-affinity" deleted
-
- 加了倾向性后就会倾向于条件,如果不满足条件,不会影响调度,只要满足必要条件就好
- [root@node22 node]# vim pod.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: node-affinity
- spec:
- containers:
- - name: nginx
- image: nginx
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: disktype
- operator: In
- values:
- - ssd
- - sata
- - fc
- preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 1
- preference:
- matchExpressions:
- - key: ingress
- operator: In
- values:
- - nginx
- [root@node22 node]# kubectl apply -f pod.yaml
- pod/node-affinity created
- [root@node22 node]# kubectl get pod -o wide 调度到了符合条件的node44
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- node-affinity 1/1 Running 0 21s 10.244.214.140 node44 <none> <none>
1).nodeaffinity还支持多种规则匹配条件的配置:
In:label的值在列表内
NotIn:label的值不在列表内
Gt:label的值大于设置的值,不支持Pod亲和性
Lt:label的值小于设置的值,不支持pod亲和性
Exists:设置的label存在
DoesNotExist:设置的label不存在
2).pod亲和性和反亲和性
podAffinity主要解决POD可以和哪些POD部署在同一个拓扑域中的问题(拓扑域用主机标签实现,可以是单个主机,也可以是多个主机组成的cluster、zone等)
podAntiAffinity主要解决POD不能和哪些POD部署在同一个拓扑域中的问题;它们处理的是Kubernetes集群内部POD和POD之间的关系
Pod间亲和与反亲和在与更高级别的集合(例如ReplicaSets、StatefulSets、Deployments等)一起使用时,它们可能更加有用,可以轻松配置一组应位于相同定义拓扑(例如,节点)中的工作负载
pod亲和性:
- [root@node22 node]# vim pod2.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: myapp
- labels:
- app: myapp
- spec:
- containers:
- - name: myapp
- image: myapp:v1
- hostNetwork: true
- nodeName: node33
- [root@node22 node]# kubectl apply -f pod2.yaml
- pod/myapp created
- [root@node22 node]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- myapp 0/1 Error 1 (8s ago) 11s
- nginx 1/1 Running 0 3m25s
- [root@node22 node]# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- myapp 1/1 Running 3 (31s ago) 53s 192.168.0.33 node33 <none> <none>
- nginx 1/1 Running 0 4m7s 192.168.0.33 node33 <none> <none>
-
- [root@node22 node]# vim pod4.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: mysql
- labels:
- app: mysql
- spec:
- containers:
- - name: mysql
- image: mysql:5.7
- env:
- - name: "MYSQL_ROOT_PASSWORD"
- value: "westos"
- affinity:
- podAntiAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: app
- operator: In
- values:
- - nginx
- topologyKey: kubernetes.io/hostname
- [root@node22 node]# kubectl apply -f pod4.yaml
- pod/mysql created
- [root@node22 node]# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- myapp 1/1 Running 3 (31s ago) 53s 192.168.0.33 node33 <none> <none>
- nginx 1/1 Running 0 4m7s 192.168.0.33 node33 <none> <none>
pod反亲和性:
- [root@node22 node]# vim pod3.yaml
- apiVersion: v1
- kind: Pod
- metadata:
- name: myapp
- labels:
- app: myapp
- spec:
- containers:
- - name: myapp
- image: myapp:v1
- hostNetwork: true
- affinity:
- podAntiAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: app
- operator: In
- values:
- - nginx
- topologyKey: "kubernetes.io/hostname"
- [root@node22 node]# kubectl delete -f pod3.yaml
- pod "myapp" deleted
- [root@node22 node]# kubectl apply -f pod3.yaml
- pod/myapp created
- [root@node22 node]# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- myapp 1/1 Running 0 12s 192.168.0.44 node44 <none> <none>
- nginx 1/1 Running 0 7m57s 192.168.0.33 node33 <none> <none>
3).Taints污点
https://kubernetes.io/zh/docs/concepts/scheduling-eviction/taint-and-toleration/
NodeAffinity节点亲和性,是Pod上定义的一种属性,使Pod能够按我们的要求调度到某个Node上,而Taints则恰恰相反,它可以让Node拒绝运行Pod,甚至驱逐Pod
Taints(污点)是Node的一个属性,设置了Taints后,Kubernetes是不会将Pod调度到这个Node上的,于是Kubernetes就给Pod设置了个属性Tolerations(容忍),只要Pod能够容忍Node上的污点,那么Kubernetes就会忽略Node上的污点,就能够(不是必须)把Pod调度过去
可以使用命令 kubectl taint 给节点增加一个 taint:
kubectl taint nodes node1 key=value:NoSchedule //创建
kubectl describe nodes server1 |grep Taints //查询
kubectl taint nodes node1 key:NoSchedule- //删除
其中[effect] 可取值: [ NoSchedule | PreferNoSchedule | NoExecute ]
• NoSchedule:POD 不会被调度到标记为 taints 节点。
• PreferNoSchedule:NoSchedule 的软策略版本。
• NoExecute:该选项意味着一旦 Taint 生效,如该节点内正在运行的 POD 没有对应
Tolerate 设置,会直接被逐出。
##集群节点的master不参与调度是因为其上有Taints,而worker节点没有此污点
- [root@node22 node]# kubectl describe nodes node22 | grep Tain
- Taints: node-role.kubernetes.io/master:NoSchedule
- [root@node22 node]# kubectl describe nodes node33 | grep Tain
- Taints: <none>
- [root@node22 node]# kubectl describe nodes node44 | grep Tain
- Taints: <none>
- [root@node22 node]# kubectl create deployment demo --image=myapp:v1 --replicas=3
- [root@node22 node]# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- demo-7c4d6f8c46-856fc 1/1 Running 0 19s 10.244.144.113 node33 <none> <none>
- demo-7c4d6f8c46-ss2nw 1/1 Running 0 19s 10.244.144.112 node33 <none> <none>
- demo-7c4d6f8c46-wljn8 1/1 Running 0 19s 10.244.214.143 node44 <none> <none>
给node33加NoSchedule污点:
##为集群中的node33主机打上污点后,后续pod都会被调度至没有污点的node44主机上
- [root@node22 node]# kubectl delete pod --all 删除不用的pod
- pod "mysql" deleted
- pod "nginx" deleted
- [root@node22 node]# kubectl taint node node33 k1=v1:NoSchedule
- node/node33 tainted
- [root@node22 node]# kubectl describe nodes node33 | grep Tain
- Taints: k1=v1:NoSchedule
- [root@node22 node]# kubectl scale deployment demo --replicas=6 把副本数拉升为6个
- deployment.apps/demo scaled
- [root@node22 node]# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- demo-7c4d6f8c46-856fc 1/1 Running 0 4m50s 10.244.144.113 node33 <none> <none>
- demo-7c4d6f8c46-8qsng 1/1 Running 0 44s 10.244.214.145 node44 <none> <none>
- demo-7c4d6f8c46-9jmkc 1/1 Running 0 44s 10.244.214.144 node44 <none> <none>
- demo-7c4d6f8c46-ss2nw 1/1 Running 0 4m50s 10.244.144.112 node44 <none> <none>
- demo-7c4d6f8c46-vlfws 1/1 Running 0 44s 10.244.214.146 node44 <none> <none>
- demo-7c4d6f8c46-wljn8 1/1 Running 0 4m50s 10.244.214.143 node44 <none> <none>
给node44加NoExecute污点:
##给node44节点主机也打上污点后,原来运行在其上面的pod会被驱离
- [root@node22 node]# kubectl taint node node44 k1=v1:NoExecute
- node/node44 tainted
- [root@node22 node]# kubectl describe nodes node44 | grep Tain
- Taints: k1=v1:NoExecute
- [root@node22 node]# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- demo-7c4d6f8c46-5gw59 0/1 Pending 0 22s <none> <none> <none> <none>
- demo-7c4d6f8c46-5ww9h 0/1 Pending 0 20s <none> <none> <none> <none>
- demo-7c4d6f8c46-7xmsw 0/1 Pending 0 20s <none> <none> <none> <none>
- demo-7c4d6f8c46-856fc 1/1 Running 0 7m28s 10.244.144.113 node33 <none> <none>
- demo-7c4d6f8c46-rthws 0/1 Pending 0 20s <none> <none> <none> <none>
- demo-7c4d6f8c46-ss2nw 1/1 Running 0 7m28s 10.244.144.112 node33 <none> <none>
##在pod清单文件中配置污点容忍后,node44主机其上又可以重新运行pod
[root@node22 node]# vim myapp.yaml 容忍NoSchedule污点:
[root@node22 node]# kubectl apply -f myapp.yaml
Warning: resource deployments/demo is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically.
deployment.apps/demo configured
[root@node22 node]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
demo-7df9764968-2hw59 1/1 Running 0 7s 10.244.144.118 node33
demo-7df9764968-8cfdf 1/1 Running 0 23s 10.244.144.116 node33
demo-7df9764968-8rbg8 1/1 Running 0 7s 10.244.144.117 node33
demo-7df9764968-hlxcj 1/1 Running 0 23s 10.244.144.114 node33
demo-7df9764968-jthwv 1/1 Running 0 6s 10.244.144.119 node33
demo-7df9764968-mzqn9 1/1 Running 0 23s 10.244.144.115 node33
[root@node22 node]# vim myapp.yaml 容忍所有污点
[root@node22 node]# kubectl apply -f myapp.yaml
deployment.apps/demo created
[root@node22 node]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
demo-5f7ffd8d99-hsvt5 1/1 Running 0 7s 10.244.214.147 node44
demo-5f7ffd8d99-jbnpt 1/1 Running 0 7s 10.244.214.148 node44
demo-5f7ffd8d99-mzbhh 1/1 Running 0 7s 10.244.144.121 node33
demo-5f7ffd8d99-nv746 0/1 ContainerCreating 0 7s
node22 demo-5f7ffd8d99-wq2pb 0/1 ContainerCreating 0 7s
node22 demo-5f7ffd8d99-zhggq 1/1 Running 0 7s 10.244.144.120 node33
tolerations中定义的key、value、effect,要与node上设置的taint保持一直:
• 如果 operator 是 Exists ,value可以省略。
• 如果 operator 是 Equal ,则key与value之间的关系必须相等。
• 如果不指定operator属性,则默认值为Equal。
还有两个特殊值:
• 当不指定key,再配合Exists 就能匹配所有的key与value ,可以容忍所有污点。
• 当不指定effect ,则匹配所有的effect。
- [root@node22 node]# kubectl delete -f myapp.yaml
- deployment.apps "demo" deleted
- [root@node22 node]# kubectl taint node node33 k1-
- node/node33 untainted
- [root@node22 node]# kubectl taint node node44 k1-
- node/node44 untainted
- [root@node22 node]# kubectl describe nodes | grep Tain
- Taints: node-role.kubernetes.io/master:NoSchedule
- Taints: <none>
- Taints: <none>
4).影响Pod调度的指令还有:cordon、drain、delete,后期创建的pod都不会被调度到
该节点上,但操作的暴力程度不一样。
cordon 停止调度:
• 影响最小,只会将node调为SchedulingDisabled,新创建pod,不会被调度到该节点,节
点原有pod不受影响,仍正常对外提供服务。
- [root@node22 node]# kubectl get node
- NAME STATUS ROLES AGE VERSION
- node22 Ready control-plane,master 9d v1.23.10
- node33 Ready <none> 9d v1.23.10
- node44 Ready <none> 9d v1.23.10
- [root@node22 node]# kubectl cordon node33
- node/node33 cordoned
- [root@node22 node]# kubectl get node
- NAME STATUS ROLES AGE VERSION
- node22 Ready control-plane,master 9d v1.23.10
- node33 Ready,SchedulingDisabled <none> 9d v1.23.10
- node44 Ready <none> 9d v1.23.10
- [root@node22 node]# kubectl uncordon node33 取消禁用
- node/node33 uncordoned
5).drain 驱逐节点:
• 首先驱逐node上的pod,在其他节点重新创建,然后将节点调为SchedulingDisabled。
- [root@node22 node]# kubectl create deployment demo --image=nginx --replicas=3
- deployment.apps/demo created
- [root@node22 node]# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- demo-6c54f77c95-78mhw 1/1 Running 0 7s 10.244.214.151 node44 <none> <none>
- demo-6c54f77c95-c9jp7 1/1 Running 0 7s 10.244.144.123 node33 <none> <none>
- demo-6c54f77c95-j8rtj 1/1 Running 0 7s 10.244.144.122 node33 <none> <none>
- root@node22 node]# kubectl drain node44 --ignore-daemonsets
- node/node44 already cordoned
- WARNING: ignoring DaemonSet-managed Pods: kube-system/calico-node-2tgjc, kube-system/kube-proxy-zh89l, metallb-system/speaker-4hb2q
- evicting pod metallb-system/controller-5c97f5f498-zkkww
- evicting pod default/demo-6c54f77c95-78mhw
- evicting pod ingress-nginx/ingress-nginx-controller-5bbfbbb9c7-d82hw
- pod/controller-5c97f5f498-zkkww evicted
- pod/demo-6c54f77c95-78mhw evicted
- pod/ingress-nginx-controller-5bbfbbb9c7-d82hw evicted
- node/node44 drained
- [root@node22 node]# kubectl get pod -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- demo-6c54f77c95-bl47m 1/1 Running 0 17s 10.244.144.124 node33 <none> <none>
- demo-6c54f77c95-c9jp7 1/1 Running 0 79s 10.244.144.123 node33 <none> <none>
- demo-6c54f77c95-j8rtj 1/1 Running 0 79s 10.244.144.122 node33 <none> <none>
- [root@node22 node]# kubectl get node
- NAME STATUS ROLES AGE VERSION
- node22 Ready control-plane,master 9d v1.23.10
- node33 Ready <none> 9d v1.23.10
- node44 Ready,SchedulingDisabled <none> 9d v1.23.10
6).delete 删除节点
• 最暴力的一个,首先驱逐node上的pod,在其他节点重新创建,然后,从master节点删除
该node,master失去对其控制,如要恢复调度,需进入node节点,重启kubelet服务
- [root@node22 node]# kubectl delete nodes node44
- node "node44" deleted
- [root@node22 node]# kubectl get node
- NAME STATUS ROLES AGE VERSION
- node22 Ready control-plane,master 9d v1.23.10
- node33 Ready <none> 9d v1.23.10
- [root@node44 ~]# systemctl restart kubelet重启节点上的kubelete基于node的自注册功能,恢复使用
- [root@node22 node]# kubectl get node
- NAME STATUS ROLES AGE VERSION
- node22 Ready control-plane,master 9d v1.23.10
- node33 Ready <none> 9d v1.23.10
- node44 Ready <none> 31s v1.23.10
1.kubernetes API 访问控制


Authentication(认证)
• 认证方式现共有8种,可以启用一种或多种认证方式,只要有一种认证方式通过,就不再
进行其它方式的认证。通常启用X509 Client Certs和Service Accout Tokens两种认证方式。
• Kubernetes集群有两类用户:由Kubernetes管理的Service Accounts (服务账户)和
(Users Accounts) 普通账户。k8s中账号的概念不是我们理解的账号,它并不真的存在,
它只是形式上存在。
Authorization(授权)
• 必须经过认证阶段,才到授权请求,根据所有授权策略匹配请求资源属性,决定允许或拒
绝请求。授权方式现共有6种,AlwaysDeny、AlwaysAllow、ABAC、RBAC、Webhook、
Node。默认集群强制开启RBAC。
Admission Control(准入控制)
• 用于拦截请求的一种方式,运行在认证、授权之后,是权限认证链上的最后一环,对请求
API资源对象进行修改和校验。
访问k8s的API Server的客户端主要分为两类:
• kubectl :用户家目录中的 .kube/config 里面保存了客户端访问API Server的密钥相关信息,
这样当用kubectl访问k8s时,它就会自动读取该配置文件,向API Server发起认证,然后
完成操作请求。
• pod:Pod中的进程需要访问API Server,如果是人去访问或编写的脚本去访问,这类访问
使用的账号为:UserAccount;而Pod自身去连接API Server时,使用的账号是:
ServiceAccount,生产中后者使用居多。
kubectl向apiserver发起的命令,采用的是http方式,其实就是对URL发起增删改查的操作。
• kubectl proxy --port=8888 &
• curl http://localhost:8888/api/v1/namespaces/default
• curl http://localhost:8888/apis/apps/v1/namespaces/default/deployments
以上两种api的区别是:
• api它是一个特殊链接,只有在核心v1群组中的对象才能使用。
• apis 它是一般API访问的入口固定格式名。kubernetes访问控制
UserAccount与serviceaccount:
• 用户账户是针对人而言的。 服务账户是针对运行在 pod 中的进程而言的。
• 用户账户是全局性的。 其名称在集群各 namespace 中都是全局唯一的,未来的用户资源不
会做 namespace 隔离, 服务账户是 namespace 隔离的。
• 通常情况下,集群的用户账户可能会从企业数据库进行同步,其创建需要特殊权限,并且涉
及到复杂的业务流程。 服务账户创建的目的是为了更轻量,允许集群用户为了具体的任务
创建服务账户 ( 即权限最小化原则 )。
创建服务账号(serviceaccount)
- [root@node22 ~]# kubectl delete pvc --all 删除多余pvc
- persistentvolumeclaim "data-mysql-0" deleted
- [root@node22 ~]# kubectl create sa admin 创建admin账号(服务)
- serviceaccount/admin created
- [root@node22 ~]# kubectl get sa 查看sa账号
- NAME SECRETS AGE
- admin 1 52s
- default 1 9d
- [root@node22 ~]# kubectl describe sa admin此时k8s为用户自动生成认证信息,但没有授权
- Name: admin 查看sa信息
- Namespace: default
- Labels: <none>
- Annotations: <none>
- Image pull secrets: <none>
- Mountable secrets: admin-token-jbwqn
- Tokens: admin-token-jbwqn
- Events: <none>
- [root@node22 ~]# kubectl run demo --image=nginx 创建容器
- pod/demo created
- [root@node22 ~]# kubectl get pod demo -o yaml | grep default 默认使用default账号
- namespace: default
- schedulerName: default-scheduler
- serviceAccount: default
- serviceAccountName: default
- defaultMode: 420
- [root@node22 ~]# kubectl delete pod demo
- pod "demo" deleted
添加secrets到serviceaccount中:
- [root@node22 sa]# kubectl patch serviceaccount admin -p '{"imagePullSecrets": [{"name": "myregistrykey"}]}'
- serviceaccount/admin patched
- [root@node22 sa]# kubectl describe sa admin
- Name: admin
- Namespace: default
- Labels: <none>
- Annotations: <none>
- Image pull secrets: myregistrykey
- Mountable secrets: admin-token-jbwqn
- Tokens: admin-token-jbwqn
- Events: <none>
-
把serviceaccount和pod绑定起来:
- [root@node22 ~]# mkdir sa
- [root@node22 ~]# cd sa
- [root@node22 sa]# vim pod.yaml 定义一个admin用户
- apiVersion: v1
- kind: Pod
- metadata:
- name: myapp
- labels:
- app: myapp
- spec:
- containers:
- - name: myapp
- image: reg.westos.org/westos/game2048
- ports:
- - name: http
- containerPort: 80
- serviceAccountName: admin
- [root@node22 sa]# kubectl apply -f pod.yaml
- pod/myapp created
- [root@node22 sa]# kubectl get pod 拉取私有仓库里的镜像需要认证
- NAME READY STATUS RESTARTS AGE
- demo-6c54f77c95-bl47m 1/1 Running 0 101m
- demo-6c54f77c95-c9jp7 1/1 Running 0 102m
- demo-6c54f77c95-j8rtj 1/1 Running 0 102m
- myapp 0/1 ImagePullBackOff 0 18s
- [root@node22 sa]# kubectl delete deployments.apps demo
- deployment.apps "demo" deleted
- [root@node22 sa]# kubectl delete pod myapp
- pod "myapp" deleted
做完上面的添加secrets到serviceaccount中后就可拉取
- [root@node22 sa]# kubectl apply -f pod.yaml
- pod/myapp created
- [root@node22 sa]# kubectl get pod
- NAME READY STATUS RESTARTS AGE
- myapp 1/1 Running 0 18s
- [root@node22 sa]# kubectl delete pod myapp
- pod "myapp" deleted
创建用户账号(UserAccount)
- [root@node22 sa]# cd /etc/kubernetes/pki/
- [root@node22 pki]# openssl genrsa -out test.key 2048 生成一个测试key
- [root@node22 pki]# openssl req -new -key test.key -out test.csr -subj "/CN=test"通过私钥生成证书请求文件
- [root@node22 pki]# openssl x509 -req -in test.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out test.crt -days 365 通过证书请求文件生成证书
- Signature ok
- subject=/CN=test
- Getting CA Private Key
- [root@node22 pki]# kubectl config set-credentials test --client-certificate=/etc/kubernetes/pki/test.crt --client-key=/etc/kubernetes/pki/test.key --embed-certs=true
- User "test" set.
- [root@node22 pki]# kubectl config set-context test@kubernetes --cluster=kubernetes --user=test 创建用户上下文
- Context "test@kubernetes" created.
- [root@node22 pki]# kubectl config use-context test@kubernetes 切换到test账号
- Switched to context "test@kubernetes".
- [root@node22 pki]# kubectl config view 查看
- apiVersion: v1
- clusters:
- - cluster:
- certificate-authority-data: DATA+OMITTED
- server: https://192.168.0.22:6443
- name: kubernetes
- contexts:
- - context:
- cluster: kubernetes
- user: kubernetes-admin
- name: kubernetes-admin@kubernetes
- - context:
- cluster: kubernetes
- user: test
- name: test@kubernetes
- current-context: test@kubernetes
- kind: Config
- preferences: {}
- users:
- - name: kubernetes-admin
- user:
- client-certificate-data: REDACTED
- client-key-data: REDACTED
- - name: test
- user:
- client-certificate-data: REDACTED
- client-key-data: REDACTED
- [root@node22 pki]# kubectl get pod权限不够
- Error from server (Forbidden): pods is forbidden: User "test" cannot list resource "pods" in API group "" in the namespace "default"
- 此时用户通过认证,但还没有权限操作集群资源,需要继续添加授权。
RBAC(Role Based Access Control):基于角色访问控制授权
允许管理员通过Kubernetes API动态配置授权策略,RBAC就是用户通过角色与权限进行关联;RBAC只有授权,没有拒绝授权,所以只需要定义允许该用户做什么即可;RBAC包括四种类型:Role、ClusterRole、RoleBinding、ClusterRoleBinding
RBAC的三个基本概念
Subject:被作用者,它表示k8s中的三类主体---user、group、serviceAccount
Role:角色,它其实是一组规则,定义了一组对Kubernetes API对象的操作权限
RoleBinding:定义了“被作用者”和“角色”的绑定关系

RBAC的三个基本概念:
• Subject:被作用者,它表示k8s中的三类主体, user, group, serviceAccount
• Role:角色,它其实是一组规则,定义了一组对 Kubernetes API 对象的操作权限。
• RoleBinding:定义了“被作用者”和“角色”的绑定关系。
Role 和 ClusterRole
• Role是一系列的权限的集合,Role只能授予单个namespace 中资源的访问权限。
• ClusterRole 跟 Role 类似,但是可以在集群中全局使用。
创建局部角色:
- [root@node22 sa]# vim role.yaml
- kind: Role
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- namespace: default
- name: myrole
- rules:
- - apiGroups: [""]
- resources: ["pods"]
- verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
- [root@node22 sa]# kubectl config use-context kubernetes-admin@kubernetes 切换到admin
- Switched to context "kubernetes-admin@kubernetes".
- [root@node22 sa]# kubectl apply -f role.yaml 创建角色
- role.rbac.authorization.k8s.io/myrole created
- [root@node22 sa]# kubectl get role 查看角色
- NAME CREATED AT
- myrole 2022-09-03T14:04:53Z
RoleBinding和ClusterRoleBinding :
• RoleBinding是将Role中定义的权限授予给用户或用户组。它包含一个subjects
列表(users,groups ,service accounts),并引用该Role。
• RoleBinding是对某个namespace 内授权,ClusterRoleBinding适用在集群范围
内使用。
绑定角色;
- [root@node22 sa]# vim bind.yaml
- kind: RoleBinding
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- name: test-read-pods
- namespace: default
- subjects:
- - kind: User
- name: test
- apiGroup: rbac.authorization.k8s.io
- roleRef:
- kind: Role
- name: myrole
- apiGroup: rbac.authorization.k8s.io
- [root@node22 sa]# kubectl apply -f bind.yaml
- rolebinding.rbac.authorization.k8s.io/test-read-pods created
- [root@node22 sa]# kubectl get rolebindings.rbac.authorization.k8s.io
- NAME ROLE AGE
- test-read-pods Role/myrole 14s
- [root@node22 sa]# kubectl config use-context test@kubernetes 切换到test用户
- Switched to context "test@kubernetes".
- [root@node22 sa]# kubectl get pod
- No resources found in default namespace.
- [root@node22 sa]# kubectl run demo --image=nginx 可以操控pod
- pod/demo created
-
创建集群角色:
- [root@node22 sa]# vim role.yaml
- kind: Role
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- namespace: default
- name: myrole
- rules:
- - apiGroups: [""]
- resources: ["pods"]
- verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
-
- ---
- kind: ClusterRole
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- name: myclusterrole
- rules:
- - apiGroups: [""]
- resources: ["pods"]
- verbs: ["get", "watch", "list", "delete", "create", "update"]
- - apiGroups: ["extensions", "apps"]
- resources: ["deployments"]
- verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- [root@node22 sa]# kubectl apply -f role.yaml
- role.rbac.authorization.k8s.io/myrole unchanged
- clusterrole.rbac.authorization.k8s.io/myclusterrole created
创建集群角色绑定:
- [root@node22 sa]# vim bind.yaml
- kind: RoleBinding
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- name: test-read-pods
- namespace: default
- subjects:
- - kind: User
- name: test
- apiGroup: rbac.authorization.k8s.io
- roleRef:
- kind: Role
- name: myrole
- apiGroup: rbac.authorization.k8s.io
-
- ---
- apiVersion: rbac.authorization.k8s.io/v1
- kind: RoleBinding
- metadata:
- name: rolebind-myclusterrole
- namespace: default
- roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: myclusterrole
- subjects:
- - apiGroup: rbac.authorization.k8s.io
- kind: User
- name: test
- [root@node22 sa]# kubectl apply -f bind.yaml
- rolebinding.rbac.authorization.k8s.io/test-read-pods unchanged
- rolebinding.rbac.authorization.k8s.io/rolebind-myclusterrole created
- [root@node22 sa]# kubectl config use-context test@kubernetes 切换到test用户
- Switched to context "test@kubernetes".
- [root@node22 sa]# kubectl get pod
- No resources found in default namespace.
- [root@node22 sa]# kubectl get deployments.apps
- No resources found in default namespace.
- [root@node22 sa]# kubectl create deployments demo --images=nginx
- error: unknown flag: --images
- See 'kubectl create --help' for usage.
- [root@node22 sa]# kubectl create deployments demo --image=nginx
- error: unknown flag: --image
- See 'kubectl create --help' for usage.
- [root@node22 sa]# kubectl create deployment demo --image=nginx
- deployment.apps/demo created
- [root@node22 sa]# kubectl delete deployments.apps demo
- deployment.apps "demo" deleted
- [root@node22 sa]# kubectl get deployments.apps -n kube-system 不能切换到其他namespace
- Error from server (Forbidden): deployments.apps is forbidden: User "test" cannot list resource "deployments" in API group "apps" in the namespace "kube-system"
- [root@node22 sa]# kubectl config use-context kubernetes-admin@kubernetes 切换到admin
- Switched to context "kubernetes-admin@kubernetes".
创建clusterrolebinding:
- [root@node22 sa]# vim bind.yaml
- kind: RoleBinding
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- name: test-read-pods
- namespace: default
- subjects:
- - kind: User
- name: test
- apiGroup: rbac.authorization.k8s.io
- roleRef:
- kind: Role
- name: myrole
- apiGroup: rbac.authorization.k8s.io
-
- ---
- apiVersion: rbac.authorization.k8s.io/v1
- kind: RoleBinding
- metadata:
- name: rolebind-myclusterrole
- namespace: default
- roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: myclusterrole
- subjects:
- - apiGroup: rbac.authorization.k8s.io
- kind: User
- name: test
-
- ---
- apiVersion: rbac.authorization.k8s.io/v1
- kind: ClusterRoleBinding
- metadata:
- name: clusterrolebinding-myclusterrole
- roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: myclusterrole
- subjects:
- - apiGroup: rbac.authorization.k8s.io
- kind: User
- name: test
- [root@node22 sa]# kubectl apply -f bind.yaml 可以操作整个集群的namespace
- rolebinding.rbac.authorization.k8s.io/test-read-pods unchanged
- rolebinding.rbac.authorization.k8s.io/rolebind-myclusterrole unchanged
- clusterrolebinding.rbac.authorization.k8s.io/clusterrolebinding-myclusterrole created
服务账户的自动化
服务账户准入控制器(Service account admission controller):
• 如果该 pod 没有 ServiceAccount 设置,将其 ServiceAccount 设为 default。
• 保证 pod 所关联的 ServiceAccount 存在,否则拒绝该 pod。
• 如果 pod 不包含 ImagePullSecrets 设置,那么 将 ServiceAccount 中的
ImagePullSecrets 信息添加到 pod 中。
• 将一个包含用于 API 访问的 token 的 volume 添加到 pod 中。
• 将挂载于 /var/run/secrets/kubernetes.io/serviceaccount 的 volumeSource 添加到
pod 下的每个容器中。
Token 控制器(Token controller)
• 检测服务账户的创建,并且创建相应的 Secret 以支持 API 访问。
• 检测服务账户的删除,并且删除所有相应的服务账户 Token Secret。
• 检测 Secret 的增加,保证相应的服务账户存在,如有需要,为 Secret 增加 token。
• 检测 Secret 的删除,如有需要,从相应的服务账户中移除引用。
服务账户控制器(Service account controller)
• 服务账户管理器管理各命名空间下的服务账户,并且保证每个活跃的命名空间下存在
一个名为 “default” 的服务账户
Kubernetes 还拥有“用户组”(Group)的概念:
• ServiceAccount对应内置“用户”的名字是:
• system:serviceaccount:
• 而用户组所对应的内置名字是:
• system:serviceaccounts:
示例1:表示mynamespace中的所有ServiceAccount
subjects:
- kind: Group
name: system:serviceaccounts:mynamespace
apiGroup: rbac.authorization.k8s.io
示例2:表示整个系统中的所有ServiceAccount
subjects:
- kind: Group
name: system:serviceaccounts
apiGroup: rbac.authorization.k8s.io
Kubernetes 还提供了四个预先定义好的 ClusterRole 来供用户直接使用:
• cluster-amdin 管理集群
• admin 可以管理
• edit 可写
• view 只读
示例:(最佳实践)
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: readonly-default
subjects:
- kind: ServiceAccount
name: default
namespace: default
roleRef:
kind: ClusterRole
name: view
apiGroup: rbac.authorization.k8s.io