#查看帮助 kubectl explain pods.spec.volumes awsElasticBlockStore <Object> # AWS Elastic Block Store(EBS)卷的配置对象 azureDisk <Object> # Azure Disk卷的配置对象 azureFile <Object> # Azure File卷的配置对象 cephfs <Object> # Ceph文件系统的配置对象 cinder <Object> # Cinder卷的配置对象 configMap <Object> # ConfigMap卷的配置对象 csi <Object> # Container Storage Interface(CSI)卷的配置对象 downwardAPI <Object> # Downward API卷的配置对象 emptyDir <Object> # EmptyDir卷的配置对象,临时卷 ephemeral <Object> # 临时卷的配置对象,该卷的生命周期与Pod的生命周期相同 fc <Object> # Fibre Channel(FC)卷的配置对象 flexVolume <Object> # FlexVolume卷的配置对象 flocker <Object> # Flocker卷的配置对象 gcePersistentDisk <Object> # Google Compute Engine(GCE)持久磁盘卷的配置对象 gitRepo <Object> # Git存储库卷的配置对象 glusterfs <Object> # GlusterFS卷的配置对象 hostPath <Object> # HostPath卷的配置对象,使用主机上的文件或目录作为卷 iscsi <Object> # iSCSI卷的配置对象 name <string> -required- # 表示卷的名称(必需) nfs <Object> # NFS卷的配置对象 persistentVolumeClaim <Object> # 持久卷声明(PVC)的配置对象 photonPersistentDisk <Object> # Photon持久磁盘卷的配置对象 portworxVolume <Object> # Portworx卷的配置对象 projected <Object> # Projected卷的配置对象,可以将多种卷类型投影到单个卷中 quobyte <Object> # Quobyte卷的配置对象 rbd <Object> # Rados Block Device(RBD)卷的配置对象
emptyDir临时目录
该目录在Pod的所有容器之间是可共享的,容器可以读取和写入其中的文件。emptyDir卷的生命周期与Pod的生命周期相同,当Pod被删除或重启时,emptyDir中的数据也会被清除。
#创建一个卷 #创建yaml cat > linshi-dir.yaml << EOF apiVersion: v1 kind: Pod metadata: name: stor spec: containers: - name: test1 image: docker.io/library/nginx imagePullPolicy: IfNotPresent volumeMounts: - mountPath: /cache #挂载到容器内的临时目录/cache name: linshi volumes: - name: linshi emptyDir: {} #创建一个临时目录作为容器的卷 EOF kubectl apply -f linshi-dir.yaml kubectl get po -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES stor 1/1 Running 0 3m28s 10.10.234.66 ws-k8s-node2 <none> <none> #查看与测试,找到node的临时目录 kubectl get pods stor -o yaml | grep uid # uid: 35339f94-e827-4227-be53-9b0ac7116ec5 #node节点 ls /var/lib/kubelet/pods -l total 0 drwxr-x--- 5 root root 71 Jan 19 08:11 35339f94-e827-4227-be53-9b0ac7116ec5 drwxr-x--- 5 root root 71 Jan 19 08:11 cc056149-ee92-4080-a8d5-15de19f4dee5 drwxr-x--- 5 root root 71 Jan 6 18:36 e6696d51-c037-49a8-bfeb-c0c452b0558b drwxr-x--- 5 root root 71 Jan 19 08:11 eaec4ad0-b509-472d-9c8f-7271b6379482 # cd 35339f94-e827-4227-be53-9b0ac7116ec5 cd volumes/kubernetes.io~empty-dir #进入容器 kubectl exec -it stor -- /bin/bash touch /cache/1.txt #查看node节点 ls linshi/ 1.txt #删除pod kubectl delete -f linshi-dir.yaml #目录已经随着pod删除而消失 ls linshi/ ls: cannot access linshi/: No such file or directory
hostpath
允许把节点的目录挂载到容器上,但跨节点不行,所以需要确保能够调度到同一节点。
支持持久化存储,类似于容器的bind mount,因此安全性存在问题,需要尽量设置为只读类型
#在node上创建一个目录用以挂载pod,也可以不创建,因为type可以选择 #mkdir /stor-test # cat > hostpath-stor.yaml << EOF apiVersion: v1 kind: Pod metadata: name: stor spec: containers: - name: test1 image: docker.io/library/nginx imagePullPolicy: IfNotPresent volumeMounts: - mountPath: /cache #挂载到容器内的目录/cache name: hostpath - name: test2 image: docker.io/library/tomcat imagePullPolicy: IfNotPresent volumeMounts: - mountPath: /cache name: hostpath volumes: - name: hostpath hostPath: path: /stor-test type: DirectoryOrCreate #如果在给定路径上什么都不存在,那么将根据需要创建空目录 EOF kubectl apply -f hostpath-stor.yaml kubectl get pods -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES stor 2/2 Running 1 (2s ago) 5s 10.10.179.2 ws-k8s-node1 <none> <none> #测试 kubectl exec -it stor -c test1 -- /bin/bash touch /cache/1.txt #n进入test2查看 kubectl exec -it stor -c test2 -- /bin/bash root@stor:/usr/local/tomcat# ls /cache/ 1.txt #说明test1和test2之间已经完成容器存储的共享 kubectl delete -f hostpath-stor.yaml
nfs持久化存储
NFS(Network File System)是一种用于在计算机网络中共享文件的协议和文件系统。它允许在不同的计算机之间通过网络访问和共享文件,就像这些文件位于本地文件系统上一样。
NFS是一种分布式文件系统,它允许客户端计算机通过网络挂载和访问远程服务器上的文件系统。NFS使用客户端-服务器模型,其中服务器端维护存储在共享目录中的文件,并向客户端提供访问权限。弥补了hostpath的缺点
#在master2与node节点上创建nfs yum -y install nfs-utils systemctl enable nfs --now #master mkdir /dirfornfs #挂载dirfornfs目录,允许所有网段,读写权限,且以root访问 #生产环境中不要这么配置 echo "/dirfornfs *(rw,no_root_squash)" >> /etc/exports exportfs -arv #node测试 mkdir test mount 192.168.8.159:/dirfornfs test df -Th | grep test 192.168.8.159:/dirfornfs nfs4 50G 7.6G 43G 16% /root/test #master创建yaml cat > cunchu-nfs.yaml << EOF apiVersion: apps/v1 kind: Deployment metadata: name: test-nfs spec: replicas: 2 minReadySeconds: 10 selector: matchLabels: cunchu: nfs template: metadata: name: nfs-pod labels: cunchu: nfs spec: containers: - name: test-pod image: docker.io/library/nginx imagePullPolicy: IfNotPresent ports: - containerPort: 80 volumeMounts: - name: nfs mountPath: /usr/share/nginx/html #nginx默认的首页 volumes: - name: nfs nfs: #要挂载的服务器的ip与目录 path: /dirfornfs server: 192.168.8.159 EOF kubectl apply -f cunchu-nfs.yaml kubectl get pods -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES test-nfs-5559d84cd6-sb25b 1/1 Running 0 62s 10.10.179.4 ws-k8s-node1 <none> <none> test-nfs-5559d84cd6-w77mf 1/1 Running 0 62s 10.10.234.67 ws-k8s-node2 <none> <none> #在nfs服务器端创建首页 cd /dirfornfs/ echo '123' > index.html #访问这两个pod查看网页,已经同步 curl 10.10.179.4:80 123 curl 10.10.234.67:80 123 kubectl delete -f cunchu-nfs.yaml