环境介绍

名称 版本
系统版本 Centos 7.6
集群部署方式 Kubeadm
K8s集群版本 v1.18.3
ceph集群版本 v14.2.10

K8s使用RBD做为持久数据卷

传统PV&PVC方式挂载RBD

1.ceph创建存储池并启动RBD功能

1
2
3
4
5
6
7
8
9
10
11
12
13
# 创建存储池并启用
[root@ceph-node01 ceph]# ceph osd pool create rbd 8 8
pool 'rbd' created
[root@ceph-node01 ceph]# ceph osd pool ls
rbd
[root@ceph-node01 ceph]# ceph osd pool application enable rbd rbd
enabled application 'rbd' on pool 'rbd'

# 创建一个镜像后面供pv-pvc使用
[root@ceph-node01 ceph]# rbd create --size 10240 image
[root@ceph-node01 ceph]# rbd ls
image
[root@ceph-node01 ceph]# rbd feature disable image object-map fast-diff deep-flatten

2.复制ceph集群的ceph.conf以及admin用户的keyring文件到K8s各个节点(包括master与node)

1
2
3
[root@ceph-node01 ceph]# scp /etc/ceph/{ceph.conf,ceph.client.admin.keyring} k8s-master.nnv5.cn:/etc/ceph/
[root@ceph-node01 ceph]# scp /etc/ceph/{ceph.conf,ceph.client.admin.keyring} k8s-node01.nnv5.cn:/etc/ceph/
[root@ceph-node01 ceph]# scp /etc/ceph/{ceph.conf,ceph.client.admin.keyring} k8s-node02.nnv5.cn:/etc/ceph/

3.在k8s各节点安装ceph-common

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# 安装epel源
yum -y install epel-release

# 配置yum源
cat /etc/yum.repos.d/ceph.repo
[ceph]
name=Ceph packages for $basearch
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/$basearch
enabled=1
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc

[ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch
enabled=1
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc

[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS
enabled=0
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc

# 安装ceph-common
yum -y install ceph-common

4.使用ceph的管理key创建k8s的secret

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
# 获取ceph的管理员key供k8s使用
[root@ceph-node01 ceph]# ceph auth get-key client.admin | base64
QVFDYkx5RmZlaEp1R1JBQXVTSS9HTi9IdFMwSXkyeGwvV2R0TVE9PQ==

# 创建secret
[root@k8s-master ~]# mkdir ceph-pv-pvc
[root@k8s-master ~]# cd ceph-pv-pvc/
[root@k8s-master ceph-pv-pvc]# vim ceph-secret.yml
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret
data:
# 注意 直接使用pv和pvc方式使用ceph时,ceph的key是需要base64进行加密的(存储类方式需要原生key)
# 下面的key为前面从ceph集群获取的admin的key
key: QVFDYkx5RmZlaEp1R1JBQXVTSS9HTi9IdFMwSXkyeGwvV2R0TVE9PQ==

[root@k8s-master ceph-pv-pvc]# kubectl apply -f ceph-secret.yml
secret/ceph-secret created
[root@k8s-master ceph-pv-pvc]# kubectl get secrets
NAME TYPE DATA AGE
ceph-secret Opaque 1 3s

5.创建PV

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
[root@k8s-master ceph-pv-pvc]# vim test-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: ceph-test-pv
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
rbd:
#ceph的monitor节点
monitors:
- 192.168.1.71:6789
- 192.168.1.72:6789
- 192.168.1.73:6789
#ceph的存储池名字
pool: rbd
#在存储池里创建的image的名字
image: image
#ceph集群中的用户ip
user: admin
#前面从ceph集群获取到的admin的key生成的secret名称
secretRef:
name: ceph-secret
fsType: xfs
readOnly: false
persistentVolumeReclaimPolicy: Recycle


# 应用资源清单并查看创建后的资源
[root@k8s-master ceph-pv-pvc]# kubectl apply -f test-pv.yaml
persistentvolume/ceph-test-pv created
[root@k8s-master ceph-pv-pvc]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM TORAGECLASS REASON AGE
ceph-test-pv 2Gi RWO Recycle Available

6.创建pvc测试

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
[root@k8s-master ceph-pv-pvc]# vim test-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi


# 应用资源清单并验证
[root@k8s-master ceph-pv-pvc]# kubectl apply -f test-pvc.yaml
persistentvolumeclaim/test-pvc created
[root@k8s-master ceph-pv-pvc]# kubectl get pv,pvc
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
persistentvolume/ceph-test-pv 2Gi RWO Recycle Bound default/test-pvc

NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
persistentvolumeclaim/test-pvc Bound ceph-test-pv 2Gi RWO 21s

7.部署Pod挂载pvc测试数据持久化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
[root@k8s-master ceph-pv-pvc]# vim pv-pvc-pod.yaml
apiVersion: v1
kind: Pod
metadata:
labels:
run: test-nginx
name: test-nginx
spec:
containers:
- image: putianhui/myapp:v2
name: test-nginx
volumeMounts:
- name: wwwroot
mountPath: /usr/share/nginx/html
restartPolicy: Always
volumes:
- name: wwwroot
persistentVolumeClaim:
claimName: test-pvc


[root@k8s-master ceph-pv-pvc]# kubectl apply -f pv-pvc-pod.yaml
pod/test-nginx created
[root@k8s-master ceph-pv-pvc]# kubectl get pod -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
test-nginx 1/1 Running 0 86s 10.244.2.16 k8s-node02.nnv5.cn <none> <none>


# 访问clusterIP验证,由于pvc挂载到了nginx的根目录,根目录为空所有403
[root@k8s-master ceph-pv-pvc]# curl 10.244.2.16
<html>
<head><title>403 Forbidden</title></head>
<body bgcolor="white">
<center><h1>403 Forbidden</h1></center>
<hr><center>nginx/1.12.2</center>
</body>
</html>


# 生成一个html文件复制到pod的nginx根目录,再次访问ClusterIP验证
[root@k8s-master ceph-pv-pvc]# echo 'ceph PV&PVC mount pod test' > index.html
[root@k8s-master ceph-pv-pvc]# kubectl cp ./index.html test-nginx:/usr/share/nginx/html
[root@k8s-master ceph-pv-pvc]# curl 10.244.2.16
ceph PV&PVC mount pod test


# 删除POD再重新从yaml文件启动一个新POd,验证持久化
[root@k8s-master ceph-pv-pvc]# kubectl delete -f pv-pvc-pod.yaml
pod "test-nginx" deleted
[root@k8s-master ceph-pv-pvc]# kubectl apply -f pv-pvc-pod.yaml
pod/test-nginx created
[root@k8s-master ceph-pv-pvc]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
test-nginx 1/1 Running 0 12s 10.244.2.17 k8s-node02.nnv5.cn <none> <none>

# 访问新pod验证还是之前的数据
[root@k8s-master ceph-pv-pvc]# curl 10.244.2.17
ceph PV&PVC mount pod test

存储类方式动态挂载RBD

[scode type=”green”]想使用存储类方式动态供给pv和pvc需要部署rbd-provisioner,官方github地址如下:
https://github.com/kubernetes-incubator/external-storage/tree/master/ceph/rbd/deploy/rbac
[/scode]

1.部署rbd-provisioner

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# 下载rbd部署的资源清单到本地目录下
[root@k8s-master ~]# mkdir sc-rbd && cd sc-rbd
[root@k8s-master sc-rbd]# cat >> list.txt <<EOF
clusterrole.yaml
clusterrolebinding.yaml
deployment.yaml
role.yaml
rolebinding.yaml
serviceaccount.yaml
EOF


[root@k8s-master sc-rbd]# ll
total 28
-rw-r--r-- 1 root root 275 Aug 1 11:54 clusterrolebinding.yaml
-rw-r--r-- 1 root root 743 Aug 1 11:54 clusterrole.yaml
-rw-r--r-- 1 root root 484 Aug 1 11:54 deployment.yaml
-rw-r--r-- 1 root root 106 Aug 1 11:54 list.txt
-rw-r--r-- 1 root root 255 Aug 1 11:54 rolebinding.yaml
-rw-r--r-- 1 root root 260 Aug 1 11:54 role.yaml
-rw-r--r-- 1 root root 70 Aug 1 11:54 serviceaccount.yaml


# 修改deployment.yaml中的PROVISIONER_NAME变量的值,后面创建存储类的时候要和这个变量值对应
# 注意:上面下载的资源清单默认都放在default名称空间,我这个实验全部将其放到kube-system名称空间,所以这里只修改了一个yaml文件,你们做实验需要修改所有yaml文件的namespace
[root@k8s-master sc-rbd]# vim deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: rbd-provisioner
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: rbd-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: rbd-provisioner
spec:
containers:
- name: rbd-provisioner
image: "quay.io/external_storage/rbd-provisioner:latest"
env:
- name: PROVISIONER_NAME
value: kubernetes.io/rbd # 注意此值,后面创建存储类需要与之对应
serviceAccount: rbd-provisioner


# 部署
[root@k8s-master sc-rbd]# kubectl apply -f .
clusterrole.rbac.authorization.k8s.io/rbd-provisioner created
clusterrolebinding.rbac.authorization.k8s.io/rbd-provisioner created
deployment.apps/rbd-provisioner created
role.rbac.authorization.k8s.io/rbd-provisioner created
rolebinding.rbac.authorization.k8s.io/rbd-provisioner created
serviceaccount/rbd-provisioner created


[root@k8s-master sc-rbd]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
rbd-provisioner-796c77ddc9-x2p6f 1/1 Running 0 1m

2.创建secret

由于StorageClass要求ceph的secret type必须为kubernetes.io/rbd,所以在上面PV & PVC方式中创建的secret无法使用,需要重新创建。如下:

1
2
3
4
5
6
7
8
9
# ceph集群获取admin的key(注意这里不用base64加密)
[root@k8s-master sc-rbd]# ceph auth get-key client.admin
AQCbLyFfehJuGRAAuSI/GN/HtS0Iy2xl/WdtMQ==

[root@k8s-master sc-rbd]# kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" --from-literal=key='AQCbLyFfehJuGRAAuSI/GN/HtS0Iy2xl/WdtMQ==' --namespace=kube-system
secret/ceph-secret created

[root@k8s-master sc-rbd]# kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" --from-literal=key='AQCbLyFfehJuGRAAuSI/GN/HtS0Iy2xl/WdtMQ==' --namespace=default
secret/ceph-secret created

3.创建StorageClass

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
[root@k8s-master sc-rbd]# vim rbd-sc-test.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ceph-storageclass
provisioner: kubernetes.io/rbd
parameters:
# 你的ceph集群mo节点ip
monitors: 192.168.1.71:6789,192.168.1.72:6789,192.168.1.73:6789
# ceph集群key的名称以及k8s集群创建ceph的secret的名称和namespace
adminId: admin
adminSecretName: ceph-secret
adminSecretNamespace: kube-system
# k8s使用ceph的pool名称
pool: rbd
# 我这里和adminid和secret的一样
userId: admin
userSecretName: ceph-secret
userSecretNamespace: default
fsType: xfs
imageFormat: "2"
imageFeatures: "layering"


[root@k8s-master sc-rbd]# kubectl apply -f rbd-sc-test.yaml
storageclass.storage.k8s.io/ceph-storageclass created
[root@k8s-master sc-rbd]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
ceph-storageclass kubernetes.io/rbd Delete Immediate false 5s

4.创建PVC使用前面创建的ceph-storageclass

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
[root@k8s-master sc-rbd]# vim pvc-rbd-sc-test.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ceph-sc-pvc
annotations:
volume.beta.kubernetes.io/storage-class: ceph-storageclass
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
storageClassName: ceph-storageclass


[root@k8s-master sc-rbd]# kubectl apply -f pvc-rbd-sc-test.yaml
persistentvolumeclaim/ceph-sc-pvc created
[root@k8s-master sc-rbd]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
ceph-sc-pvc Bound pvc-2fea213d-892f-4659-84c7-7c55b593ab92 2Gi RWO ceph-storageclass 3s

至于挂载验证,与前面PV & PVC的方式一致,不再重复说明

POD使用CephFS做为持久数据卷

CephFS方式支持k8s的pv的3种访问模式ReadWriteOnce,ReadOnlyMany ,ReadWriteMany

上面大致说明了使用k8s挂载ceph rbd块设备的方法。这里简单说下k8s挂载cephFS 文件系统的方法。

首先secret可以直接与上面复用,不用再单独创建。也不需要再创建pv和pvc。

Ceph端创建CephFS pool

1、如下操作在ceph的mon或者admin节点
CephFS需要使用两个Pool来分别存储数据和元数据

1
2
3
[root@ceph-node01 ceph]# ceph osd pool create fs_data 128
[root@ceph-node01 ceph]# ceph osd pool create fs_metadata 128
[root@ceph-node01 ceph]# ceph osd lspools

2、创建一个CephFS

1
2
[root@ceph-node01 ceph]# ceph fs new cephfs fs_metadata fs_data
[root@ceph-node01 ceph]# ceph fs ls

存储类方式动态挂载cephFS

[scode type=”green”]CephFS项目地址:https://github.com/kubernetes-incubator/external-storage/tree/master/ceph/cephfs/deploy/rbac
[/scode]

1.下载cephfs-provisioner部署资源清单到本地

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
[root@k8s-master sc-cephfs]# cat >> list.txt <<EOF
clusterrole.yaml
clusterrolebinding.yaml
deployment.yaml
role.yaml
rolebinding.yaml
serviceaccount.yaml
EOF

[root@k8s-master sc-cephfs]# for i in `cat ./list.txt`;do wget https://raw.githubusercontent.com/kubernetes-incubator/external-storage/master/ceph/cephfs/deploy/rbac/$i;done

[root@k8s-master sc-cephfs]# ll
total 28
-rw-r--r-- 1 root root 288 Aug 1 13:33 clusterrolebinding.yaml
-rw-r--r-- 1 root root 657 Aug 1 13:32 clusterrole.yaml
-rw-r--r-- 1 root root 718 Aug 1 13:33 deployment.yaml
-rw-r--r-- 1 root root 104 Aug 1 13:31 list.txt
-rw-r--r-- 1 root root 268 Aug 1 13:34 rolebinding.yaml
-rw-r--r-- 1 root root 321 Aug 1 13:34 role.yaml
-rw-r--r-- 1 root root 98 Aug 1 13:34 serviceaccount.yaml

2.修改部署资源清单(注意修改名称空间)

我这里将上面所有资源清单的名称空间都更改到了kube-system下,默认是default,修改名称空间步骤不做演示

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
[root@k8s-master sc-cephfs]# vim deployment.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
name: cephfs-provisioner
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: cephfs-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: cephfs-provisioner
spec:
containers:
- name: cephfs-provisioner
image: "quay.io/external_storage/cephfs-provisioner:latest"
env:
- name: PROVISIONER_NAME
# 注意PROVISIONER_NAME这个变量的值,如果这里你修改了,后面创建存储类的时候也要改和这里一致
value: ceph.com/cephfs
- name: PROVISIONER_SECRET_NAMESPACE
value: kube-system
command:
- "/usr/local/bin/cephfs-provisioner"
args:
- "-id=cephfs-provisioner-1"
serviceAccount: cephfs-provisioner

# 部署
[root@k8s-master sc-cephfs]# kubectl apply -f .
clusterrole.rbac.authorization.k8s.io/cephfs-provisioner created
clusterrolebinding.rbac.authorization.k8s.io/cephfs-provisioner created
deployment.apps/cephfs-provisioner created
role.rbac.authorization.k8s.io/cephfs-provisioner created
rolebinding.rbac.authorization.k8s.io/cephfs-provisioner created
serviceaccount/cephfs-provisioner created

[root@k8s-master sc-cephfs]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
cephfs-provisioner-6c4dc5f646-fwmqk 1/1 Running 0 31s

这里使用的secret我就不重新创建了,使用前面RBD方式使用的secret

3.配置 StorageClass

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
[root@k8s-master storageclass]# vim sc-cephfs-test.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: cephfs
# 注意provisioner的值为前面部署cephfs-provisioner的时候里面对应的值(一定要一致)
provisioner: ceph.com/cephfs
parameters:
# ceph集群中mo的节点IP地址
monitors: 192.168.1.71:6789,192.168.1.72:6789,192.168.1.73:6789
adminId: admin
# 这里的secret使用前面RBD创建的secret
adminSecretName: ceph-secret
adminSecretNamespace: "kube-system"
claimRoot: /volumes/kubernetes


[root@k8s-master storageclass]# kubectl apply -f sc-cephfs-test.yaml
storageclass.storage.k8s.io/cephfs created

[root@k8s-master storageclass]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
cephfs ceph.com/cephfs Delete Immediate false 3s

4.创建PVC

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
[root@k8s-master storageclass]# vim pvc-cephfs-test.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: cephfs-pvc
spec:
accessModes:
- ReadWriteMany
storageClassName: cephfs
resources:
requests:
storage: 2Gi

[root@k8s-master storageclass]# kubectl apply -f pvc-cephfs-test.yaml
persistentvolumeclaim/cephfs-pvc created

[root@k8s-master storageclass]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
cephfs-pvc Bound pvc-84945dd9-f38f-4e8a-a479-76de12f9681a 2Gi RWX cephfs 3s

5.创建pod调用cephfs-pvc验证持久化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
[root@k8s-master ceph-pv-pvc]# vim pv-pvc-pod.yaml
apiVersion: v1
kind: Pod
metadata:
labels:
run: test-nginx
name: test-nginx
spec:
containers:
- image: putianhui/myapp:v2
name: test-nginx
volumeMounts:
- name: wwwroot
mountPath: /usr/share/nginx/html
restartPolicy: Always
volumes:
- name: wwwroot
persistentVolumeClaim:
claimName: cephfs-pvc


[root@k8s-master ceph-pv-pvc]# kubectl apply -f pv-pvc-pod.yaml
pod/test-nginx created
[root@k8s-master ceph-pv-pvc]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
test-nginx 1/1 Running 0 17s 10.244.2.22 k8s-node02.nnv5.cn <none> <none>


# 生成测试页面复制到pod中
[root@k8s-master ceph-pv-pvc]# echo 'cephfs pod test' > index.html
[root@k8s-master ceph-pv-pvc]# kubectl cp ./index.html test-nginx:/usr/share/nginx/html
[root@k8s-master ceph-pv-pvc]# curl 10.244.2.22
cephfs pod test


# 删除pod并重建pod验证数据持久化
[root@k8s-master ceph-pv-pvc]# kubectl delete -f pv-pvc-pod.yaml
pod "test-nginx" deleted
[root@k8s-master ceph-pv-pvc]# kubectl apply -f pv-pvc-pod.yaml
pod/test-nginx created
[root@k8s-master ceph-pv-pvc]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
test-nginx 1/1 Running 0 21s 10.244.2.23 k8s-node02.nnv5.cn <none> <none>
[root@k8s-master ceph-pv-pvc]# curl 10.244.2.23
cephfs pod test

传统直接挂载cephFS

前面大致说明了使用k8s挂载ceph rbd块设备的方法。这里简单说下k8s挂载ceph 文件系统的方法。

首先secret可以直接与上面复用,不用再单独创建。也不需要再创建pv和pvc。直接在deployment中挂载即可

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
apiVersion: apps/v1
kind: Deployment
metadata:
name: test
spec:
replicas: 1
selector:
matchLabels:
app: test
template:
metadata:
labels:
app: test
spec:
containers:
- name: test
image: putianhui/myapp:v2
ports:
- containerPort: 80
volumeMounts:
- mountPath: "/usr/share/nginx/html"
name: data
volumes:
- name: data
cephfs:
monitors:
- 192.168.1.71:6789
- 192.168.1.72:6789
- 192.168.1.73:6789
user: admin
secretRef:
name: ceph-secret