cephs funcionando! no se que cambio :p
This commit is contained in:
@@ -1,17 +0,0 @@
|
|||||||
apiVersion: ceph.rook.io/v1
|
|
||||||
kind: CephFilesystem
|
|
||||||
metadata:
|
|
||||||
name: newfs
|
|
||||||
namespace: rook-ceph
|
|
||||||
spec:
|
|
||||||
metadataPool:
|
|
||||||
replicated:
|
|
||||||
size: 3
|
|
||||||
dataPools:
|
|
||||||
- name: replicated
|
|
||||||
replicated:
|
|
||||||
size: 3
|
|
||||||
preserveFilesystemOnDelete: false
|
|
||||||
metadataServer:
|
|
||||||
activeCount: 1
|
|
||||||
activeStandby: true
|
|
||||||
@@ -107,7 +107,7 @@ spec:
|
|||||||
# Whether to compress the data in transit across the wire. The default is false.
|
# Whether to compress the data in transit across the wire. The default is false.
|
||||||
# See the kernel requirements above for encryption.
|
# See the kernel requirements above for encryption.
|
||||||
compression:
|
compression:
|
||||||
enabled: true
|
enabled: false
|
||||||
# Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
|
# Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
|
||||||
# and clients will be required to connect to the Ceph cluster with the v2 port (3300).
|
# and clients will be required to connect to the Ceph cluster with the v2 port (3300).
|
||||||
# Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).
|
# Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).
|
||||||
@@ -262,13 +262,13 @@ spec:
|
|||||||
#crashcollector: rook-ceph-crashcollector-priority-class
|
#crashcollector: rook-ceph-crashcollector-priority-class
|
||||||
storage: # cluster level storage configuration and selection
|
storage: # cluster level storage configuration and selection
|
||||||
useAllNodes: true
|
useAllNodes: true
|
||||||
useAllDevices: false
|
useAllDevices: true
|
||||||
deviceFilter: vdb
|
#deviceFilter:
|
||||||
config:
|
config:
|
||||||
# crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
|
# crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
|
||||||
# metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
|
# metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
|
||||||
# databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
|
# databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
|
||||||
osdsPerDevice: "1" # this value can be overridden at the node or device level
|
# osdsPerDevice: "1" # this value can be overridden at the node or device level
|
||||||
# encryptedDevice: "true" # the default value for this option is "false"
|
# encryptedDevice: "true" # the default value for this option is "false"
|
||||||
# deviceClass: "myclass" # specify a device class for OSDs in the cluster
|
# deviceClass: "myclass" # specify a device class for OSDs in the cluster
|
||||||
allowDeviceClassUpdate: false # whether to allow changing the device class of an OSD after it is created
|
allowDeviceClassUpdate: false # whether to allow changing the device class of an OSD after it is created
|
||||||
@@ -353,4 +353,4 @@ spec:
|
|||||||
mgr:
|
mgr:
|
||||||
disabled: false
|
disabled: false
|
||||||
osd:
|
osd:
|
||||||
disabled: false
|
disabled: false
|
||||||
|
|||||||
@@ -1326,4 +1326,4 @@ metadata:
|
|||||||
name: rook-csi-rbd-provisioner-sa
|
name: rook-csi-rbd-provisioner-sa
|
||||||
namespace: rook-ceph # namespace:operator
|
namespace: rook-ceph # namespace:operator
|
||||||
# imagePullSecrets:
|
# imagePullSecrets:
|
||||||
# - name: my-registry-secret
|
# - name: my-registry-secret
|
||||||
|
|||||||
@@ -574,7 +574,6 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
targetSizeRatio:
|
targetSizeRatio:
|
||||||
description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
|
description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
|
||||||
minimum: 0
|
|
||||||
type: number
|
type: number
|
||||||
required:
|
required:
|
||||||
- size
|
- size
|
||||||
@@ -1497,11 +1496,6 @@ spec:
|
|||||||
- quick
|
- quick
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
wipeDevicesFromOtherClusters:
|
|
||||||
description: |-
|
|
||||||
WipeDevicesFromOtherClusters wipes the OSD disks belonging to other clusters. This is useful in scenarios where ceph cluster
|
|
||||||
was reinstalled but OSD disk still contains the metadata from previous ceph cluster.
|
|
||||||
type: boolean
|
|
||||||
type: object
|
type: object
|
||||||
continueUpgradeAfterChecksEvenIfNotHealthy:
|
continueUpgradeAfterChecksEvenIfNotHealthy:
|
||||||
description: ContinueUpgradeAfterChecksEvenIfNotHealthy defines if an upgrade should continue even if PGs are not clean
|
description: ContinueUpgradeAfterChecksEvenIfNotHealthy defines if an upgrade should continue even if PGs are not clean
|
||||||
@@ -7368,7 +7362,6 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
targetSizeRatio:
|
targetSizeRatio:
|
||||||
description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
|
description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
|
||||||
minimum: 0
|
|
||||||
type: number
|
type: number
|
||||||
required:
|
required:
|
||||||
- size
|
- size
|
||||||
@@ -7557,7 +7550,6 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
targetSizeRatio:
|
targetSizeRatio:
|
||||||
description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
|
description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
|
||||||
minimum: 0
|
|
||||||
type: number
|
type: number
|
||||||
required:
|
required:
|
||||||
- size
|
- size
|
||||||
@@ -11218,7 +11210,6 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
targetSizeRatio:
|
targetSizeRatio:
|
||||||
description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
|
description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
|
||||||
minimum: 0
|
|
||||||
type: number
|
type: number
|
||||||
required:
|
required:
|
||||||
- size
|
- size
|
||||||
@@ -12819,7 +12810,6 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
targetSizeRatio:
|
targetSizeRatio:
|
||||||
description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
|
description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
|
||||||
minimum: 0
|
|
||||||
type: number
|
type: number
|
||||||
required:
|
required:
|
||||||
- size
|
- size
|
||||||
@@ -13751,7 +13741,6 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
targetSizeRatio:
|
targetSizeRatio:
|
||||||
description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
|
description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
|
||||||
minimum: 0
|
|
||||||
type: number
|
type: number
|
||||||
required:
|
required:
|
||||||
- size
|
- size
|
||||||
@@ -13935,7 +13924,6 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
targetSizeRatio:
|
targetSizeRatio:
|
||||||
description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
|
description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity
|
||||||
minimum: 0
|
|
||||||
type: number
|
type: number
|
||||||
required:
|
required:
|
||||||
- size
|
- size
|
||||||
@@ -14906,4 +14894,4 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
x-kubernetes-preserve-unknown-fields: true
|
x-kubernetes-preserve-unknown-fields: true
|
||||||
subresources:
|
subresources:
|
||||||
status: {}
|
status: {}
|
||||||
|
|||||||
67
manifest/04-ceph/example.yaml
Normal file
67
manifest/04-ceph/example.yaml
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: cephfs-pvc
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 1Gi
|
||||||
|
storageClassName: rook-cephfs
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: kube-registry
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: kube-registry
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
spec:
|
||||||
|
replicas: 3
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: kube-registry
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: kube-registry
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: registry
|
||||||
|
image: registry:2
|
||||||
|
imagePullPolicy: Always
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 100Mi
|
||||||
|
env:
|
||||||
|
# Configuration reference: https://docs.docker.com/registry/configuration/
|
||||||
|
- name: REGISTRY_HTTP_ADDR
|
||||||
|
value: :5000
|
||||||
|
- name: REGISTRY_HTTP_SECRET
|
||||||
|
value: "Ple4seCh4ngeThisN0tAVerySecretV4lue"
|
||||||
|
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
|
||||||
|
value: /var/lib/registry
|
||||||
|
volumeMounts:
|
||||||
|
- name: image-store
|
||||||
|
mountPath: /var/lib/registry
|
||||||
|
ports:
|
||||||
|
- containerPort: 5000
|
||||||
|
name: registry
|
||||||
|
protocol: TCP
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /
|
||||||
|
port: registry
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /
|
||||||
|
port: registry
|
||||||
|
volumes:
|
||||||
|
- name: image-store
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: cephfs-pvc
|
||||||
|
readOnly: false
|
||||||
135
manifest/04-ceph/filesystem.yaml
Normal file
135
manifest/04-ceph/filesystem.yaml
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
#################################################################################################################
|
||||||
|
# Create a filesystem with settings with replication enabled for a production environment.
|
||||||
|
# A minimum of 3 OSDs on different nodes are required in this example.
|
||||||
|
# If one mds daemon per node is too restrictive, see the podAntiAffinity below.
|
||||||
|
# kubectl create -f filesystem.yaml
|
||||||
|
#################################################################################################################
|
||||||
|
|
||||||
|
apiVersion: ceph.rook.io/v1
|
||||||
|
kind: CephFilesystem
|
||||||
|
metadata:
|
||||||
|
name: myfs
|
||||||
|
namespace: rook-ceph # namespace:cluster
|
||||||
|
spec:
|
||||||
|
# The metadata pool spec. Must use replication.
|
||||||
|
metadataPool:
|
||||||
|
replicated:
|
||||||
|
size: 3
|
||||||
|
requireSafeReplicaSize: true
|
||||||
|
parameters:
|
||||||
|
# Inline compression mode for the data pool
|
||||||
|
# Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
|
||||||
|
compression_mode:
|
||||||
|
none
|
||||||
|
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
|
||||||
|
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
|
||||||
|
#target_size_ratio: ".5"
|
||||||
|
# The list of data pool specs. Can use replication or erasure coding.
|
||||||
|
dataPools:
|
||||||
|
- name: replicated
|
||||||
|
failureDomain: host
|
||||||
|
replicated:
|
||||||
|
size: 3
|
||||||
|
# Disallow setting pool with replica 1, this could lead to data loss without recovery.
|
||||||
|
# Make sure you're *ABSOLUTELY CERTAIN* that is what you want
|
||||||
|
requireSafeReplicaSize: true
|
||||||
|
parameters:
|
||||||
|
# Inline compression mode for the data pool
|
||||||
|
# Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
|
||||||
|
compression_mode:
|
||||||
|
none
|
||||||
|
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
|
||||||
|
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
|
||||||
|
#target_size_ratio: ".5"
|
||||||
|
# Whether to preserve filesystem after CephFilesystem CRD deletion
|
||||||
|
preserveFilesystemOnDelete: true
|
||||||
|
# The metadata service (mds) configuration
|
||||||
|
metadataServer:
|
||||||
|
# The number of active MDS instances
|
||||||
|
activeCount: 1
|
||||||
|
# Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover.
|
||||||
|
# If false, standbys will be available, but will not have a warm cache.
|
||||||
|
activeStandby: true
|
||||||
|
# The affinity rules to apply to the mds deployment
|
||||||
|
placement:
|
||||||
|
# nodeAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# - key: role
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - mds-node
|
||||||
|
# topologySpreadConstraints:
|
||||||
|
# tolerations:
|
||||||
|
# - key: mds-node
|
||||||
|
# operator: Exists
|
||||||
|
# podAffinity:
|
||||||
|
podAntiAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- rook-ceph-mds
|
||||||
|
## Add this if you want to allow mds daemons for different filesystems to run on one
|
||||||
|
## node. The value in "values" must match .metadata.name.
|
||||||
|
# - key: rook_file_system
|
||||||
|
# operator: In
|
||||||
|
# values:
|
||||||
|
# - myfs
|
||||||
|
# topologyKey: kubernetes.io/hostname will place MDS across different hosts
|
||||||
|
topologyKey: kubernetes.io/hostname
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
podAffinityTerm:
|
||||||
|
labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- rook-ceph-mds
|
||||||
|
# topologyKey: */zone can be used to spread MDS across different AZ
|
||||||
|
topologyKey: topology.kubernetes.io/zone
|
||||||
|
# A key/value list of annotations
|
||||||
|
# annotations:
|
||||||
|
# key: value
|
||||||
|
# A key/value list of labels
|
||||||
|
# labels:
|
||||||
|
# key: value
|
||||||
|
# resources:
|
||||||
|
# The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory
|
||||||
|
# limits:
|
||||||
|
# memory: "1024Mi"
|
||||||
|
# requests:
|
||||||
|
# cpu: "500m"
|
||||||
|
# memory: "1024Mi"
|
||||||
|
priorityClassName: system-cluster-critical
|
||||||
|
livenessProbe:
|
||||||
|
disabled: false
|
||||||
|
startupProbe:
|
||||||
|
disabled: false
|
||||||
|
# Filesystem mirroring settings
|
||||||
|
# mirroring:
|
||||||
|
# enabled: true
|
||||||
|
# # list of Kubernetes Secrets containing the peer token
|
||||||
|
# # for more details see: https://docs.ceph.com/en/latest/dev/cephfs-mirroring/#bootstrap-peers
|
||||||
|
# # Add the secret name if it already exists else specify the empty list here.
|
||||||
|
# peers:
|
||||||
|
# secretNames:
|
||||||
|
# - secondary-cluster-peer
|
||||||
|
# # specify the schedule(s) on which snapshots should be taken
|
||||||
|
# # see the official syntax here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-schedules
|
||||||
|
# snapshotSchedules:
|
||||||
|
# - path: /
|
||||||
|
# interval: 24h # daily snapshots
|
||||||
|
# # The startTime should be mentioned in the format YYYY-MM-DDTHH:MM:SS
|
||||||
|
# # If startTime is not specified, then by default the start time is considered as midnight UTC.
|
||||||
|
# # see usage here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#usage
|
||||||
|
# # startTime: 2022-07-15T11:55:00
|
||||||
|
# # manage retention policies
|
||||||
|
# # see syntax duration here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-retention-policies
|
||||||
|
# snapshotRetention:
|
||||||
|
# - path: /
|
||||||
|
# duration: "h 24"
|
||||||
@@ -11,25 +11,19 @@ spec:
|
|||||||
name: letsencrypt-prod
|
name: letsencrypt-prod
|
||||||
kind: ClusterIssuer
|
kind: ClusterIssuer
|
||||||
---
|
---
|
||||||
apiVersion: networking.k8s.io/v1
|
apiVersion: traefik.io/v1alpha1
|
||||||
kind: Ingress
|
kind: IngressRoute
|
||||||
metadata:
|
metadata:
|
||||||
name: rook-ceph-dashboard-ingress
|
name: rook-ceph-dashboard-ingress
|
||||||
namespace: rook-ceph
|
namespace: rook-ceph
|
||||||
annotations:
|
|
||||||
traefik.ingress.kubernetes.io/router.middlewares: default-allow-local-only@kubernetescrd, default-redirect-https@kubernetescrd
|
|
||||||
spec:
|
spec:
|
||||||
ingressClassName: traefik
|
entryPoints:
|
||||||
rules:
|
- websecure
|
||||||
- host: rook-ceph.skrd.fun
|
routes:
|
||||||
http:
|
- match: "Host(`rook-ceph.skrd.fun`)"
|
||||||
paths:
|
kind: Rule
|
||||||
- path: /
|
services:
|
||||||
pathType: Prefix
|
- name: rook-ceph-mgr-dashboard
|
||||||
backend:
|
port: http-dashboard
|
||||||
service:
|
|
||||||
name: rook-ceph-mgr-dashboard
|
|
||||||
port:
|
|
||||||
name: http-dashboard
|
|
||||||
tls:
|
tls:
|
||||||
- secretName: rook-ceph-skrd-fun-tls
|
secretName: rook-ceph-skrd-fun-tls
|
||||||
@@ -695,4 +695,4 @@ spec:
|
|||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
- name: default-config-dir
|
- name: default-config-dir
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
# OLM: END OPERATOR DEPLOYMENT
|
# OLM: END OPERATOR DEPLOYMENT
|
||||||
|
|||||||
@@ -1,67 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
metadata:
|
|
||||||
name: cephfs-pvc
|
|
||||||
namespace: kube-system
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteMany
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 1Gi
|
|
||||||
storageClassName: rook-cephfs
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: kube-registry
|
|
||||||
namespace: kube-system
|
|
||||||
labels:
|
|
||||||
k8s-app: kube-registry
|
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
spec:
|
|
||||||
replicas: 3
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
k8s-app: kube-registry
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kube-registry
|
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: registry
|
|
||||||
image: registry:2
|
|
||||||
imagePullPolicy: Always
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
memory: 100Mi
|
|
||||||
env:
|
|
||||||
# Configuration reference: https://docs.docker.com/registry/configuration/
|
|
||||||
- name: REGISTRY_HTTP_ADDR
|
|
||||||
value: :5000
|
|
||||||
- name: REGISTRY_HTTP_SECRET
|
|
||||||
value: "Ple4seCh4ngeThisN0tAVerySecretV4lue"
|
|
||||||
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
|
|
||||||
value: /var/lib/registry
|
|
||||||
volumeMounts:
|
|
||||||
- name: image-store
|
|
||||||
mountPath: /var/lib/registry
|
|
||||||
ports:
|
|
||||||
- containerPort: 5000
|
|
||||||
name: registry
|
|
||||||
protocol: TCP
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: registry
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: registry
|
|
||||||
volumes:
|
|
||||||
- name: image-store
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: cephfs-pvc
|
|
||||||
readOnly: false
|
|
||||||
44
manifest/04-ceph/storageclass.yaml
Normal file
44
manifest/04-ceph/storageclass.yaml
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: rook-cephfs
|
||||||
|
provisioner: rook-ceph.cephfs.csi.ceph.com # csi-provisioner-name
|
||||||
|
parameters:
|
||||||
|
# clusterID is the namespace where the rook cluster is running
|
||||||
|
# If you change this namespace, also change the namespace below where the secret namespaces are defined
|
||||||
|
clusterID: rook-ceph # namespace:cluster
|
||||||
|
|
||||||
|
# CephFS filesystem name into which the volume shall be created
|
||||||
|
fsName: myfs
|
||||||
|
|
||||||
|
# Ceph pool into which the volume shall be created
|
||||||
|
# Required for provisionVolume: "true"
|
||||||
|
pool: myfs-replicated
|
||||||
|
|
||||||
|
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
|
||||||
|
# in the same namespace as the cluster.
|
||||||
|
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
|
||||||
|
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster
|
||||||
|
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
|
||||||
|
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster
|
||||||
|
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
|
||||||
|
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster
|
||||||
|
|
||||||
|
# (optional) Set it to true to encrypt each volume with encryption keys
|
||||||
|
# from a key management system (KMS)
|
||||||
|
# encrypted: "true"
|
||||||
|
|
||||||
|
# (optional) Use external key management system (KMS) for encryption key by
|
||||||
|
# specifying a unique ID matching a KMS ConfigMap. The ID is only used for
|
||||||
|
# correlation to configmap entry.
|
||||||
|
# encryptionKMSID: <kms-config-id>
|
||||||
|
|
||||||
|
# (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel)
|
||||||
|
# If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse
|
||||||
|
# or by setting the default mounter explicitly via --volumemounter command-line argument.
|
||||||
|
# mounter: kernel
|
||||||
|
reclaimPolicy: Delete
|
||||||
|
allowVolumeExpansion: true
|
||||||
|
mountOptions:
|
||||||
|
# uncomment the following line for debugging
|
||||||
|
#- debug
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: rook-cephfs
|
|
||||||
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
|
|
||||||
provisioner: rook-ceph.cephfs.csi.ceph.com
|
|
||||||
parameters:
|
|
||||||
# clusterID is the namespace where the rook cluster is running
|
|
||||||
# If you change this namespace, also change the namespace below where the secret namespaces are defined
|
|
||||||
clusterID: rook-ceph
|
|
||||||
|
|
||||||
# CephFS filesystem name into which the volume shall be created
|
|
||||||
fsName: newfs
|
|
||||||
|
|
||||||
# Ceph pool into which the volume shall be created
|
|
||||||
# Required for provisionVolume: "true"
|
|
||||||
pool: newfs-replicated
|
|
||||||
|
|
||||||
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
|
|
||||||
# in the same namespace as the cluster.
|
|
||||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
|
|
||||||
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
|
|
||||||
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
|
||||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
|
|
||||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
|
||||||
|
|
||||||
reclaimPolicy: Delete
|
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
metadata:
|
|
||||||
name: cephfs-pvc
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteMany
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 1Gi
|
|
||||||
storageClassName: rook-cephfs
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: cephfs-nginx
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: cephfs-nginx
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: cephfs-nginx
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: nginx
|
|
||||||
image: nginx:latest
|
|
||||||
volumeMounts:
|
|
||||||
- name: cephfs-volume
|
|
||||||
mountPath: /usr/share/nginx/html
|
|
||||||
volumes:
|
|
||||||
- name: cephfs-volume
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: cephfs-pvc
|
|
||||||
Reference in New Issue
Block a user