k8s-cluster-config/badhouseplants/values/values.rook-ceph-cluster.yaml

97 lines
2.5 KiB
YAML

cephFileSystems:
- name: ceph-filesystem
spec:
metadataPool:
replicated:
size: 3
dataPools:
- failureDomain: host
replicated:
size: 3
name: data0
metadataServer:
activeCount: 1
activeStandby: true
resources:
limits:
cpu: "200m"
memory: "256Mi"
requests:
cpu: "50m"
memory: "128Mi"
priorityClassName: system-cluster-critical
storageClass:
enabled: true
isDefault: false
name: ceph-filesystem
pool: data0
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: "Immediate"
mountOptions: []
parameters:
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/fstype: ext4
cephObjectStores: []
# - name: ceph-objectstore
# spec:
# metadataPool:
# failureDomain: host
# replicated:
# size: 3
# dataPool:
# failureDomain: host
# erasureCoded:
# dataChunks: 2
# codingChunks: 1
# preservePoolsOnDelete: true
# gateway:
# port: 80
# resources:
# limits:
# cpu: "150m"
# memory: "256Mi"
# requests:
# cpu: "50m"
# memory: "128Mi"
# instances: 1
# priorityClassName: system-cluster-critical
# storageClass:
# enabled: true
# name: ceph-bucket
# reclaimPolicy: Delete
# volumeBindingMode: "Immediate"
# parameters:
# region: us-east-1
# ingress:
# enabled: false
cephClusterSpec:
resources:
mgr:
limits:
cpu: "200m"
memory: "512Mi"
requests:
cpu: "100m"
memory: "128Mi"
mon:
limits:
cpu: "200m"
memory: "512Mi"
requests:
cpu: "100m"
memory: "128Mi"
osd:
limits:
cpu: "200m"
memory: "2Gi"
requests:
cpu: "100m"
memory: "256Mi"