Some changes for ARM

This commit is contained in:
Nikolai Rodionov 2023-11-05 16:16:24 +01:00
parent 70101a1c3f
commit 3463f91c77
Signed by: allanger
GPG Key ID: 19DB54039EBF8F10
5 changed files with 147 additions and 7 deletions

View File

@ -13,7 +13,7 @@ releases:
createNamespace: true
- <<: *longhorn
installed: true
installed: false
namespace: longhorn-system
createNamespace: true

View File

@ -21,10 +21,6 @@ service:
port: 1194
protocol: TCP
targetPort: 1194
- name: tcp
port: 25
protocol: TCP
targetPort: 25
# -----------
# -- Email
# -----------

View File

@ -17,7 +17,7 @@ istio:
image:
tag: v2.6.5-xor-4.0.0beta08
storage:
class: longhorn
class: default
size: 512Mi
openvpn:

View File

@ -0,0 +1,144 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- Namespace of the main rook operator
operatorNamespace: rook-ceph
# -- The metadata.name of the CephCluster CR
# @default -- The same as the namespace
clusterName:
# -- Optional override of the target kubernetes version
kubeVersion:
# -- Cluster ceph.conf override
configOverride:
# configOverride: |
# [global]
# mon_allow_pool_delete = true
# osd_pool_default_size = 3
# osd_pool_default_min_size = 2
# Installs a debugging toolbox deployment
toolbox:
# -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
enabled: false
# -- Toolbox image, defaults to the image used by the Ceph cluster
image: #quay.io/ceph/ceph:v17.2.6
# -- Toolbox tolerations
tolerations: []
# -- Toolbox affinity
affinity: {}
# -- Toolbox container security context
containerSecurityContext:
runAsNonRoot: true
runAsUser: 2016
runAsGroup: 2016
capabilities:
drop: ["ALL"]
# -- Toolbox resources
resources:
limits:
cpu: "500m"
memory: "1Gi"
requests:
cpu: "100m"
memory: "128Mi"
# -- Set the priority class for the toolbox if desired
priorityClassName:
monitoring:
# -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
# Monitoring requires Prometheus to be pre-installed
enabled: false
# -- Whether to create the Prometheus rules for Ceph alerts
createPrometheusRules: false
# -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
# If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
# deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
rulesNamespaceOverride:
# Monitoring settings for external clusters:
# externalMgrEndpoints: <list of endpoints>
# externalMgrPrometheusPort: <port>
# allow adding custom labels and annotations to the prometheus rule
prometheusRule:
# -- Labels applied to PrometheusRule
labels: {}
# -- Annotations applied to PrometheusRule
annotations: {}
# -- Create & use PSP resources. Set this to the same value as the rook-ceph chart.
pspEnable: false
# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
# imagePullSecrets:
# - name: my-registry-secret
# All values below are taken from the CephCluster CRD
# -- Cluster configuration.
# @default -- See [below](#ceph-cluster-spec)
cephClusterSpec:
resources:
mgr:
limits:
memory: "1Gi"
requests:
cpu: "200m"
memory: "512Mi"
mon:
limits:
memory: "2Gi"
requests:
cpu: "250m"
memory: "1Gi"
osd:
requests:
cpu: "200m"
memory: "4Gi"
prepareosd:
# limits: It is not recommended to set limits on the OSD prepare job
# since it's a one-time burst for memory that must be allowed to
# complete without an OOM kill. Note however that if a k8s
# limitRange guardrail is defined external to Rook, the lack of
# a limit here may result in a sync failure, in which case a
# limit should be added. 1200Mi may suffice for up to 15Ti
# OSDs ; for larger devices 2Gi may be required.
# cf. https://github.com/rook/rook/pull/11103
requests:
cpu: "500m"
memory: "50Mi"
mgr-sidecar:
limits:
cpu: "500m"
memory: "100Mi"
requests:
cpu: "100m"
memory: "40Mi"
crashcollector:
limits:
cpu: "500m"
memory: "60Mi"
requests:
cpu: "100m"
memory: "60Mi"
logcollector:
limits:
cpu: "500m"
memory: "1Gi"
requests:
cpu: "100m"
memory: "100Mi"
cleanup:
limits:
cpu: "500m"
memory: "1Gi"
requests:
cpu: "500m"
memory: "100Mi"
exporter:
limits:
cpu: "250m"
memory: "128Mi"
requests:
cpu: "50m"
memory: "50Mi"

View File

@ -7,7 +7,7 @@ bases:
releases:
- <<: *metrics-server
installed: true
installed: false
namespace: kube-system
createNamespace: true