longhorn/chart/values.yaml

481 lines
24 KiB
YAML
Raw Permalink Normal View History

2019-05-08 17:21:58 +00:00
# Default values for longhorn.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
cattle:
# -- System default registry
systemDefaultRegistry: ""
windowsCluster:
# -- Enable this to allow Longhorn to run on the Rancher deployed Windows cluster
enabled: false
# -- Tolerate Linux nodes to run Longhorn user deployed components
tolerations:
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
# -- Select Linux nodes to run Longhorn user deployed components
nodeSelector:
kubernetes.io/os: "linux"
defaultSetting:
# -- Toleration for Longhorn system managed components
taintToleration: cattle.io/os=linux:NoSchedule
# -- Node selector for Longhorn system managed components
systemManagedComponentsNodeSelector: kubernetes.io/os:linux
networkPolicies:
# -- Enable NetworkPolicies to limit access to the Longhorn pods
enabled: false
# -- Create the policy based on your distribution to allow access for the ingress. Options: `k3s`, `rke2`, `rke1`
type: "k3s"
2019-05-08 17:21:58 +00:00
image:
longhorn:
engine:
# -- Specify Longhorn engine image repository
repository: longhornio/longhorn-engine
# -- Specify Longhorn engine image tag
tag: master-head
manager:
# -- Specify Longhorn manager image repository
repository: longhornio/longhorn-manager
# -- Specify Longhorn manager image tag
tag: master-head
ui:
# -- Specify Longhorn ui image repository
repository: longhornio/longhorn-ui
# -- Specify Longhorn ui image tag
tag: master-head
instanceManager:
# -- Specify Longhorn instance manager image repository
repository: longhornio/longhorn-instance-manager
# -- Specify Longhorn instance manager image tag
tag: master-head
shareManager:
# -- Specify Longhorn share manager image repository
repository: longhornio/longhorn-share-manager
# -- Specify Longhorn share manager image tag
tag: master-head
backingImageManager:
# -- Specify Longhorn backing image manager image repository
repository: longhornio/backing-image-manager
# -- Specify Longhorn backing image manager image tag
tag: master-head
supportBundleKit:
# -- Specify Longhorn support bundle manager image repository
repository: longhornio/support-bundle-kit
# -- Specify Longhorn support bundle manager image tag
tag: v0.0.27
csi:
attacher:
# -- Specify CSI attacher image repository. Leave blank to autodetect
repository: longhornio/csi-attacher
# -- Specify CSI attacher image tag. Leave blank to autodetect
tag: v4.2.0
provisioner:
# -- Specify CSI provisioner image repository. Leave blank to autodetect
repository: longhornio/csi-provisioner
# -- Specify CSI provisioner image tag. Leave blank to autodetect
tag: v3.4.1
nodeDriverRegistrar:
# -- Specify CSI node driver registrar image repository. Leave blank to autodetect
repository: longhornio/csi-node-driver-registrar
# -- Specify CSI node driver registrar image tag. Leave blank to autodetect
tag: v2.7.0
resizer:
# -- Specify CSI driver resizer image repository. Leave blank to autodetect
repository: longhornio/csi-resizer
# -- Specify CSI driver resizer image tag. Leave blank to autodetect
tag: v1.7.0
snapshotter:
# -- Specify CSI driver snapshotter image repository. Leave blank to autodetect
repository: longhornio/csi-snapshotter
# -- Specify CSI driver snapshotter image tag. Leave blank to autodetect.
tag: v6.2.1
livenessProbe:
# -- Specify CSI liveness probe image repository. Leave blank to autodetect
repository: longhornio/livenessprobe
# -- Specify CSI liveness probe image tag. Leave blank to autodetect
tag: v2.9.0
openshift:
oauthProxy:
# -- For openshift user. Specify oauth proxy image repository
repository: quay.io/openshift/origin-oauth-proxy
# -- For openshift user. Specify oauth proxy image tag. Note: Use your OCP/OKD 4.X Version, Current Stable is 4.14
tag: 4.14
# -- Image pull policy which applies to all user deployed Longhorn Components. e.g, Longhorn manager, Longhorn driver, Longhorn UI
2019-05-08 17:21:58 +00:00
pullPolicy: IfNotPresent
service:
ui:
# -- Define Longhorn UI service type. Options: `ClusterIP`, `NodePort`, `LoadBalancer`, `Rancher-Proxy`
type: ClusterIP
# -- NodePort port number (to set explicitly, choose port between 30000-32767)
nodePort: null
2019-05-08 17:21:58 +00:00
manager:
# -- Define Longhorn manager service type.
2019-05-08 17:21:58 +00:00
type: ClusterIP
# -- NodePort port number (to set explicitly, choose port between 30000-32767)
2019-05-08 17:21:58 +00:00
nodePort: ""
persistence:
# -- Set Longhorn StorageClass as default
2019-05-08 17:21:58 +00:00
defaultClass: true
# -- Set filesystem type for Longhorn StorageClass
defaultFsType: ext4
# -- Set mkfs options for Longhorn StorageClass
defaultMkfsParams: ""
# -- Set replica count for Longhorn StorageClass
defaultClassReplicaCount: 3
# -- Set data locality for Longhorn StorageClass. Options: `disabled`, `best-effort`
defaultDataLocality: disabled
# -- Define reclaim policy. Options: `Retain`, `Delete`
reclaimPolicy: Delete
# -- Set volume migratable for Longhorn StorageClass
migratable: false
recurringJobSelector:
# -- Enable recurring job selector for Longhorn StorageClass
enable: false
# -- Recurring job selector list for Longhorn StorageClass. Please be careful of quotes of input. e.g., `[{"name":"backup", "isGroup":true}]`
jobList: []
backingImage:
# -- Set backing image for Longhorn StorageClass
enable: false
# -- Specify a backing image that will be used by Longhorn volumes in Longhorn StorageClass. If not exists, the backing image data source type and backing image data source parameters should be specified so that Longhorn will create the backing image before using it
name: ~
# -- Specify the data source type for the backing image used in Longhorn StorageClass.
# If the backing image does not exists, Longhorn will use this field to create a backing image. Otherwise, Longhorn will use it to verify the selected backing image.
dataSourceType: ~
# -- Specify the data source parameters for the backing image used in Longhorn StorageClass. This option accepts a json string of a map. e.g., `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`.
dataSourceParameters: ~
# -- Specify the expected SHA512 checksum of the selected backing image in Longhorn StorageClass
expectedChecksum: ~
defaultNodeSelector:
# -- Enable Node selector for Longhorn StorageClass
enable: false
# -- This selector enables only certain nodes having these tags to be used for the volume. e.g. `"storage,fast"`
2023-04-05 11:16:10 +00:00
selector: ""
# -- Allow automatically removing snapshots during filesystem trim for Longhorn StorageClass. Options: `ignored`, `enabled`, `disabled`
removeSnapshotsDuringFilesystemTrim: ignored
helmPreUpgradeCheckerJob:
enabled: true
2019-05-08 17:21:58 +00:00
csi:
# -- Specify kubelet root-dir. Leave blank to autodetect
kubeletRootDir: ~
# -- Specify replica count of CSI Attacher. Leave blank to use default count: 3
attacherReplicaCount: ~
# -- Specify replica count of CSI Provisioner. Leave blank to use default count: 3
provisionerReplicaCount: ~
# -- Specify replica count of CSI Resizer. Leave blank to use default count: 3
resizerReplicaCount: ~
# -- Specify replica count of CSI Snapshotter. Leave blank to use default count: 3
snapshotterReplicaCount: ~
2019-05-08 17:21:58 +00:00
defaultSettings:
# -- The endpoint used to access the backupstore. Available: NFS, CIFS, AWS, GCP, AZURE.
backupTarget: ~
# -- The name of the Kubernetes secret associated with the backup target.
backupTargetCredentialSecret: ~
# -- If this setting is enabled, Longhorn will automatically attaches the volume and takes snapshot/backup
# when it is the time to do recurring snapshot/backup.
allowRecurringJobWhileVolumeDetached: ~
# -- Create default Disk automatically only on Nodes with the label "node.longhorn.io/create-default-disk=true" if no other disks exist.
# If disabled, the default disk will be created on all new nodes when each node is first added.
createDefaultDiskLabeledNodes: ~
# -- Default path to use for storing data on a host. By default "/var/lib/longhorn/"
defaultDataPath: ~
# -- Longhorn volume has data locality if there is a local replica of the volume on the same node as the pod which is using the volume.
defaultDataLocality: ~
# -- Allow scheduling on nodes with existing healthy replicas of the same volume. By default false.
replicaSoftAntiAffinity: ~
# -- Enable this setting automatically rebalances replicas when discovered an available node.
replicaAutoBalance: ~
# -- The over-provisioning percentage defines how much storage can be allocated relative to the hard drive's capacity. By default 200.
storageOverProvisioningPercentage: ~
# -- If the minimum available disk capacity exceeds the actual percentage of available disk capacity,
# the disk becomes unschedulable until more space is freed up. By default 25.
storageMinimalAvailablePercentage: ~
# -- The reserved percentage specifies the percentage of disk space that will not be allocated to the default disk on each new Longhorn node.
storageReservedPercentageForDefaultDisk: ~
# -- Upgrade Checker will check for new Longhorn version periodically.
# When there is a new version available, a notification will appear in the UI. By default true.
upgradeChecker: ~
# -- The default number of replicas when a volume is created from the Longhorn UI.
# For Kubernetes configuration, update the `numberOfReplicas` in the StorageClass. By default 3.
defaultReplicaCount: ~
# -- The 'storageClassName' is given to PVs and PVCs that are created for an existing Longhorn volume. The StorageClass name can also be used as a label,
# so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object.
# By default 'longhorn-static'.
defaultLonghornStaticStorageClass: ~
# -- In seconds. The backupstore poll interval determines how often Longhorn checks the backupstore for new backups.
# Set to 0 to disable the polling. By default 300.
backupstorePollInterval: ~
# -- In minutes. This setting determines how long Longhorn will keep the backup resource that was failed. Set to 0 to disable the auto-deletion.
failedBackupTTL: ~
# -- Restore recurring jobs from the backup volume on the backup target and create recurring jobs if not exist during a backup restoration.
restoreVolumeRecurringJobs: ~
# -- This setting specifies how many successful backup or snapshot job histories should be retained. History will not be retained if the value is 0.
recurringSuccessfulJobsHistoryLimit: ~
# -- This setting specifies how many failed backup or snapshot job histories should be retained. History will not be retained if the value is 0.
recurringFailedJobsHistoryLimit: ~
# -- This setting specifies how many failed support bundles can exist in the cluster.
# Set this value to **0** to have Longhorn automatically purge all failed support bundles.
supportBundleFailedHistoryLimit: ~
# -- taintToleration for longhorn system components
taintToleration: ~
# -- nodeSelector for longhorn system components
systemManagedComponentsNodeSelector: ~
# -- priorityClass for longhorn system componentss
priorityClass: ~
# -- If enabled, volumes will be automatically salvaged when all the replicas become faulty e.g. due to network disconnection.
# Longhorn will try to figure out which replica(s) are usable, then use them for the volume. By default true.
autoSalvage: ~
# -- If enabled, Longhorn will automatically delete the workload pod that is managed by a controller (e.g. deployment, statefulset, daemonset, etc...)
# when Longhorn volume is detached unexpectedly (e.g. during Kubernetes upgrade, Docker reboot, or network disconnect).
# By deleting the pod, its controller restarts the pod and Kubernetes handles volume reattachment and remount.
autoDeletePodWhenVolumeDetachedUnexpectedly: ~
# -- Disable Longhorn manager to schedule replica on Kubernetes cordoned node. By default true.
disableSchedulingOnCordonedNode: ~
# -- Allow scheduling new Replicas of Volume to the Nodes in the same Zone as existing healthy Replicas.
# Nodes don't belong to any Zone will be treated as in the same Zone.
# Notice that Longhorn relies on label `topology.kubernetes.io/zone=<Zone name of the node>` in the Kubernetes node object to identify the zone.
# By default true.
replicaZoneSoftAntiAffinity: ~
# -- Allow scheduling on disks with existing healthy replicas of the same volume. By default true.
replicaDiskSoftAntiAffinity: ~
# -- Defines the Longhorn action when a Volume is stuck with a StatefulSet/Deployment Pod on a node that is down.
nodeDownPodDeletionPolicy: ~
# -- Define the policy to use when a node with the last healthy replica of a volume is drained.
nodeDrainPolicy: ~
# -- In seconds. The interval determines how long Longhorn will wait at least in order to reuse the existing data on a failed replica
# rather than directly creating a new replica for a degraded volume.
replicaReplenishmentWaitInterval: ~
# -- This setting controls how many replicas on a node can be rebuilt simultaneously.
concurrentReplicaRebuildPerNodeLimit: ~
# -- This setting controls how many volumes on a node can restore the backup concurrently. Set the value to **0** to disable backup restore.
concurrentVolumeBackupRestorePerNodeLimit: ~
# -- This setting is only for volumes created by UI.
# By default, this is false meaning there will be a reivision counter file to track every write to the volume.
# During salvage recovering Longhorn will pick the replica with largest reivision counter as candidate to recover the whole volume.
# If revision counter is disabled, Longhorn will not track every write to the volume.
# During the salvage recovering, Longhorn will use the 'volume-head-xxx.img' file last modification time and
# file size to pick the replica candidate to recover the whole volume.
disableRevisionCounter: ~
# -- This setting defines the Image Pull Policy of Longhorn system managed pod.
# e.g. instance manager, engine image, CSI driver, etc.
# The new Image Pull Policy will only apply after the system managed pods restart.
systemManagedPodsImagePullPolicy: ~
# -- This setting allows user to create and attach a volume that doesn't have all the replicas scheduled at the time of creation.
allowVolumeCreationWithDegradedAvailability: ~
# -- This setting enables Longhorn to automatically cleanup the system generated snapshot after replica rebuild is done.
autoCleanupSystemGeneratedSnapshot: ~
# -- This setting controls how Longhorn automatically upgrades volumes' engines to the new default engine image after upgrading Longhorn manager.
# The value of this setting specifies the maximum number of engines per node that are allowed to upgrade to the default engine image at the same time.
# If the value is 0, Longhorn will not automatically upgrade volumes' engines to default version.
concurrentAutomaticEngineUpgradePerNodeLimit: ~
# -- This interval in minutes determines how long Longhorn will wait before cleaning up the backing image file when there is no replica in the disk using it.
backingImageCleanupWaitInterval: ~
# -- This interval in seconds determines how long Longhorn will wait before re-downloading the backing image file
# when all disk files of this backing image become failed or unknown.
backingImageRecoveryWaitInterval: ~
# -- This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each instance manager Pod.
# You can leave it with the default value, which is 12%.
guaranteedInstanceManagerCPU: ~
# -- Enabling this setting will notify Longhorn that the cluster is using Kubernetes Cluster Autoscaler.
kubernetesClusterAutoscalerEnabled: ~
# -- This setting allows Longhorn to delete the orphan resource and its corresponding orphaned data automatically like stale replicas.
# Orphan resources on down or unknown nodes will not be cleaned up automatically.
orphanAutoDeletion: ~
# -- Longhorn uses the storage network for in-cluster data traffic. Leave this blank to use the Kubernetes cluster network.
storageNetwork: ~
# -- This flag is designed to prevent Longhorn from being accidentally uninstalled which will lead to data lost.
deletingConfirmationFlag: ~
# -- In seconds. The setting specifies the timeout between the engine and replica(s), and the value should be between 8 to 30 seconds.
# The default value is 8 seconds.
engineReplicaTimeout: ~
# -- This setting allows users to enable or disable snapshot hashing and data integrity checking.
snapshotDataIntegrity: ~
# -- Hashing snapshot disk files impacts the performance of the system.
# The immediate snapshot hashing and checking can be disabled to minimize the impact after creating a snapshot.
snapshotDataIntegrityImmediateCheckAfterSnapshotCreation: ~
# -- Unix-cron string format. The setting specifies when Longhorn checks the data integrity of snapshot disk files.
snapshotDataIntegrityCronjob: ~
# -- This setting allows Longhorn filesystem trim feature to automatically mark the latest snapshot and
# its ancestors as removed and stops at the snapshot containing multiple children.
removeSnapshotsDuringFilesystemTrim: ~
# -- This feature supports the fast replica rebuilding.
# It relies on the checksum of snapshot disk files, so setting the snapshot-data-integrity to **enable** or **fast-check** is a prerequisite.
fastReplicaRebuildEnabled: ~
# -- In seconds. The setting specifies the HTTP client timeout to the file sync server.
replicaFileSyncHttpClientTimeout: ~
# -- The log level Panic, Fatal, Error, Warn, Info, Debug, Trace used in longhorn manager. Default to Info.
logLevel: ~
# -- This setting allows users to specify backup compression method.
backupCompressionMethod: ~
# -- This setting controls how many worker threads per backup concurrently.
backupConcurrentLimit: ~
# -- This setting controls how many worker threads per restore concurrently.
restoreConcurrentLimit: ~
# -- This allows users to activate v2 data engine based on SPDK.
# Currently, it is in the preview phase and should not be utilized in a production environment.
v2DataEngine: ~
# -- This setting allows users to enable the offline replica rebuilding for volumes using v2 data engine.
offlineReplicaRebuilding: ~
# -- Allow Scheduling Empty Node Selector Volumes To Any Node
allowEmptyNodeSelectorVolume: ~
# -- Allow Scheduling Empty Disk Selector Volumes To Any Disk
allowEmptyDiskSelectorVolume: ~
privateRegistry:
# -- Set `true` to create a new private registry secret
createSecret: ~
# -- URL of private registry. Leave blank to apply system default registry
registryUrl: ~
# -- User used to authenticate to private registry
registryUser: ~
# -- Password used to authenticate to private registry
registryPasswd: ~
# -- If create a new private registry secret is true, create a Kubernetes secret with this name; else use the existing secret of this name. Use it to pull images from your private registry
registrySecret: ~
longhornManager:
log:
# -- Options: `plain`, `json`
format: plain
# -- Priority class for longhorn manager
priorityClass: ~
# -- Tolerate nodes to run Longhorn manager
tolerations: []
## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
# -- Select nodes to run Longhorn manager
nodeSelector: {}
## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
# -- Annotation used in Longhorn manager service
serviceAnnotations: {}
## If you want to set annotations for the Longhorn Manager service, delete the `{}` in the line above
## and uncomment this example block
# annotation-key1: "annotation-value1"
# annotation-key2: "annotation-value2"
longhornDriver:
# -- Priority class for longhorn driver
priorityClass: ~
# -- Tolerate nodes to run Longhorn driver
tolerations: []
## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
# -- Select nodes to run Longhorn driver
nodeSelector: {}
## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
longhornUI:
# -- Replica count for longhorn ui
replicas: 2
# -- Priority class count for longhorn ui
priorityClass: ~
# -- Tolerate nodes to run Longhorn UI
tolerations: []
## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
## and uncomment this example block
# - key: "key"
# operator: "Equal"
# value: "value"
# effect: "NoSchedule"
# -- Select nodes to run Longhorn UI
nodeSelector: {}
## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
2019-05-08 17:21:58 +00:00
ingress:
# -- Set to true to enable ingress record generation
2019-05-08 17:21:58 +00:00
enabled: false
# -- Add ingressClassName to the Ingress
# Can replace the kubernetes.io/ingress.class annotation on v1.18+
ingressClassName: ~
2019-05-08 17:21:58 +00:00
# -- Layer 7 Load Balancer hostname
host: sslip.io
2019-05-08 17:21:58 +00:00
# -- Set this to true in order to enable TLS on the ingress record
2019-05-08 17:21:58 +00:00
tls: false
# -- Enable this in order to enable that the backend service will be connected at port 443
secureBackends: false
# -- If TLS is set to true, you must declare what secret will store the key/certificate for TLS
2019-05-08 17:21:58 +00:00
tlsSecret: longhorn.local-tls
# -- If ingress is enabled you can set the default ingress path
# then you can access the UI by using the following full path {{host}}+{{path}}
Add config value for ingress path Signed-off-by: Alaa Attya <alaa.attya91@gmail.com> (+6 squashed commits) Squashed commits: [fdea72a] fix typo Signed-off-by: Alaa Attya <alaa.saleh@deliveryhero.com> Signed-off-by: Alaa Attya <alaa.attya91@gmail.com> [2af80af] Update share-manager image to v1_20210914 This fixes the previously created RWX volumes that were marked as XFS but got created as EXT4 since we only supported EXT4 at the time. Longhorn #2991 Signed-off-by: Joshua Moody <joshua.moody@suse.com> Signed-off-by: Alaa Attya <alaa.attya91@gmail.com> [350d8cb] update questions.yaml Signed-off-by: Alaa Attya <alaa.attya91@gmail.com> [ed4846e] update questions.yaml Signed-off-by: Alaa Attya <alaa.attya91@gmail.com> [b8db363] updates Signed-off-by: Alaa Attya <alaa.attya91@gmail.com> [a3b8719] update labels Signed-off-by: Alaa Attya <alaa.attya91@gmail.com> update labels Signed-off-by: Alaa Attya <alaa.attya91@gmail.com> Revert "Update chart/values.yaml" This reverts commit 94cebff301ba24779daef0d83222350321814dd4. Revert "Revert "Update chart/values.yaml"" This reverts commit d0528ef7bf79d60b26bcd961438a0026e58eac01. updates Revert "updates" This reverts commit 0d47279b4edb2b170c831e462745079148bcd678. Revert "Revert "Revert "Update chart/values.yaml""" This reverts commit f64503db10b58f374c614471a573893af4594ca4. Revert "Revert "Update chart/values.yaml"" This reverts commit d0528ef7bf79d60b26bcd961438a0026e58eac01. Revert "Update chart/values.yaml" This reverts commit 94cebff301ba24779daef0d83222350321814dd4. Revert "Revert "Update chart/values.yaml"" This reverts commit 15bf7bebf08535d24ee8636ae6f8013b871ca0a8. updates updates
2021-10-02 13:29:05 +00:00
path: /
2019-05-08 17:21:58 +00:00
## If you're using kube-lego, you will want to add:
## kubernetes.io/tls-acme: true
##
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
# -- Ingress annotations done as key:value pairs
2019-05-08 17:21:58 +00:00
annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: true
# -- If you're providing your own certificates, please use this to add the certificates as secrets
2019-05-08 17:21:58 +00:00
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: longhorn.local-tls
# key:
# certificate:
# -- For Kubernetes < v1.25, if your cluster enables Pod Security Policy admission controller,
# set this to `true` to ship longhorn-psp which allow privileged Longhorn pods to start
enablePSP: false
# -- Annotations to add to the Longhorn Manager DaemonSet Pods. Optional.
annotations: {}
serviceAccount:
# -- Annotations to add to the service account
annotations: {}
## openshift settings
openshift:
# -- Enable when using openshift
enabled: false
ui:
# -- UI route in openshift environment
route: "longhorn-ui"
# -- UI port in openshift environment
port: 443
# -- UI proxy in openshift environment
proxy: 8443