doc(chart): add table of helm values (#6639)
Co-authored-by: David Ko <dko@suse.com>
This commit is contained in:
parent
c0a258afef
commit
e1914963a6
248
chart/README.md
248
chart/README.md
@ -74,5 +74,253 @@ helm uninstall longhorn -n longhorn-system
|
||||
kubectl delete namespace longhorn-system
|
||||
```
|
||||
|
||||
## Values
|
||||
|
||||
The `values.yaml` contains items used to tweak a deployment of this chart.
|
||||
|
||||
### Cattle Settings
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| global.cattle.systemDefaultRegistry | string | `""` | System default registry |
|
||||
| global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector | string | `"kubernetes.io/os:linux"` | Node selector for Longhorn system managed components |
|
||||
| global.cattle.windowsCluster.defaultSetting.taintToleration | string | `"cattle.io/os=linux:NoSchedule"` | Toleration for Longhorn system managed components |
|
||||
| global.cattle.windowsCluster.enabled | bool | `false` | Enable this to allow Longhorn to run on the Rancher deployed Windows cluster |
|
||||
| global.cattle.windowsCluster.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Select Linux nodes to run Longhorn user deployed components |
|
||||
| global.cattle.windowsCluster.tolerations | list | `[{"effect":"NoSchedule","key":"cattle.io/os","operator":"Equal","value":"linux"}]` | Tolerate Linux nodes to run Longhorn user deployed components |
|
||||
|
||||
### Network Policies
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| networkPolicies.enabled | bool | `false` | Enable NetworkPolicies to limit access to the Longhorn pods |
|
||||
| networkPolicies.type | string | `"k3s"` | Create the policy based on your distribution to allow access for the ingress. Options: `k3s`, `rke2`, `rke1` |
|
||||
|
||||
### Image Settings
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| image.csi.attacher.repository | string | `"longhornio/csi-attacher"` | Specify CSI attacher image repository. Leave blank to autodetect |
|
||||
| image.csi.attacher.tag | string | `"v4.2.0"` | Specify CSI attacher image tag. Leave blank to autodetect |
|
||||
| image.csi.livenessProbe.repository | string | `"longhornio/livenessprobe"` | Specify CSI liveness probe image repository. Leave blank to autodetect |
|
||||
| image.csi.livenessProbe.tag | string | `"v2.9.0"` | Specify CSI liveness probe image tag. Leave blank to autodetect |
|
||||
| image.csi.nodeDriverRegistrar.repository | string | `"longhornio/csi-node-driver-registrar"` | Specify CSI node driver registrar image repository. Leave blank to autodetect |
|
||||
| image.csi.nodeDriverRegistrar.tag | string | `"v2.7.0"` | Specify CSI node driver registrar image tag. Leave blank to autodetect |
|
||||
| image.csi.provisioner.repository | string | `"longhornio/csi-provisioner"` | Specify CSI provisioner image repository. Leave blank to autodetect |
|
||||
| image.csi.provisioner.tag | string | `"v3.4.1"` | Specify CSI provisioner image tag. Leave blank to autodetect |
|
||||
| image.csi.resizer.repository | string | `"longhornio/csi-resizer"` | Specify CSI driver resizer image repository. Leave blank to autodetect |
|
||||
| image.csi.resizer.tag | string | `"v1.7.0"` | Specify CSI driver resizer image tag. Leave blank to autodetect |
|
||||
| image.csi.snapshotter.repository | string | `"longhornio/csi-snapshotter"` | Specify CSI driver snapshotter image repository. Leave blank to autodetect |
|
||||
| image.csi.snapshotter.tag | string | `"v6.2.1"` | Specify CSI driver snapshotter image tag. Leave blank to autodetect. |
|
||||
| image.longhorn.backingImageManager.repository | string | `"longhornio/backing-image-manager"` | Specify Longhorn backing image manager image repository |
|
||||
| image.longhorn.backingImageManager.tag | string | `"master-head"` | Specify Longhorn backing image manager image tag |
|
||||
| image.longhorn.engine.repository | string | `"longhornio/longhorn-engine"` | Specify Longhorn engine image repository |
|
||||
| image.longhorn.engine.tag | string | `"master-head"` | Specify Longhorn engine image tag |
|
||||
| image.longhorn.instanceManager.repository | string | `"longhornio/longhorn-instance-manager"` | Specify Longhorn instance manager image repository |
|
||||
| image.longhorn.instanceManager.tag | string | `"master-head"` | Specify Longhorn instance manager image tag |
|
||||
| image.longhorn.manager.repository | string | `"longhornio/longhorn-manager"` | Specify Longhorn manager image repository |
|
||||
| image.longhorn.manager.tag | string | `"master-head"` | Specify Longhorn manager image tag |
|
||||
| image.longhorn.shareManager.repository | string | `"longhornio/longhorn-share-manager"` | Specify Longhorn share manager image repository |
|
||||
| image.longhorn.shareManager.tag | string | `"master-head"` | Specify Longhorn share manager image tag |
|
||||
| image.longhorn.supportBundleKit.repository | string | `"longhornio/support-bundle-kit"` | Specify Longhorn support bundle manager image repository |
|
||||
| image.longhorn.supportBundleKit.tag | string | `"v0.0.27"` | Specify Longhorn support bundle manager image tag |
|
||||
| image.longhorn.ui.repository | string | `"longhornio/longhorn-ui"` | Specify Longhorn ui image repository |
|
||||
| image.longhorn.ui.tag | string | `"master-head"` | Specify Longhorn ui image tag |
|
||||
| image.openshift.oauthProxy.repository | string | `"quay.io/openshift/origin-oauth-proxy"` | For openshift user. Specify oauth proxy image repository |
|
||||
| image.openshift.oauthProxy.tag | float | `4.13` | For openshift user. Specify oauth proxy image tag. Note: Use your OCP/OKD 4.X Version, Current Stable is 4.13 |
|
||||
| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy which applies to all user deployed Longhorn Components. e.g, Longhorn manager, Longhorn driver, Longhorn UI |
|
||||
|
||||
### Service Settings
|
||||
|
||||
| Key | Description |
|
||||
|-----|-------------|
|
||||
| service.manager.nodePort | NodePort port number (to set explicitly, choose port between 30000-32767) |
|
||||
| service.manager.type | Define Longhorn manager service type. |
|
||||
| service.ui.nodePort | NodePort port number (to set explicitly, choose port between 30000-32767) |
|
||||
| service.ui.type | Define Longhorn UI service type. Options: `ClusterIP`, `NodePort`, `LoadBalancer`, `Rancher-Proxy` |
|
||||
|
||||
### StorageClass Settings
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| persistence.backingImage.dataSourceParameters | string | `nil` | Specify the data source parameters for the backing image used in Longhorn StorageClass. This option accepts a json string of a map. e.g., `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`. |
|
||||
| persistence.backingImage.dataSourceType | string | `nil` | Specify the data source type for the backing image used in Longhorn StorageClass. If the backing image does not exists, Longhorn will use this field to create a backing image. Otherwise, Longhorn will use it to verify the selected backing image. |
|
||||
| persistence.backingImage.enable | bool | `false` | Set backing image for Longhorn StorageClass |
|
||||
| persistence.backingImage.expectedChecksum | string | `nil` | Specify the expected SHA512 checksum of the selected backing image in Longhorn StorageClass |
|
||||
| persistence.backingImage.name | string | `nil` | Specify a backing image that will be used by Longhorn volumes in Longhorn StorageClass. If not exists, the backing image data source type and backing image data source parameters should be specified so that Longhorn will create the backing image before using it |
|
||||
| persistence.defaultClass | bool | `true` | Set Longhorn StorageClass as default |
|
||||
| persistence.defaultClassReplicaCount | int | `3` | Set replica count for Longhorn StorageClass |
|
||||
| persistence.defaultDataLocality | string | `"disabled"` | Set data locality for Longhorn StorageClass. Options: `disabled`, `best-effort` |
|
||||
| persistence.defaultFsType | string | `"ext4"` | Set filesystem type for Longhorn StorageClass |
|
||||
| persistence.defaultMkfsParams | string | `""` | Set mkfs options for Longhorn StorageClass |
|
||||
| persistence.defaultNodeSelector.enable | bool | `false` | Enable Node selector for Longhorn StorageClass |
|
||||
| persistence.defaultNodeSelector.selector | string | `""` | This selector enables only certain nodes having these tags to be used for the volume. e.g. `"storage,fast"` |
|
||||
| persistence.migratable | bool | `false` | Set volume migratable for Longhorn StorageClass |
|
||||
| persistence.reclaimPolicy | string | `"Delete"` | Define reclaim policy. Options: `Retain`, `Delete` |
|
||||
| persistence.recurringJobSelector.enable | bool | `false` | Enable recurring job selector for Longhorn StorageClass |
|
||||
| persistence.recurringJobSelector.jobList | list | `[]` | Recurring job selector list for Longhorn StorageClass. Please be careful of quotes of input. e.g., `[{"name":"backup", "isGroup":true}]` |
|
||||
| persistence.removeSnapshotsDuringFilesystemTrim | string | `"ignored"` | Allow automatically removing snapshots during filesystem trim for Longhorn StorageClass. Options: `ignored`, `enabled`, `disabled` |
|
||||
|
||||
### CSI Settings
|
||||
|
||||
| Key | Description |
|
||||
|-----|-------------|
|
||||
| csi.attacherReplicaCount | Specify replica count of CSI Attacher. Leave blank to use default count: 3 |
|
||||
| csi.kubeletRootDir | Specify kubelet root-dir. Leave blank to autodetect |
|
||||
| csi.provisionerReplicaCount | Specify replica count of CSI Provisioner. Leave blank to use default count: 3 |
|
||||
| csi.resizerReplicaCount | Specify replica count of CSI Resizer. Leave blank to use default count: 3 |
|
||||
| csi.snapshotterReplicaCount | Specify replica count of CSI Snapshotter. Leave blank to use default count: 3 |
|
||||
|
||||
### Longhorn Manager Settings
|
||||
|
||||
Longhorn system contains user deployed components (e.g, Longhorn manager, Longhorn driver, Longhorn UI) and system managed components (e.g, instance manager, engine image, CSI driver, etc.).
|
||||
These settings only apply to Longhorn manager component.
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| longhornManager.log.format | string | `"plain"` | Options: `plain`, `json` |
|
||||
| longhornManager.nodeSelector | object | `{}` | Select nodes to run Longhorn manager |
|
||||
| longhornManager.priorityClass | string | `nil` | Priority class for longhorn manager |
|
||||
| longhornManager.serviceAnnotations | object | `{}` | Annotation used in Longhorn manager service |
|
||||
| longhornManager.tolerations | list | `[]` | Tolerate nodes to run Longhorn manager |
|
||||
|
||||
### Longhorn Driver Settings
|
||||
|
||||
Longhorn system contains user deployed components (e.g, Longhorn manager, Longhorn driver, Longhorn UI) and system managed components (e.g, instance manager, engine image, CSI driver, etc.).
|
||||
These settings only apply to Longhorn driver component.
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| longhornDriver.nodeSelector | object | `{}` | Select nodes to run Longhorn driver |
|
||||
| longhornDriver.priorityClass | string | `nil` | Priority class for longhorn driver |
|
||||
| longhornDriver.tolerations | list | `[]` | Tolerate nodes to run Longhorn driver |
|
||||
|
||||
### Longhorn UI Settings
|
||||
|
||||
Longhorn system contains user deployed components (e.g, Longhorn manager, Longhorn driver, Longhorn UI) and system managed components (e.g, instance manager, engine image, CSI driver, etc.).
|
||||
These settings only apply to Longhorn UI component.
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| longhornUI.nodeSelector | object | `{}` | Select nodes to run Longhorn UI |
|
||||
| longhornUI.priorityClass | string | `nil` | Priority class count for longhorn ui |
|
||||
| longhornUI.replicas | int | `2` | Replica count for longhorn ui |
|
||||
| longhornUI.tolerations | list | `[]` | Tolerate nodes to run Longhorn UI |
|
||||
|
||||
### Ingress Settings
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| ingress.annotations | string | `nil` | Ingress annotations done as key:value pairs |
|
||||
| ingress.enabled | bool | `false` | Set to true to enable ingress record generation |
|
||||
| ingress.host | string | `"sslip.io"` | Layer 7 Load Balancer hostname |
|
||||
| ingress.ingressClassName | string | `nil` | Add ingressClassName to the Ingress Can replace the kubernetes.io/ingress.class annotation on v1.18+ |
|
||||
| ingress.path | string | `"/"` | If ingress is enabled you can set the default ingress path then you can access the UI by using the following full path {{host}}+{{path}} |
|
||||
| ingress.secrets | string | `nil` | If you're providing your own certificates, please use this to add the certificates as secrets |
|
||||
| ingress.secureBackends | bool | `false` | Enable this in order to enable that the backend service will be connected at port 443 |
|
||||
| ingress.tls | bool | `false` | Set this to true in order to enable TLS on the ingress record |
|
||||
| ingress.tlsSecret | string | `"longhorn.local-tls"` | If TLS is set to true, you must declare what secret will store the key/certificate for TLS |
|
||||
|
||||
### Private Registry Settings
|
||||
|
||||
Longhorn can be installed in an air gapped environment with private registry settings. Please refer to **Air Gap Installation** in our official site [link](https://longhorn.io/docs)
|
||||
|
||||
| Key | Description |
|
||||
|-----|-------------|
|
||||
| privateRegistry.createSecret | Set `true` to create a new private registry secret |
|
||||
| privateRegistry.registryPasswd | Password used to authenticate to private registry |
|
||||
| privateRegistry.registrySecret | If create a new private registry secret is true, create a Kubernetes secret with this name; else use the existing secret of this name. Use it to pull images from your private registry |
|
||||
| privateRegistry.registryUrl | URL of private registry. Leave blank to apply system default registry |
|
||||
| privateRegistry.registryUser | User used to authenticate to private registry |
|
||||
|
||||
### OS/Kubernetes Distro Settings
|
||||
|
||||
#### Opensift Settings
|
||||
|
||||
Please also refer to this document [ocp-readme](https://github.com/longhorn/longhorn/blob/master/chart/ocp-readme.md) for more details
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| openshift.enabled | bool | `false` | Enable when using openshift |
|
||||
| openshift.ui.port | int | `443` | UI port in openshift environment |
|
||||
| openshift.ui.proxy | int | `8443` | UI proxy in openshift environment |
|
||||
| openshift.ui.route | string | `"longhorn-ui"` | UI route in openshift environment |
|
||||
|
||||
### Other Settings
|
||||
|
||||
| Key | Default | Description |
|
||||
|-----|---------|-------------|
|
||||
| annotations | `{}` | Annotations to add to the Longhorn Manager DaemonSet Pods. Optional. |
|
||||
| enablePSP | `false` | For Kubernetes < v1.25, if your cluster enables Pod Security Policy admission controller, set this to `true` to ship longhorn-psp which allow privileged Longhorn pods to start |
|
||||
|
||||
### System Default Settings
|
||||
|
||||
For system default settings, you can first leave blank to use default values which will be applied when installing Longhorn.
|
||||
You can then change them through UI after installation.
|
||||
For more details like types or options, you can refer to **Settings Reference** in our official site [link](https://longhorn.io/docs)
|
||||
|
||||
| Key | Description |
|
||||
|-----|-------------|
|
||||
| defaultSettings.allowEmptyDiskSelectorVolume | Allow Scheduling Empty Disk Selector Volumes To Any Disk |
|
||||
| defaultSettings.allowEmptyNodeSelectorVolume | Allow Scheduling Empty Node Selector Volumes To Any Node |
|
||||
| defaultSettings.allowRecurringJobWhileVolumeDetached | If this setting is enabled, Longhorn will automatically attaches the volume and takes snapshot/backup when it is the time to do recurring snapshot/backup. |
|
||||
| defaultSettings.allowVolumeCreationWithDegradedAvailability | This setting allows user to create and attach a volume that doesn't have all the replicas scheduled at the time of creation. |
|
||||
| defaultSettings.autoCleanupSystemGeneratedSnapshot | This setting enables Longhorn to automatically cleanup the system generated snapshot after replica rebuild is done. |
|
||||
| defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly | If enabled, Longhorn will automatically delete the workload pod that is managed by a controller (e.g. deployment, statefulset, daemonset, etc...) when Longhorn volume is detached unexpectedly (e.g. during Kubernetes upgrade, Docker reboot, or network disconnect). By deleting the pod, its controller restarts the pod and Kubernetes handles volume reattachment and remount. |
|
||||
| defaultSettings.autoSalvage | If enabled, volumes will be automatically salvaged when all the replicas become faulty e.g. due to network disconnection. Longhorn will try to figure out which replica(s) are usable, then use them for the volume. By default true. |
|
||||
| defaultSettings.backingImageCleanupWaitInterval | This interval in minutes determines how long Longhorn will wait before cleaning up the backing image file when there is no replica in the disk using it. |
|
||||
| defaultSettings.backingImageRecoveryWaitInterval | This interval in seconds determines how long Longhorn will wait before re-downloading the backing image file when all disk files of this backing image become failed or unknown. |
|
||||
| defaultSettings.backupCompressionMethod | This setting allows users to specify backup compression method. |
|
||||
| defaultSettings.backupConcurrentLimit | This setting controls how many worker threads per backup concurrently. |
|
||||
| defaultSettings.backupTarget | The endpoint used to access the backupstore. Available: NFS, CIFS, AWS, GCP, AZURE. |
|
||||
| defaultSettings.backupTargetCredentialSecret | The name of the Kubernetes secret associated with the backup target. |
|
||||
| defaultSettings.backupstorePollInterval | In seconds. The backupstore poll interval determines how often Longhorn checks the backupstore for new backups. Set to 0 to disable the polling. By default 300. |
|
||||
| defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit | This setting controls how Longhorn automatically upgrades volumes' engines to the new default engine image after upgrading Longhorn manager. The value of this setting specifies the maximum number of engines per node that are allowed to upgrade to the default engine image at the same time. If the value is 0, Longhorn will not automatically upgrade volumes' engines to default version. |
|
||||
| defaultSettings.concurrentReplicaRebuildPerNodeLimit | This setting controls how many replicas on a node can be rebuilt simultaneously. |
|
||||
| defaultSettings.concurrentVolumeBackupRestorePerNodeLimit | This setting controls how many volumes on a node can restore the backup concurrently. Set the value to **0** to disable backup restore. |
|
||||
| defaultSettings.createDefaultDiskLabeledNodes | Create default Disk automatically only on Nodes with the label "node.longhorn.io/create-default-disk=true" if no other disks exist. If disabled, the default disk will be created on all new nodes when each node is first added. |
|
||||
| defaultSettings.defaultDataLocality | Longhorn volume has data locality if there is a local replica of the volume on the same node as the pod which is using the volume. |
|
||||
| defaultSettings.defaultDataPath | Default path to use for storing data on a host. By default "/var/lib/longhorn/" |
|
||||
| defaultSettings.defaultLonghornStaticStorageClass | The 'storageClassName' is given to PVs and PVCs that are created for an existing Longhorn volume. The StorageClass name can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. By default 'longhorn-static'. |
|
||||
| defaultSettings.defaultReplicaCount | The default number of replicas when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `numberOfReplicas` in the StorageClass. By default 3. |
|
||||
| defaultSettings.deletingConfirmationFlag | This flag is designed to prevent Longhorn from being accidentally uninstalled which will lead to data lost. |
|
||||
| defaultSettings.disableRevisionCounter | This setting is only for volumes created by UI. By default, this is false meaning there will be a reivision counter file to track every write to the volume. During salvage recovering Longhorn will pick the replica with largest reivision counter as candidate to recover the whole volume. If revision counter is disabled, Longhorn will not track every write to the volume. During the salvage recovering, Longhorn will use the 'volume-head-xxx.img' file last modification time and file size to pick the replica candidate to recover the whole volume. |
|
||||
| defaultSettings.disableSchedulingOnCordonedNode | Disable Longhorn manager to schedule replica on Kubernetes cordoned node. By default true. |
|
||||
| defaultSettings.engineReplicaTimeout | In seconds. The setting specifies the timeout between the engine and replica(s), and the value should be between 8 to 30 seconds. The default value is 8 seconds. |
|
||||
| defaultSettings.failedBackupTTL | In minutes. This setting determines how long Longhorn will keep the backup resource that was failed. Set to 0 to disable the auto-deletion. |
|
||||
| defaultSettings.fastReplicaRebuildEnabled | This feature supports the fast replica rebuilding. It relies on the checksum of snapshot disk files, so setting the snapshot-data-integrity to **enable** or **fast-check** is a prerequisite. |
|
||||
| defaultSettings.guaranteedInstanceManagerCPU | This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each instance manager Pod. You can leave it with the default value, which is 12%. |
|
||||
| defaultSettings.kubernetesClusterAutoscalerEnabled | Enabling this setting will notify Longhorn that the cluster is using Kubernetes Cluster Autoscaler. |
|
||||
| defaultSettings.logLevel | The log level Panic, Fatal, Error, Warn, Info, Debug, Trace used in longhorn manager. Default to Info. |
|
||||
| defaultSettings.nodeDownPodDeletionPolicy | Defines the Longhorn action when a Volume is stuck with a StatefulSet/Deployment Pod on a node that is down. |
|
||||
| defaultSettings.nodeDrainPolicy | Define the policy to use when a node with the last healthy replica of a volume is drained. |
|
||||
| defaultSettings.offlineReplicaRebuilding | This setting allows users to enable the offline replica rebuilding for volumes using v2 data engine. |
|
||||
| defaultSettings.orphanAutoDeletion | This setting allows Longhorn to delete the orphan resource and its corresponding orphaned data automatically like stale replicas. Orphan resources on down or unknown nodes will not be cleaned up automatically. |
|
||||
| defaultSettings.priorityClass | priorityClass for longhorn system componentss |
|
||||
| defaultSettings.recurringFailedJobsHistoryLimit | This setting specifies how many failed backup or snapshot job histories should be retained. History will not be retained if the value is 0. |
|
||||
| defaultSettings.recurringSuccessfulJobsHistoryLimit | This setting specifies how many successful backup or snapshot job histories should be retained. History will not be retained if the value is 0. |
|
||||
| defaultSettings.removeSnapshotsDuringFilesystemTrim | This setting allows Longhorn filesystem trim feature to automatically mark the latest snapshot and its ancestors as removed and stops at the snapshot containing multiple children. |
|
||||
| defaultSettings.replicaAutoBalance | Enable this setting automatically rebalances replicas when discovered an available node. |
|
||||
| defaultSettings.replicaDiskSoftAntiAffinity | Allow scheduling on disks with existing healthy replicas of the same volume. By default true. |
|
||||
| defaultSettings.replicaFileSyncHttpClientTimeout | In seconds. The setting specifies the HTTP client timeout to the file sync server. |
|
||||
| defaultSettings.replicaReplenishmentWaitInterval | In seconds. The interval determines how long Longhorn will wait at least in order to reuse the existing data on a failed replica rather than directly creating a new replica for a degraded volume. |
|
||||
| defaultSettings.replicaSoftAntiAffinity | Allow scheduling on nodes with existing healthy replicas of the same volume. By default false. |
|
||||
| defaultSettings.replicaZoneSoftAntiAffinity | Allow scheduling new Replicas of Volume to the Nodes in the same Zone as existing healthy Replicas. Nodes don't belong to any Zone will be treated as in the same Zone. Notice that Longhorn relies on label `topology.kubernetes.io/zone=<Zone name of the node>` in the Kubernetes node object to identify the zone. By default true. |
|
||||
| defaultSettings.restoreConcurrentLimit | This setting controls how many worker threads per restore concurrently. |
|
||||
| defaultSettings.restoreVolumeRecurringJobs | Restore recurring jobs from the backup volume on the backup target and create recurring jobs if not exist during a backup restoration. |
|
||||
| defaultSettings.snapshotDataIntegrity | This setting allows users to enable or disable snapshot hashing and data integrity checking. |
|
||||
| defaultSettings.snapshotDataIntegrityCronjob | Unix-cron string format. The setting specifies when Longhorn checks the data integrity of snapshot disk files. |
|
||||
| defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation | Hashing snapshot disk files impacts the performance of the system. The immediate snapshot hashing and checking can be disabled to minimize the impact after creating a snapshot. |
|
||||
| defaultSettings.storageMinimalAvailablePercentage | If the minimum available disk capacity exceeds the actual percentage of available disk capacity, the disk becomes unschedulable until more space is freed up. By default 25. |
|
||||
| defaultSettings.storageNetwork | Longhorn uses the storage network for in-cluster data traffic. Leave this blank to use the Kubernetes cluster network. |
|
||||
| defaultSettings.storageOverProvisioningPercentage | The over-provisioning percentage defines how much storage can be allocated relative to the hard drive's capacity. By default 200. |
|
||||
| defaultSettings.storageReservedPercentageForDefaultDisk | The reserved percentage specifies the percentage of disk space that will not be allocated to the default disk on each new Longhorn node. |
|
||||
| defaultSettings.supportBundleFailedHistoryLimit | This setting specifies how many failed support bundles can exist in the cluster. Set this value to **0** to have Longhorn automatically purge all failed support bundles. |
|
||||
| defaultSettings.systemManagedComponentsNodeSelector | nodeSelector for longhorn system components |
|
||||
| defaultSettings.systemManagedPodsImagePullPolicy | This setting defines the Image Pull Policy of Longhorn system managed pod. e.g. instance manager, engine image, CSI driver, etc. The new Image Pull Policy will only apply after the system managed pods restart. |
|
||||
| defaultSettings.taintToleration | taintToleration for longhorn system components |
|
||||
| defaultSettings.upgradeChecker | Upgrade Checker will check for new Longhorn version periodically. When there is a new version available, a notification will appear in the UI. By default true. |
|
||||
| defaultSettings.v2DataEngine | This allows users to activate v2 data engine based on SPDK. Currently, it is in the preview phase and should not be utilized in a production environment. |
|
||||
|
||||
---
|
||||
Please see [link](https://github.com/longhorn/longhorn) for more information.
|
||||
|
253
chart/README.md.gotmpl
Normal file
253
chart/README.md.gotmpl
Normal file
@ -0,0 +1,253 @@
|
||||
# Longhorn Chart
|
||||
|
||||
> **Important**: Please install the Longhorn chart in the `longhorn-system` namespace only.
|
||||
|
||||
> **Warning**: Longhorn doesn't support downgrading from a higher version to a lower version.
|
||||
|
||||
## Source Code
|
||||
|
||||
Longhorn is 100% open source software. Project source code is spread across a number of repos:
|
||||
|
||||
1. Longhorn Engine -- Core controller/replica logic https://github.com/longhorn/longhorn-engine
|
||||
2. Longhorn Instance Manager -- Controller/replica instance lifecycle management https://github.com/longhorn/longhorn-instance-manager
|
||||
3. Longhorn Share Manager -- NFS provisioner that exposes Longhorn volumes as ReadWriteMany volumes. https://github.com/longhorn/longhorn-share-manager
|
||||
4. Backing Image Manager -- Backing image file lifecycle management. https://github.com/longhorn/backing-image-manager
|
||||
5. Longhorn Manager -- Longhorn orchestration, includes CSI driver for Kubernetes https://github.com/longhorn/longhorn-manager
|
||||
6. Longhorn UI -- Dashboard https://github.com/longhorn/longhorn-ui
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. A container runtime compatible with Kubernetes (Docker v1.13+, containerd v1.3.7+, etc.)
|
||||
2. Kubernetes >= v1.21
|
||||
3. Make sure `bash`, `curl`, `findmnt`, `grep`, `awk` and `blkid` has been installed in all nodes of the Kubernetes cluster.
|
||||
4. Make sure `open-iscsi` has been installed, and the `iscsid` daemon is running on all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already.
|
||||
|
||||
## Upgrading to Kubernetes v1.25+
|
||||
|
||||
Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API.
|
||||
|
||||
As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `enablePSP` set to `false` if it has been previously set to `true`.
|
||||
|
||||
> **Note:**
|
||||
> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).**
|
||||
>
|
||||
> If your charts get stuck in this state, you may have to clean up your Helm release secrets.
|
||||
Upon setting `enablePSP` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart.
|
||||
|
||||
As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Longhorn docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards.
|
||||
|
||||
## Installation
|
||||
1. Add Longhorn chart repository.
|
||||
```
|
||||
helm repo add longhorn https://charts.longhorn.io
|
||||
```
|
||||
|
||||
2. Update local Longhorn chart information from chart repository.
|
||||
```
|
||||
helm repo update
|
||||
```
|
||||
|
||||
3. Install Longhorn chart.
|
||||
- With Helm 2, the following command will create the `longhorn-system` namespace and install the Longhorn chart together.
|
||||
```
|
||||
helm install longhorn/longhorn --name longhorn --namespace longhorn-system
|
||||
```
|
||||
- With Helm 3, the following commands will create the `longhorn-system` namespace first, then install the Longhorn chart.
|
||||
|
||||
```
|
||||
kubectl create namespace longhorn-system
|
||||
helm install longhorn longhorn/longhorn --namespace longhorn-system
|
||||
```
|
||||
|
||||
## Uninstallation
|
||||
|
||||
With Helm 2 to uninstall Longhorn.
|
||||
```
|
||||
kubectl -n longhorn-system patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag
|
||||
helm delete longhorn --purge
|
||||
```
|
||||
|
||||
With Helm 3 to uninstall Longhorn.
|
||||
```
|
||||
kubectl -n longhorn-system patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag
|
||||
helm uninstall longhorn -n longhorn-system
|
||||
kubectl delete namespace longhorn-system
|
||||
```
|
||||
|
||||
## Values
|
||||
|
||||
The `values.yaml` contains items used to tweak a deployment of this chart.
|
||||
|
||||
### Cattle Settings
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
{{- range .Values }}
|
||||
{{- if hasPrefix "global" .Key }}
|
||||
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
### Network Policies
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
{{- range .Values }}
|
||||
{{- if hasPrefix "networkPolicies" .Key }}
|
||||
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
### Image Settings
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
{{- range .Values }}
|
||||
{{- if hasPrefix "image" .Key }}
|
||||
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
### Service Settings
|
||||
|
||||
| Key | Description |
|
||||
|-----|-------------|
|
||||
{{- range .Values }}
|
||||
{{- if (and (hasPrefix "service" .Key) (not (contains "Account" .Key))) }}
|
||||
| {{ .Key }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
### StorageClass Settings
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
{{- range .Values }}
|
||||
{{- if hasPrefix "persistence" .Key }}
|
||||
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
### CSI Settings
|
||||
|
||||
| Key | Description |
|
||||
|-----|-------------|
|
||||
{{- range .Values }}
|
||||
{{- if hasPrefix "csi" .Key }}
|
||||
| {{ .Key }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
### Longhorn Manager Settings
|
||||
|
||||
Longhorn system contains user deployed components (e.g, Longhorn manager, Longhorn driver, Longhorn UI) and system managed components (e.g, instance manager, engine image, CSI driver, etc.).
|
||||
These settings only apply to Longhorn manager component.
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
{{- range .Values }}
|
||||
{{- if hasPrefix "longhornManager" .Key }}
|
||||
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
### Longhorn Driver Settings
|
||||
|
||||
Longhorn system contains user deployed components (e.g, Longhorn manager, Longhorn driver, Longhorn UI) and system managed components (e.g, instance manager, engine image, CSI driver, etc.).
|
||||
These settings only apply to Longhorn driver component.
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
{{- range .Values }}
|
||||
{{- if hasPrefix "longhornDriver" .Key }}
|
||||
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
### Longhorn UI Settings
|
||||
|
||||
Longhorn system contains user deployed components (e.g, Longhorn manager, Longhorn driver, Longhorn UI) and system managed components (e.g, instance manager, engine image, CSI driver, etc.).
|
||||
These settings only apply to Longhorn UI component.
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
{{- range .Values }}
|
||||
{{- if hasPrefix "longhornUI" .Key }}
|
||||
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
### Ingress Settings
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
{{- range .Values }}
|
||||
{{- if hasPrefix "ingress" .Key }}
|
||||
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
### Private Registry Settings
|
||||
|
||||
Longhorn can be installed in an air gapped environment with private registry settings. Please refer to **Air Gap Installation** in our official site [link](https://longhorn.io/docs)
|
||||
|
||||
| Key | Description |
|
||||
|-----|-------------|
|
||||
{{- range .Values }}
|
||||
{{- if hasPrefix "privateRegistry" .Key }}
|
||||
| {{ .Key }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
### OS/Kubernetes Distro Settings
|
||||
|
||||
#### Opensift Settings
|
||||
|
||||
Please also refer to this document [ocp-readme](https://github.com/longhorn/longhorn/blob/master/chart/ocp-readme.md) for more details
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
{{- range .Values }}
|
||||
{{- if hasPrefix "openshift" .Key }}
|
||||
| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
### Other Settings
|
||||
|
||||
| Key | Default | Description |
|
||||
|-----|---------|-------------|
|
||||
{{- range .Values }}
|
||||
{{- if not (or (hasPrefix "defaultSettings" .Key)
|
||||
(hasPrefix "networkPolicies" .Key)
|
||||
(hasPrefix "image" .Key)
|
||||
(hasPrefix "service" .Key)
|
||||
(hasPrefix "persistence" .Key)
|
||||
(hasPrefix "csi" .Key)
|
||||
(hasPrefix "longhornManager" .Key)
|
||||
(hasPrefix "longhornDriver" .Key)
|
||||
(hasPrefix "longhornUI" .Key)
|
||||
(hasPrefix "privateRegistry" .Key)
|
||||
(hasPrefix "ingress" .Key)
|
||||
(hasPrefix "openshift" .Key)
|
||||
(hasPrefix "global" .Key)) }}
|
||||
| {{ .Key }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
### System Default Settings
|
||||
|
||||
For system default settings, you can first leave blank to use default values which will be applied when installing Longhorn.
|
||||
You can then change them through UI after installation.
|
||||
For more details like types or options, you can refer to **Settings Reference** in our official site [link](https://longhorn.io/docs)
|
||||
|
||||
| Key | Description |
|
||||
|-----|-------------|
|
||||
{{- range .Values }}
|
||||
{{- if hasPrefix "defaultSettings" .Key }}
|
||||
| {{ .Key }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} |
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
Please see [link](https://github.com/longhorn/longhorn) for more information.
|
@ -244,7 +244,7 @@ questions:
|
||||
group: "Longhorn CSI Driver Settings"
|
||||
- variable: defaultSettings.backupTarget
|
||||
label: Backup Target
|
||||
description: "The endpoint used to access the backupstore. NFS and S3 are supported."
|
||||
description: "The endpoint used to access the backupstore. Available: NFS, CIFS, AWS, GCP, AZURE"
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default:
|
||||
@ -256,8 +256,7 @@ questions:
|
||||
default:
|
||||
- variable: defaultSettings.allowRecurringJobWhileVolumeDetached
|
||||
label: Allow Recurring Job While Volume Is Detached
|
||||
description: 'If this setting is enabled, Longhorn will automatically attaches the volume and takes snapshot/backup when it is the time to do recurring snapshot/backup.
|
||||
Note that the volume is not ready for workload during the period when the volume was automatically attached. Workload will have to wait until the recurring job finishes.'
|
||||
description: 'If this setting is enabled, Longhorn will automatically attaches the volume and takes snapshot/backup when it is the time to do recurring snapshot/backup.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
@ -275,11 +274,7 @@ Note that the volume is not ready for workload during the period when the volume
|
||||
default: "/var/lib/longhorn/"
|
||||
- variable: defaultSettings.defaultDataLocality
|
||||
label: Default Data Locality
|
||||
description: 'We say a Longhorn volume has data locality if there is a local replica of the volume on the same node as the pod which is using the volume.
|
||||
This setting specifies the default data locality when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `dataLocality` in the StorageClass
|
||||
The available modes are:
|
||||
- **disabled**. This is the default option. There may or may not be a replica on the same node as the attached volume (workload)
|
||||
- **best-effort**. This option instructs Longhorn to try to keep a replica on the same node as the attached volume (workload). Longhorn will not stop the volume, even if it cannot keep a replica local to the attached volume (workload) due to environment limitation, e.g. not enough disk space, incompatible disk tags, etc.'
|
||||
description: 'Longhorn volume has data locality if there is a local replica of the volume on the same node as the pod which is using the volume.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: enum
|
||||
options:
|
||||
@ -294,17 +289,7 @@ The available modes are:
|
||||
default: "false"
|
||||
- variable: defaultSettings.replicaAutoBalance
|
||||
label: Replica Auto Balance
|
||||
description: 'Enable this setting automatically rebalances replicas when discovered an available node.
|
||||
The available global options are:
|
||||
- **disabled**. This is the default option. No replica auto-balance will be done.
|
||||
- **least-effort**. This option instructs Longhorn to balance replicas for minimal redundancy.
|
||||
- **best-effort**. This option instructs Longhorn to balance replicas for even redundancy.
|
||||
Longhorn also support individual volume setting. The setting can be specified in volume.spec.replicaAutoBalance, this overrules the global setting.
|
||||
The available volume spec options are:
|
||||
- **ignored**. This is the default option that instructs Longhorn to inherit from the global setting.
|
||||
- **disabled**. This option instructs Longhorn no replica auto-balance should be done.
|
||||
- **least-effort**. This option instructs Longhorn to balance replicas for minimal redundancy.
|
||||
- **best-effort**. This option instructs Longhorn to balance replicas for even redundancy.'
|
||||
description: 'Enable this setting automatically rebalances replicas when discovered an available node.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: enum
|
||||
options:
|
||||
@ -364,22 +349,14 @@ The available volume spec options are:
|
||||
default: 300
|
||||
- variable: defaultSettings.failedBackupTTL
|
||||
label: Failed Backup Time to Live
|
||||
description: "In minutes. This setting determines how long Longhorn will keep the backup resource that was failed. Set to 0 to disable the auto-deletion.
|
||||
Failed backups will be checked and cleaned up during backupstore polling which is controlled by **Backupstore Poll Interval** setting.
|
||||
Hence this value determines the minimal wait interval of the cleanup. And the actual cleanup interval is multiple of **Backupstore Poll Interval**.
|
||||
Disabling **Backupstore Poll Interval** also means to disable failed backup auto-deletion."
|
||||
description: "In minutes. This setting determines how long Longhorn will keep the backup resource that was failed. Set to 0 to disable the auto-deletion."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 1440
|
||||
- variable: defaultSettings.restoreVolumeRecurringJobs
|
||||
label: Restore Volume Recurring Jobs
|
||||
description: "Restore recurring jobs from the backup volume on the backup target and create recurring jobs if not exist during a backup restoration.
|
||||
Longhorn also supports individual volume setting. The setting can be specified on Backup page when making a backup restoration, this overrules the global setting.
|
||||
The available volume setting options are:
|
||||
- **ignored**. This is the default option that instructs Longhorn to inherit from the global setting.
|
||||
- **enabled**. This option instructs Longhorn to restore recurring jobs/groups from the backup target forcibly.
|
||||
- **disabled**. This option instructs Longhorn no restoring recurring jobs/groups should be done."
|
||||
description: "Restore recurring jobs from the backup volume on the backup target and create recurring jobs if not exist during a backup restoration."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
@ -399,9 +376,7 @@ The available volume setting options are:
|
||||
default: 1
|
||||
- variable: defaultSettings.supportBundleFailedHistoryLimit
|
||||
label: SupportBundle Failed History Limit
|
||||
description: "This setting specifies how many failed support bundles can exist in the cluster.
|
||||
The retained failed support bundle is for analysis purposes and needs to clean up manually.
|
||||
Set this value to **0** to have Longhorn automatically purge all failed support bundles."
|
||||
description: "This setting specifies how many failed support bundles can exist in the cluster. Set this value to **0** to have Longhorn automatically purge all failed support bundles."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
@ -414,9 +389,7 @@ Set this value to **0** to have Longhorn automatically purge all failed support
|
||||
default: "true"
|
||||
- variable: defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly
|
||||
label: Automatically Delete Workload Pod when The Volume Is Detached Unexpectedly
|
||||
description: 'If enabled, Longhorn will automatically delete the workload pod that is managed by a controller (e.g. deployment, statefulset, daemonset, etc...) when Longhorn volume is detached unexpectedly (e.g. during Kubernetes upgrade, Docker reboot, or network disconnect). By deleting the pod, its controller restarts the pod and Kubernetes handles volume reattachment and remount.
|
||||
If disabled, Longhorn will not delete the workload pod that is managed by a controller. You will have to manually restart the pod to reattach and remount the volume.
|
||||
**Note:** This setting does not apply to the workload pods that do not have a controller. Longhorn never deletes them.'
|
||||
description: 'If enabled, Longhorn will automatically delete the workload pod that is managed by a controller (e.g. deployment, statefulset, daemonset, etc...) when Longhorn volume is detached unexpectedly (e.g. during Kubernetes upgrade, Docker reboot, or network disconnect). By deleting the pod, its controller restarts the pod and Kubernetes handles volume reattachment and remount.'
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "true"
|
||||
@ -452,11 +425,7 @@ If disabled, Longhorn will not delete the workload pod that is managed by a cont
|
||||
default: "true"
|
||||
- variable: defaultSettings.nodeDownPodDeletionPolicy
|
||||
label: Pod Deletion Policy When Node is Down
|
||||
description: "Defines the Longhorn action when a Volume is stuck with a StatefulSet/Deployment Pod on a node that is down.
|
||||
- **do-nothing** is the default Kubernetes behavior of never force deleting StatefulSet/Deployment terminating pods. Since the pod on the node that is down isn't removed, Longhorn volumes are stuck on nodes that are down.
|
||||
- **delete-statefulset-pod** Longhorn will force delete StatefulSet terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods.
|
||||
- **delete-deployment-pod** Longhorn will force delete Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods.
|
||||
- **delete-both-statefulset-and-deployment-pod** Longhorn will force delete StatefulSet/Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods."
|
||||
description: "Defines the Longhorn action when a Volume is stuck with a StatefulSet/Deployment Pod on a node that is down."
|
||||
group: "Longhorn Default Settings"
|
||||
type: enum
|
||||
options:
|
||||
@ -467,10 +436,7 @@ If disabled, Longhorn will not delete the workload pod that is managed by a cont
|
||||
default: "do-nothing"
|
||||
- variable: defaultSettings.nodeDrainPolicy
|
||||
label: Node Drain Policy
|
||||
description: "Define the policy to use when a node with the last healthy replica of a volume is drained.
|
||||
- **block-if-contains-last-replica** Longhorn will block the drain when the node contains the last healthy replica of a volume.
|
||||
- **allow-if-replica-is-stopped** Longhorn will allow the drain when the node contains the last healthy replica of a volume but the replica is stopped. WARNING: possible data loss if the node is removed after draining. Select this option if you want to drain the node and do in-place upgrade/maintenance.
|
||||
- **always-allow** Longhorn will allow the drain even though the node contains the last healthy replica of a volume. WARNING: possible data loss if the node is removed after draining. Also possible data corruption if the last replica was running during the draining."
|
||||
description: "Define the policy to use when a node with the last healthy replica of a volume is drained."
|
||||
group: "Longhorn Default Settings"
|
||||
type: enum
|
||||
options:
|
||||
@ -480,29 +446,21 @@ If disabled, Longhorn will not delete the workload pod that is managed by a cont
|
||||
default: "block-if-contains-last-replica"
|
||||
- variable: defaultSettings.replicaReplenishmentWaitInterval
|
||||
label: Replica Replenishment Wait Interval
|
||||
description: "In seconds. The interval determines how long Longhorn will wait at least in order to reuse the existing data on a failed replica rather than directly creating a new replica for a degraded volume.
|
||||
Warning: This option works only when there is a failed replica in the volume. And this option may block the rebuilding for a while in the case."
|
||||
description: "In seconds. The interval determines how long Longhorn will wait at least in order to reuse the existing data on a failed replica rather than directly creating a new replica for a degraded volume."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 600
|
||||
- variable: defaultSettings.concurrentReplicaRebuildPerNodeLimit
|
||||
label: Concurrent Replica Rebuild Per Node Limit
|
||||
description: "This setting controls how many replicas on a node can be rebuilt simultaneously.
|
||||
Typically, Longhorn can block the replica starting once the current rebuilding count on a node exceeds the limit. But when the value is 0, it means disabling the replica rebuilding.
|
||||
WARNING:
|
||||
- The old setting \"Disable Replica Rebuild\" is replaced by this setting.
|
||||
- Different from relying on replica starting delay to limit the concurrent rebuilding, if the rebuilding is disabled, replica object replenishment will be directly skipped.
|
||||
- When the value is 0, the eviction and data locality feature won't work. But this shouldn't have any impact to any current replica rebuild and backup restore."
|
||||
description: "This setting controls how many replicas on a node can be rebuilt simultaneously."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 5
|
||||
- variable: defaultSettings.concurrentVolumeBackupRestorePerNodeLimit
|
||||
label: Concurrent Volume Backup Restore Per Node Limit
|
||||
description: "This setting controls how many volumes on a node can restore the backup concurrently.
|
||||
Longhorn blocks the backup restore once the restoring volume count exceeds the limit.
|
||||
Set the value to **0** to disable backup restore."
|
||||
description: "This setting controls how many volumes on a node can restore the backup concurrently. Set the value to **0** to disable backup restore."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
@ -551,27 +509,14 @@ Set the value to **0** to disable backup restore."
|
||||
default: 60
|
||||
- variable: defaultSettings.backingImageRecoveryWaitInterval
|
||||
label: Backing Image Recovery Wait Interval
|
||||
description: "This interval in seconds determines how long Longhorn will wait before re-downloading the backing image file when all disk files of this backing image become failed or unknown.
|
||||
WARNING:
|
||||
- This recovery only works for the backing image of which the creation type is \"download\".
|
||||
- File state \"unknown\" means the related manager pods on the pod is not running or the node itself is down/disconnected."
|
||||
description: "This interval in seconds determines how long Longhorn will wait before re-downloading the backing image file when all disk files of this backing image become failed or unknown."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
default: 300
|
||||
- variable: defaultSettings.guaranteedInstanceManagerCPU
|
||||
label: Guaranteed Instance Manager CPU
|
||||
description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each instance manager Pod. For example, 10 means 10% of the total CPU on a node will be allocated to each instance manager pod on this node. This will help maintain engine and replica stability during high node workload.
|
||||
In order to prevent unexpected volume instance (engine/replica) crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting:
|
||||
`Guaranteed Instance Manager CPU = The estimated max Longhorn volume engine and replica count on a node * 0.1 / The total allocatable CPUs on the node * 100`
|
||||
The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting.
|
||||
If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes.
|
||||
WARNING:
|
||||
- Value 0 means unsetting CPU requests for instance manager pods.
|
||||
- Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40.
|
||||
- One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then.
|
||||
- This global setting will be ignored for a node if the field \"InstanceManagerCPURequest\" on the node is set.
|
||||
- After this setting is changed, all instance manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES."
|
||||
description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each instance manager Pod. You can leave it with the default value, which is 12%."
|
||||
group: "Longhorn Default Settings"
|
||||
type: int
|
||||
min: 0
|
||||
@ -579,18 +524,13 @@ Set the value to **0** to disable backup restore."
|
||||
default: 12
|
||||
- variable: defaultSettings.logLevel
|
||||
label: Log Level
|
||||
description: "The log level Panic, Fatal, Error, Warn, Info, Debug, Trace used in longhorn manager. By default Debug."
|
||||
description: "The log level Panic, Fatal, Error, Warn, Info, Debug, Trace used in longhorn manager. Default to Info."
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default: "Info"
|
||||
- variable: defaultSettings.kubernetesClusterAutoscalerEnabled
|
||||
label: Kubernetes Cluster Autoscaler Enabled (Experimental)
|
||||
description: "Enabling this setting will notify Longhorn that the cluster is using Kubernetes Cluster Autoscaler.
|
||||
Longhorn prevents data loss by only allowing the Cluster Autoscaler to scale down a node that met all conditions:
|
||||
- No volume attached to the node.
|
||||
- Is not the last node containing the replica of any volume.
|
||||
- Is not running backing image components pod.
|
||||
- Is not running share manager components pod."
|
||||
description: "Enabling this setting will notify Longhorn that the cluster is using Kubernetes Cluster Autoscaler."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: false
|
||||
@ -602,20 +542,13 @@ Set the value to **0** to disable backup restore."
|
||||
default: false
|
||||
- variable: defaultSettings.storageNetwork
|
||||
label: Storage Network
|
||||
description: "Longhorn uses the storage network for in-cluster data traffic. Leave this blank to use the Kubernetes cluster network.
|
||||
To segregate the storage network, input the pre-existing NetworkAttachmentDefinition in \"<namespace>/<name>\" format.
|
||||
WARNING:
|
||||
- The cluster must have pre-existing Multus installed, and NetworkAttachmentDefinition IPs are reachable between nodes.
|
||||
- DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES. Longhorn will try to block this setting update when there are attached volumes.
|
||||
- When applying the setting, Longhorn will restart all manager, instance-manager, and backing-image-manager pods."
|
||||
description: "Longhorn uses the storage network for in-cluster data traffic. Leave this blank to use the Kubernetes cluster network."
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default:
|
||||
- variable: defaultSettings.deletingConfirmationFlag
|
||||
label: Deleting Confirmation Flag
|
||||
description: "This flag is designed to prevent Longhorn from being accidentally uninstalled which will lead to data lost.
|
||||
Set this flag to **true** to allow Longhorn uninstallation.
|
||||
If this flag **false**, Longhorn uninstallation job will fail. "
|
||||
description: "This flag is designed to prevent Longhorn from being accidentally uninstalled which will lead to data lost."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
@ -627,11 +560,7 @@ Set the value to **0** to disable backup restore."
|
||||
default: "8"
|
||||
- variable: defaultSettings.snapshotDataIntegrity
|
||||
label: Snapshot Data Integrity
|
||||
description: "This setting allows users to enable or disable snapshot hashing and data integrity checking.
|
||||
Available options are
|
||||
- **disabled**: Disable snapshot disk file hashing and data integrity checking.
|
||||
- **enabled**: Enables periodic snapshot disk file hashing and data integrity checking. To detect the filesystem-unaware corruption caused by bit rot or other issues in snapshot disk files, Longhorn system periodically hashes files and finds corrupted ones. Hence, the system performance will be impacted during the periodical checking.
|
||||
- **fast-check**: Enable snapshot disk file hashing and fast data integrity checking. Longhorn system only hashes snapshot disk files if their are not hashed or the modification time are changed. In this mode, filesystem-unaware corruption cannot be detected, but the impact on system performance can be minimized."
|
||||
description: "This setting allows users to enable or disable snapshot hashing and data integrity checking."
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default: "disabled"
|
||||
@ -643,17 +572,13 @@ Set the value to **0** to disable backup restore."
|
||||
default: "false"
|
||||
- variable: defaultSettings.snapshotDataIntegrityCronjob
|
||||
label: Snapshot Data Integrity Check CronJob
|
||||
description: "Unix-cron string format. The setting specifies when Longhorn checks the data integrity of snapshot disk files.
|
||||
Warning: Hashing snapshot disk files impacts the performance of the system. It is recommended to run data integrity checks during off-peak times and to reduce the frequency of checks."
|
||||
description: "Unix-cron string format. The setting specifies when Longhorn checks the data integrity of snapshot disk files."
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default: "0 0 */7 * *"
|
||||
- variable: defaultSettings.removeSnapshotsDuringFilesystemTrim
|
||||
label: Remove Snapshots During Filesystem Trim
|
||||
description: "This setting allows Longhorn filesystem trim feature to automatically mark the latest snapshot and its ancestors as removed and stops at the snapshot containing multiple children.\n\n
|
||||
Since Longhorn filesystem trim feature can be applied to the volume head and the followed continuous removed or system snapshots only.\n\n
|
||||
Notice that trying to trim a removed files from a valid snapshot will do nothing but the filesystem will discard this kind of in-memory trimmable file info.\n\n
|
||||
Later on if you mark the snapshot as removed and want to retry the trim, you may need to unmount and remount the filesystem so that the filesystem can recollect the trimmable file info."
|
||||
description: "This setting allows Longhorn filesystem trim feature to automatically mark the latest snapshot and its ancestors as removed and stops at the snapshot containing multiple children."
|
||||
group: "Longhorn Default Settings"
|
||||
type: boolean
|
||||
default: "false"
|
||||
@ -671,11 +596,7 @@ Set the value to **0** to disable backup restore."
|
||||
default: "30"
|
||||
- variable: defaultSettings.backupCompressionMethod
|
||||
label: Backup Compression Method
|
||||
description: "This setting allows users to specify backup compression method.
|
||||
Available options are
|
||||
- **none**: Disable the compression method. Suitable for multimedia data such as encoded images and videos.
|
||||
- **lz4**: Fast compression method. Suitable for flat files.
|
||||
- **gzip**: A bit of higher compression ratio but relatively slow."
|
||||
description: "This setting allows users to specify backup compression method."
|
||||
group: "Longhorn Default Settings"
|
||||
type: string
|
||||
default: "lz4"
|
||||
@ -695,17 +616,13 @@ Set the value to **0** to disable backup restore."
|
||||
default: 2
|
||||
- variable: defaultSettings.v2DataEngine
|
||||
label: V2 Data Engine
|
||||
description: "This allows users to activate v2 data engine based on SPDK. Currently, it is in the preview phase and should not be utilized in a production environment.
|
||||
WARNING:
|
||||
- DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES. Longhorn will block this setting update when there are attached volumes.
|
||||
- When applying the setting, Longhorn will restart all instance-manager pods.
|
||||
- When the V2 Data Engine is enabled, each instance-manager pod utilizes 1 CPU core. This high CPU usage is attributed to the spdk_tgt process running within each instance-manager pod. The spdk_tgt process is responsible for handling input/output (IO) operations and requires intensive polling. As a result, it consumes 100% of a dedicated CPU core to efficiently manage and process the IO requests, ensuring optimal performance and responsiveness for storage operations."
|
||||
description: "This allows users to activate v2 data engine based on SPDK. Currently, it is in the preview phase and should not be utilized in a production environment."
|
||||
group: "Longhorn V2 Data Engine (Preview Feature) Settings"
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: defaultSettings.offlineReplicaRebuilding
|
||||
label: Offline Replica Rebuilding
|
||||
description: ""This setting allows users to enable the offline replica rebuilding for volumes using v2 data engine."
|
||||
description: "This setting allows users to enable the offline replica rebuilding for volumes using v2 data engine."
|
||||
group: "Longhorn V2 Data Engine (Preview Feature) Settings"
|
||||
required: true
|
||||
type: enum
|
||||
@ -722,7 +639,7 @@ Set the value to **0** to disable backup restore."
|
||||
type: boolean
|
||||
- variable: persistence.reclaimPolicy
|
||||
label: Storage Class Retain Policy
|
||||
description: "Define reclaim policy (Retain or Delete)"
|
||||
description: "Define reclaim policy. Options: `Retain`, `Delete`"
|
||||
group: "Longhorn Storage Class Settings"
|
||||
required: true
|
||||
type: enum
|
||||
@ -739,7 +656,7 @@ Set the value to **0** to disable backup restore."
|
||||
max: 10
|
||||
default: 3
|
||||
- variable: persistence.defaultDataLocality
|
||||
description: "Set data locality for Longhorn StorageClass"
|
||||
description: "Set data locality for Longhorn StorageClass. Options: `disabled`, `best-effort`"
|
||||
label: Default Storage Class Data Locality
|
||||
group: "Longhorn Storage Class Settings"
|
||||
type: enum
|
||||
@ -771,7 +688,7 @@ Set the value to **0** to disable backup restore."
|
||||
subquestions:
|
||||
- variable: persistence.defaultNodeSelector.selector
|
||||
label: Storage Class Node Selector
|
||||
description: 'We use NodeSelector when we want to bind PVC via StorageClass into desired mountpoint on the nodes tagged with its value'
|
||||
description: 'This selector enables only certain nodes having these tags to be used for the volume. e.g. `"storage,fast"`'
|
||||
group: "Longhorn Storage Class Settings"
|
||||
type: string
|
||||
default:
|
||||
@ -825,7 +742,7 @@ Set the value to **0** to disable backup restore."
|
||||
type: string
|
||||
default:
|
||||
- variable: persistence.removeSnapshotsDuringFilesystemTrim
|
||||
description: "Allow automatically removing snapshots during filesystem trim for Longhorn StorageClass"
|
||||
description: "Allow automatically removing snapshots during filesystem trim for Longhorn StorageClass. Options: `ignored`, `enabled`, `disabled`"
|
||||
label: Default Storage Class Remove Snapshots During Filesystem Trim
|
||||
group: "Longhorn Storage Class Settings"
|
||||
type: enum
|
||||
@ -856,7 +773,7 @@ Set the value to **0** to disable backup restore."
|
||||
label: Ingress Path
|
||||
- variable: service.ui.type
|
||||
default: "Rancher-Proxy"
|
||||
description: "Define Longhorn UI service type"
|
||||
description: "Define Longhorn UI service type. Options: `ClusterIP`, `NodePort`, `LoadBalancer`, `Rancher-Proxy`"
|
||||
type: enum
|
||||
options:
|
||||
- "ClusterIP"
|
||||
@ -898,7 +815,7 @@ Set the value to **0** to disable backup restore."
|
||||
subquestions:
|
||||
- variable: networkPolicies.type
|
||||
label: Network Policies for Ingress
|
||||
description: "Create the policy to allow access for the ingress, select the distribution."
|
||||
description: "Create the policy based on your distribution to allow access for the ingress. Options: `k3s`, `rke2`, `rke1`"
|
||||
show_if: "networkPolicies.enabled=true&&ingress.enabled=true"
|
||||
type: enum
|
||||
default: "rke2"
|
||||
|
@ -3,189 +3,347 @@
|
||||
# Declare variables to be passed into your templates.
|
||||
global:
|
||||
cattle:
|
||||
# -- System default registry
|
||||
systemDefaultRegistry: ""
|
||||
windowsCluster:
|
||||
# Enable this to allow Longhorn to run on the Rancher deployed Windows cluster
|
||||
# -- Enable this to allow Longhorn to run on the Rancher deployed Windows cluster
|
||||
enabled: false
|
||||
# Tolerate Linux node taint
|
||||
# -- Tolerate Linux nodes to run Longhorn user deployed components
|
||||
tolerations:
|
||||
- key: "cattle.io/os"
|
||||
value: "linux"
|
||||
effect: "NoSchedule"
|
||||
operator: "Equal"
|
||||
# Select Linux nodes
|
||||
# -- Select Linux nodes to run Longhorn user deployed components
|
||||
nodeSelector:
|
||||
kubernetes.io/os: "linux"
|
||||
# Recognize toleration and node selector for Longhorn run-time created components
|
||||
defaultSetting:
|
||||
# -- Toleration for Longhorn system managed components
|
||||
taintToleration: cattle.io/os=linux:NoSchedule
|
||||
# -- Node selector for Longhorn system managed components
|
||||
systemManagedComponentsNodeSelector: kubernetes.io/os:linux
|
||||
|
||||
networkPolicies:
|
||||
# -- Enable NetworkPolicies to limit access to the Longhorn pods
|
||||
enabled: false
|
||||
# Available types: k3s, rke2, rke1
|
||||
# -- Create the policy based on your distribution to allow access for the ingress. Options: `k3s`, `rke2`, `rke1`
|
||||
type: "k3s"
|
||||
|
||||
image:
|
||||
longhorn:
|
||||
engine:
|
||||
# -- Specify Longhorn engine image repository
|
||||
repository: longhornio/longhorn-engine
|
||||
# -- Specify Longhorn engine image tag
|
||||
tag: master-head
|
||||
manager:
|
||||
# -- Specify Longhorn manager image repository
|
||||
repository: longhornio/longhorn-manager
|
||||
# -- Specify Longhorn manager image tag
|
||||
tag: master-head
|
||||
ui:
|
||||
# -- Specify Longhorn ui image repository
|
||||
repository: longhornio/longhorn-ui
|
||||
# -- Specify Longhorn ui image tag
|
||||
tag: master-head
|
||||
instanceManager:
|
||||
# -- Specify Longhorn instance manager image repository
|
||||
repository: longhornio/longhorn-instance-manager
|
||||
# -- Specify Longhorn instance manager image tag
|
||||
tag: master-head
|
||||
shareManager:
|
||||
# -- Specify Longhorn share manager image repository
|
||||
repository: longhornio/longhorn-share-manager
|
||||
# -- Specify Longhorn share manager image tag
|
||||
tag: master-head
|
||||
backingImageManager:
|
||||
# -- Specify Longhorn backing image manager image repository
|
||||
repository: longhornio/backing-image-manager
|
||||
# -- Specify Longhorn backing image manager image tag
|
||||
tag: master-head
|
||||
supportBundleKit:
|
||||
# -- Specify Longhorn support bundle manager image repository
|
||||
repository: longhornio/support-bundle-kit
|
||||
# -- Specify Longhorn support bundle manager image tag
|
||||
tag: v0.0.27
|
||||
csi:
|
||||
attacher:
|
||||
# -- Specify CSI attacher image repository. Leave blank to autodetect
|
||||
repository: longhornio/csi-attacher
|
||||
# -- Specify CSI attacher image tag. Leave blank to autodetect
|
||||
tag: v4.2.0
|
||||
provisioner:
|
||||
# -- Specify CSI provisioner image repository. Leave blank to autodetect
|
||||
repository: longhornio/csi-provisioner
|
||||
# -- Specify CSI provisioner image tag. Leave blank to autodetect
|
||||
tag: v3.4.1
|
||||
nodeDriverRegistrar:
|
||||
# -- Specify CSI node driver registrar image repository. Leave blank to autodetect
|
||||
repository: longhornio/csi-node-driver-registrar
|
||||
# -- Specify CSI node driver registrar image tag. Leave blank to autodetect
|
||||
tag: v2.7.0
|
||||
resizer:
|
||||
# -- Specify CSI driver resizer image repository. Leave blank to autodetect
|
||||
repository: longhornio/csi-resizer
|
||||
# -- Specify CSI driver resizer image tag. Leave blank to autodetect
|
||||
tag: v1.7.0
|
||||
snapshotter:
|
||||
# -- Specify CSI driver snapshotter image repository. Leave blank to autodetect
|
||||
repository: longhornio/csi-snapshotter
|
||||
# -- Specify CSI driver snapshotter image tag. Leave blank to autodetect.
|
||||
tag: v6.2.1
|
||||
livenessProbe:
|
||||
# -- Specify CSI liveness probe image repository. Leave blank to autodetect
|
||||
repository: longhornio/livenessprobe
|
||||
# -- Specify CSI liveness probe image tag. Leave blank to autodetect
|
||||
tag: v2.9.0
|
||||
openshift:
|
||||
oauthProxy:
|
||||
# -- For openshift user. Specify oauth proxy image repository
|
||||
repository: quay.io/openshift/origin-oauth-proxy
|
||||
tag: 4.13 # Use Your OCP/OKD 4.X Version, Current Stable is 4.13
|
||||
# -- For openshift user. Specify oauth proxy image tag. Note: Use your OCP/OKD 4.X Version, Current Stable is 4.13
|
||||
tag: 4.13
|
||||
# -- Image pull policy which applies to all user deployed Longhorn Components. e.g, Longhorn manager, Longhorn driver, Longhorn UI
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
ui:
|
||||
# -- Define Longhorn UI service type. Options: `ClusterIP`, `NodePort`, `LoadBalancer`, `Rancher-Proxy`
|
||||
type: ClusterIP
|
||||
# -- NodePort port number (to set explicitly, choose port between 30000-32767)
|
||||
nodePort: null
|
||||
manager:
|
||||
# -- Define Longhorn manager service type.
|
||||
type: ClusterIP
|
||||
# -- NodePort port number (to set explicitly, choose port between 30000-32767)
|
||||
nodePort: ""
|
||||
loadBalancerIP: ""
|
||||
loadBalancerSourceRanges: ""
|
||||
|
||||
persistence:
|
||||
# -- Set Longhorn StorageClass as default
|
||||
defaultClass: true
|
||||
# -- Set filesystem type for Longhorn StorageClass
|
||||
defaultFsType: ext4
|
||||
# -- Set mkfs options for Longhorn StorageClass
|
||||
defaultMkfsParams: ""
|
||||
# -- Set replica count for Longhorn StorageClass
|
||||
defaultClassReplicaCount: 3
|
||||
defaultDataLocality: disabled # best-effort otherwise
|
||||
# -- Set data locality for Longhorn StorageClass. Options: `disabled`, `best-effort`
|
||||
defaultDataLocality: disabled
|
||||
# -- Define reclaim policy. Options: `Retain`, `Delete`
|
||||
reclaimPolicy: Delete
|
||||
# -- Set volume migratable for Longhorn StorageClass
|
||||
migratable: false
|
||||
recurringJobSelector:
|
||||
# -- Enable recurring job selector for Longhorn StorageClass
|
||||
enable: false
|
||||
# -- Recurring job selector list for Longhorn StorageClass. Please be careful of quotes of input. e.g., `[{"name":"backup", "isGroup":true}]`
|
||||
jobList: []
|
||||
backingImage:
|
||||
# -- Set backing image for Longhorn StorageClass
|
||||
enable: false
|
||||
# -- Specify a backing image that will be used by Longhorn volumes in Longhorn StorageClass. If not exists, the backing image data source type and backing image data source parameters should be specified so that Longhorn will create the backing image before using it
|
||||
name: ~
|
||||
# -- Specify the data source type for the backing image used in Longhorn StorageClass.
|
||||
# If the backing image does not exists, Longhorn will use this field to create a backing image. Otherwise, Longhorn will use it to verify the selected backing image.
|
||||
dataSourceType: ~
|
||||
# -- Specify the data source parameters for the backing image used in Longhorn StorageClass. This option accepts a json string of a map. e.g., `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`.
|
||||
dataSourceParameters: ~
|
||||
# -- Specify the expected SHA512 checksum of the selected backing image in Longhorn StorageClass
|
||||
expectedChecksum: ~
|
||||
defaultNodeSelector:
|
||||
enable: false # disable by default
|
||||
# -- Enable Node selector for Longhorn StorageClass
|
||||
enable: false
|
||||
# -- This selector enables only certain nodes having these tags to be used for the volume. e.g. `"storage,fast"`
|
||||
selector: ""
|
||||
removeSnapshotsDuringFilesystemTrim: ignored # "enabled" or "disabled" otherwise
|
||||
# -- Allow automatically removing snapshots during filesystem trim for Longhorn StorageClass. Options: `ignored`, `enabled`, `disabled`
|
||||
removeSnapshotsDuringFilesystemTrim: ignored
|
||||
|
||||
csi:
|
||||
# -- Specify kubelet root-dir. Leave blank to autodetect
|
||||
kubeletRootDir: ~
|
||||
# -- Specify replica count of CSI Attacher. Leave blank to use default count: 3
|
||||
attacherReplicaCount: ~
|
||||
# -- Specify replica count of CSI Provisioner. Leave blank to use default count: 3
|
||||
provisionerReplicaCount: ~
|
||||
# -- Specify replica count of CSI Resizer. Leave blank to use default count: 3
|
||||
resizerReplicaCount: ~
|
||||
# -- Specify replica count of CSI Snapshotter. Leave blank to use default count: 3
|
||||
snapshotterReplicaCount: ~
|
||||
|
||||
defaultSettings:
|
||||
# -- The endpoint used to access the backupstore. Available: NFS, CIFS, AWS, GCP, AZURE.
|
||||
backupTarget: ~
|
||||
# -- The name of the Kubernetes secret associated with the backup target.
|
||||
backupTargetCredentialSecret: ~
|
||||
# -- If this setting is enabled, Longhorn will automatically attaches the volume and takes snapshot/backup
|
||||
# when it is the time to do recurring snapshot/backup.
|
||||
allowRecurringJobWhileVolumeDetached: ~
|
||||
# -- Create default Disk automatically only on Nodes with the label "node.longhorn.io/create-default-disk=true" if no other disks exist.
|
||||
# If disabled, the default disk will be created on all new nodes when each node is first added.
|
||||
createDefaultDiskLabeledNodes: ~
|
||||
# -- Default path to use for storing data on a host. By default "/var/lib/longhorn/"
|
||||
defaultDataPath: ~
|
||||
# -- Longhorn volume has data locality if there is a local replica of the volume on the same node as the pod which is using the volume.
|
||||
defaultDataLocality: ~
|
||||
# -- Allow scheduling on nodes with existing healthy replicas of the same volume. By default false.
|
||||
replicaSoftAntiAffinity: ~
|
||||
# -- Enable this setting automatically rebalances replicas when discovered an available node.
|
||||
replicaAutoBalance: ~
|
||||
# -- The over-provisioning percentage defines how much storage can be allocated relative to the hard drive's capacity. By default 200.
|
||||
storageOverProvisioningPercentage: ~
|
||||
# -- If the minimum available disk capacity exceeds the actual percentage of available disk capacity,
|
||||
# the disk becomes unschedulable until more space is freed up. By default 25.
|
||||
storageMinimalAvailablePercentage: ~
|
||||
# -- The reserved percentage specifies the percentage of disk space that will not be allocated to the default disk on each new Longhorn node.
|
||||
storageReservedPercentageForDefaultDisk: ~
|
||||
# -- Upgrade Checker will check for new Longhorn version periodically.
|
||||
# When there is a new version available, a notification will appear in the UI. By default true.
|
||||
upgradeChecker: ~
|
||||
# -- The default number of replicas when a volume is created from the Longhorn UI.
|
||||
# For Kubernetes configuration, update the `numberOfReplicas` in the StorageClass. By default 3.
|
||||
defaultReplicaCount: ~
|
||||
# -- The 'storageClassName' is given to PVs and PVCs that are created for an existing Longhorn volume. The StorageClass name can also be used as a label,
|
||||
# so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object.
|
||||
# By default 'longhorn-static'.
|
||||
defaultLonghornStaticStorageClass: ~
|
||||
# -- In seconds. The backupstore poll interval determines how often Longhorn checks the backupstore for new backups.
|
||||
# Set to 0 to disable the polling. By default 300.
|
||||
backupstorePollInterval: ~
|
||||
# -- In minutes. This setting determines how long Longhorn will keep the backup resource that was failed. Set to 0 to disable the auto-deletion.
|
||||
failedBackupTTL: ~
|
||||
# -- Restore recurring jobs from the backup volume on the backup target and create recurring jobs if not exist during a backup restoration.
|
||||
restoreVolumeRecurringJobs: ~
|
||||
# -- This setting specifies how many successful backup or snapshot job histories should be retained. History will not be retained if the value is 0.
|
||||
recurringSuccessfulJobsHistoryLimit: ~
|
||||
# -- This setting specifies how many failed backup or snapshot job histories should be retained. History will not be retained if the value is 0.
|
||||
recurringFailedJobsHistoryLimit: ~
|
||||
# -- This setting specifies how many failed support bundles can exist in the cluster.
|
||||
# Set this value to **0** to have Longhorn automatically purge all failed support bundles.
|
||||
supportBundleFailedHistoryLimit: ~
|
||||
# -- taintToleration for longhorn system components
|
||||
taintToleration: ~
|
||||
# -- nodeSelector for longhorn system components
|
||||
systemManagedComponentsNodeSelector: ~
|
||||
# -- priorityClass for longhorn system componentss
|
||||
priorityClass: ~
|
||||
# -- If enabled, volumes will be automatically salvaged when all the replicas become faulty e.g. due to network disconnection.
|
||||
# Longhorn will try to figure out which replica(s) are usable, then use them for the volume. By default true.
|
||||
autoSalvage: ~
|
||||
# -- If enabled, Longhorn will automatically delete the workload pod that is managed by a controller (e.g. deployment, statefulset, daemonset, etc...)
|
||||
# when Longhorn volume is detached unexpectedly (e.g. during Kubernetes upgrade, Docker reboot, or network disconnect).
|
||||
# By deleting the pod, its controller restarts the pod and Kubernetes handles volume reattachment and remount.
|
||||
autoDeletePodWhenVolumeDetachedUnexpectedly: ~
|
||||
# -- Disable Longhorn manager to schedule replica on Kubernetes cordoned node. By default true.
|
||||
disableSchedulingOnCordonedNode: ~
|
||||
# -- Allow scheduling new Replicas of Volume to the Nodes in the same Zone as existing healthy Replicas.
|
||||
# Nodes don't belong to any Zone will be treated as in the same Zone.
|
||||
# Notice that Longhorn relies on label `topology.kubernetes.io/zone=<Zone name of the node>` in the Kubernetes node object to identify the zone.
|
||||
# By default true.
|
||||
replicaZoneSoftAntiAffinity: ~
|
||||
# -- Allow scheduling on disks with existing healthy replicas of the same volume. By default true.
|
||||
replicaDiskSoftAntiAffinity: ~
|
||||
# -- Defines the Longhorn action when a Volume is stuck with a StatefulSet/Deployment Pod on a node that is down.
|
||||
nodeDownPodDeletionPolicy: ~
|
||||
# -- Define the policy to use when a node with the last healthy replica of a volume is drained.
|
||||
nodeDrainPolicy: ~
|
||||
# -- In seconds. The interval determines how long Longhorn will wait at least in order to reuse the existing data on a failed replica
|
||||
# rather than directly creating a new replica for a degraded volume.
|
||||
replicaReplenishmentWaitInterval: ~
|
||||
# -- This setting controls how many replicas on a node can be rebuilt simultaneously.
|
||||
concurrentReplicaRebuildPerNodeLimit: ~
|
||||
# -- This setting controls how many volumes on a node can restore the backup concurrently. Set the value to **0** to disable backup restore.
|
||||
concurrentVolumeBackupRestorePerNodeLimit: ~
|
||||
# -- This setting is only for volumes created by UI.
|
||||
# By default, this is false meaning there will be a reivision counter file to track every write to the volume.
|
||||
# During salvage recovering Longhorn will pick the replica with largest reivision counter as candidate to recover the whole volume.
|
||||
# If revision counter is disabled, Longhorn will not track every write to the volume.
|
||||
# During the salvage recovering, Longhorn will use the 'volume-head-xxx.img' file last modification time and
|
||||
# file size to pick the replica candidate to recover the whole volume.
|
||||
disableRevisionCounter: ~
|
||||
# -- This setting defines the Image Pull Policy of Longhorn system managed pod.
|
||||
# e.g. instance manager, engine image, CSI driver, etc.
|
||||
# The new Image Pull Policy will only apply after the system managed pods restart.
|
||||
systemManagedPodsImagePullPolicy: ~
|
||||
# -- This setting allows user to create and attach a volume that doesn't have all the replicas scheduled at the time of creation.
|
||||
allowVolumeCreationWithDegradedAvailability: ~
|
||||
# -- This setting enables Longhorn to automatically cleanup the system generated snapshot after replica rebuild is done.
|
||||
autoCleanupSystemGeneratedSnapshot: ~
|
||||
# -- This setting controls how Longhorn automatically upgrades volumes' engines to the new default engine image after upgrading Longhorn manager.
|
||||
# The value of this setting specifies the maximum number of engines per node that are allowed to upgrade to the default engine image at the same time.
|
||||
# If the value is 0, Longhorn will not automatically upgrade volumes' engines to default version.
|
||||
concurrentAutomaticEngineUpgradePerNodeLimit: ~
|
||||
# -- This interval in minutes determines how long Longhorn will wait before cleaning up the backing image file when there is no replica in the disk using it.
|
||||
backingImageCleanupWaitInterval: ~
|
||||
# -- This interval in seconds determines how long Longhorn will wait before re-downloading the backing image file
|
||||
# when all disk files of this backing image become failed or unknown.
|
||||
backingImageRecoveryWaitInterval: ~
|
||||
# -- This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each instance manager Pod.
|
||||
# You can leave it with the default value, which is 12%.
|
||||
guaranteedInstanceManagerCPU: ~
|
||||
# -- Enabling this setting will notify Longhorn that the cluster is using Kubernetes Cluster Autoscaler.
|
||||
kubernetesClusterAutoscalerEnabled: ~
|
||||
# -- This setting allows Longhorn to delete the orphan resource and its corresponding orphaned data automatically like stale replicas.
|
||||
# Orphan resources on down or unknown nodes will not be cleaned up automatically.
|
||||
orphanAutoDeletion: ~
|
||||
# -- Longhorn uses the storage network for in-cluster data traffic. Leave this blank to use the Kubernetes cluster network.
|
||||
storageNetwork: ~
|
||||
# -- This flag is designed to prevent Longhorn from being accidentally uninstalled which will lead to data lost.
|
||||
deletingConfirmationFlag: ~
|
||||
# -- In seconds. The setting specifies the timeout between the engine and replica(s), and the value should be between 8 to 30 seconds.
|
||||
# The default value is 8 seconds.
|
||||
engineReplicaTimeout: ~
|
||||
# -- This setting allows users to enable or disable snapshot hashing and data integrity checking.
|
||||
snapshotDataIntegrity: ~
|
||||
# -- Hashing snapshot disk files impacts the performance of the system.
|
||||
# The immediate snapshot hashing and checking can be disabled to minimize the impact after creating a snapshot.
|
||||
snapshotDataIntegrityImmediateCheckAfterSnapshotCreation: ~
|
||||
# -- Unix-cron string format. The setting specifies when Longhorn checks the data integrity of snapshot disk files.
|
||||
snapshotDataIntegrityCronjob: ~
|
||||
# -- This setting allows Longhorn filesystem trim feature to automatically mark the latest snapshot and
|
||||
# its ancestors as removed and stops at the snapshot containing multiple children.
|
||||
removeSnapshotsDuringFilesystemTrim: ~
|
||||
# -- This feature supports the fast replica rebuilding.
|
||||
# It relies on the checksum of snapshot disk files, so setting the snapshot-data-integrity to **enable** or **fast-check** is a prerequisite.
|
||||
fastReplicaRebuildEnabled: ~
|
||||
# -- In seconds. The setting specifies the HTTP client timeout to the file sync server.
|
||||
replicaFileSyncHttpClientTimeout: ~
|
||||
# -- The log level Panic, Fatal, Error, Warn, Info, Debug, Trace used in longhorn manager. Default to Info.
|
||||
logLevel: ~
|
||||
# -- This setting allows users to specify backup compression method.
|
||||
backupCompressionMethod: ~
|
||||
# -- This setting controls how many worker threads per backup concurrently.
|
||||
backupConcurrentLimit: ~
|
||||
# -- This setting controls how many worker threads per restore concurrently.
|
||||
restoreConcurrentLimit: ~
|
||||
# -- This allows users to activate v2 data engine based on SPDK.
|
||||
# Currently, it is in the preview phase and should not be utilized in a production environment.
|
||||
v2DataEngine: ~
|
||||
# -- This setting allows users to enable the offline replica rebuilding for volumes using v2 data engine.
|
||||
offlineReplicaRebuilding: ~
|
||||
# -- Allow Scheduling Empty Node Selector Volumes To Any Node
|
||||
allowEmptyNodeSelectorVolume: ~
|
||||
# -- Allow Scheduling Empty Disk Selector Volumes To Any Disk
|
||||
allowEmptyDiskSelectorVolume: ~
|
||||
|
||||
privateRegistry:
|
||||
# -- Set `true` to create a new private registry secret
|
||||
createSecret: ~
|
||||
# -- URL of private registry. Leave blank to apply system default registry
|
||||
registryUrl: ~
|
||||
# -- User used to authenticate to private registry
|
||||
registryUser: ~
|
||||
# -- Password used to authenticate to private registry
|
||||
registryPasswd: ~
|
||||
# -- If create a new private registry secret is true, create a Kubernetes secret with this name; else use the existing secret of this name. Use it to pull images from your private registry
|
||||
registrySecret: ~
|
||||
|
||||
longhornManager:
|
||||
log:
|
||||
## Allowed values are `plain` or `json`.
|
||||
# -- Options: `plain`, `json`
|
||||
format: plain
|
||||
# -- Priority class for longhorn manager
|
||||
priorityClass: ~
|
||||
# -- Tolerate nodes to run Longhorn manager
|
||||
tolerations: []
|
||||
## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
|
||||
## and uncomment this example block
|
||||
@ -193,11 +351,13 @@ longhornManager:
|
||||
# operator: "Equal"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule"
|
||||
# -- Select nodes to run Longhorn manager
|
||||
nodeSelector: {}
|
||||
## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above
|
||||
## and uncomment this example block
|
||||
# label-key1: "label-value1"
|
||||
# label-key2: "label-value2"
|
||||
# -- Annotation used in Longhorn manager service
|
||||
serviceAnnotations: {}
|
||||
## If you want to set annotations for the Longhorn Manager service, delete the `{}` in the line above
|
||||
## and uncomment this example block
|
||||
@ -205,7 +365,9 @@ longhornManager:
|
||||
# annotation-key2: "annotation-value2"
|
||||
|
||||
longhornDriver:
|
||||
# -- Priority class for longhorn driver
|
||||
priorityClass: ~
|
||||
# -- Tolerate nodes to run Longhorn driver
|
||||
tolerations: []
|
||||
## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above
|
||||
## and uncomment this example block
|
||||
@ -213,6 +375,7 @@ longhornDriver:
|
||||
# operator: "Equal"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule"
|
||||
# -- Select nodes to run Longhorn driver
|
||||
nodeSelector: {}
|
||||
## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above
|
||||
## and uncomment this example block
|
||||
@ -220,8 +383,11 @@ longhornDriver:
|
||||
# label-key2: "label-value2"
|
||||
|
||||
longhornUI:
|
||||
# -- Replica count for longhorn ui
|
||||
replicas: 2
|
||||
# -- Priority class count for longhorn ui
|
||||
priorityClass: ~
|
||||
# -- Tolerate nodes to run Longhorn UI
|
||||
tolerations: []
|
||||
## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
|
||||
## and uncomment this example block
|
||||
@ -229,6 +395,7 @@ longhornUI:
|
||||
# operator: "Equal"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule"
|
||||
# -- Select nodes to run Longhorn UI
|
||||
nodeSelector: {}
|
||||
## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above
|
||||
## and uncomment this example block
|
||||
@ -236,29 +403,29 @@ longhornUI:
|
||||
# label-key2: "label-value2"
|
||||
|
||||
ingress:
|
||||
## Set to true to enable ingress record generation
|
||||
# -- Set to true to enable ingress record generation
|
||||
enabled: false
|
||||
|
||||
## Add ingressClassName to the Ingress
|
||||
## Can replace the kubernetes.io/ingress.class annotation on v1.18+
|
||||
# -- Add ingressClassName to the Ingress
|
||||
# Can replace the kubernetes.io/ingress.class annotation on v1.18+
|
||||
ingressClassName: ~
|
||||
|
||||
# -- Layer 7 Load Balancer hostname
|
||||
host: sslip.io
|
||||
|
||||
## Set this to true in order to enable TLS on the ingress record
|
||||
# -- Set this to true in order to enable TLS on the ingress record
|
||||
tls: false
|
||||
|
||||
## Enable this in order to enable that the backend service will be connected at port 443
|
||||
# -- Enable this in order to enable that the backend service will be connected at port 443
|
||||
secureBackends: false
|
||||
|
||||
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
|
||||
# -- If TLS is set to true, you must declare what secret will store the key/certificate for TLS
|
||||
tlsSecret: longhorn.local-tls
|
||||
|
||||
## If ingress is enabled you can set the default ingress path
|
||||
## then you can access the UI by using the following full path {{host}}+{{path}}
|
||||
# -- If ingress is enabled you can set the default ingress path
|
||||
# then you can access the UI by using the following full path {{host}}+{{path}}
|
||||
path: /
|
||||
|
||||
## Ingress annotations done as key:value pairs
|
||||
## If you're using kube-lego, you will want to add:
|
||||
## kubernetes.io/tls-acme: true
|
||||
##
|
||||
@ -266,10 +433,12 @@ ingress:
|
||||
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
|
||||
##
|
||||
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
|
||||
# -- Ingress annotations done as key:value pairs
|
||||
annotations:
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: true
|
||||
|
||||
# -- If you're providing your own certificates, please use this to add the certificates as secrets
|
||||
secrets:
|
||||
## If you're providing your own certificates, please use this to add the certificates as secrets
|
||||
## key and certificate should start with -----BEGIN CERTIFICATE----- or
|
||||
@ -284,25 +453,25 @@ ingress:
|
||||
# key:
|
||||
# certificate:
|
||||
|
||||
# For Kubernetes < v1.25, if your cluster enables Pod Security Policy admission controller,
|
||||
# set this to `true` to ship longhorn-psp which allow privileged Longhorn pods to start
|
||||
# -- For Kubernetes < v1.25, if your cluster enables Pod Security Policy admission controller,
|
||||
# set this to `true` to ship longhorn-psp which allow privileged Longhorn pods to start
|
||||
enablePSP: false
|
||||
|
||||
## Specify override namespace, specifically this is useful for using longhorn as sub-chart
|
||||
## and its release namespace is not the `longhorn-system`
|
||||
namespaceOverride: ""
|
||||
|
||||
# Annotations to add to the Longhorn Manager DaemonSet Pods. Optional.
|
||||
# -- Annotations to add to the Longhorn Manager DaemonSet Pods. Optional.
|
||||
annotations: {}
|
||||
|
||||
serviceAccount:
|
||||
# Annotations to add to the service account
|
||||
# -- Annotations to add to the service account
|
||||
annotations: {}
|
||||
|
||||
## openshift settings
|
||||
openshift:
|
||||
enabled: false # true
|
||||
# -- Enable when using openshift
|
||||
enabled: false
|
||||
ui:
|
||||
# -- UI route in openshift environment
|
||||
route: "longhorn-ui"
|
||||
# -- UI port in openshift environment
|
||||
port: 443
|
||||
# -- UI proxy in openshift environment
|
||||
proxy: 8443
|
||||
|
15
scripts/helm-docs.sh
Executable file
15
scripts/helm-docs.sh
Executable file
@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
## Reference: https://github.com/norwoodj/helm-docs
|
||||
|
||||
set -o errexit
|
||||
set -o xtrace
|
||||
|
||||
PRJ_DIR=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/.." 2>/dev/null || realpath "$(dirname "${BASH_SOURCE[0]}")/.." 2>/dev/null)
|
||||
CHART_DIR="$PRJ_DIR/chart"
|
||||
echo "$CHART_DIR"
|
||||
|
||||
echo "Running Helm-Docs"
|
||||
sudo docker run \
|
||||
-v "$CHART_DIR:/helm-docs" \
|
||||
-u $(id -u) \
|
||||
jnorwood/helm-docs:v1.9.1
|
Loading…
Reference in New Issue
Block a user