diff --git a/chart/Chart.yaml b/chart/Chart.yaml index 8e15810..50d23cc 100644 --- a/chart/Chart.yaml +++ b/chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: longhorn -version: 1.1.0 -appVersion: v1.1.0 +version: 1.1.1 +appVersion: v1.1.1 kubeVersion: ">=v1.16.0-r0" description: Longhorn is a distributed block storage system for Kubernetes. keywords: @@ -11,6 +11,7 @@ keywords: - block - device - iscsi +- nfs home: https://github.com/longhorn/longhorn sources: - https://github.com/longhorn/longhorn @@ -20,9 +21,8 @@ sources: - https://github.com/longhorn/longhorn-manager - https://github.com/longhorn/longhorn-ui - https://github.com/longhorn/longhorn-tests +- https://github.com/longhorn/backing-image-manager maintainers: - name: Longhorn maintainers email: maintainers@longhorn.io -- name: Sheng Yang - email: sheng@yasker.org -icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/longhorn/icon/color/longhorn-icon-color.svg?sanitize=true +icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/longhorn/icon/color/longhorn-icon-color.png diff --git a/chart/README.md b/chart/README.md index d2f6d05..5d693de 100644 --- a/chart/README.md +++ b/chart/README.md @@ -16,10 +16,10 @@ Longhorn is 100% open source software. Project source code is spread across a nu ## Prerequisites -1. Docker v1.13+ +1. A container runtime compatible with Kubernetes (Docker v1.13+, containerd v1.3.7+, etc.) 2. Kubernetes v1.16+ 3. Make sure `curl`, `findmnt`, `grep`, `awk` and `blkid` has been installed in all nodes of the Kubernetes cluster. -4. Make sure `open-iscsi` has been installed in all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already. +4. Make sure `open-iscsi` has been installed, and the `iscsid` daemon is running on all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already. ## Installation 1. Add Longhorn chart repository. diff --git a/chart/questions.yml b/chart/questions.yml index ef33761..7f292d0 100644 --- a/chart/questions.yml +++ b/chart/questions.yml @@ -17,7 +17,7 @@ questions: label: Longhorn Manager Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.manager.tag - default: v1.1.0 + default: v1.1.1 description: "Specify Longhorn Manager Image Tag" type: string label: Longhorn Manager Image Tag @@ -29,7 +29,7 @@ questions: label: Longhorn Engine Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.engine.tag - default: v1.1.0 + default: v1.1.1 description: "Specify Longhorn Engine Image Tag" type: string label: Longhorn Engine Image Tag @@ -41,7 +41,7 @@ questions: label: Longhorn UI Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.ui.tag - default: v1.1.0 + default: v1.1.1 description: "Specify Longhorn UI Image Tag" type: string label: Longhorn UI Image Tag @@ -65,11 +65,23 @@ questions: label: Longhorn Share Manager Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.shareManager.tag - default: v1_20201204 + default: v1_20210416 description: "Specify Longhorn Share Manager Image Tag" type: string label: Longhorn Share Manager Image Tag group: "Longhorn Images Settings" + - variable: image.longhorn.backingImageManager.repository + default: longhornio/backing-image-manager + description: "Specify Longhorn Backing Image Manager Image Repository" + type: string + label: Longhorn Backing Image Manager Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.backingImageManager.tag + default: v1_20210422 + description: "Specify Longhorn Backing Image Manager Image Tag" + type: string + label: Longhorn Backing Image Manager Image Tag + group: "Longhorn Images Settings" - variable: image.csi.attacher.repository default: longhornio/csi-attacher description: "Specify CSI attacher image repository. Leave blank to autodetect." @@ -279,18 +291,6 @@ The available modes are: min: 1 max: 20 default: 3 - - variable: defaultSettings.guaranteedEngineCPU - label: Guaranteed Engine CPU - description: "Allow Longhorn Instance Managers to have guaranteed CPU allocation. By default 0.25. The value is how many CPUs should be reserved for each Engine/Replica Instance Manager Pod created by Longhorn. For example, 0.1 means one-tenth of a CPU. This will help maintain engine stability during high node workload. It only applies to the Engine/Replica Instance Manager Pods created after the setting took effect. -In order to prevent unexpected volume crash, you can use the following formula to calculate an appropriate value for this setting: -'Guaranteed Engine CPU = The estimated max Longhorn volume/replica count on a node * 0.1'. -The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting. -If it's hard to estimate the volume/replica count now, you can leave it with the default value, or allocate 1/8 of total CPU of a node. Then you can tune it when there is no running workload using Longhorn volumes. -WARNING: After this setting is changed, all the instance managers on all the nodes will be automatically restarted -WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES." - group: "Longhorn Default Settings" - type: float - default: 0.25 - variable: defaultSettings.defaultLonghornStaticStorageClass label: Default Longhorn Static StorageClass Name description: "The 'storageClassName' is given to PVs and PVCs that are created for an existing Longhorn volume. The StorageClass name can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. By default 'longhorn-static'." @@ -304,26 +304,6 @@ WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES." type: int min: 0 default: 300 - - variable: defaultSettings.taintToleration - label: Kubernetes Taint Toleration - description: "To dedicate nodes to store Longhorn replicas and reject other general workloads, set tolerations for Longhorn and add taints for the storage nodes. -All Longhorn volumes should be detached before modifying toleration settings. -We recommend setting tolerations during Longhorn deployment because the Longhorn system cannot be operated during the update. -Multiple tolerations can be set here, and these tolerations are separated by semicolon. For example: -* `key1=value1:NoSchedule; key2:NoExecute` -* `:` this toleration tolerates everything because an empty key with operator `Exists` matches all keys, values and effects -* `key1=value1:` this toleration has empty effect. It matches all effects with key `key1` -Because `kubernetes.io` is used as the key of all Kubernetes default tolerations, it should not be used in the toleration settings. -WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES!" - group: "Longhorn Default Settings" - type: string - default: "" - - variable: defaultSettings.priorityClass - label: Priority Class - description: "The name of the Priority Class to set on the Longhorn workloads. This can help prevent Longhorn workloads from being evicted under Node Pressure. WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES." - group: "Longhorn Default Settings" - type: string - default: "" - variable: defaultSettings.autoSalvage label: Automatic salvage description: "If enabled, volumes will be automatically salvaged when all the replicas become faulty e.g. due to network disconnection. Longhorn will try to figure out which replica(s) are usable, then use them for the volume. By default true." @@ -429,6 +409,56 @@ Warning: This option works only when there is a failed replica in the volume. An group: "Longhorn Default Settings" type: boolean default: "true" + - variable: defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit + label: Concurrent Automatic Engine Upgrade Per Node Limit + description: "This setting controls how Longhorn automatically upgrades volumes' engines to the new default engine image after upgrading Longhorn manager. The value of this setting specifies the maximum number of engines per node that are allowed to upgrade to the default engine image at the same time. If the value is 0, Longhorn will not automatically upgrade volumes' engines to default version." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 0 + - variable: defaultSettings.backingImageCleanupWaitInterval + label: Backing Image Cleanup Wait Interval + description: "This interval in minutes determines how long Longhorn will wait before cleaning up the backing image file when there is no replica in the disk using it." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 60 + - variable: defaultSettings.guaranteedEngineManagerCPU + label: Guaranteed Engine Manager CPU + description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each engine manager Pod. For example, 10 means 10% of the total CPU on a node will be allocated to each engine manager pod on this node. This will help maintain engine stability during high node workload. + In order to prevent unexpected volume engine crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting: + Guaranteed Engine Manager CPU = The estimated max Longhorn volume engine count on a node * 0.1 / The total allocatable CPUs on the node * 100. + The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting. + If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes. + WARNING: + - Value 0 means unsetting CPU requests for engine manager pods. + - Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40. And the sum with setting 'Guaranteed Engine Manager CPU' should not be greater than 40. + - One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then. + - This global setting will be ignored for a node if the field \"EngineManagerCPURequest\" on the node is set. + - After this setting is changed, all engine manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES." + group: "Longhorn Default Settings" + type: int + min: 0 + max: 40 + default: 12 + - variable: defaultSettings.guaranteedReplicaManagerCPU + label: Guaranteed Replica Manager CPU + description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each replica manager Pod. 10 means 10% of the total CPU on a node will be allocated to each replica manager pod on this node. This will help maintain replica stability during high node workload. + In order to prevent unexpected volume replica crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting: + Guaranteed Replica Manager CPU = The estimated max Longhorn volume replica count on a node * 0.1 / The total allocatable CPUs on the node * 100. + The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting. + If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes. + WARNING: + - Value 0 means unsetting CPU requests for replica manager pods. + - Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40. And the sum with setting 'Guaranteed Replica Manager CPU' should not be greater than 40. + - One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then. + - This global setting will be ignored for a node if the field \"ReplicaManagerCPURequest\" on the node is set. + - After this setting is changed, all replica manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES." + group: "Longhorn Default Settings" + type: int + min: 0 + max: 40 + default: 12 - variable: persistence.defaultClass default: "true" description: "Set as default StorageClass for Longhorn" diff --git a/chart/templates/clusterrole.yaml b/chart/templates/clusterrole.yaml index c697617..cd5aafb 100644 --- a/chart/templates/clusterrole.yaml +++ b/chart/templates/clusterrole.yaml @@ -37,7 +37,7 @@ rules: - apiGroups: ["longhorn.io"] resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings", "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status", - "sharemanagers", "sharemanagers/status"] + "sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status", "backingimagemanagers", "backingimagemanagers/status"] verbs: ["*"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] diff --git a/chart/templates/crds.yaml b/chart/templates/crds.yaml index 04cf9ed..aa49047 100644 --- a/chart/templates/crds.yaml +++ b/chart/templates/crds.yaml @@ -378,3 +378,97 @@ spec: - name: Age type: date jsonPath: .metadata.creationTimestamp +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: BackingImage + name: backingimages.longhorn.io +spec: + group: longhorn.io + names: + kind: BackingImage + listKind: BackingImageList + plural: backingimages + shortNames: + - lhbi + singular: backingimage + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: Image + type: string + description: The backing image name + jsonPath: .spec.image + - name: Age + type: date + jsonPath: .metadata.creationTimestamp +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: BackingImageManager + name: backingimagemanagers.longhorn.io +spec: + group: longhorn.io + names: + kind: BackingImageManager + listKind: BackingImageManagerList + plural: backingimagemanagers + shortNames: + - lhbim + singular: backingimagemanager + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: State + type: string + description: The current state of the manager + jsonPath: .status.currentState + - name: Image + type: string + description: The image the manager pod will use + jsonPath: .spec.image + - name: Node + type: string + description: The node the manager is on + jsonPath: .spec.nodeID + - name: DiskUUID + type: string + description: The disk the manager is responsible for + jsonPath: .spec.diskUUID + - name: DiskPath + type: string + description: The disk path the manager is using + jsonPath: .spec.diskPath + - name: Age + type: date + jsonPath: .metadata.creationTimestamp diff --git a/chart/templates/daemonset-sa.yaml b/chart/templates/daemonset-sa.yaml index e407935..636a4c0 100644 --- a/chart/templates/daemonset-sa.yaml +++ b/chart/templates/daemonset-sa.yaml @@ -13,6 +13,10 @@ spec: metadata: labels: {{- include "longhorn.labels" . | nindent 8 }} app: longhorn-manager + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} spec: containers: - name: longhorn-manager @@ -30,6 +34,8 @@ spec: - "{{ template "registry_url" . }}{{ .Values.image.longhorn.instanceManager.repository }}:{{ .Values.image.longhorn.instanceManager.tag }}" - --share-manager-image - "{{ template "registry_url" . }}{{ .Values.image.longhorn.shareManager.repository }}:{{ .Values.image.longhorn.shareManager.tag }}" + - --backing-image-manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.backingImageManager.repository }}:{{ .Values.image.longhorn.backingImageManager.tag }}" - --manager-image - "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}" - --service-account @@ -45,9 +51,6 @@ spec: mountPath: /host/dev/ - name: proc mountPath: /host/proc/ - - name: varrun - mountPath: /var/run/ - mountPropagation: Bidirectional - name: longhorn mountPath: /var/lib/longhorn/ mountPropagation: Bidirectional @@ -75,9 +78,6 @@ spec: - name: proc hostPath: path: /proc/ - - name: varrun - hostPath: - path: /var/run/ - name: longhorn hostPath: path: /var/lib/longhorn/ @@ -88,7 +88,18 @@ spec: imagePullSecrets: - name: {{ .Values.privateRegistry.registrySecret }} {{- end }} + {{- if .Values.longhornManager.priorityClass }} + priorityClassName: {{ .Values.longhornManager.priorityClass | quote}} + {{- end }} serviceAccountName: longhorn-service-account + {{- if .Values.longhornManager.tolerations }} + tolerations: +{{ toYaml .Values.longhornManager.tolerations | indent 6 }} + {{- end }} + {{- if .Values.longhornManager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }} + {{- end }} updateStrategy: rollingUpdate: maxUnavailable: "100%" diff --git a/chart/templates/default-setting.yaml b/chart/templates/default-setting.yaml index 14c264e..e1f0c7a 100644 --- a/chart/templates/default-setting.yaml +++ b/chart/templates/default-setting.yaml @@ -21,6 +21,7 @@ data: default-longhorn-static-storage-class: {{ .Values.defaultSettings.defaultLonghornStaticStorageClass }} backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }} taint-toleration: {{ .Values.defaultSettings.taintToleration }} + system-managed-components-node-selector: {{ .Values.defaultSettings.systemManagedComponentsNodeSelector }} priority-class: {{ .Values.defaultSettings.priorityClass }} auto-salvage: {{ .Values.defaultSettings.autoSalvage }} auto-delete-pod-when-volume-detached-unexpectedly: {{ .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly }} @@ -36,3 +37,7 @@ data: system-managed-pods-image-pull-policy: {{ .Values.defaultSettings.systemManagedPodsImagePullPolicy }} allow-volume-creation-with-degraded-availability: {{ .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability }} auto-cleanup-system-generated-snapshot: {{ .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot }} + concurrent-automatic-engine-upgrade-per-node-limit: {{ .Values.defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit }} + backing-image-cleanup-wait-interval: {{ .Values.defaultSettings.backingImageCleanupWaitInterval }} + guaranteed-engine-manager-cpu: {{ .Values.defaultSettings.guaranteedEngineManagerCPU }} + guaranteed-replica-manager-cpu: {{ .Values.defaultSettings.guaranteedReplicaManagerCPU }} diff --git a/chart/templates/deployment-driver.yaml b/chart/templates/deployment-driver.yaml index c4b6e35..fb0390a 100644 --- a/chart/templates/deployment-driver.yaml +++ b/chart/templates/deployment-driver.yaml @@ -87,6 +87,17 @@ spec: {{- if .Values.privateRegistry.registrySecret }} imagePullSecrets: - name: {{ .Values.privateRegistry.registrySecret }} + {{- end }} + {{- if .Values.longhornDriver.priorityClass }} + priorityClassName: {{ .Values.longhornDriver.priorityClass | quote}} + {{- end }} + {{- if .Values.longhornDriver.tolerations }} + tolerations: +{{ toYaml .Values.longhornDriver.tolerations | indent 6 }} + {{- end }} + {{- if .Values.longhornDriver.nodeSelector }} + nodeSelector: +{{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }} {{- end }} serviceAccountName: longhorn-service-account securityContext: diff --git a/chart/templates/deployment-ui.yaml b/chart/templates/deployment-ui.yaml index da7c0ea..e46a842 100644 --- a/chart/templates/deployment-ui.yaml +++ b/chart/templates/deployment-ui.yaml @@ -31,6 +31,17 @@ spec: imagePullSecrets: - name: {{ .Values.privateRegistry.registrySecret }} {{- end }} + {{- if .Values.longhornUI.priorityClass }} + priorityClassName: {{ .Values.longhornUI.priorityClass | quote}} + {{- end }} + {{- if .Values.longhornUI.tolerations }} + tolerations: +{{ toYaml .Values.longhornManager.tolerations | indent 6 }} + {{- end }} + {{- if .Values.longhornUI.nodeSelector }} + nodeSelector: +{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }} + {{- end }} --- kind: Service apiVersion: v1 diff --git a/chart/templates/ingress.yaml b/chart/templates/ingress.yaml index 5b3a405..13555f8 100644 --- a/chart/templates/ingress.yaml +++ b/chart/templates/ingress.yaml @@ -14,6 +14,9 @@ metadata: {{ $key }}: {{ $value | quote }} {{- end }} spec: + {{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end }} rules: - host: {{ .Values.ingress.host }} http: diff --git a/chart/templates/postupgrade-job.yaml b/chart/templates/postupgrade-job.yaml index 6b6a385..4af75e2 100644 --- a/chart/templates/postupgrade-job.yaml +++ b/chart/templates/postupgrade-job.yaml @@ -19,6 +19,8 @@ spec: - name: longhorn-post-upgrade image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} imagePullPolicy: IfNotPresent + securityContext: + privileged: true command: - longhorn-manager - post-upgrade @@ -32,4 +34,15 @@ spec: imagePullSecrets: - name: {{ .Values.privateRegistry.registrySecret }} {{- end }} + {{- if .Values.longhornManager.priorityClass }} + priorityClassName: {{ .Values.longhornManager.priorityClass | quote}} + {{- end }} serviceAccountName: longhorn-service-account + {{- if .Values.longhornManager.tolerations }} + tolerations: +{{ toYaml .Values.longhornManager.tolerations | indent 6 }} + {{- end }} + {{- if .Values.longhornManager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }} + {{- end }} diff --git a/chart/templates/uninstall-job.yaml b/chart/templates/uninstall-job.yaml index e7e9f14..5f21b10 100644 --- a/chart/templates/uninstall-job.yaml +++ b/chart/templates/uninstall-job.yaml @@ -19,6 +19,8 @@ spec: - name: longhorn-uninstall image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} imagePullPolicy: IfNotPresent + securityContext: + privileged: true command: - longhorn-manager - uninstall @@ -33,4 +35,15 @@ spec: imagePullSecrets: - name: {{ .Values.privateRegistry.registrySecret }} {{- end }} + {{- if .Values.longhornManager.priorityClass }} + priorityClassName: {{ .Values.longhornManager.priorityClass | quote}} + {{- end }} serviceAccountName: longhorn-service-account + {{- if .Values.longhornManager.tolerations }} + tolerations: +{{ toYaml .Values.longhornManager.tolerations | indent 6 }} + {{- end }} + {{- if .Values.longhornManager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }} + {{- end }} diff --git a/chart/values.yaml b/chart/values.yaml index d3345d4..323d272 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -9,19 +9,22 @@ image: longhorn: engine: repository: longhornio/longhorn-engine - tag: v1.1.0 + tag: v1.1.1 manager: repository: longhornio/longhorn-manager - tag: v1.1.0 + tag: v1.1.1 ui: repository: longhornio/longhorn-ui - tag: v1.1.0 + tag: v1.1.1 instanceManager: repository: longhornio/longhorn-instance-manager tag: v1_20201216 shareManager: repository: longhornio/longhorn-share-manager - tag: v1_20201204 + tag: v1_20210416 + backingImageManager: + repository: longhornio/backing-image-manager + tag: v1_20210422 csi: attacher: repository: longhornio/csi-attacher @@ -79,6 +82,7 @@ defaultSettings: defaultLonghornStaticStorageClass: ~ backupstorePollInterval: ~ taintToleration: ~ + systemManagedComponentsNodeSelector: ~ priorityClass: ~ autoSalvage: ~ autoDeletePodWhenVolumeDetachedUnexpectedly: ~ @@ -94,13 +98,61 @@ defaultSettings: systemManagedPodsImagePullPolicy: ~ allowVolumeCreationWithDegradedAvailability: ~ autoCleanupSystemGeneratedSnapshot: ~ - + concurrentAutomaticEngineUpgradePerNodeLimit: ~ + backingImageCleanupWaitInterval: ~ + guaranteedEngineManagerCPU: ~ + guaranteedReplicaManagerCPU: ~ privateRegistry: registryUrl: ~ registryUser: ~ registryPasswd: ~ registrySecret: ~ +longhornManager: + priorityClass: ~ + tolerations: [] + ## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above + ## and uncomment this example block + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + nodeSelector: {} + ## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above + ## and uncomment this example block + # label-key1: "label-value1" + # label-key2: "label-value2" + +longhornDriver: + priorityClass: ~ + tolerations: [] + ## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above + ## and uncomment this example block + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + nodeSelector: {} + ## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above + ## and uncomment this example block + # label-key1: "label-value1" + # label-key2: "label-value2" + +longhornUI: + priorityClass: ~ + tolerations: [] + ## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above + ## and uncomment this example block + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + nodeSelector: {} + ## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above + ## and uncomment this example block + # label-key1: "label-value1" + # label-key2: "label-value2" + resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little @@ -118,6 +170,9 @@ ingress: ## Set to true to enable ingress record generation enabled: false + ## Add ingressClassName to the Ingress + ## Can replace the kubernetes.io/ingress.class annotation on v1.18+ + ingressClassName: ~ host: xip.io @@ -160,3 +215,6 @@ enablePSP: true ## Specify override namespace, specifically this is useful for using longhorn as sub-chart ## and its release namespace is not the `longhorn-system` namespaceOverride: "" + +# Annotations to add to the Longhorn Manager DaemonSet Pods. Optional. +annotations: {} diff --git a/deploy/backupstores/minio-backupstore.yaml b/deploy/backupstores/minio-backupstore.yaml index fe1b268..bd0b4b4 100644 --- a/deploy/backupstores/minio-backupstore.yaml +++ b/deploy/backupstores/minio-backupstore.yaml @@ -8,8 +8,8 @@ data: AWS_ACCESS_KEY_ID: bG9uZ2hvcm4tdGVzdC1hY2Nlc3Mta2V5 # longhorn-test-access-key AWS_SECRET_ACCESS_KEY: bG9uZ2hvcm4tdGVzdC1zZWNyZXQta2V5 # longhorn-test-secret-key AWS_ENDPOINTS: aHR0cHM6Ly9taW5pby1zZXJ2aWNlLmRlZmF1bHQ6OTAwMA== # https://minio-service.default:9000 - AWS_CERT: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUREekNDQWZlZ0F3SUJBZ0lSQU91d1oybTZ6SXl3c1h2a2UyNS9LYzB3RFFZSktvWklodmNOQVFFTEJRQXcKRWpFUU1BNEdBMVVFQ2hNSFFXTnRaU0JEYnpBZUZ3MHlNREEwTWpVd01qRTJNalphRncweU1UQTBNalV3TWpFMgpNalphTUJJeEVEQU9CZ05WQkFvVEIwRmpiV1VnUTI4d2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3CmdnRUtBb0lCQVFEWkpyWVUraVJhc1huSExvb1d0Tm9OQWpxN0U3YWNlQTJQZnQ1ZFM3aExzVUtCbExMOVVQMmUKZ0QrVFl3RmtCWVJNU3BsV0tNT0tuWEErNDZSVkRwSkhwSTF4YjhHNDV0L3gzVXhVaWc2WUFVbDBnTFV6N01rMQpYSUtRaWZZUllaL0FjUzJqU0VOYjRISFJ1aFZ5NzV0ZDdCaXNhd2J2TTJwTXI0dWNSR1lwZ3J6Z2V2eFBXSHZ1CnkxT29yRnIvNjFwV28wcG9aSXhtRmM2YXMzekw0NWlrRzRHN1A2ejJPamc4NGdrdnR4RFUzYVdmWXRNb3VhL3gKQVhkRlRCd2NqMkNHMHJtdmd4cE5KeEx5Kzl5NDVLVGU1SFlSd0xxUjVCeWtnVGt2RGplcWdXTnJyQWdCL3lLTApwU1ZjRmZkKzBWNjhyQmtNMEt3VlQ3bXF2WWRsZDVrTkFnTUJBQUdqWURCZU1BNEdBMVVkRHdFQi93UUVBd0lDCnBEQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQ1lHQTFVZEVRUWYKTUIyQ0ZXMXBibWx2TFhObGNuWnBZMlV1WkdWbVlYVnNkSWNFZndBQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQwpBUUVBdDBQYjM5QWliS0EyU1BPTHJHSmVRVlNaVTdZbFUyU0h0M2lhcFVBS1Rtb2o1RTQrTU8yV2NnbktoRktrCnNxeW9CYjBPYTNPMHdXUnRvVnhNUGdPQS9FaXNtZExQWmJIVTUzS2w3SDVWMm8rb0tQY0YydTk2ajdlcUZxSkUKMlltQkpBTHlUVks5LzZhS1hOSnRYZE5xRmpPMWJRcDJRTURXbjQyZGgyNjJLZmgvekM4enRyK0h4RzhuTVpQQwpsZUpxbzU3S0tkZ0YvZHVBWTdUaUI2cThTelE4RmViMklQQ2FhdVVLNzdBZ0d5b3kzK1JuWkdZV2U1MG1KVnN6CmdkQTFURmg0TVdMeUxWSFdIbnl2cEFvTjJIUjQrdzhYRkpJS2VRRFM1YklJM1pFeU5OQUZNRDg0bTVReGY4cjUKMEovQWhXTVVyMFUwcCtiRi9KM3FDQVNSK3c9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - AWS_CERT_KEY: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRRFpKcllVK2lSYXNYbkgKTG9vV3ROb05BanE3RTdhY2VBMlBmdDVkUzdoTHNVS0JsTEw5VVAyZWdEK1RZd0ZrQllSTVNwbFdLTU9LblhBKwo0NlJWRHBKSHBJMXhiOEc0NXQveDNVeFVpZzZZQVVsMGdMVXo3TWsxWElLUWlmWVJZWi9BY1MyalNFTmI0SEhSCnVoVnk3NXRkN0Jpc2F3YnZNMnBNcjR1Y1JHWXBncnpnZXZ4UFdIdnV5MU9vckZyLzYxcFdvMHBvWkl4bUZjNmEKczN6TDQ1aWtHNEc3UDZ6Mk9qZzg0Z2t2dHhEVTNhV2ZZdE1vdWEveEFYZEZUQndjajJDRzBybXZneHBOSnhMeQorOXk0NUtUZTVIWVJ3THFSNUJ5a2dUa3ZEamVxZ1dOcnJBZ0IveUtMcFNWY0ZmZCswVjY4ckJrTTBLd1ZUN21xCnZZZGxkNWtOQWdNQkFBRUNnZ0VBQUlwREc2dy9tT1ltR21PNFBqUTI4cDlWekE5UmZmUWlmSC9oUjdRZmdqaXYKcEtqZEJScEZkelowY2dabUEzeXNCcENNN3hUczM1UmlxaFZnM0VGTUJkZVg3bmRMc1EwSjg0ME1XbzE1V2RGdgpBRll0blRKeWthcG9QTG5MSGVIelJzUkJTODJyTlRoS3NDM1pUYzdnd1F3TVI2bUFlK25SMHQwQTZPT1dxWFhECm5ENmdmdk9vNXJqUjE2WFhibE9vMkIwQ2RITStIb3lXTjJhbXhVL1pUNUlsVGFjVDBHT0FaajN4QW4yclRqSTYKRXRsRGx2cUhIYy8vY3c3ck1xSHZFVEdNbnBjakpRaEZic0pmR2p2OHcxSFQ3VFd6dHphdXZoektkbHBRakc3VgpJcFlsTXBObHl1RzJVVDJqQnBEcXIyT0hqTE5CWktXWFNVL0x3VWU5WVFLQmdRRHFWVzJRcUNVNmhxalQvMHlICml6OFVOK2J3ZHdKZ2tZdXRmQWxkbG94OG1WZ1BYZHdnVkZnNUFkZktLa2hLRkhBN3Nad1dnRFA2Mlovd0IxZWEKRVNIVkZYc1V5ay9pMjQ1THVUQm5UcGRIUXNTc0F3TGl0MVFRZk16dWxiR0ZYdHlUSVkrU3FVbGhCeXY0ckg5aApRakpFYWFTcEhxZzhFeGt0VjNqUVNqRVM1UUtCZ1FEdE9wVGxQMHlkZUVkT1BZQ3ZFWllFZzNzUStiWVdLMkdwCnh1dlF2UUZTV2lVRXZpcEd5OHJGaGVRb1dyOHh3cnVFZ25MUEg3TWFwSTBYcFFpRjQvVVZjNFBweDczWFg2cmwKQkxRZUZWbnZNR1lUMElDMWJ5Ty9oUmw1ZlhGRXdOWXQzVTE4RVJteFg0N1poVUZienNYNDNPYU5hUGVha1NpRQpvQmlpa2R4RENRS0JnRU5mK3BlYjhOQktCV0tteGI4M0J4VHVHY1ZMd25BM2lMeUJyRU92VklkQ283SVBYNG9nCkZobVY4UkJjWmRwKzArSWw1K1lFU0cyNGdxYkZ4YWN6ZzlHN1VsOGc2Q1VtMFZ2dVMvOTM5a0R6N280eWMzTHkKR1FhQWkzK1JwSy9mSFhaa01ONlBNOXprN2Z5YXhDa1htbEpYT1pPeWo5WnQrMUF4RlRoMkRIUU5Bb0dCQU9JSgpXSWdheVRpZHUydU1aSW5yT3NMd09KblRKVEY0Z21VUG1lL1p0Mkd0Yk9wWSs5VmJKc2tRSzNaY0NTTXp4aEtiCmJTTjNzK05sK040WHJNaE9qVjYwSTNQa2t6bWMrU3VnVUxMOWF5VGlPOUVUY1IvdlZ1T013ZG9sc1lCdU1XV2cKSU0xZlNkamRFVEtucXIvOGhGdjh0MXowTUVEQm9SYkZxTk4ySWFacEFvR0FKVUN5SFcyS1o4cWJlWkJleW9PWApxeDF2VFVMRWtDMkdjZCs2a3VQdGlxdEpxMWVGNmc1N1BNWktoUHpoeTVVcGlxeDZZS1hrQ0tqZDlVc3FNQ2pNCm5KU2pJY3VXOWxFTmdCRmlWYjVzVVViTDdDVlhUalJkM1hab3BvemMyZjc5a1lNazVzYVpoWDdHL2Y3aGM1WWoKNUxqbkVJTWw3WWRyeUNsOHRyZjA0em89Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K + AWS_CERT: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMRENDQWhTZ0F3SUJBZ0lSQU1kbzQycGhUZXlrMTcvYkxyWjVZRHN3RFFZSktvWklodmNOQVFFTEJRQXcKR2pFWU1CWUdBMVVFQ2hNUFRHOXVaMmh2Y200Z0xTQlVaWE4wTUNBWERUSXdNRFF5TnpJek1EQXhNVm9ZRHpJeApNakF3TkRBek1qTXdNREV4V2pBYU1SZ3dGZ1lEVlFRS0V3OU1iMjVuYUc5eWJpQXRJRlJsYzNRd2dnRWlNQTBHCkNTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEWHpVdXJnUFpEZ3pUM0RZdWFlYmdld3Fvd2RlQUQKODRWWWF6ZlN1USs3K21Oa2lpUVBvelVVMmZvUWFGL1BxekJiUW1lZ29hT3l5NVhqM1VFeG1GcmV0eDBaRjVOVgpKTi85ZWFJNWRXRk9teHhpMElPUGI2T0RpbE1qcXVEbUVPSXljdjRTaCsvSWo5Zk1nS0tXUDdJZGxDNUJPeThkCncwOVdkckxxaE9WY3BKamNxYjN6K3hISHd5Q05YeGhoRm9tb2xQVnpJbnlUUEJTZkRuSDBuS0lHUXl2bGhCMGsKVHBHSzYxc2prZnFTK3hpNTlJeHVrbHZIRXNQcjFXblRzYU9oaVh6N3lQSlorcTNBMWZoVzBVa1JaRFlnWnNFbQovZ05KM3JwOFhZdURna2kzZ0UrOElXQWRBWHExeWhqRDdSSkI4VFNJYTV0SGpKUUtqZ0NlSG5HekFnTUJBQUdqCmF6QnBNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUIKQWY4RUJUQURBUUgvTURFR0ExVWRFUVFxTUNpQ0NXeHZZMkZzYUc5emRJSVZiV2x1YVc4dGMyVnlkbWxqWlM1awpaV1poZFd4MGh3Ui9BQUFCTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFDbUZMMzlNSHVZMzFhMTFEajRwMjVjCnFQRUM0RHZJUWozTk9kU0dWMmQrZjZzZ3pGejFXTDhWcnF2QjFCMVM2cjRKYjJQRXVJQkQ4NFlwVXJIT1JNU2MKd3ViTEppSEtEa0Jmb2U5QWI1cC9VakpyS0tuajM0RGx2c1cvR3AwWTZYc1BWaVdpVWorb1JLbUdWSTI0Q0JIdgpnK0JtVzNDeU5RR1RLajk0eE02czNBV2xHRW95YXFXUGU1eHllVWUzZjFBWkY5N3RDaklKUmVWbENtaENGK0JtCmFUY1RSUWN3cVdvQ3AwYmJZcHlERFlwUmxxOEdQbElFOW8yWjZBc05mTHJVcGFtZ3FYMmtYa2gxa3lzSlEralAKelFadHJSMG1tdHVyM0RuRW0yYmk0TktIQVFIcFc5TXUxNkdRakUxTmJYcVF0VEI4OGpLNzZjdEg5MzRDYWw2VgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + AWS_CERT_KEY: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRRFh6VXVyZ1BaRGd6VDMKRFl1YWViZ2V3cW93ZGVBRDg0VllhemZTdVErNyttTmtpaVFQb3pVVTJmb1FhRi9QcXpCYlFtZWdvYU95eTVYagozVUV4bUZyZXR4MFpGNU5WSk4vOWVhSTVkV0ZPbXh4aTBJT1BiNk9EaWxNanF1RG1FT0l5Y3Y0U2grL0lqOWZNCmdLS1dQN0lkbEM1Qk95OGR3MDlXZHJMcWhPVmNwSmpjcWIzeit4SEh3eUNOWHhoaEZvbW9sUFZ6SW55VFBCU2YKRG5IMG5LSUdReXZsaEIwa1RwR0s2MXNqa2ZxUyt4aTU5SXh1a2x2SEVzUHIxV25Uc2FPaGlYejd5UEpaK3EzQQoxZmhXMFVrUlpEWWdac0VtL2dOSjNycDhYWXVEZ2tpM2dFKzhJV0FkQVhxMXloakQ3UkpCOFRTSWE1dEhqSlFLCmpnQ2VIbkd6QWdNQkFBRUNnZ0VBZlVyQ1hrYTN0Q2JmZjNpcnp2cFFmZnVEbURNMzV0TmlYaDJTQVpSVW9FMFYKbSsvZ1UvdnIrN2s2eUgvdzhMOXhpZXFhQTljVkZkL0JuTlIrMzI2WGc2dEpCNko2ZGZxODJZdmZOZ0VDaUFMaQpqalNGemFlQmhnT3ZsWXZHbTR5OTU1Q0FGdjQ1cDNac1VsMTFDRXJlL1BGbGtaWHRHeGlrWFl6NC85UTgzblhZCnM2eDdPYTgyUjdwT2lraWh3Q0FvVTU3Rjc4ZWFKOG1xTmkwRlF2bHlxSk9QMTFCbVp4dm54ZU11S2poQjlPTnAKTFNwMWpzZXk5bDZNR2pVbjBGTG53RHZkVWRiK0ZlUEkxTjdWYUNBd3hJK3JHa3JTWkhnekhWWE92VUpON2t2QQpqNUZPNW9uNGgvK3hXbkYzM3lxZ0VvWWZ0MFFJL2pXS2NOV1d1a2pCd1FLQmdRRGVFNlJGRUpsT2Q1aVcxeW1qCm45RENnczVFbXFtRXN3WU95bkN3U2RhK1lNNnZVYmlac1k4WW9wMVRmVWN4cUh2NkFQWGpVd2NBUG1QVE9KRW8KMlJtS0xTYkhsTnc4bFNOMWJsWDBEL3Mzamc1R3VlVW9nbW5TVnhMa0h1OFhKR0o3VzFReEUzZG9IUHRrcTNpagpoa09QTnJpZFM0UmxqNTJwYkhscjUvQzRjUUtCZ1FENHhFYmpuck1heFV2b0xxVTRvT2xiOVc5UytSUllTc0cxCmxJUmgzNzZTV0ZuTTlSdGoyMTI0M1hkaE4zUFBtSTNNeiswYjdyMnZSUi9LMS9Cc1JUQnlrTi9kbkVuNVUxQkEKYm90cGZIS1Jvc1FUR1hIQkEvM0JrNC9qOWplU3RmVXgzZ2x3eUI0L2hORy9KM1ZVV2FXeURTRm5qZFEvcGJsRwp6VWlsSVBmK1l3S0JnUUNwMkdYYmVJMTN5TnBJQ3psS2JqRlFncEJWUWVDQ29CVHkvUHRncUtoM3BEeVBNN1kyCnZla09VMWgyQVN1UkhDWHRtQXgzRndvVXNxTFFhY1FEZEw4bXdjK1Y5eERWdU02TXdwMDBjNENVQmE1L2d5OXoKWXdLaUgzeFFRaVJrRTZ6S1laZ3JqSkxYYXNzT1BHS2cxbEFYV1NlckRaV3R3MEEyMHNLdXQ0NlEwUUtCZ0hGZQpxZHZVR0ZXcjhvTDJ0dzlPcmVyZHVJVTh4RnZVZmVFdHRRTVJ2N3pjRE5qT0gxUnJ4Wk9aUW0ySW92dkp6MTIyCnFKMWhPUXJtV3EzTHFXTCtTU3o4L3pqMG4vWERWVUIzNElzTFR2ODJDVnVXN2ZPRHlTSnVDRlpnZ0VVWkxZd3oKWDJRSm4xZGRSV1Z6S3hKczVJbDNXSERqL3dXZWxnaEJSOGtSZEZOM0FvR0FJNldDdjJQQ1lUS1ZZNjAwOFYwbgpyTDQ3YTlPanZ0Yy81S2ZxSjFpMkpKTUgyQi9jbU1WRSs4M2dpODFIU1FqMWErNnBjektmQVppZWcwRk9nL015ClB6VlZRYmpKTnY0QzM5KzdxSDg1WGdZTXZhcTJ0aDFEZWUvQ3NsMlM4QlV0cW5mc0VuMUYwcWhlWUJZb2RibHAKV3RUaE5oRi9oRVhzbkJROURyWkJKT1U9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K --- # same secret for longhorn-system namespace apiVersion: v1 @@ -22,7 +22,7 @@ data: AWS_ACCESS_KEY_ID: bG9uZ2hvcm4tdGVzdC1hY2Nlc3Mta2V5 # longhorn-test-access-key AWS_SECRET_ACCESS_KEY: bG9uZ2hvcm4tdGVzdC1zZWNyZXQta2V5 # longhorn-test-secret-key AWS_ENDPOINTS: aHR0cHM6Ly9taW5pby1zZXJ2aWNlLmRlZmF1bHQ6OTAwMA== # https://minio-service.default:9000 - AWS_CERT: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUREekNDQWZlZ0F3SUJBZ0lSQU91d1oybTZ6SXl3c1h2a2UyNS9LYzB3RFFZSktvWklodmNOQVFFTEJRQXcKRWpFUU1BNEdBMVVFQ2hNSFFXTnRaU0JEYnpBZUZ3MHlNREEwTWpVd01qRTJNalphRncweU1UQTBNalV3TWpFMgpNalphTUJJeEVEQU9CZ05WQkFvVEIwRmpiV1VnUTI4d2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3CmdnRUtBb0lCQVFEWkpyWVUraVJhc1huSExvb1d0Tm9OQWpxN0U3YWNlQTJQZnQ1ZFM3aExzVUtCbExMOVVQMmUKZ0QrVFl3RmtCWVJNU3BsV0tNT0tuWEErNDZSVkRwSkhwSTF4YjhHNDV0L3gzVXhVaWc2WUFVbDBnTFV6N01rMQpYSUtRaWZZUllaL0FjUzJqU0VOYjRISFJ1aFZ5NzV0ZDdCaXNhd2J2TTJwTXI0dWNSR1lwZ3J6Z2V2eFBXSHZ1CnkxT29yRnIvNjFwV28wcG9aSXhtRmM2YXMzekw0NWlrRzRHN1A2ejJPamc4NGdrdnR4RFUzYVdmWXRNb3VhL3gKQVhkRlRCd2NqMkNHMHJtdmd4cE5KeEx5Kzl5NDVLVGU1SFlSd0xxUjVCeWtnVGt2RGplcWdXTnJyQWdCL3lLTApwU1ZjRmZkKzBWNjhyQmtNMEt3VlQ3bXF2WWRsZDVrTkFnTUJBQUdqWURCZU1BNEdBMVVkRHdFQi93UUVBd0lDCnBEQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQ1lHQTFVZEVRUWYKTUIyQ0ZXMXBibWx2TFhObGNuWnBZMlV1WkdWbVlYVnNkSWNFZndBQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQwpBUUVBdDBQYjM5QWliS0EyU1BPTHJHSmVRVlNaVTdZbFUyU0h0M2lhcFVBS1Rtb2o1RTQrTU8yV2NnbktoRktrCnNxeW9CYjBPYTNPMHdXUnRvVnhNUGdPQS9FaXNtZExQWmJIVTUzS2w3SDVWMm8rb0tQY0YydTk2ajdlcUZxSkUKMlltQkpBTHlUVks5LzZhS1hOSnRYZE5xRmpPMWJRcDJRTURXbjQyZGgyNjJLZmgvekM4enRyK0h4RzhuTVpQQwpsZUpxbzU3S0tkZ0YvZHVBWTdUaUI2cThTelE4RmViMklQQ2FhdVVLNzdBZ0d5b3kzK1JuWkdZV2U1MG1KVnN6CmdkQTFURmg0TVdMeUxWSFdIbnl2cEFvTjJIUjQrdzhYRkpJS2VRRFM1YklJM1pFeU5OQUZNRDg0bTVReGY4cjUKMEovQWhXTVVyMFUwcCtiRi9KM3FDQVNSK3c9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + AWS_CERT: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMRENDQWhTZ0F3SUJBZ0lSQU1kbzQycGhUZXlrMTcvYkxyWjVZRHN3RFFZSktvWklodmNOQVFFTEJRQXcKR2pFWU1CWUdBMVVFQ2hNUFRHOXVaMmh2Y200Z0xTQlVaWE4wTUNBWERUSXdNRFF5TnpJek1EQXhNVm9ZRHpJeApNakF3TkRBek1qTXdNREV4V2pBYU1SZ3dGZ1lEVlFRS0V3OU1iMjVuYUc5eWJpQXRJRlJsYzNRd2dnRWlNQTBHCkNTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEWHpVdXJnUFpEZ3pUM0RZdWFlYmdld3Fvd2RlQUQKODRWWWF6ZlN1USs3K21Oa2lpUVBvelVVMmZvUWFGL1BxekJiUW1lZ29hT3l5NVhqM1VFeG1GcmV0eDBaRjVOVgpKTi85ZWFJNWRXRk9teHhpMElPUGI2T0RpbE1qcXVEbUVPSXljdjRTaCsvSWo5Zk1nS0tXUDdJZGxDNUJPeThkCncwOVdkckxxaE9WY3BKamNxYjN6K3hISHd5Q05YeGhoRm9tb2xQVnpJbnlUUEJTZkRuSDBuS0lHUXl2bGhCMGsKVHBHSzYxc2prZnFTK3hpNTlJeHVrbHZIRXNQcjFXblRzYU9oaVh6N3lQSlorcTNBMWZoVzBVa1JaRFlnWnNFbQovZ05KM3JwOFhZdURna2kzZ0UrOElXQWRBWHExeWhqRDdSSkI4VFNJYTV0SGpKUUtqZ0NlSG5HekFnTUJBQUdqCmF6QnBNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUIKQWY4RUJUQURBUUgvTURFR0ExVWRFUVFxTUNpQ0NXeHZZMkZzYUc5emRJSVZiV2x1YVc4dGMyVnlkbWxqWlM1awpaV1poZFd4MGh3Ui9BQUFCTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFDbUZMMzlNSHVZMzFhMTFEajRwMjVjCnFQRUM0RHZJUWozTk9kU0dWMmQrZjZzZ3pGejFXTDhWcnF2QjFCMVM2cjRKYjJQRXVJQkQ4NFlwVXJIT1JNU2MKd3ViTEppSEtEa0Jmb2U5QWI1cC9VakpyS0tuajM0RGx2c1cvR3AwWTZYc1BWaVdpVWorb1JLbUdWSTI0Q0JIdgpnK0JtVzNDeU5RR1RLajk0eE02czNBV2xHRW95YXFXUGU1eHllVWUzZjFBWkY5N3RDaklKUmVWbENtaENGK0JtCmFUY1RSUWN3cVdvQ3AwYmJZcHlERFlwUmxxOEdQbElFOW8yWjZBc05mTHJVcGFtZ3FYMmtYa2gxa3lzSlEralAKelFadHJSMG1tdHVyM0RuRW0yYmk0TktIQVFIcFc5TXUxNkdRakUxTmJYcVF0VEI4OGpLNzZjdEg5MzRDYWw2VgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== --- apiVersion: v1 kind: Pod diff --git a/deploy/backupstores/nfs-backupstore.yaml b/deploy/backupstores/nfs-backupstore.yaml index 7ef6e0e..af3aa39 100644 --- a/deploy/backupstores/nfs-backupstore.yaml +++ b/deploy/backupstores/nfs-backupstore.yaml @@ -20,6 +20,8 @@ spec: value: /opt/backupstore - name: PSEUDO_PATH value: /opt/backupstore + - name: NFS_DISK_IMAGE_SIZE_MB + value: "4096" command: ["bash", "-c", "chmod 700 /opt/backupstore && /opt/start_nfs.sh | tee /var/log/ganesha.log"] securityContext: privileged: true diff --git a/deploy/iscsi/longhorn-iscsi-installation.yaml b/deploy/iscsi/longhorn-iscsi-installation.yaml deleted file mode 100644 index 02d201c..0000000 --- a/deploy/iscsi/longhorn-iscsi-installation.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: longhorn-iscsi-installation - labels: - app: longhorn-iscsi-installation - annotations: - command: &cmd OS=$(grep "ID_LIKE" /etc/os-release | cut -d '=' -f 2); if [[ $OS == *"debian"* ]]; then apt-get update -qy && apt-get install -qy open-iscsi && sudo systemctl enable iscsid && sudo systemctl start iscsid; else yum install iscsi-initiator-utils -y && sudo systemctl enable iscsid && sudo systemctl start iscsid; fi && if [ $? -eq 0 ]; then echo "iscsi install successfully"; else echo "iscsi install failed error code " $?; fi -spec: - selector: - matchLabels: - app: longhorn-iscsi-installation - template: - metadata: - labels: - app: longhorn-iscsi-installation - spec: - hostNetwork: true - hostPID: true - initContainers: - - name: iscsi-installation - command: - - nsenter - - --mount=/proc/1/ns/mnt - - -- - - sh - - -c - - *cmd - image: alpine:3.7 - securityContext: - privileged: true - containers: - - name: sleep - image: k8s.gcr.io/pause:3.1 - updateStrategy: - type: RollingUpdate diff --git a/deploy/longhorn-images.txt b/deploy/longhorn-images.txt index 25bb23f..8a404d3 100644 --- a/deploy/longhorn-images.txt +++ b/deploy/longhorn-images.txt @@ -1,10 +1,11 @@ -longhornio/longhorn-engine:v1.1.0 -longhornio/longhorn-instance-manager:v1_20201216 -longhornio/longhorn-share-manager:v1_20201204 -longhornio/longhorn-manager:v1.1.0 -longhornio/longhorn-ui:v1.1.0 longhornio/csi-attacher:v2.2.1-lh1 +longhornio/csi-node-driver-registrar:v1.2.0-lh1 longhornio/csi-provisioner:v1.6.0-lh1 longhornio/csi-resizer:v0.5.1-lh1 longhornio/csi-snapshotter:v2.1.1-lh1 -longhornio/csi-node-driver-registrar:v1.2.0-lh1 +longhornio/backing-image-manager:v1_20210422 +longhornio/longhorn-engine:v1.1.1 +longhornio/longhorn-instance-manager:v1_20201216 +longhornio/longhorn-manager:v1.1.1 +longhornio/longhorn-share-manager:v1_20210416 +longhornio/longhorn-ui:v1.1.1 diff --git a/deploy/longhorn.yaml b/deploy/longhorn.yaml index 2a112c1..359beb7 100644 --- a/deploy/longhorn.yaml +++ b/deploy/longhorn.yaml @@ -47,7 +47,8 @@ rules: - apiGroups: ["longhorn.io"] resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings", "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status", - "sharemanagers", "sharemanagers/status"] + "sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status", + "backingimagemanagers", "backingimagemanagers/status"] verbs: ["*"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] @@ -450,6 +451,100 @@ spec: type: date jsonPath: .metadata.creationTimestamp --- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + longhorn-manager: BackingImage + name: backingimages.longhorn.io +spec: + group: longhorn.io + names: + kind: BackingImage + listKind: BackingImageList + plural: backingimages + shortNames: + - lhbi + singular: backingimage + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: Image + type: string + description: The backing image name + jsonPath: .spec.image + - name: Age + type: date + jsonPath: .metadata.creationTimestamp +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + longhorn-manager: BackingImageManager + name: backingimagemanagers.longhorn.io +spec: + group: longhorn.io + names: + kind: BackingImageManager + listKind: BackingImageManagerList + plural: backingimagemanagers + shortNames: + - lhbim + singular: backingimagemanager + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: State + type: string + description: The current state of the manager + jsonPath: .status.currentState + - name: Image + type: string + description: The image the manager pod will use + jsonPath: .spec.image + - name: Node + type: string + description: The node the manager is on + jsonPath: .spec.nodeID + - name: DiskUUID + type: string + description: The disk the manager is responsible for + jsonPath: .spec.diskUUID + - name: DiskPath + type: string + description: The disk path the manager is using + jsonPath: .spec.diskPath + - name: Age + type: date + jsonPath: .metadata.creationTimestamp +--- apiVersion: v1 kind: ConfigMap metadata: @@ -472,6 +567,7 @@ data: default-longhorn-static-storage-class: backupstore-poll-interval: taint-toleration: + system-managed-components-node-selector: priority-class: auto-salvage: auto-delete-pod-when-volume-detached-unexpectedly: @@ -487,6 +583,11 @@ data: system-managed-pods-image-pull-policy: allow-volume-creation-with-degraded-availability: auto-cleanup-system-generated-snapshot: + concurrent-automatic-engine-upgrade-per-node-limit: + backing-image-cleanup-wait-interval: + guaranteed-engine-manager-cpu: + guaranteed-replica-manager-cpu: + --- apiVersion: policy/v1beta1 kind: PodSecurityPolicy @@ -569,6 +670,8 @@ data: numberOfReplicas: "3" staleReplicaTimeout: "2880" fromBackup: "" + # backingImage: "bi-test" + # backingImageURL: "https://backing-image-example.s3-region.amazonaws.com/test-backing-image" # diskSelector: "ssd,fast" # nodeSelector: "storage,fast" # recurringJobs: '[{"name":"snap", "task":"snapshot", "cron":"*/1 * * * *", "retain":1}, @@ -593,7 +696,7 @@ spec: spec: containers: - name: longhorn-manager - image: longhornio/longhorn-manager:v1.1.0 + image: longhornio/longhorn-manager:v1.1.1 imagePullPolicy: IfNotPresent securityContext: privileged: true @@ -602,13 +705,15 @@ spec: - -d - daemon - --engine-image - - longhornio/longhorn-engine:v1.1.0 + - longhornio/longhorn-engine:v1.1.1 - --instance-manager-image - longhornio/longhorn-instance-manager:v1_20201216 - --share-manager-image - - longhornio/longhorn-share-manager:v1_20201204 + - longhornio/longhorn-share-manager:v1_20210416 + - --backing-image-manager-image + - longhornio/backing-image-manager:v1_20210422 - --manager-image - - longhornio/longhorn-manager:v1.1.0 + - longhornio/longhorn-manager:v1.1.1 - --service-account - longhorn-service-account ports: @@ -658,6 +763,15 @@ spec: name: longhorn-default-setting # imagePullSecrets: # - name: "" +# priorityClassName: +# tolerations: +# - key: "key" +# operator: "Equal" +# value: "value" +# effect: "NoSchedule" +# nodeSelector: +# label-key1: "label-value1" +# label-key2: "label-value2" serviceAccountName: longhorn-service-account updateStrategy: rollingUpdate: @@ -699,7 +813,7 @@ spec: spec: containers: - name: longhorn-ui - image: longhornio/longhorn-ui:v1.1.0 + image: longhornio/longhorn-ui:v1.1.1 imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 @@ -710,7 +824,16 @@ spec: - name: LONGHORN_MANAGER_IP value: "http://longhorn-backend:9500" # imagePullSecrets: -# - name: +# - name: "" +# priorityClassName: +# tolerations: +# - key: "key" +# operator: "Equal" +# value: "value" +# effect: "NoSchedule" +# nodeSelector: +# label-key1: "label-value1" +# label-key2: "label-value2" --- kind: Service apiVersion: v1 @@ -746,18 +869,18 @@ spec: spec: initContainers: - name: wait-longhorn-manager - image: longhornio/longhorn-manager:v1.1.0 + image: longhornio/longhorn-manager:v1.1.1 command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done'] containers: - name: longhorn-driver-deployer - image: longhornio/longhorn-manager:v1.1.0 + image: longhornio/longhorn-manager:v1.1.1 imagePullPolicy: IfNotPresent command: - longhorn-manager - -d - deploy-driver - --manager-image - - longhornio/longhorn-manager:v1.1.0 + - longhornio/longhorn-manager:v1.1.1 - --manager-url - http://longhorn-backend:9500/v1 env: @@ -777,7 +900,7 @@ spec: #- name: KUBELET_ROOT_DIR # value: /var/lib/rancher/k3s/agent/kubelet # For AirGap Installation - # Replace PREFIX with your private registery + # Replace PREFIX with your private registry #- name: CSI_ATTACHER_IMAGE # value: PREFIX/csi-attacher:v2.2.1-lh1 #- name: CSI_PROVISIONER_IMAGE @@ -798,8 +921,17 @@ spec: # value: "3" #- name: CSI_SNAPSHOTTER_REPLICA_COUNT # value: "3" - #imagePullSecrets: - #- name: +# imagePullSecrets: +# - name: "" +# priorityClassName: +# tolerations: +# - key: "key" +# operator: "Equal" +# value: "value" +# effect: "NoSchedule" +# nodeSelector: +# label-key1: "label-value1" +# label-key2: "label-value2" serviceAccountName: longhorn-service-account securityContext: runAsUser: 0 diff --git a/deploy/prerequisite/longhorn-iscsi-installation.yaml b/deploy/prerequisite/longhorn-iscsi-installation.yaml new file mode 100644 index 0000000..ef29f51 --- /dev/null +++ b/deploy/prerequisite/longhorn-iscsi-installation.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: longhorn-iscsi-installation + labels: + app: longhorn-iscsi-installation + annotations: + command: &cmd OS=$(grep "ID_LIKE" /etc/os-release | cut -d '=' -f 2); if [[ "${OS}" == *"debian"* ]]; then sudo apt-get update -q -y && sudo apt-get install -q -y open-iscsi && sudo systemctl -q enable iscsid && sudo systemctl start iscsid; elif [[ "${OS}" == *"suse"* ]]; then sudo zypper --gpg-auto-import-keys -q refresh && sudo zypper --gpg-auto-import-keys -q install -y open-iscsi && sudo systemctl -q enable iscsid && sudo systemctl start iscsid; else sudo yum makecache -q -y && sudo yum --setopt=tsflags=noscripts install -q -y iscsi-initiator-utils && echo "InitiatorName=$(/sbin/iscsi-iname)" > /etc/iscsi/initiatorname.iscsi && sudo systemctl -q enable iscsid && sudo systemctl start iscsid; fi && if [ $? -eq 0 ]; then echo "iscsi install successfully"; else echo "iscsi install failed error code $?"; fi +spec: + selector: + matchLabels: + app: longhorn-iscsi-installation + template: + metadata: + labels: + app: longhorn-iscsi-installation + spec: + hostNetwork: true + hostPID: true + initContainers: + - name: iscsi-installation + command: + - nsenter + - --mount=/proc/1/ns/mnt + - -- + - bash + - -c + - *cmd + image: alpine:3.12 + securityContext: + privileged: true + containers: + - name: sleep + image: k8s.gcr.io/pause:3.1 + updateStrategy: + type: RollingUpdate diff --git a/deploy/prerequisite/longhorn-nfs-installation.yaml b/deploy/prerequisite/longhorn-nfs-installation.yaml new file mode 100644 index 0000000..7da810a --- /dev/null +++ b/deploy/prerequisite/longhorn-nfs-installation.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: longhorn-nfs-installation + labels: + app: longhorn-nfs-installation + annotations: + command: &cmd OS=$(grep "ID_LIKE" /etc/os-release | cut -d '=' -f 2); if [[ "${OS}" == *"debian"* ]]; then sudo apt-get update -q -y && sudo apt-get install -q -y nfs-common; elif [[ "${OS}" == *"suse"* ]]; then sudo zypper --gpg-auto-import-keys -q refresh && sudo zypper --gpg-auto-import-keys -q install -y nfs-client; else sudo yum makecache -q -y && sudo yum --setopt=tsflags=noscripts install -q -y nfs-utils; fi && if [ $? -eq 0 ]; then echo "nfs install successfully"; else echo "nfs install failed error code $?"; fi +spec: + selector: + matchLabels: + app: longhorn-nfs-installation + template: + metadata: + labels: + app: longhorn-nfs-installation + spec: + hostNetwork: true + hostPID: true + initContainers: + - name: nfs-installation + command: + - nsenter + - --mount=/proc/1/ns/mnt + - -- + - bash + - -c + - *cmd + image: alpine:3.12 + securityContext: + privileged: true + containers: + - name: sleep + image: k8s.gcr.io/pause:3.1 + updateStrategy: + type: RollingUpdate diff --git a/deploy/release-images.txt b/deploy/release-images.txt index 25bb23f..8a404d3 100644 --- a/deploy/release-images.txt +++ b/deploy/release-images.txt @@ -1,10 +1,11 @@ -longhornio/longhorn-engine:v1.1.0 -longhornio/longhorn-instance-manager:v1_20201216 -longhornio/longhorn-share-manager:v1_20201204 -longhornio/longhorn-manager:v1.1.0 -longhornio/longhorn-ui:v1.1.0 longhornio/csi-attacher:v2.2.1-lh1 +longhornio/csi-node-driver-registrar:v1.2.0-lh1 longhornio/csi-provisioner:v1.6.0-lh1 longhornio/csi-resizer:v0.5.1-lh1 longhornio/csi-snapshotter:v2.1.1-lh1 -longhornio/csi-node-driver-registrar:v1.2.0-lh1 +longhornio/backing-image-manager:v1_20210422 +longhornio/longhorn-engine:v1.1.1 +longhornio/longhorn-instance-manager:v1_20201216 +longhornio/longhorn-manager:v1.1.1 +longhornio/longhorn-share-manager:v1_20210416 +longhornio/longhorn-ui:v1.1.1 diff --git a/examples/rwx/rwx-nginx-deployment.yaml b/examples/rwx/rwx-nginx-deployment.yaml new file mode 100644 index 0000000..109fec5 --- /dev/null +++ b/examples/rwx/rwx-nginx-deployment.yaml @@ -0,0 +1,70 @@ +apiVersion: v1 +kind: Service +metadata: + name: rwx-test + labels: + app: rwx-test +spec: + ports: + - port: 80 + selector: + app: rwx-test +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: rwx-test + namespace: default +spec: + accessModes: + - ReadWriteMany + storageClassName: longhorn + resources: + requests: + storage: 1Gi +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rwx-test + labels: + app: rwx-test + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + app: rwx-test + strategy: + type: Recreate + template: + metadata: + labels: + app: rwx-test + spec: + containers: + - image: ubuntu:xenial + imagePullPolicy: IfNotPresent + command: [ "/bin/sh", "-c" ] + args: + - sleep 10; touch /data/index.html; while true; do date >> /data/index.html; sleep 1; done; + name: rwx-test + stdin: true + tty: true + volumeMounts: + - mountPath: /data + name: rwx-test + - image: nginx:stable + imagePullPolicy: IfNotPresent + name: nginx + ports: + - containerPort: 80 + name: http + volumeMounts: + - mountPath: /usr/share/nginx/html + name: rwx-test + restartPolicy: Always + volumes: + - name: rwx-test + persistentVolumeClaim: + claimName: rwx-test diff --git a/examples/rwx/storageclass-migratable.yaml b/examples/rwx/storageclass-migratable.yaml new file mode 100644 index 0000000..271a876 --- /dev/null +++ b/examples/rwx/storageclass-migratable.yaml @@ -0,0 +1,12 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: longhorn-migratable +provisioner: driver.longhorn.io +allowVolumeExpansion: true +parameters: + numberOfReplicas: "3" + staleReplicaTimeout: "2880" # 48 hours in minutes + fromBackup: "" + migratable: "true" + share: "false" diff --git a/examples/storageclass.yaml b/examples/storageclass.yaml index 45ddc84..96c06d9 100644 --- a/examples/storageclass.yaml +++ b/examples/storageclass.yaml @@ -1,15 +1,26 @@ -kind: StorageClass -apiVersion: storage.k8s.io/v1 +apiVersion: v1 +kind: ConfigMap metadata: - name: longhorn -provisioner: driver.longhorn.io -allowVolumeExpansion: true -parameters: - numberOfReplicas: "3" - staleReplicaTimeout: "2880" # 48 hours in minutes - fromBackup: "" -# diskSelector: "ssd,fast" -# nodeSelector: "storage,fast" -# recurringJobs: '[{"name":"snap", "task":"snapshot", "cron":"*/1 * * * *", "retain":1}, -# {"name":"backup", "task":"backup", "cron":"*/2 * * * *", "retain":1, -# "labels": {"interval":"2m"}}]' + name: longhorn-storageclass + namespace: longhorn-system +data: + storageclass.yaml: | + kind: StorageClass + apiVersion: storage.k8s.io/v1 + metadata: + name: longhorn + provisioner: driver.longhorn.io + allowVolumeExpansion: true + reclaimPolicy: Delete + volumeBindingMode: Immediate + parameters: + numberOfReplicas: "2" + staleReplicaTimeout: "2880" + fromBackup: "" + # backingImage: "bi-test" + # backingImageURL: "https://backing-image-example.s3-region.amazonaws.com/test-backing-image" + # diskSelector: "ssd,fast" + # nodeSelector: "storage,fast" + # recurringJobs: '[{"name":"snap", "task":"snapshot", "cron":"*/1 * * * *", "retain":1}, + # {"name":"backup", "task":"backup", "cron":"*/2 * * * *", "retain":1, + # "labels": {"interval":"2m"}}]' diff --git a/uninstall/uninstall.yaml b/uninstall/uninstall.yaml index b9d1f20..d92ee58 100644 --- a/uninstall/uninstall.yaml +++ b/uninstall/uninstall.yaml @@ -1,3 +1,33 @@ +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: longhorn-uninstall-psp +spec: + privileged: true + allowPrivilegeEscalation: true + requiredDropCapabilities: + - NET_RAW + allowedCapabilities: + - SYS_ADMIN + hostNetwork: false + hostIPC: false + hostPID: true + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + fsGroup: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - configMap + - downwardAPI + - emptyDir + - secret + - projected + - hostPath +--- apiVersion: v1 kind: ServiceAccount metadata: @@ -34,11 +64,15 @@ rules: resources: ["csidrivers", "storageclasses"] verbs: ["*"] - apiGroups: ["longhorn.io"] - resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes", "instancemanagers", "sharemanagers"] + resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes", "instancemanagers", "sharemanagers", "backingimages", "backingimagemanagers"] verbs: ["*"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["*"] + - apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + resourceNames: ["longhorn-uninstall-psp"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -67,8 +101,10 @@ spec: spec: containers: - name: longhorn-uninstall - image: longhornio/longhorn-manager:v1.1.0 + image: longhornio/longhorn-manager:v1.1.1 imagePullPolicy: Always + securityContext: + privileged: true command: - longhorn-manager - uninstall @@ -78,3 +114,14 @@ spec: value: longhorn-system restartPolicy: OnFailure serviceAccountName: longhorn-uninstall-service-account +# imagePullSecrets: +# - name: "" +# priorityClassName: +# tolerations: +# - key: "key" +# operator: "Equal" +# value: "value" +# effect: "NoSchedule" +# nodeSelector: +# label-key1: "label-value1" +# label-key2: "label-value2" \ No newline at end of file