From 9c3ae72954c02c8e9dc1396e1efb04d42ac1e346 Mon Sep 17 00:00:00 2001 From: Phan Le Date: Fri, 23 Oct 2020 16:39:05 -0700 Subject: [PATCH 01/33] Update Helm chart templates for Longhorn v1.1.0 Longhorn #1906 Signed-off-by: Phan Le --- chart/templates/clusterrole.yaml | 13 ++- chart/templates/crds.yaml | 128 +++++++++++++++++++++++++++ chart/templates/default-setting.yaml | 9 ++ chart/templates/storageclass.yaml | 41 +++++---- chart/values.yaml | 9 ++ 5 files changed, 182 insertions(+), 18 deletions(-) diff --git a/chart/templates/clusterrole.yaml b/chart/templates/clusterrole.yaml index 34a46d9..cb594de 100644 --- a/chart/templates/clusterrole.yaml +++ b/chart/templates/clusterrole.yaml @@ -22,16 +22,25 @@ rules: - apiGroups: ["batch"] resources: ["jobs", "cronjobs"] verbs: ["*"] +- apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["*"] - apiGroups: ["scheduling.k8s.io"] resources: ["priorityclasses"] verbs: ["watch", "list"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses", "volumeattachments", "csinodes", "csidrivers"] verbs: ["*"] -- apiGroups: ["coordination.k8s.io"] - resources: ["leases"] +- apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses", "volumesnapshots", "volumesnapshotcontents", "volumesnapshotcontents/status"] verbs: ["*"] - apiGroups: ["longhorn.io"] resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings", "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status"] verbs: ["*"] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["*"] +- apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list"] \ No newline at end of file diff --git a/chart/templates/crds.yaml b/chart/templates/crds.yaml index 61544aa..62949ae 100644 --- a/chart/templates/crds.yaml +++ b/chart/templates/crds.yaml @@ -17,6 +17,26 @@ spec: singular: engine scope: Namespaced version: v1beta1 + additionalPrinterColumns: + - name: State + type: string + description: The current state of the engine + JSONPath: .status.currentState + - name: Node + type: string + description: The node that the engine is on + JSONPath: .spec.nodeID + - name: InstanceManager + type: string + description: The instance manager of the engine + JSONPath: .status.instanceManagerName + - name: Image + type: string + description: The current image of the engine + JSONPath: .status.currentImage + - name: Age + type: date + JSONPath: .metadata.creationTimestamp subresources: status: {} --- @@ -39,6 +59,30 @@ spec: singular: replica scope: Namespaced version: v1beta1 + additionalPrinterColumns: + - name: State + type: string + description: The current state of the replica + JSONPath: .status.currentState + - name: Node + type: string + description: The node that the replica is on + JSONPath: .spec.nodeID + - name: Disk + type: string + description: The disk that the replica is on + JSONPath: .spec.diskID + - name: InstanceManager + type: string + description: The instance manager of the replica + JSONPath: .status.instanceManagerName + - name: Image + type: string + description: The current image of the replica + JSONPath: .status.currentImage + - name: Age + type: date + JSONPath: .metadata.creationTimestamp subresources: status: {} --- @@ -61,6 +105,14 @@ spec: singular: setting scope: Namespaced version: v1beta1 + additionalPrinterColumns: + - name: Value + type: string + description: The value of the setting + JSONPath: .value + - name: Age + type: date + JSONPath: .metadata.creationTimestamp --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition @@ -81,6 +133,30 @@ spec: singular: volume scope: Namespaced version: v1beta1 + additionalPrinterColumns: + - name: State + type: string + description: The state of the volume + JSONPath: .status.state + - name: Robustness + type: string + description: The robustness of the volume + JSONPath: .status.robustness + - name: Scheduled + type: string + description: The scheduled condition of the volume + JSONPath: .status.conditions['scheduled']['status'] + - name: Size + type: string + description: The size of the volume + JSONPath: .spec.size + - name: Node + type: string + description: The node that the volume is currently attaching to + JSONPath: .status.currentNodeID + - name: Age + type: date + JSONPath: .metadata.creationTimestamp subresources: status: {} --- @@ -103,6 +179,26 @@ spec: singular: engineimage scope: Namespaced version: v1beta1 + additionalPrinterColumns: + - name: State + type: string + description: State of the engine image + JSONPath: .status.state + - name: Image + type: string + description: The Longhorn engine image + JSONPath: .spec.image + - name: RefCount + type: integer + description: Number of volumes are using the engine image + JSONPath: .status.refCount + - name: BuildDate + type: date + description: The build date of the engine image + JSONPath: .status.buildDate + - name: Age + type: date + JSONPath: .metadata.creationTimestamp subresources: status: {} --- @@ -125,6 +221,22 @@ spec: singular: node scope: Namespaced version: v1beta1 + additionalPrinterColumns: + - name: Ready + type: string + description: Indicate whether the node is ready + JSONPath: .status.conditions['Ready']['status'] + - name: AllowScheduling + type: boolean + description: Indicate whether the user disabled/enabled replica scheduling for the node + JSONPath: .spec.allowScheduling + - name: Schedulable + type: string + description: Indicate whether Longhorn can schedule replicas on the node + JSONPath: .status.conditions['Schedulable']['status'] + - name: Age + type: date + JSONPath: .metadata.creationTimestamp subresources: status: {} --- @@ -147,5 +259,21 @@ spec: singular: instancemanager scope: Namespaced version: v1beta1 + additionalPrinterColumns: + - name: State + type: string + description: The state of the instance manager + JSONPath: .status.currentState + - name: Type + type: string + description: The type of the instance manager (engine or replica) + JSONPath: .spec.type + - name: Node + type: string + description: The node that the instance manager is running on + JSONPath: .spec.nodeID + - name: Age + type: date + JSONPath: .metadata.creationTimestamp subresources: status: {} diff --git a/chart/templates/default-setting.yaml b/chart/templates/default-setting.yaml index 2eb10b4..610b646 100644 --- a/chart/templates/default-setting.yaml +++ b/chart/templates/default-setting.yaml @@ -8,6 +8,7 @@ data: default-setting.yaml: |- backup-target: {{ .Values.defaultSettings.backupTarget }} backup-target-credential-secret: {{ .Values.defaultSettings.backupTargetCredentialSecret }} + allow-recurring-job-while-volume-detached: {{ .Values.defaultSettings.allowRecurringJobWhileVolumeDetached }} create-default-disk-labeled-nodes: {{ .Values.defaultSettings.createDefaultDiskLabeledNodes }} default-data-path: {{ .Values.defaultSettings.defaultDataPath }} replica-soft-anti-affinity: {{ .Values.defaultSettings.replicaSoftAntiAffinity }} @@ -22,7 +23,15 @@ data: priority-class: {{ .Values.defaultSettings.priorityClass }} registry-secret: {{ .Values.defaultSettings.registrySecret }} auto-salvage: {{ .Values.defaultSettings.autoSalvage }} + auto-delete-pod-when-volume-detached-unexpectedly: {{ .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly }} disable-scheduling-on-cordoned-node: {{ .Values.defaultSettings.disableSchedulingOnCordonedNode }} replica-zone-soft-anti-affinity: {{ .Values.defaultSettings.replicaZoneSoftAntiAffinity }} volume-attachment-recovery-policy: {{ .Values.defaultSettings.volumeAttachmentRecoveryPolicy }} + node-down-pod-deletion-policy: {{ .Values.defaultSettings.nodeDownPodDeletionPolicy }} + allow-node-drain-with-last-healthy-replica: {{ .Values.defaultSettings.allowNodeDrainWithLastHealthyReplica }} mkfs-ext4-parameters: {{ .Values.defaultSettings.mkfsExt4Parameters }} + disable-replica-rebuild: {{ .Values.defaultSettings.disableReplicaRebuild }} + disable-revision-counter: {{ .Values.defaultSettings.disableRevisionCounter }} + system-managed-pods-image-pull-policy: {{ .Values.defaultSettings.systemManagedPodsImagePullPolicy }} + allow-volume-creation-with-degraded-availability: {{ .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability }} + auto-cleanup-system-generated-snapshot: {{ .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot }} \ No newline at end of file diff --git a/chart/templates/storageclass.yaml b/chart/templates/storageclass.yaml index 6a80d00..72a75cb 100644 --- a/chart/templates/storageclass.yaml +++ b/chart/templates/storageclass.yaml @@ -1,18 +1,27 @@ -kind: StorageClass -apiVersion: storage.k8s.io/v1 +apiVersion: v1 +kind: ConfigMap metadata: - name: longhorn - annotations: - storageclass.kubernetes.io/is-default-class: {{ .Values.persistence.defaultClass | quote }} + name: longhorn-storageclass + namespace: {{ .Release.Namespace }} labels: {{- include "longhorn.labels" . | nindent 4 }} -provisioner: driver.longhorn.io -allowVolumeExpansion: true -reclaimPolicy: "{{ .Values.persistence.reclaimPolicy }}" -parameters: - numberOfReplicas: "{{ .Values.persistence.defaultClassReplicaCount }}" - staleReplicaTimeout: "30" - fromBackup: "" - baseImage: "" - {{- if .Values.recurringJobs.enable }} - recurringJobs: '{{ .Values.recurringJobs.jobsList | toPrettyJson | indent 2 | trim }}' - {{- end }} +data: + storageclass.yaml: | + kind: StorageClass + apiVersion: storage.k8s.io/v1 + metadata: + name: longhorn + annotations: + storageclass.kubernetes.io/is-default-class: {{ .Values.persistence.defaultClass | quote }} + labels: {{- include "longhorn.labels" . | nindent 4 }} + provisioner: driver.longhorn.io + allowVolumeExpansion: true + reclaimPolicy: "{{ .Values.persistence.reclaimPolicy }}" + volumeBindingMode: Immediate + parameters: + numberOfReplicas: "{{ .Values.persistence.defaultClassReplicaCount }}" + staleReplicaTimeout: "30" + fromBackup: "" + baseImage: "" + {{- if .Values.recurringJobs.enable }} + recurringJobs: '{{ .Values.recurringJobs.jobsList | toPrettyJson | indent 2 | trim }}' + {{- end }} diff --git a/chart/values.yaml b/chart/values.yaml index ea46554..1acfd43 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -68,6 +68,7 @@ csi: defaultSettings: backupTarget: ~ backupTargetCredentialSecret: ~ + allowRecurringJobWhileVolumeDetached: ~ createDefaultDiskLabeledNodes: ~ defaultDataPath: ~ replicaSoftAntiAffinity: ~ @@ -82,10 +83,18 @@ defaultSettings: priorityClass: ~ registrySecret: ~ autoSalvage: ~ + autoDeletePodWhenVolumeDetachedUnexpectedly: ~ disableSchedulingOnCordonedNode: ~ replicaZoneSoftAntiAffinity: ~ volumeAttachmentRecoveryPolicy: ~ + nodeDownPodDeletionPolicy: ~ + allowNodeDrainWithLastHealthyReplica: ~ mkfsExt4Parameters: ~ + disableReplicaRebuild: ~ + disableRevisionCounter: ~ + systemManagedPodsImagePullPolicy: ~ + allowVolumeCreationWithDegradedAvailability: ~ + autoCleanupSystemGeneratedSnapshot: ~ privateRegistry: registryUrl: ~ From b79b914967a84d66c5bb141e8056cd638d52ace2 Mon Sep 17 00:00:00 2001 From: Phan Le Date: Fri, 20 Nov 2020 22:38:04 -0800 Subject: [PATCH 02/33] =?UTF-8?q?Move=20registry=20secret=20setting=20to?= =?UTF-8?q?=20outside=20of=C2=A0=20default=20setting?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To avoid confusing user, we move the registry secret setting outside of default setting section. Now, user will set registry secret setting in privateRegistry.registrySecret Longhorn #1670 Signed-off-by: Phan Le --- chart/questions.yml | 12 ++++++------ chart/templates/daemonset-sa.yaml | 4 ++-- chart/templates/default-setting.yaml | 1 - chart/templates/deployment-driver.yaml | 4 ++-- chart/templates/deployment-ui.yaml | 4 ++-- chart/templates/postupgrade-job.yaml | 6 +++--- chart/templates/registry-secret.yml | 6 +++--- chart/templates/uninstall-job.yaml | 6 +++--- chart/values.yaml | 2 +- 9 files changed, 22 insertions(+), 23 deletions(-) diff --git a/chart/questions.yml b/chart/questions.yml index cf95ba5..aec4c38 100644 --- a/chart/questions.yml +++ b/chart/questions.yml @@ -130,6 +130,12 @@ questions: group: "Private Registry Settings" type: password default: "" +- variable: privateRegistry.registrySecret + label: Private registry secret name + description: "Longhorn will automatically generate a Kubernetes secret with this name and use it to pull images from your private registry." + group: "Private Registry Settings" + type: string + default: "" - variable: longhorn.default_setting default: "false" description: "Customize the default settings before installing Longhorn for the first time. This option will only work if the cluster hasn't installed Longhorn." @@ -138,12 +144,6 @@ questions: show_subquestion_if: true group: "Longhorn Default Settings" subquestions: - - variable: defaultSettings.registrySecret - label: Private registry secret - description: "The Kubernetes Secret name" - group: "Longhorn Default Settings" - type: string - default: "" - variable: csi.kubeletRootDir default: description: "Specify kubelet root-dir. Leave blank to autodetect." diff --git a/chart/templates/daemonset-sa.yaml b/chart/templates/daemonset-sa.yaml index 1870aa0..f6e323b 100644 --- a/chart/templates/daemonset-sa.yaml +++ b/chart/templates/daemonset-sa.yaml @@ -82,9 +82,9 @@ spec: - name: longhorn-default-setting configMap: name: longhorn-default-setting - {{- if .Values.defaultSettings.registrySecret }} + {{- if .Values.privateRegistry.registrySecret }} imagePullSecrets: - - name: {{ .Values.defaultSettings.registrySecret }} + - name: {{ .Values.privateRegistry.registrySecret }} {{- end }} serviceAccountName: longhorn-service-account updateStrategy: diff --git a/chart/templates/default-setting.yaml b/chart/templates/default-setting.yaml index 610b646..421e9bd 100644 --- a/chart/templates/default-setting.yaml +++ b/chart/templates/default-setting.yaml @@ -21,7 +21,6 @@ data: backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }} taint-toleration: {{ .Values.defaultSettings.taintToleration }} priority-class: {{ .Values.defaultSettings.priorityClass }} - registry-secret: {{ .Values.defaultSettings.registrySecret }} auto-salvage: {{ .Values.defaultSettings.autoSalvage }} auto-delete-pod-when-volume-detached-unexpectedly: {{ .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly }} disable-scheduling-on-cordoned-node: {{ .Values.defaultSettings.disableSchedulingOnCordonedNode }} diff --git a/chart/templates/deployment-driver.yaml b/chart/templates/deployment-driver.yaml index ec3a867..232358c 100644 --- a/chart/templates/deployment-driver.yaml +++ b/chart/templates/deployment-driver.yaml @@ -84,9 +84,9 @@ spec: value: {{ .Values.csi.snapshotterReplicaCount | quote }} {{- end }} - {{- if .Values.defaultSettings.registrySecret }} + {{- if .Values.privateRegistry.registrySecret }} imagePullSecrets: - - name: {{ .Values.defaultSettings.registrySecret }} + - name: {{ .Values.privateRegistry.registrySecret }} {{- end }} serviceAccountName: longhorn-service-account securityContext: diff --git a/chart/templates/deployment-ui.yaml b/chart/templates/deployment-ui.yaml index 1921dbd..027d54e 100644 --- a/chart/templates/deployment-ui.yaml +++ b/chart/templates/deployment-ui.yaml @@ -27,9 +27,9 @@ spec: env: - name: LONGHORN_MANAGER_IP value: "http://longhorn-backend:9500" - {{- if .Values.defaultSettings.registrySecret }} + {{- if .Values.privateRegistry.registrySecret }} imagePullSecrets: - - name: {{ .Values.defaultSettings.registrySecret }} + - name: {{ .Values.privateRegistry.registrySecret }} {{- end }} --- kind: Service diff --git a/chart/templates/postupgrade-job.yaml b/chart/templates/postupgrade-job.yaml index f493617..e3559eb 100644 --- a/chart/templates/postupgrade-job.yaml +++ b/chart/templates/postupgrade-job.yaml @@ -18,7 +18,7 @@ spec: containers: - name: longhorn-post-upgrade image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} - imagePullPolicy: Always + imagePullPolicy: IfNotPresent command: - longhorn-manager - post-upgrade @@ -28,8 +28,8 @@ spec: fieldRef: fieldPath: metadata.namespace restartPolicy: OnFailure - {{- if .Values.defaultSettings.registrySecret }} + {{- if .Values.privateRegistry.registrySecret }} imagePullSecrets: - - name: {{ .Values.defaultSettings.registrySecret }} + - name: {{ .Values.privateRegistry.registrySecret }} {{- end }} serviceAccountName: longhorn-service-account diff --git a/chart/templates/registry-secret.yml b/chart/templates/registry-secret.yml index eeb9a8f..7112c89 100644 --- a/chart/templates/registry-secret.yml +++ b/chart/templates/registry-secret.yml @@ -1,10 +1,10 @@ -{{- if .Values.defaultSettings.registrySecret }} +{{- if .Values.privateRegistry.registrySecret }} apiVersion: v1 kind: Secret metadata: - name: {{ .Values.defaultSettings.registrySecret }} + name: {{ .Values.privateRegistry.registrySecret }} labels: {{- include "longhorn.labels" . | nindent 4 }} type: kubernetes.io/dockerconfigjson data: .dockerconfigjson: {{ template "secret" . }} -{{- end }} +{{- end }} \ No newline at end of file diff --git a/chart/templates/uninstall-job.yaml b/chart/templates/uninstall-job.yaml index 4dde54b..6a36a16 100644 --- a/chart/templates/uninstall-job.yaml +++ b/chart/templates/uninstall-job.yaml @@ -18,7 +18,7 @@ spec: containers: - name: longhorn-uninstall image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} - imagePullPolicy: Always + imagePullPolicy: IfNotPresent command: - longhorn-manager - uninstall @@ -29,8 +29,8 @@ spec: fieldRef: fieldPath: metadata.namespace restartPolicy: OnFailure - {{- if .Values.defaultSettings.registrySecret }} + {{- if .Values.privateRegistry.registrySecret }} imagePullSecrets: - - name: {{ .Values.defaultSettings.registrySecret }} + - name: {{ .Values.privateRegistry.registrySecret }} {{- end }} serviceAccountName: longhorn-service-account diff --git a/chart/values.yaml b/chart/values.yaml index 1acfd43..0e1d685 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -81,7 +81,6 @@ defaultSettings: backupstorePollInterval: ~ taintToleration: ~ priorityClass: ~ - registrySecret: ~ autoSalvage: ~ autoDeletePodWhenVolumeDetachedUnexpectedly: ~ disableSchedulingOnCordonedNode: ~ @@ -100,6 +99,7 @@ privateRegistry: registryUrl: ~ registryUser: ~ registryPasswd: ~ + registrySecret: ~ resources: {} # We usually recommend not to specify default resources and to leave this as a conscious From d77dc2c35e6e686e41473f73dfa9c31a1a2693cb Mon Sep 17 00:00:00 2001 From: Shuo Wu Date: Wed, 11 Nov 2020 20:08:22 +0800 Subject: [PATCH 03/33] chart: Sync chart with the YAML files in longhorn-manager Longhorn #1818, #1813 Signed-off-by: Shuo Wu --- chart/questions.yml | 175 +++++++++++++++++++---- chart/templates/clusterrole.yaml | 2 +- chart/templates/crds.yaml | 202 ++++++++++++++++++--------- chart/templates/default-setting.yaml | 14 +- chart/templates/storageclass.yaml | 5 +- chart/values.yaml | 23 ++- 6 files changed, 303 insertions(+), 118 deletions(-) diff --git a/chart/questions.yml b/chart/questions.yml index aec4c38..2b423f7 100644 --- a/chart/questions.yml +++ b/chart/questions.yml @@ -65,7 +65,7 @@ questions: label: Longhorn CSI Attacher Image Repository group: "Longhorn CSI Driver Images" - variable: image.csi.attacher.tag - default: v2.0.0 + default: v2.2.1-lh1 description: "Specify CSI attacher image tag. Leave blank to autodetect." type: string label: Longhorn CSI Attacher Image Tag @@ -77,7 +77,7 @@ questions: label: Longhorn CSI Provisioner Image Repository group: "Longhorn CSI Driver Images" - variable: image.csi.provisioner.tag - default: v1.4.0 + default: v1.6.0-lh1 description: "Specify CSI provisioner image tag. Leave blank to autodetect." type: string label: Longhorn CSI Provisioner Image Tag @@ -89,7 +89,7 @@ questions: label: Longhorn CSI Node Driver Registrar Image Repository group: "Longhorn CSI Driver Images" - variable: image.csi.nodeDriverRegistrar.tag - default: v1.2.0 + default: v1.2.0-lh1 description: "Specify CSI Node Driver Registrar image tag. Leave blank to autodetect." type: string label: Longhorn CSI Node Driver Registrar Image Tag @@ -101,13 +101,19 @@ questions: label: Longhorn CSI Driver Resizer Image Repository group: "Longhorn CSI Driver Images" - variable: image.csi.resizer.tag - default: v0.3.0 + default: v0.5.1-lh1 description: "Specify CSI Driver Resizer image tag. Leave blank to autodetect." type: string label: Longhorn CSI Driver Resizer Image Tag group: "Longhorn CSI Driver Images" + - variable: image.csi.snapshotter.repository + default: longhornio/csi-snapshotter + description: "Specify CSI Driver Snapshotter image repository. Leave blank to autodetect." + type: string + label: Longhorn CSI Driver Snapshotter Image Repository + group: "Longhorn CSI Driver Images" - variable: image.csi.snapshotter.tag - default: v2.1.1 + default: v2.1.1-lh1 description: "Specify CSI Driver Snapshotter image tag. Leave blank to autodetect." type: string label: Longhorn CSI Driver Snapshotter Image Tag @@ -182,28 +188,6 @@ questions: description: "Specify replica count of CSI Snapshotter. By default 3." label: Longhorn CSI Snapshotter replica count group: "Longhorn CSI Driver Settings" - - variable: persistence.defaultClass - default: "true" - description: "Set as default StorageClass" - group: "Longhorn CSI Driver Settings" - type: boolean - required: true - label: Default Storage Class - - variable: persistence.reclaimPolicy - default: "Delete" - description: "Define reclaim policy (Retain or Delete)" - group: "Longhorn CSI Driver Settings" - type: string - required: true - label: Storage Class Retain Policy - - variable: persistence.defaultClassReplicaCount - description: "Set replica count for default StorageClass" - group: "Longhorn CSI Driver Settings" - type: int - default: 3 - min: 1 - max: 10 - label: Default Storage Class Replica Count - variable: defaultSettings.backupTarget label: Backup Target description: "The endpoint used to access the backupstore. NFS and S3 are supported." @@ -216,6 +200,13 @@ questions: group: "Longhorn Default Settings" type: string default: + - variable: defaultSettings.allowRecurringJobWhileVolumeDetached + label: Allow Recurring Job While Volume Is Detached + description: 'If this setting is enabled, Longhorn will automatically attaches the volume and takes snapshot/backup when it is the time to do recurring snapshot/backup. +Note that the volume is not ready for workload during the period when the volume was automatically attached. Workload will have to wait until the recurring job finishes.' + group: "Longhorn Default Settings" + type: boolean + default: "false" - variable: defaultSettings.createDefaultDiskLabeledNodes label: Create Default Disk on Labeled Nodes description: 'Create default Disk automatically only on Nodes with the label "node.longhorn.io/create-default-disk=true" if no other disks exist. If disabled, the default disk will be created on all new nodes when each node is first added.' @@ -228,6 +219,19 @@ questions: group: "Longhorn Default Settings" type: string default: "/var/lib/longhorn/" + - variable: defaultSettings.defaultDataLocality + label: Default Data Locality + description: 'We say a Longhorn volume has data locality if there is a local replica of the volume on the same node as the pod which is using the volume. +This setting specifies the default data locality when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `dataLocality` in the StorageClass +The available modes are: +- **disabled**. This is the default option. There may or may not be a replica on the same node as the attached volume (workload) +- **best-effort**. This option instructs Longhorn to try to keep a replica on the same node as the attached volume (workload). Longhorn will not stop the volume, even if it cannot keep a replica local to the attached volume (workload) due to environment limitation, e.g. not enough disk space, incompatible disk tags, etc.' + group: "Longhorn Default Settings" + type: enum + options: + - "disabled" + - "best-effort" + default: "disabled" - variable: defaultSettings.replicaSoftAntiAffinity label: Replica Node Level Soft Anti-Affinity description: 'Allow scheduling on nodes with existing healthy replicas of the same volume. By default false.' @@ -304,6 +308,14 @@ WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES.' group: "Longhorn Default Settings" type: boolean default: "true" + - variable: defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly + label: Automatically Delete Workload Pod when The Volume Is Detached Unexpectedly + description: 'If enabled, Longhorn will automatically delete the workload pod that is managed by a controller (e.g. deployment, statefulset, daemonset, etc...) when Longhorn volume is detached unexpectedly (e.g. during Kubernetes upgrade, Docker reboot, or network disconnect). By deleting the pod, its controller restarts the pod and Kubernetes handles volume reattachment and remount. +If disabled, Longhorn will not delete the workload pod that is managed by a controller. You will have to manually restart the pod to reattach and remount the volume. +**Note:** This setting does not apply to the workload pods that do not have a controller. Longhorn never deletes them.' + group: "Longhorn Default Settings" + type: boolean + default: "true" - variable: defaultSettings.disableSchedulingOnCordonedNode label: Disable Scheduling On Cordoned Node description: "Disable Longhorn manager to schedule replica on Kubernetes cordoned node. By default true." @@ -322,15 +334,118 @@ WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES.' group: "Longhorn Default Settings" type: enum options: - - "wait" - - "never" - - "immediate" + - "wait" + - "never" + - "immediate" default: "wait" + - variable: defaultSettings.nodeDownPodDeletionPolicy + label: Pod Deletion Policy When Node is Down + description: "Defines the Longhorn action when a Volume is stuck with a StatefulSet/Deployment Pod on a node that is down. +- **do-nothing** is the default Kubernetes behavior of never force deleting StatefulSet/Deployment terminating pods. Since the pod on the node that is down isn't removed, Longhorn volumes are stuck on nodes that are down. +- **delete-statefulset-pod** Longhorn will force delete StatefulSet terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods. +- **delete-deployment-pod** Longhorn will force delete Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods. +- **delete-both-statefulset-and-deployment-pod** Longhorn will force delete StatefulSet/Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods." + group: "Longhorn Default Settings" + type: enum + options: + - "do-nothing" + - "delete-statefulset-pod" + - "delete-deployment-pod" + - "delete-both-statefulset-and-deployment-pod" + default: "do-nothing" + - variable: defaultSettings.allowNodeDrainWithLastHealthyReplica + label: Allow Node Drain with the Last Healthy Replica + description: "By default, Longhorn will block `kubectl drain` action on a node if the node contains the last healthy replica of a volume. +If this setting is enabled, Longhorn will **not** block `kubectl drain` action on a node even if the node contains the last healthy replica of a volume." + group: "Longhorn Default Settings" + type: boolean + default: "false" - variable: defaultSettings.mkfsExt4Parameters label: Custom mkfs.ext4 parameters description: "Allows setting additional filesystem creation parameters for ext4. For older host kernels it might be necessary to disable the optional ext4 metadata_csum feature by specifying `-O ^64bit,^metadata_csum`." group: "Longhorn Default Settings" type: string + - variable: defaultSettings.disableReplicaRebuild + label: Disable Replica Rebuild + description: "This setting disable replica rebuild cross the whole cluster, eviction and data locality feature won't work if this setting is true. But doesn't have any impact to any current replica rebuild and restore disaster recovery volume." + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.replicaReplenishmentWaitInterval + label: Replica Replenishment Wait Interval + description: "In seconds. The interval determines how long Longhorn will wait at least in order to reuse the existing data on a failed replica rather than directly creating a new replica for a degraded volume. +Warning: This option works only when there is a failed replica in the volume. And this option may block the rebuilding for a while in the case." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 600 + - variable: defaultSettings.disableRevisionCounter + label: Disable Revision Counter + description: "This setting is only for volumes created by UI. By default, this is false meaning there will be a reivision counter file to track every write to the volume. During salvage recovering Longhorn will pick the repica with largest reivision counter as candidate to recover the whole volume. If revision counter is disabled, Longhorn will not track every write to the volume. During the salvage recovering, Longhorn will use the 'volume-head-xxx.img' file last modification time and file size to pick the replica candidate to recover the whole volume." + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.systemManagedPodsImagePullPolicy + label: System Managed Pod Image Pull Policy + description: "This setting defines the Image Pull Policy of Longhorn system managed pods, e.g. instance manager, engine image, CSI driver, etc. The new Image Pull Policy will only apply after the system managed pods restart." + group: "Longhorn Default Settings" + type: enum + options: + - "if-not-present" + - "always" + - "never" + default: "if-not-present" + - variable: defaultSettings.allowVolumeCreationWithDegradedAvailability + label: Allow Volume Creation with Degraded Availability + description: "This setting allows user to create and attach a volume that doesn't have all the replicas scheduled at the time of creation." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.autoCleanupSystemGeneratedSnapshot + label: Automatically Cleanup System Generated Snapshot + description: "This setting enables Longhorn to automatically cleanup the system generated snapshot after replica rebuild is done." + group: "Longhorn Default Settings" + type: boolean + default: "true" +- variable: persistence.defaultClass + default: "true" + description: "Set as default StorageClass for Longhorn" + label: Default Storage Class + group: "Longhorn Storage Class Settings" + required: true + type: boolean +- variable: persistence.reclaimPolicy + label: Storage Class Retain Policy + description: "Define reclaim policy (Retain or Delete)" + group: "Longhorn Storage Class Settings" + required: true + type: enum + options: + - "Delete" + - "Retain" + default: "Delete" +- variable: persistence.defaultClassReplicaCount + description: "Set replica count for Longhorn StorageClass" + label: Default Storage Class Replica Count + group: "Longhorn Storage Class Settings" + type: int + min: 1 + max: 10 + default: 3 +- variable: persistence.recurringJobs.enable + description: "Enable recurring job for Longhorn StorageClass" + group: "Longhorn Storage Class Settings" + label: Enable Storage Class Recurring Job + type: boolean + default: false + show_subquestion_if: true + subquestions: + - variable: persistence.recurringJobs.jobList + description: 'Recurring job list for Longhorn StorageClass. Please be careful of quotes of input. e.g., [{"name":"backup", "task":"backup", "cron":"*/2 * * * *", "retain":1,"labels": {"interval":"2m"}}]' + label: Storage Class Recurring Job List + group: "Longhorn Storage Class Settings" + type: string + default: - variable: ingress.enabled default: "false" description: "Expose app using Layer 7 Load Balancer - ingress" diff --git a/chart/templates/clusterrole.yaml b/chart/templates/clusterrole.yaml index cb594de..f6fafbe 100644 --- a/chart/templates/clusterrole.yaml +++ b/chart/templates/clusterrole.yaml @@ -43,4 +43,4 @@ rules: verbs: ["*"] - apiGroups: ["metrics.k8s.io"] resources: ["pods", "nodes"] - verbs: ["get", "list"] \ No newline at end of file + verbs: ["get", "list"] diff --git a/chart/templates/crds.yaml b/chart/templates/crds.yaml index 62949ae..9fd23c1 100644 --- a/chart/templates/crds.yaml +++ b/chart/templates/crds.yaml @@ -1,4 +1,4 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: labels: {{- include "longhorn.labels" . | nindent 4 }} @@ -16,31 +16,42 @@ spec: - lhe singular: engine scope: Namespaced - version: v1beta1 - additionalPrinterColumns: + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: - name: State type: string description: The current state of the engine - JSONPath: .status.currentState + jsonPath: .status.currentState - name: Node type: string description: The node that the engine is on - JSONPath: .spec.nodeID + jsonPath: .spec.nodeID - name: InstanceManager type: string description: The instance manager of the engine - JSONPath: .status.instanceManagerName + jsonPath: .status.instanceManagerName - name: Image type: string description: The current image of the engine - JSONPath: .status.currentImage + jsonPath: .status.currentImage - name: Age type: date - JSONPath: .metadata.creationTimestamp - subresources: - status: {} + jsonPath: .metadata.creationTimestamp --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: labels: {{- include "longhorn.labels" . | nindent 4 }} @@ -58,35 +69,46 @@ spec: - lhr singular: replica scope: Namespaced - version: v1beta1 - additionalPrinterColumns: + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: - name: State type: string description: The current state of the replica - JSONPath: .status.currentState + jsonPath: .status.currentState - name: Node type: string description: The node that the replica is on - JSONPath: .spec.nodeID + jsonPath: .spec.nodeID - name: Disk type: string description: The disk that the replica is on - JSONPath: .spec.diskID + jsonPath: .spec.diskID - name: InstanceManager type: string description: The instance manager of the replica - JSONPath: .status.instanceManagerName + jsonPath: .status.instanceManagerName - name: Image type: string description: The current image of the replica - JSONPath: .status.currentImage + jsonPath: .status.currentImage - name: Age type: date - JSONPath: .metadata.creationTimestamp - subresources: - status: {} + jsonPath: .metadata.creationTimestamp --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: labels: {{- include "longhorn.labels" . | nindent 4 }} @@ -104,17 +126,23 @@ spec: - lhs singular: setting scope: Namespaced - version: v1beta1 - additionalPrinterColumns: + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + x-kubernetes-preserve-unknown-fields: true + additionalPrinterColumns: - name: Value type: string description: The value of the setting - JSONPath: .value + jsonPath: .value - name: Age type: date - JSONPath: .metadata.creationTimestamp + jsonPath: .metadata.creationTimestamp --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: labels: {{- include "longhorn.labels" . | nindent 4 }} @@ -132,35 +160,46 @@ spec: - lhv singular: volume scope: Namespaced - version: v1beta1 - additionalPrinterColumns: + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: - name: State type: string description: The state of the volume - JSONPath: .status.state + jsonPath: .status.state - name: Robustness type: string description: The robustness of the volume - JSONPath: .status.robustness + jsonPath: .status.robustness - name: Scheduled type: string description: The scheduled condition of the volume - JSONPath: .status.conditions['scheduled']['status'] + jsonPath: .status.conditions['scheduled']['status'] - name: Size type: string description: The size of the volume - JSONPath: .spec.size + jsonPath: .spec.size - name: Node type: string description: The node that the volume is currently attaching to - JSONPath: .status.currentNodeID + jsonPath: .status.currentNodeID - name: Age type: date - JSONPath: .metadata.creationTimestamp - subresources: - status: {} + jsonPath: .metadata.creationTimestamp --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: labels: {{- include "longhorn.labels" . | nindent 4 }} @@ -178,31 +217,42 @@ spec: - lhei singular: engineimage scope: Namespaced - version: v1beta1 - additionalPrinterColumns: + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: - name: State type: string description: State of the engine image - JSONPath: .status.state + jsonPath: .status.state - name: Image type: string description: The Longhorn engine image - JSONPath: .spec.image + jsonPath: .spec.image - name: RefCount type: integer description: Number of volumes are using the engine image - JSONPath: .status.refCount + jsonPath: .status.refCount - name: BuildDate type: date description: The build date of the engine image - JSONPath: .status.buildDate + jsonPath: .status.buildDate - name: Age type: date - JSONPath: .metadata.creationTimestamp - subresources: - status: {} + jsonPath: .metadata.creationTimestamp --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: labels: {{- include "longhorn.labels" . | nindent 4 }} @@ -220,27 +270,38 @@ spec: - lhn singular: node scope: Namespaced - version: v1beta1 - additionalPrinterColumns: + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: - name: Ready type: string description: Indicate whether the node is ready - JSONPath: .status.conditions['Ready']['status'] + jsonPath: .status.conditions['Ready']['status'] - name: AllowScheduling type: boolean description: Indicate whether the user disabled/enabled replica scheduling for the node - JSONPath: .spec.allowScheduling + jsonPath: .spec.allowScheduling - name: Schedulable type: string description: Indicate whether Longhorn can schedule replicas on the node - JSONPath: .status.conditions['Schedulable']['status'] + jsonPath: .status.conditions['Schedulable']['status'] - name: Age type: date - JSONPath: .metadata.creationTimestamp - subresources: - status: {} + jsonPath: .metadata.creationTimestamp --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: labels: {{- include "longhorn.labels" . | nindent 4 }} @@ -258,22 +319,33 @@ spec: - lhim singular: instancemanager scope: Namespaced - version: v1beta1 - additionalPrinterColumns: + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: - name: State type: string description: The state of the instance manager - JSONPath: .status.currentState + jsonPath: .status.currentState - name: Type type: string description: The type of the instance manager (engine or replica) - JSONPath: .spec.type + jsonPath: .spec.type - name: Node type: string description: The node that the instance manager is running on - JSONPath: .spec.nodeID + jsonPath: .spec.nodeID - name: Age type: date - JSONPath: .metadata.creationTimestamp - subresources: - status: {} + jsonPath: .metadata.creationTimestamp diff --git a/chart/templates/default-setting.yaml b/chart/templates/default-setting.yaml index 421e9bd..48283c9 100644 --- a/chart/templates/default-setting.yaml +++ b/chart/templates/default-setting.yaml @@ -16,6 +16,7 @@ data: storage-minimal-available-percentage: {{ .Values.defaultSettings.storageMinimalAvailablePercentage }} upgrade-checker: {{ .Values.defaultSettings.upgradeChecker }} default-replica-count: {{ .Values.defaultSettings.defaultReplicaCount }} + default-data-locality: {{ .Values.defaultSettings.defaultDataLocality }} guaranteed-engine-cpu: {{ .Values.defaultSettings.guaranteedEngineCPU }} default-longhorn-static-storage-class: {{ .Values.defaultSettings.defaultLonghornStaticStorageClass }} backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }} @@ -28,9 +29,10 @@ data: volume-attachment-recovery-policy: {{ .Values.defaultSettings.volumeAttachmentRecoveryPolicy }} node-down-pod-deletion-policy: {{ .Values.defaultSettings.nodeDownPodDeletionPolicy }} allow-node-drain-with-last-healthy-replica: {{ .Values.defaultSettings.allowNodeDrainWithLastHealthyReplica }} - mkfs-ext4-parameters: {{ .Values.defaultSettings.mkfsExt4Parameters }} - disable-replica-rebuild: {{ .Values.defaultSettings.disableReplicaRebuild }} - disable-revision-counter: {{ .Values.defaultSettings.disableRevisionCounter }} - system-managed-pods-image-pull-policy: {{ .Values.defaultSettings.systemManagedPodsImagePullPolicy }} - allow-volume-creation-with-degraded-availability: {{ .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability }} - auto-cleanup-system-generated-snapshot: {{ .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot }} \ No newline at end of file + mkfs-ext4-parameters: {{ .Values.defaultSettings.mkfsExt4Parameters }} + disable-replica-rebuild: {{ .Values.defaultSettings.disableReplicaRebuild }} + replica-replenishment-wait-interval: {{ .Values.defaultSettings.replicaReplenishmentWaitInterval }} + disable-revision-counter: {{ .Values.defaultSettings.disableRevisionCounter }} + system-managed-pods-image-pull-policy: {{ .Values.defaultSettings.systemManagedPodsImagePullPolicy }} + allow-volume-creation-with-degraded-availability: {{ .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability }} + auto-cleanup-system-generated-snapshot: {{ .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot }} diff --git a/chart/templates/storageclass.yaml b/chart/templates/storageclass.yaml index 72a75cb..6b82d41 100644 --- a/chart/templates/storageclass.yaml +++ b/chart/templates/storageclass.yaml @@ -12,7 +12,6 @@ data: name: longhorn annotations: storageclass.kubernetes.io/is-default-class: {{ .Values.persistence.defaultClass | quote }} - labels: {{- include "longhorn.labels" . | nindent 4 }} provisioner: driver.longhorn.io allowVolumeExpansion: true reclaimPolicy: "{{ .Values.persistence.reclaimPolicy }}" @@ -22,6 +21,6 @@ data: staleReplicaTimeout: "30" fromBackup: "" baseImage: "" - {{- if .Values.recurringJobs.enable }} - recurringJobs: '{{ .Values.recurringJobs.jobsList | toPrettyJson | indent 2 | trim }}' + {{- if .Values.persistence.recurringJobs.enable }} + recurringJobs: '{{ .Values.persistence.recurringJobs.jobList }}' {{- end }} diff --git a/chart/values.yaml b/chart/values.yaml index 0e1d685..2afe094 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -22,19 +22,19 @@ image: csi: attacher: repository: longhornio/csi-attacher - tag: v2.0.0 + tag: v2.2.1-lh1 provisioner: repository: longhornio/csi-provisioner - tag: v1.4.0 + tag: v1.6.0-lh1 nodeDriverRegistrar: repository: longhornio/csi-node-driver-registrar - tag: v1.2.0 + tag: v1.2.0-lh1 resizer: repository: longhornio/csi-resizer - tag: v0.3.0 + tag: v0.5.1-lh1 snapshotter: repository: longhornio/csi-snapshotter - tag: v2.1.1 + tag: v2.1.1-lh1 pullPolicy: IfNotPresent service: @@ -49,14 +49,9 @@ persistence: defaultClass: true defaultClassReplicaCount: 3 reclaimPolicy: Delete - -recurringJobs: - enable: false - jobsList: [] - # - name: snap - # task: snapshot - # cron: '*/1 * * * *' - # retain: 1 + recurringJobs: + enable: false + jobList: [] csi: kubeletRootDir: ~ @@ -71,6 +66,7 @@ defaultSettings: allowRecurringJobWhileVolumeDetached: ~ createDefaultDiskLabeledNodes: ~ defaultDataPath: ~ + defaultDataLocality: ~ replicaSoftAntiAffinity: ~ storageOverProvisioningPercentage: ~ storageMinimalAvailablePercentage: ~ @@ -90,6 +86,7 @@ defaultSettings: allowNodeDrainWithLastHealthyReplica: ~ mkfsExt4Parameters: ~ disableReplicaRebuild: ~ + replicaReplenishmentWaitInterval: ~ disableRevisionCounter: ~ systemManagedPodsImagePullPolicy: ~ allowVolumeCreationWithDegradedAvailability: ~ From 791d63c5e38fc75dbb5439df96a82793974d2e82 Mon Sep 17 00:00:00 2001 From: Guangbo Chen Date: Thu, 3 Dec 2020 23:02:28 +0800 Subject: [PATCH 04/33] Add namespace override Signed-off-by: Guangbo Chen --- chart/templates/_helpers.tpl | 11 +++++++++++ chart/templates/clusterrolebinding.yaml | 2 +- chart/templates/daemonset-sa.yaml | 4 ++-- chart/templates/default-setting.yaml | 2 +- chart/templates/deployment-driver.yaml | 2 +- chart/templates/deployment-ui.yaml | 4 ++-- chart/templates/ingress.yaml | 1 + chart/templates/postupgrade-job.yaml | 2 +- chart/templates/psp.yaml | 10 +++++----- chart/templates/registry-secret.yml | 1 + chart/templates/serviceaccount.yaml | 2 +- chart/templates/storageclass.yaml | 2 +- chart/templates/tls-secrets.yaml | 1 + chart/templates/uninstall-job.yaml | 2 +- chart/values.yaml | 4 ++++ 15 files changed, 34 insertions(+), 16 deletions(-) diff --git a/chart/templates/_helpers.tpl b/chart/templates/_helpers.tpl index 9c6c892..3fbc2ac 100644 --- a/chart/templates/_helpers.tpl +++ b/chart/templates/_helpers.tpl @@ -53,3 +53,14 @@ app.kubernetes.io/version: {{ .Chart.AppVersion }} {{ include "system_default_registry" . }} {{- end -}} {{- end -}} + +{{- /* + define the longhorn release namespace +*/ -}} +{{- define "release_namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} diff --git a/chart/templates/clusterrolebinding.yaml b/chart/templates/clusterrolebinding.yaml index 30c7fa7..66ac62f 100644 --- a/chart/templates/clusterrolebinding.yaml +++ b/chart/templates/clusterrolebinding.yaml @@ -10,4 +10,4 @@ roleRef: subjects: - kind: ServiceAccount name: longhorn-service-account - namespace: {{ .Release.Namespace }} + namespace: {{ include "release_namespace" . }} diff --git a/chart/templates/daemonset-sa.yaml b/chart/templates/daemonset-sa.yaml index f6e323b..e4c4ff1 100644 --- a/chart/templates/daemonset-sa.yaml +++ b/chart/templates/daemonset-sa.yaml @@ -4,7 +4,7 @@ metadata: labels: {{- include "longhorn.labels" . | nindent 4 }} app: longhorn-manager name: longhorn-manager - namespace: {{ .Release.Namespace }} + namespace: {{ include "release_namespace" . }} spec: selector: matchLabels: @@ -97,7 +97,7 @@ metadata: labels: {{- include "longhorn.labels" . | nindent 4 }} app: longhorn-manager name: longhorn-backend - namespace: {{ .Release.Namespace }} + namespace: {{ include "release_namespace" . }} spec: type: {{ .Values.service.manager.type }} sessionAffinity: ClientIP diff --git a/chart/templates/default-setting.yaml b/chart/templates/default-setting.yaml index 48283c9..14c264e 100644 --- a/chart/templates/default-setting.yaml +++ b/chart/templates/default-setting.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: longhorn-default-setting - namespace: {{ .Release.Namespace }} + namespace: {{ include "release_namespace" . }} labels: {{- include "longhorn.labels" . | nindent 4 }} data: default-setting.yaml: |- diff --git a/chart/templates/deployment-driver.yaml b/chart/templates/deployment-driver.yaml index 232358c..c4b6e35 100644 --- a/chart/templates/deployment-driver.yaml +++ b/chart/templates/deployment-driver.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: longhorn-driver-deployer - namespace: {{ .Release.Namespace }} + namespace: {{ include "release_namespace" . }} labels: {{- include "longhorn.labels" . | nindent 4 }} spec: replicas: 1 diff --git a/chart/templates/deployment-ui.yaml b/chart/templates/deployment-ui.yaml index 027d54e..da7c0ea 100644 --- a/chart/templates/deployment-ui.yaml +++ b/chart/templates/deployment-ui.yaml @@ -4,7 +4,7 @@ metadata: labels: {{- include "longhorn.labels" . | nindent 4 }} app: longhorn-ui name: longhorn-ui - namespace: {{ .Release.Namespace }} + namespace: {{ include "release_namespace" . }} spec: replicas: 1 selector: @@ -41,7 +41,7 @@ metadata: kubernetes.io/cluster-service: "true" {{- end }} name: longhorn-frontend - namespace: {{ .Release.Namespace }} + namespace: {{ include "release_namespace" . }} spec: {{- if eq .Values.service.ui.type "Rancher-Proxy" }} type: ClusterIP diff --git a/chart/templates/ingress.yaml b/chart/templates/ingress.yaml index e3e9e3e..5b3a405 100644 --- a/chart/templates/ingress.yaml +++ b/chart/templates/ingress.yaml @@ -3,6 +3,7 @@ apiVersion: extensions/v1beta1 kind: Ingress metadata: name: longhorn-ingress + namespace: {{ include "release_namespace" . }} labels: {{- include "longhorn.labels" . | nindent 4 }} app: longhorn-ingress annotations: diff --git a/chart/templates/postupgrade-job.yaml b/chart/templates/postupgrade-job.yaml index e3559eb..6b6a385 100644 --- a/chart/templates/postupgrade-job.yaml +++ b/chart/templates/postupgrade-job.yaml @@ -5,7 +5,7 @@ metadata: "helm.sh/hook": post-upgrade "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation name: longhorn-post-upgrade - namespace: {{ .Release.Namespace }} + namespace: {{ include "release_namespace" . }} labels: {{- include "longhorn.labels" . | nindent 4 }} spec: activeDeadlineSeconds: 900 diff --git a/chart/templates/psp.yaml b/chart/templates/psp.yaml index 66479b4..a2dfc05 100644 --- a/chart/templates/psp.yaml +++ b/chart/templates/psp.yaml @@ -35,7 +35,7 @@ kind: Role metadata: name: longhorn-psp-role labels: {{- include "longhorn.labels" . | nindent 4 }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "release_namespace" . }} rules: - apiGroups: - policy @@ -51,7 +51,7 @@ kind: RoleBinding metadata: name: longhorn-psp-binding labels: {{- include "longhorn.labels" . | nindent 4 }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "release_namespace" . }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -59,8 +59,8 @@ roleRef: subjects: - kind: ServiceAccount name: longhorn-service-account - namespace: {{ .Release.Namespace }} + namespace: {{ include "release_namespace" . }} - kind: ServiceAccount name: default - namespace: {{ .Release.Namespace }} -{{- end }} \ No newline at end of file + namespace: {{ include "release_namespace" . }} +{{- end }} diff --git a/chart/templates/registry-secret.yml b/chart/templates/registry-secret.yml index 7112c89..1c7565f 100644 --- a/chart/templates/registry-secret.yml +++ b/chart/templates/registry-secret.yml @@ -3,6 +3,7 @@ apiVersion: v1 kind: Secret metadata: name: {{ .Values.privateRegistry.registrySecret }} + namespace: {{ include "release_namespace" . }} labels: {{- include "longhorn.labels" . | nindent 4 }} type: kubernetes.io/dockerconfigjson data: diff --git a/chart/templates/serviceaccount.yaml b/chart/templates/serviceaccount.yaml index a2280b4..ad576c3 100644 --- a/chart/templates/serviceaccount.yaml +++ b/chart/templates/serviceaccount.yaml @@ -2,5 +2,5 @@ apiVersion: v1 kind: ServiceAccount metadata: name: longhorn-service-account - namespace: {{ .Release.Namespace }} + namespace: {{ include "release_namespace" . }} labels: {{- include "longhorn.labels" . | nindent 4 }} diff --git a/chart/templates/storageclass.yaml b/chart/templates/storageclass.yaml index 6b82d41..dea6aaf 100644 --- a/chart/templates/storageclass.yaml +++ b/chart/templates/storageclass.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: longhorn-storageclass - namespace: {{ .Release.Namespace }} + namespace: {{ include "release_namespace" . }} labels: {{- include "longhorn.labels" . | nindent 4 }} data: storageclass.yaml: | diff --git a/chart/templates/tls-secrets.yaml b/chart/templates/tls-secrets.yaml index 7a75df0..a7ebf13 100644 --- a/chart/templates/tls-secrets.yaml +++ b/chart/templates/tls-secrets.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: Secret metadata: name: longhorn + namespace: {{ include "release_namespace" . }} labels: {{- include "longhorn.labels" . | nindent 4 }} app: longhorn type: kubernetes.io/tls diff --git a/chart/templates/uninstall-job.yaml b/chart/templates/uninstall-job.yaml index 6a36a16..e7e9f14 100644 --- a/chart/templates/uninstall-job.yaml +++ b/chart/templates/uninstall-job.yaml @@ -5,7 +5,7 @@ metadata: "helm.sh/hook": pre-delete "helm.sh/hook-delete-policy": hook-succeeded name: longhorn-uninstall - namespace: {{ .Release.Namespace }} + namespace: {{ include "release_namespace" . }} labels: {{- include "longhorn.labels" . | nindent 4 }} spec: activeDeadlineSeconds: 900 diff --git a/chart/values.yaml b/chart/values.yaml index 2afe094..b6476a3 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -153,3 +153,7 @@ ingress: # Configure a pod security policy in the Longhorn namespace to allow privileged pods enablePSP: true + +## Specify override namespace, specifically this is useful for using longhorn as sub-chart +## and its release namespace is not the `longhorn-system` +namespaceOverride: "" From 08e1522f6e671bf4d38224b0b6a6a9c0fad74a56 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Sun, 6 Dec 2020 10:27:46 -0800 Subject: [PATCH 05/33] Sync with Longhorn manager commit 53203078833122cf1544e4145bf12ecec4e7d097 Author: Sheng Yang Date: Sun Dec 6 10:25:22 2020 -0800 deploy: add iscsi deploy helper Signed-off-by: Sheng Yang Signed-off-by: Sheng Yang --- deploy/longhorn-images.txt | 18 +- deploy/longhorn.yaml | 393 +++++++++++++++--- deploy/release-images.txt | 18 +- examples/block_volume.yaml | 1 + examples/csi/example_pv.yaml | 1 + examples/deployment.yaml | 5 +- examples/pod_with_pvc.yaml | 1 + examples/rwx/01-security.yaml | 2 +- examples/rwx/03-rwx-test.yaml | 2 + examples/simple_pvc.yaml | 1 + examples/snapshot/existing_backup.yaml | 14 + .../snapshot/restore_existing_backup.yaml | 16 + examples/snapshot/restore_pvc_snapshot.yaml | 16 + examples/snapshot/snapshot_existing.yaml | 8 + examples/snapshot/snapshot_pvc.yaml | 8 + examples/snapshot/snapshotclass.yaml | 9 + examples/statefulset.yaml | 1 + uninstall/uninstall.yaml | 12 +- 18 files changed, 444 insertions(+), 82 deletions(-) create mode 100644 examples/snapshot/existing_backup.yaml create mode 100644 examples/snapshot/restore_existing_backup.yaml create mode 100644 examples/snapshot/restore_pvc_snapshot.yaml create mode 100644 examples/snapshot/snapshot_existing.yaml create mode 100644 examples/snapshot/snapshot_pvc.yaml create mode 100644 examples/snapshot/snapshotclass.yaml diff --git a/deploy/longhorn-images.txt b/deploy/longhorn-images.txt index 3c16649..f545f54 100644 --- a/deploy/longhorn-images.txt +++ b/deploy/longhorn-images.txt @@ -1,8 +1,10 @@ -longhornio/longhorn-engine:v1.0.2 -longhornio/longhorn-instance-manager:v1_20200514 -longhornio/longhorn-manager:v1.0.2 -longhornio/longhorn-ui:v1.0.2 -longhornio/csi-attacher:v2.0.0 -longhornio/csi-node-driver-registrar:v1.2.0 -longhornio/csi-provisioner:v1.4.0 -longhornio/csi-resizer:v0.3.0 +longhornio/longhorn-engine:master +longhornio/longhorn-instance-manager:v1_20201021 +longhornio/longhorn-share-manager:v1_20201204 +longhornio/longhorn-manager:master +longhornio/longhorn-ui:master +longhornio/csi-attacher:v2.2.1-lh1 +longhornio/csi-provisioner:v1.6.0-lh1 +longhornio/csi-resizer:v0.5.1-lh1 +longhornio/csi-snapshotter:v2.1.1-lh1 +longhornio/csi-node-driver-registrar:v1.2.0-lh1 diff --git a/deploy/longhorn.yaml b/deploy/longhorn.yaml index 787a154..5590525 100644 --- a/deploy/longhorn.yaml +++ b/deploy/longhorn.yaml @@ -32,22 +32,29 @@ rules: - apiGroups: ["batch"] resources: ["jobs", "cronjobs"] verbs: ["*"] +- apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["*"] - apiGroups: ["scheduling.k8s.io"] resources: ["priorityclasses"] verbs: ["watch", "list"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses", "volumeattachments", "csinodes", "csidrivers"] verbs: ["*"] -- apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] +- apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses", "volumesnapshots", "volumesnapshotcontents", "volumesnapshotcontents/status"] + verbs: ["*"] - apiGroups: ["longhorn.io"] resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings", - "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status"] + "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status", + "sharemanagers", "sharemanagers/status"] verbs: ["*"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["*"] +- apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -62,7 +69,7 @@ subjects: name: longhorn-service-account namespace: longhorn-system --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: labels: @@ -78,11 +85,42 @@ spec: - lhe singular: engine scope: Namespaced - version: v1beta1 - subresources: - status: {} + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: State + type: string + description: The current state of the engine + jsonPath: .status.currentState + - name: Node + type: string + description: The node that the engine is on + jsonPath: .spec.nodeID + - name: InstanceManager + type: string + description: The instance manager of the engine + jsonPath: .status.instanceManagerName + - name: Image + type: string + description: The current image of the engine + jsonPath: .status.currentImage + - name: Age + type: date + jsonPath: .metadata.creationTimestamp --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: labels: @@ -98,11 +136,46 @@ spec: - lhr singular: replica scope: Namespaced - version: v1beta1 - subresources: - status: {} + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: State + type: string + description: The current state of the replica + jsonPath: .status.currentState + - name: Node + type: string + description: The node that the replica is on + jsonPath: .spec.nodeID + - name: Disk + type: string + description: The disk that the replica is on + jsonPath: .spec.diskID + - name: InstanceManager + type: string + description: The instance manager of the replica + jsonPath: .status.instanceManagerName + - name: Image + type: string + description: The current image of the replica + jsonPath: .status.currentImage + - name: Age + type: date + jsonPath: .metadata.creationTimestamp --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: labels: @@ -118,9 +191,23 @@ spec: - lhs singular: setting scope: Namespaced - version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + x-kubernetes-preserve-unknown-fields: true + additionalPrinterColumns: + - name: Value + type: string + description: The value of the setting + jsonPath: .value + - name: Age + type: date + jsonPath: .metadata.creationTimestamp --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: labels: @@ -136,11 +223,46 @@ spec: - lhv singular: volume scope: Namespaced - version: v1beta1 - subresources: - status: {} + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: State + type: string + description: The state of the volume + jsonPath: .status.state + - name: Robustness + type: string + description: The robustness of the volume + jsonPath: .status.robustness + - name: Scheduled + type: string + description: The scheduled condition of the volume + jsonPath: .status.conditions['scheduled']['status'] + - name: Size + type: string + description: The size of the volume + jsonPath: .spec.size + - name: Node + type: string + description: The node that the volume is currently attaching to + jsonPath: .status.currentNodeID + - name: Age + type: date + jsonPath: .metadata.creationTimestamp --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: labels: @@ -156,11 +278,42 @@ spec: - lhei singular: engineimage scope: Namespaced - version: v1beta1 - subresources: - status: {} + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: State + type: string + description: State of the engine image + jsonPath: .status.state + - name: Image + type: string + description: The Longhorn engine image + jsonPath: .spec.image + - name: RefCount + type: integer + description: Number of volumes are using the engine image + jsonPath: .status.refCount + - name: BuildDate + type: date + description: The build date of the engine image + jsonPath: .status.buildDate + - name: Age + type: date + jsonPath: .metadata.creationTimestamp --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: labels: @@ -176,11 +329,38 @@ spec: - lhn singular: node scope: Namespaced - version: v1beta1 - subresources: - status: {} + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: Ready + type: string + description: Indicate whether the node is ready + jsonPath: .status.conditions['Ready']['status'] + - name: AllowScheduling + type: boolean + description: Indicate whether the user disabled/enabled replica scheduling for the node + jsonPath: .spec.allowScheduling + - name: Schedulable + type: string + description: Indicate whether Longhorn can schedule replicas on the node + jsonPath: .status.conditions['Schedulable']['status'] + - name: Age + type: date + jsonPath: .metadata.creationTimestamp --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: labels: @@ -196,9 +376,79 @@ spec: - lhim singular: instancemanager scope: Namespaced - version: v1beta1 - subresources: - status: {} + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: State + type: string + description: The state of the instance manager + jsonPath: .status.currentState + - name: Type + type: string + description: The type of the instance manager (engine or replica) + jsonPath: .spec.type + - name: Node + type: string + description: The node that the instance manager is running on + jsonPath: .spec.nodeID + - name: Age + type: date + jsonPath: .metadata.creationTimestamp +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + longhorn-manager: ShareManager + name: sharemanagers.longhorn.io +spec: + group: longhorn.io + names: + kind: ShareManager + listKind: ShareManagerList + plural: sharemanagers + shortNames: + - lhsm + singular: sharemanager + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: State + type: string + description: The state of the share manager + jsonPath: .status.state + - name: Node + type: string + description: The node that the share manager is owned by + jsonPath: .status.ownerID + - name: Age + type: date + jsonPath: .metadata.creationTimestamp --- apiVersion: v1 kind: ConfigMap @@ -209,6 +459,7 @@ data: default-setting.yaml: |- backup-target: backup-target-credential-secret: + allow-recurring-job-while-volume-detached: create-default-disk-labeled-nodes: default-data-path: replica-soft-anti-affinity: @@ -216,17 +467,26 @@ data: storage-minimal-available-percentage: upgrade-checker: default-replica-count: + default-data-locality: guaranteed-engine-cpu: default-longhorn-static-storage-class: backupstore-poll-interval: taint-toleration: priority-class: - registry-secret: auto-salvage: + auto-delete-pod-when-volume-detached-unexpectedly: disable-scheduling-on-cordoned-node: replica-zone-soft-anti-affinity: volume-attachment-recovery-policy: + node-down-pod-deletion-policy: + allow-node-drain-with-last-healthy-replica: mkfs-ext4-parameters: + disable-replica-rebuild: + replica-replenishment-wait-interval: + disable-revision-counter: + system-managed-pods-image-pull-policy: + allow-volume-creation-with-degraded-availability: + auto-cleanup-system-generated-snapshot: --- apiVersion: policy/v1beta1 kind: PodSecurityPolicy @@ -290,6 +550,31 @@ subjects: name: default namespace: longhorn-system --- +apiVersion: v1 +kind: ConfigMap +metadata: + name: longhorn-storageclass + namespace: longhorn-system +data: + storageclass.yaml: | + kind: StorageClass + apiVersion: storage.k8s.io/v1 + metadata: + name: longhorn + provisioner: driver.longhorn.io + allowVolumeExpansion: true + reclaimPolicy: Delete + volumeBindingMode: Immediate + parameters: + numberOfReplicas: "3" + staleReplicaTimeout: "2880" + fromBackup: "" + # diskSelector: "ssd,fast" + # nodeSelector: "storage,fast" + # recurringJobs: '[{"name":"snap", "task":"snapshot", "cron":"*/1 * * * *", "retain":1}, + # {"name":"backup", "task":"backup", "cron":"*/2 * * * *", "retain":1, + # "labels": {"interval":"2m"}}]' +--- apiVersion: apps/v1 kind: DaemonSet metadata: @@ -308,7 +593,7 @@ spec: spec: containers: - name: longhorn-manager - image: longhornio/longhorn-manager:v1.0.2 + image: longhornio/longhorn-manager:master imagePullPolicy: IfNotPresent securityContext: privileged: true @@ -317,11 +602,13 @@ spec: - -d - daemon - --engine-image - - longhornio/longhorn-engine:v1.0.2 + - longhornio/longhorn-engine:master - --instance-manager-image - - longhornio/longhorn-instance-manager:v1_20200514 + - longhornio/longhorn-instance-manager:v1_20201021 + - --share-manager-image + - longhornio/longhorn-share-manager:v1_20201204 - --manager-image - - longhornio/longhorn-manager:v1.0.2 + - longhornio/longhorn-manager:master - --service-account - longhorn-service-account ports: @@ -418,7 +705,7 @@ spec: spec: containers: - name: longhorn-ui - image: longhornio/longhorn-ui:v1.0.2 + image: longhornio/longhorn-ui:master imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 @@ -465,18 +752,18 @@ spec: spec: initContainers: - name: wait-longhorn-manager - image: longhornio/longhorn-manager:v1.0.2 + image: longhornio/longhorn-manager:master command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done'] containers: - name: longhorn-driver-deployer - image: longhornio/longhorn-manager:v1.0.2 + image: longhornio/longhorn-manager:master imagePullPolicy: IfNotPresent command: - longhorn-manager - -d - deploy-driver - --manager-image - - longhornio/longhorn-manager:v1.0.2 + - longhornio/longhorn-manager:master - --manager-url - http://longhorn-backend:9500/v1 env: @@ -498,13 +785,15 @@ spec: # For AirGap Installation # Replace PREFIX with your private registery #- name: CSI_ATTACHER_IMAGE - # value: PREFIX/csi-attacher:v2.0.0 + # value: PREFIX/csi-attacher:v2.2.1-lh1 #- name: CSI_PROVISIONER_IMAGE - # value: PREFIX/csi-provisioner:v1.4.0 + # value: PREFIX/csi-provisioner:v1.6.0-lh1 #- name: CSI_NODE_DRIVER_REGISTRAR_IMAGE - # value: PREFIX/csi-node-driver-registrar:v1.2.0 + # value: PREFIX/csi-node-driver-registrar:v1.2.0-lh1 #- name: CSI_RESIZER_IMAGE - # value: PREFIX/csi-resizer:v0.3.0 + # value: PREFIX/csi-resizer:v0.5.1-lh1 + #- name: CSI_SNAPSHOTTER_IMAGE + # value: PREFIX/csi-snapshotter:v2.1.1-lh1 # Manually specify number of CSI attacher replicas #- name: CSI_ATTACHER_REPLICA_COUNT # value: "3" @@ -513,25 +802,11 @@ spec: # value: "3" #- name: CSI_RESIZER_REPLICA_COUNT # value: "3" + #- name: CSI_SNAPSHOTTER_REPLICA_COUNT + # value: "3" #imagePullSecrets: #- name: serviceAccountName: longhorn-service-account securityContext: runAsUser: 0 --- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: longhorn -provisioner: driver.longhorn.io -allowVolumeExpansion: true -parameters: - numberOfReplicas: "3" - staleReplicaTimeout: "2880" - fromBackup: "" -# diskSelector: "ssd,fast" -# nodeSelector: "storage,fast" -# recurringJobs: '[{"name":"snap", "task":"snapshot", "cron":"*/1 * * * *", "retain":1}, -# {"name":"backup", "task":"backup", "cron":"*/2 * * * *", "retain":1, -# "labels": {"interval":"2m"}}]' ---- diff --git a/deploy/release-images.txt b/deploy/release-images.txt index 3dc686c..f545f54 100644 --- a/deploy/release-images.txt +++ b/deploy/release-images.txt @@ -1,8 +1,10 @@ -longhornio/longhorn-engine:v1.0.2 -longhornio/longhorn-instance-manager:v1_20200514 -longhornio/longhorn-manager:v1.0.2 -longhornio/longhorn-ui:v1.0.2 -quay.io/k8scsi/csi-attacher:v2.0.0 -quay.io/k8scsi/csi-node-driver-registrar:v1.2.0 -quay.io/k8scsi/csi-provisioner:v1.4.0 -quay.io/k8scsi/csi-resizer:v0.3.0 +longhornio/longhorn-engine:master +longhornio/longhorn-instance-manager:v1_20201021 +longhornio/longhorn-share-manager:v1_20201204 +longhornio/longhorn-manager:master +longhornio/longhorn-ui:master +longhornio/csi-attacher:v2.2.1-lh1 +longhornio/csi-provisioner:v1.6.0-lh1 +longhornio/csi-resizer:v0.5.1-lh1 +longhornio/csi-snapshotter:v2.1.1-lh1 +longhornio/csi-node-driver-registrar:v1.2.0-lh1 diff --git a/examples/block_volume.yaml b/examples/block_volume.yaml index 58a9dad..257ff16 100644 --- a/examples/block_volume.yaml +++ b/examples/block_volume.yaml @@ -2,6 +2,7 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: name: longhorn-block-vol + namespace: default spec: accessModes: - ReadWriteOnce diff --git a/examples/csi/example_pv.yaml b/examples/csi/example_pv.yaml index b901dd1..211b561 100644 --- a/examples/csi/example_pv.yaml +++ b/examples/csi/example_pv.yaml @@ -22,6 +22,7 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: name: longhorn-vol-pvc + namespace: default spec: accessModes: - ReadWriteOnce diff --git a/examples/deployment.yaml b/examples/deployment.yaml index fffe4da..5de2162 100644 --- a/examples/deployment.yaml +++ b/examples/deployment.yaml @@ -15,6 +15,7 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: name: mysql-pvc + namespace: default spec: accessModes: - ReadWriteOnce @@ -29,6 +30,7 @@ metadata: name: mysql labels: app: mysql + namespace: default spec: selector: matchLabels: @@ -51,9 +53,6 @@ spec: - /var/lib/mysql/lost+found initialDelaySeconds: 5 periodSeconds: 5 - env: - - name: MYSQL_ROOT_PASSWORD - value: changeme ports: - containerPort: 3306 name: mysql diff --git a/examples/pod_with_pvc.yaml b/examples/pod_with_pvc.yaml index 4e172be..1376c25 100644 --- a/examples/pod_with_pvc.yaml +++ b/examples/pod_with_pvc.yaml @@ -2,6 +2,7 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: name: longhorn-volv-pvc + namespace: default spec: accessModes: - ReadWriteOnce diff --git a/examples/rwx/01-security.yaml b/examples/rwx/01-security.yaml index be930ef..e11c910 100644 --- a/examples/rwx/01-security.yaml +++ b/examples/rwx/01-security.yaml @@ -44,7 +44,7 @@ rules: verbs: ["get"] - apiGroups: ["extensions"] resources: ["podsecuritypolicies"] - resourceNames: ["nfs-provisioner"] + resourceNames: ["longhorn-nfs-provisioner"] verbs: ["use"] --- kind: ClusterRoleBinding diff --git a/examples/rwx/03-rwx-test.yaml b/examples/rwx/03-rwx-test.yaml index 7403403..d138dea 100644 --- a/examples/rwx/03-rwx-test.yaml +++ b/examples/rwx/03-rwx-test.yaml @@ -2,6 +2,7 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: name: nfs-test + namespace: default spec: accessModes: - ReadWriteMany @@ -16,6 +17,7 @@ metadata: name: nfs-test labels: app: nfs-test + namespace: default spec: replicas: 4 selector: diff --git a/examples/simple_pvc.yaml b/examples/simple_pvc.yaml index 72e864d..01c6165 100644 --- a/examples/simple_pvc.yaml +++ b/examples/simple_pvc.yaml @@ -2,6 +2,7 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: name: longhorn-simple-pvc + namespace: default spec: accessModes: - ReadWriteOnce diff --git a/examples/snapshot/existing_backup.yaml b/examples/snapshot/existing_backup.yaml new file mode 100644 index 0000000..43989b0 --- /dev/null +++ b/examples/snapshot/existing_backup.yaml @@ -0,0 +1,14 @@ +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshotContent +metadata: + name: test-existing-backup +spec: + volumeSnapshotClassName: longhorn + driver: driver.longhorn.io + deletionPolicy: Delete + source: + # NOTE: change this to point to an existing backup on the backupstore + snapshotHandle: bs://test-vol/backup-625159fb469e492e + volumeSnapshotRef: + name: test-snapshot-existing-backup + namespace: default \ No newline at end of file diff --git a/examples/snapshot/restore_existing_backup.yaml b/examples/snapshot/restore_existing_backup.yaml new file mode 100644 index 0000000..0769fe5 --- /dev/null +++ b/examples/snapshot/restore_existing_backup.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: test-restore-existing-backup + namespace: default +spec: + storageClassName: longhorn + dataSource: + name: test-snapshot-existing-backup + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi \ No newline at end of file diff --git a/examples/snapshot/restore_pvc_snapshot.yaml b/examples/snapshot/restore_pvc_snapshot.yaml new file mode 100644 index 0000000..c58ccce --- /dev/null +++ b/examples/snapshot/restore_pvc_snapshot.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: test-restore-snapshot-pvc + namespace: default +spec: + storageClassName: longhorn + dataSource: + name: test-snapshot-pvc + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi \ No newline at end of file diff --git a/examples/snapshot/snapshot_existing.yaml b/examples/snapshot/snapshot_existing.yaml new file mode 100644 index 0000000..bd698be --- /dev/null +++ b/examples/snapshot/snapshot_existing.yaml @@ -0,0 +1,8 @@ +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshot +metadata: + name: test-snapshot-existing-backup +spec: + volumeSnapshotClassName: longhorn + source: + volumeSnapshotContentName: test-existing-backup diff --git a/examples/snapshot/snapshot_pvc.yaml b/examples/snapshot/snapshot_pvc.yaml new file mode 100644 index 0000000..aa1f373 --- /dev/null +++ b/examples/snapshot/snapshot_pvc.yaml @@ -0,0 +1,8 @@ +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshot +metadata: + name: test-snapshot-pvc +spec: + volumeSnapshotClassName: longhorn + source: + persistentVolumeClaimName: test-vol diff --git a/examples/snapshot/snapshotclass.yaml b/examples/snapshot/snapshotclass.yaml new file mode 100644 index 0000000..5d72aa8 --- /dev/null +++ b/examples/snapshot/snapshotclass.yaml @@ -0,0 +1,9 @@ +kind: VolumeSnapshotClass +apiVersion: snapshot.storage.k8s.io/v1beta1 +metadata: + name: longhorn +driver: driver.longhorn.io +deletionPolicy: Delete +#parameters: +# csi.storage.k8s.io/snapshotter-secret-name: mysecret +# csi.storage.k8s.io/snapshotter-secret-namespace: mysecretnamespace \ No newline at end of file diff --git a/examples/statefulset.yaml b/examples/statefulset.yaml index 9926b93..6e967bb 100644 --- a/examples/statefulset.yaml +++ b/examples/statefulset.yaml @@ -16,6 +16,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: name: web + namespace: default spec: selector: matchLabels: diff --git a/uninstall/uninstall.yaml b/uninstall/uninstall.yaml index 46fffe8..ad626a3 100644 --- a/uninstall/uninstall.yaml +++ b/uninstall/uninstall.yaml @@ -16,7 +16,7 @@ rules: verbs: - "*" - apiGroups: [""] - resources: ["pods", "persistentvolumes", "persistentvolumeclaims", "nodes"] + resources: ["pods", "persistentvolumes", "persistentvolumeclaims", "nodes", "configmaps"] verbs: ["*"] - apiGroups: ["apps"] resources: ["daemonsets", "statefulsets", "deployments"] @@ -24,15 +24,21 @@ rules: - apiGroups: ["batch"] resources: ["jobs", "cronjobs"] verbs: ["*"] + - apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["*"] - apiGroups: ["scheduling.k8s.io"] resources: ["priorityclasses"] verbs: ["watch", "list"] - apiGroups: ["storage.k8s.io"] - resources: ["csidrivers"] + resources: ["csidrivers", "storageclasses"] verbs: ["*"] - apiGroups: ["longhorn.io"] resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes", "instancemanagers"] verbs: ["*"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["*"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -61,7 +67,7 @@ spec: spec: containers: - name: longhorn-uninstall - image: longhornio/longhorn-manager:v1.0.2 + image: longhornio/longhorn-manager:master imagePullPolicy: Always command: - longhorn-manager From 7f0790fc8084f3325d88577af83637ca6821281a Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Sun, 6 Dec 2020 10:43:48 -0800 Subject: [PATCH 06/33] Longhorn v1.0.2-rc1 release Signed-off-by: Sheng Yang --- chart/Chart.yaml | 4 ++-- chart/questions.yml | 6 +++--- chart/values.yaml | 6 +++--- deploy/longhorn-images.txt | 6 +++--- deploy/longhorn.yaml | 14 +++++++------- deploy/release-images.txt | 6 +++--- uninstall/uninstall.yaml | 2 +- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/chart/Chart.yaml b/chart/Chart.yaml index ce617fe..9a3a78d 100644 --- a/chart/Chart.yaml +++ b/chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: longhorn -version: 1.0.2 -appVersion: v1.0.2 +version: 1.1.0-rc1 +appVersion: v1.1.0-rc1 kubeVersion: ">=v1.14.0-r0" description: Longhorn is a distributed block storage system for Kubernetes. keywords: diff --git a/chart/questions.yml b/chart/questions.yml index 2b423f7..cf69f1e 100644 --- a/chart/questions.yml +++ b/chart/questions.yml @@ -17,7 +17,7 @@ questions: label: Longhorn Manager Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.manager.tag - default: v1.0.2 + default: v1.1.0-rc1 description: "Specify Longhorn Manager Image Tag" type: string label: Longhorn Manager Image Tag @@ -29,7 +29,7 @@ questions: label: Longhorn Engine Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.engine.tag - default: v1.0.2 + default: v1.1.0-rc1 description: "Specify Longhorn Engine Image Tag" type: string label: Longhorn Engine Image Tag @@ -41,7 +41,7 @@ questions: label: Longhorn UI Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.ui.tag - default: v1.0.2 + default: v1.1.0-rc1 description: "Specify Longhorn UI Image Tag" type: string label: Longhorn UI Image Tag diff --git a/chart/values.yaml b/chart/values.yaml index b6476a3..90777e5 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -9,13 +9,13 @@ image: longhorn: engine: repository: longhornio/longhorn-engine - tag: v1.0.2 + tag: v1.1.0-rc1 manager: repository: longhornio/longhorn-manager - tag: v1.0.2 + tag: v1.1.0-rc1 ui: repository: longhornio/longhorn-ui - tag: v1.0.2 + tag: v1.1.0-rc1 instanceManager: repository: longhornio/longhorn-instance-manager tag: v1_20200514 diff --git a/deploy/longhorn-images.txt b/deploy/longhorn-images.txt index f545f54..515e79f 100644 --- a/deploy/longhorn-images.txt +++ b/deploy/longhorn-images.txt @@ -1,8 +1,8 @@ -longhornio/longhorn-engine:master +longhornio/longhorn-engine:v1.1.0-rc1 longhornio/longhorn-instance-manager:v1_20201021 longhornio/longhorn-share-manager:v1_20201204 -longhornio/longhorn-manager:master -longhornio/longhorn-ui:master +longhornio/longhorn-manager:v1.1.0-rc1 +longhornio/longhorn-ui:v1.1.0-rc1 longhornio/csi-attacher:v2.2.1-lh1 longhornio/csi-provisioner:v1.6.0-lh1 longhornio/csi-resizer:v0.5.1-lh1 diff --git a/deploy/longhorn.yaml b/deploy/longhorn.yaml index 5590525..1edcad1 100644 --- a/deploy/longhorn.yaml +++ b/deploy/longhorn.yaml @@ -593,7 +593,7 @@ spec: spec: containers: - name: longhorn-manager - image: longhornio/longhorn-manager:master + image: longhornio/longhorn-manager:v1.1.0-rc1 imagePullPolicy: IfNotPresent securityContext: privileged: true @@ -602,13 +602,13 @@ spec: - -d - daemon - --engine-image - - longhornio/longhorn-engine:master + - longhornio/longhorn-engine:v1.1.0-rc1 - --instance-manager-image - longhornio/longhorn-instance-manager:v1_20201021 - --share-manager-image - longhornio/longhorn-share-manager:v1_20201204 - --manager-image - - longhornio/longhorn-manager:master + - longhornio/longhorn-manager:v1.1.0-rc1 - --service-account - longhorn-service-account ports: @@ -705,7 +705,7 @@ spec: spec: containers: - name: longhorn-ui - image: longhornio/longhorn-ui:master + image: longhornio/longhorn-ui:v1.1.0-rc1 imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 @@ -752,18 +752,18 @@ spec: spec: initContainers: - name: wait-longhorn-manager - image: longhornio/longhorn-manager:master + image: longhornio/longhorn-manager:v1.1.0-rc1 command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done'] containers: - name: longhorn-driver-deployer - image: longhornio/longhorn-manager:master + image: longhornio/longhorn-manager:v1.1.0-rc1 imagePullPolicy: IfNotPresent command: - longhorn-manager - -d - deploy-driver - --manager-image - - longhornio/longhorn-manager:master + - longhornio/longhorn-manager:v1.1.0-rc1 - --manager-url - http://longhorn-backend:9500/v1 env: diff --git a/deploy/release-images.txt b/deploy/release-images.txt index f545f54..515e79f 100644 --- a/deploy/release-images.txt +++ b/deploy/release-images.txt @@ -1,8 +1,8 @@ -longhornio/longhorn-engine:master +longhornio/longhorn-engine:v1.1.0-rc1 longhornio/longhorn-instance-manager:v1_20201021 longhornio/longhorn-share-manager:v1_20201204 -longhornio/longhorn-manager:master -longhornio/longhorn-ui:master +longhornio/longhorn-manager:v1.1.0-rc1 +longhornio/longhorn-ui:v1.1.0-rc1 longhornio/csi-attacher:v2.2.1-lh1 longhornio/csi-provisioner:v1.6.0-lh1 longhornio/csi-resizer:v0.5.1-lh1 diff --git a/uninstall/uninstall.yaml b/uninstall/uninstall.yaml index ad626a3..50e82e1 100644 --- a/uninstall/uninstall.yaml +++ b/uninstall/uninstall.yaml @@ -67,7 +67,7 @@ spec: spec: containers: - name: longhorn-uninstall - image: longhornio/longhorn-manager:master + image: longhornio/longhorn-manager:v1.1.0-rc1 imagePullPolicy: Always command: - longhorn-manager From 3a257953daa3e249209bcde2d5a1c84143b65ed1 Mon Sep 17 00:00:00 2001 From: Shuo Wu Date: Mon, 7 Dec 2020 13:32:05 +0800 Subject: [PATCH 07/33] chart: Introduce longhorn share manager Longhorn #2043 Signed-off-by: Shuo Wu --- chart/templates/clusterrole.yaml | 3 ++- chart/templates/crds.yaml | 45 +++++++++++++++++++++++++++++++ chart/templates/daemonset-sa.yaml | 2 ++ chart/values.yaml | 3 +++ 4 files changed, 52 insertions(+), 1 deletion(-) diff --git a/chart/templates/clusterrole.yaml b/chart/templates/clusterrole.yaml index f6fafbe..c697617 100644 --- a/chart/templates/clusterrole.yaml +++ b/chart/templates/clusterrole.yaml @@ -36,7 +36,8 @@ rules: verbs: ["*"] - apiGroups: ["longhorn.io"] resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings", - "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status"] + "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status", + "sharemanagers", "sharemanagers/status"] verbs: ["*"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] diff --git a/chart/templates/crds.yaml b/chart/templates/crds.yaml index 9fd23c1..78635e5 100644 --- a/chart/templates/crds.yaml +++ b/chart/templates/crds.yaml @@ -349,3 +349,48 @@ spec: - name: Age type: date jsonPath: .metadata.creationTimestamp +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: ShareManager + annotations: + helm.sh/resource-policy: keep + name: sharemanagers.longhorn.io +spec: + group: longhorn.io + names: + kind: ShareManager + listKind: ShareManagerList + plural: sharemanagers + shortNames: + - lhsm + singular: sharemanager + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: State + type: string + description: The state of the share manager + jsonPath: .status.state + - name: Node + type: string + description: The node that the share manager is owned by + jsonPath: .status.ownerID + - name: Age + type: date + jsonPath: .metadata.creationTimestamp diff --git a/chart/templates/daemonset-sa.yaml b/chart/templates/daemonset-sa.yaml index e4c4ff1..e407935 100644 --- a/chart/templates/daemonset-sa.yaml +++ b/chart/templates/daemonset-sa.yaml @@ -28,6 +28,8 @@ spec: - "{{ template "registry_url" . }}{{ .Values.image.longhorn.engine.repository }}:{{ .Values.image.longhorn.engine.tag }}" - --instance-manager-image - "{{ template "registry_url" . }}{{ .Values.image.longhorn.instanceManager.repository }}:{{ .Values.image.longhorn.instanceManager.tag }}" + - --share-manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.shareManager.repository }}:{{ .Values.image.longhorn.shareManager.tag }}" - --manager-image - "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}" - --service-account diff --git a/chart/values.yaml b/chart/values.yaml index 90777e5..4c35779 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -19,6 +19,9 @@ image: instanceManager: repository: longhornio/longhorn-instance-manager tag: v1_20200514 + shareManager: + repository: longhornio/longhorn-share-manager + tag: v1_20201204 csi: attacher: repository: longhornio/csi-attacher From 6a301741dcfe3304a42a52c828dfaa68b7d99417 Mon Sep 17 00:00:00 2001 From: Shuo Wu Date: Mon, 7 Dec 2020 13:33:39 +0800 Subject: [PATCH 08/33] chart: Update image tag for longhorn instance manager Signed-off-by: Shuo Wu --- chart/questions.yml | 2 +- chart/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/chart/questions.yml b/chart/questions.yml index cf69f1e..bb36330 100644 --- a/chart/questions.yml +++ b/chart/questions.yml @@ -53,7 +53,7 @@ questions: label: Longhorn Instance Manager Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.instanceManager.tag - default: v1_20200514 + default: v1_20201021 description: "Specify Longhorn Instance Manager Image Tag" type: string label: Longhorn Instance Manager Image Tag diff --git a/chart/values.yaml b/chart/values.yaml index 4c35779..10ac312 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -18,7 +18,7 @@ image: tag: v1.1.0-rc1 instanceManager: repository: longhornio/longhorn-instance-manager - tag: v1_20200514 + tag: v1_20201021 shareManager: repository: longhornio/longhorn-share-manager tag: v1_20201204 From e7fb431e3c4e4226ffaf74b6c3078fbd4e712ab0 Mon Sep 17 00:00:00 2001 From: Nicholas Novak Date: Mon, 26 Oct 2020 11:51:35 -0700 Subject: [PATCH 09/33] Fixed some spelling and grammatical errors in the READMEs Signed-off-by: Nicholas Novak --- README.md | 2 +- chart/README.md | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index c5d73a3..1a87dc3 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ Longhorn can be installed on a Kubernetes cluster in several ways: The official Longhorn documentation is [here.](https://longhorn.io/docs) ## Community -Longhorn is an open source software, so contribution are greatly welcome. Please read [Code of Conduct](./CODE_OF_CONDUCT.md) and [Contributing Guideline](./CONTRIBUTING.md) before contributing. +Longhorn is open source software, so contributions are greatly welcome. Please read [Code of Conduct](./CODE_OF_CONDUCT.md) and [Contributing Guideline](./CONTRIBUTING.md) before contributing. Contributing code is not the only way of contributing. We value feedbacks very much and many of the Longhorn features are originated from users' feedback. If you have any feedbacks, feel free to [file an issue](https://github.com/longhorn/longhorn/issues/new?title=*Summarize%20your%20issue%20here*&body=*Describe%20your%20issue%20here*%0A%0A---%0AVersion%3A%20``) and talk to the developers at the [CNCF](https://slack.cncf.io/) [#longhorn](https://cloud-native.slack.com/messages/longhorn) slack channel. diff --git a/chart/README.md b/chart/README.md index b8519eb..afba135 100644 --- a/chart/README.md +++ b/chart/README.md @@ -1,6 +1,6 @@ # Longhorn Chart -> **Important**: Please install Longhorn chart in `longhorn-system` namespace only. +> **Important**: Please install the Longhorn chart in `longhorn-system` namespace only. > **Warning**: Longhorn doesn't support downgrading from a higher version to a lower version. @@ -21,7 +21,7 @@ Longhorn is 100% open source software. Project source code is spread across a nu 4. Make sure `open-iscsi` has been installed in all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already. ## Installation -1. Add Longhorn char repository. +1. Add Longhorn chart repository. ``` helm repo add longhorn https://charts.longhorn.io ``` @@ -32,11 +32,11 @@ helm repo update ``` 3. Install Longhorn chart. -- With Helm 2, the following command will create `longhorn-system` namespaceand install Longhorn chart together. +- With Helm 2, the following command will create the `longhorn-system` namespace and install the Longhorn chart together. ``` helm install longhorn/longhorn --name longhorn --namespace longhorn-system ``` -- With Helm 3, the following commands will create `longhorn-system` namespace first, then install Longhorn chart. +- With Helm 3, the following commands will create the `longhorn-system` namespace first, then install the Longhorn chart. ``` kubectl create namespace longhorn-system From a1754a0906bd3792cf48a98835a865bf3be72a60 Mon Sep 17 00:00:00 2001 From: Nicholas Novak Date: Mon, 26 Oct 2020 11:54:54 -0700 Subject: [PATCH 10/33] Fixed a grammatical error that I missed in the README Signed-off-by: Nicholas Novak --- chart/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chart/README.md b/chart/README.md index afba135..13e6312 100644 --- a/chart/README.md +++ b/chart/README.md @@ -1,6 +1,6 @@ # Longhorn Chart -> **Important**: Please install the Longhorn chart in `longhorn-system` namespace only. +> **Important**: Please install the Longhorn chart in the `longhorn-system` namespace only. > **Warning**: Longhorn doesn't support downgrading from a higher version to a lower version. From 250fd3206616130e532589b916079ceb8c272aa8 Mon Sep 17 00:00:00 2001 From: Bo Tao Date: Fri, 30 Oct 2020 22:13:28 -0700 Subject: [PATCH 11/33] Add iscsi installtion daemonset yaml file Add iscsi installation yaml file to provide a convenient way to install iscsi on every host. Longhorn #1741 Signed-off-by: Bo Tao --- deploy/iscsi/longhorn-iscsi-installation.yaml | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 deploy/iscsi/longhorn-iscsi-installation.yaml diff --git a/deploy/iscsi/longhorn-iscsi-installation.yaml b/deploy/iscsi/longhorn-iscsi-installation.yaml new file mode 100644 index 0000000..8e2a157 --- /dev/null +++ b/deploy/iscsi/longhorn-iscsi-installation.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: longhorn-iscsi-installation + labels: + app: longhorn-iscsi-installation + annotations: + command: &cmd OS=$(grep "ID_LIKE" /etc/os-release | cut -d '=' -f 2); if [[ $OS == *"debian"* ]]; then apt-get update -qy && apt-get install -qy open-iscsi && sudo systemctl enable iscsid && sudo systemctl start iscsid; else yum install iscsi-initiator-utils -y && sudo systemctl enable iscsid && sudo systemctl start iscsid; fi && if [ $? -eq 0 ]; then echo "iscsi install successfully"; else echo "iscsi install failed error code " $?; fi +spec: + selector: + matchLabels: + app: longhorn-iscsi-installation + template: + metadata: + labels: + app: longhorn-iscsi-installation + spec: + hostNetwork: true + containers: + - name: iscsi-installation + command: + - nsenter + - --mount=/proc/1/ns/mnt + - -- + - sh + - -c + - *cmd + image: alpine:3.7 + securityContext: + privileged: true + hostPID: true From 0e30e1b057c98c7478bffeeb594e05f9653a0480 Mon Sep 17 00:00:00 2001 From: Shuo Wu Date: Fri, 21 Aug 2020 19:29:33 +0800 Subject: [PATCH 12/33] enhancements: Add LEP 'Rebuild replica with existing data' Longhorn #1304 Signed-off-by: Shuo Wu --- ...0821-rebuild-replica-with-existing-data.md | 140 ++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 enhancements/20200821-rebuild-replica-with-existing-data.md diff --git a/enhancements/20200821-rebuild-replica-with-existing-data.md b/enhancements/20200821-rebuild-replica-with-existing-data.md new file mode 100644 index 0000000..df9d28f --- /dev/null +++ b/enhancements/20200821-rebuild-replica-with-existing-data.md @@ -0,0 +1,140 @@ +# Rebuild replica with existing data + +## Summary +Longhorn could reuse the existing data of failed replicas to speed up rebuild progress as well as save bandwidth. + +### Related Issues +https://github.com/longhorn/longhorn/issues/1304 + +## Motivation +### Goals +1. The (data of) failed replicas can be reused during the replica rebuild. +2. The rebuild won't be blocked when the data of failed replicas are completely corrupted, or there is no existing replica. +3. With the existing data, some of the data transferring can be skipped, and replica rebuild may speed up. + +## Proposal +1. Add a new setting `ReplicaReplenishmentWaitInterval` to delay the replica rebuild. + - If the failed replica currently is unavailable but it may be able to be reused later(we call it potential reusable failed replica), Longhorn may need to delay the new replica replenishment so that there is a chance to reuse this kind of replica. + - For eviction/data locality/new volume cases, a new replica should be recreated immediately hence this setting won't be applied. +2. In order to reuse the existing data, Longhorn can directly reuse the failed replica objects for the rebuild. +3. Add max retry count for the replica rebuild with failed replicas. Otherwise, the rebuild will get stuck of the reusing the failed replicas there if the data of failed replicas are completely corrupted. +4. Add backoff interval for the retry of the failed replica reuse. + +### User Stories +#### Rebuild replica for a large volume after network fluctuation/node reboot +Before the enhancement, there is no chance to reuse the failed replicas on the node, and the rebuild can take a long time with heavy bandwidth usage. + +After the enhancement, the replica rebuild won't start until the new worker nodes with old disks are up. Then the failed replicas will be reused during the rebuild, and the rebuild can be pretty fast. + +### User Experience In Detail +Users don't need to do anything except for setting `ReplicaReplenishmentWaitInterval` + +### API Changes +No API change is required. + +## Design +### Implementation Overview +#### longhorn-manager: +1. Add a setting `ReplicaReplenishmentWaitInterval`. + - This will block the rebuilding when there is a failed replica that is temporarily unavailable in the volume. + - Add a field `volume.Status.LastDegradedAt` so that we can determine if `ReplicaReplenishmentWaitInterval` is passed. +2. Add field `Replica.Spec.RebuildRetryCount` to indicate how many times Longhorn tries to reuse this failed replica for the rebuild. +3. In Volume Controller && Replica Scheduler: + 1. Check if there is a reusable failed replica and if the replica reuse is not in the backoff window. If YES, directly try to reuse the failed replica. + 2. Otherwise, replenish a new replica is required for one of the following cases: + 1. the volume is a new volume (volume.Status.Robustness is Empty) + 2. data locality is required (hardNodeAffinity is not Empty and volume.Status.Robustness is Healthy) + 3. replica eviction happens (volume.Status.Robustness is Healthy) + 4. there is no potential reusable replica + 5. there is a potential reusable replica but the replica replenishment wait interval is passed. + 3. Reuse the failed replica by cleaning up `ReplicaSpec.HealthyAt` and `ReplicaSpec.FailedAt`. And `Replica.Spec.RebuildRetryCount` will be increasd by 1. + 4. Clean up the related record in `Replica.Spec.RebuildRetryCount` when the rebuilding replica becomes mode `RW`. + 5. Guarantee the reused failed replica will be stopped before re-launching it. + +### Test Plan +#### Manually Test Plan +##### Rebuild replica for a large volume after network fluctuation/node reboot +1. Set `ReplicaReplenishmentWaitInterval`. Make sure it's longer than the node recovery interval. +2. Create and attach a large volume. Set a short `staleReplicaTimeout` for the volume, e.g., 1 minute. +3. Write a large amount of data then take a snapshot. +4. Repeat step 3 several times. +5. Reboot/Temporarily disconnect a node contains replica only. +6. According to the `ReplicaReplenishmentWaitInterval` and the node recovery interval: + - Verify the failed replica is reused and there is no new replica for the rebuild after the node recovery. + - Verify the replica rebuild only takes a relatively short time. + +##### Replenish replicas when failed replicas cannot be reused +1. Create and attach a large volume. +2. Write data then take snapshots. +3. Hack into one replica directory and make the directory and files read-only. +4. Crash the related replica process and wait for the replica failure. +5. Wait and check if Longhorn tries to reuse the corrupted replica but always fail. Since there is backoff mechanism, this will take a long time(8 ~ 10min). +6. Check if Longhorn will create a new replica and succeeds to finish the rebuild when the max retry count is reached. +7. Verify the data content. And check if the volume still works fine. + +##### Replenish replicas when failed there is a potential replica and the replenishment wait interval is passed +1. Set `ReplicaReplenishmentWaitInterval` to 60s. +2. Create and attach a large volume. +3. Write data then take snapshots. +4. Shut down a node containing replica only for 60s. +5. Wait and check if Longhorn tries to reuse the failed replica for 2~3 times but always fail. +6. Check if Longhorn will create a new replica once the replenishment wait interval is passed. +7. Verify the data content. And check if the volume still works fine. + +#### Reuse failed replicas for an old degraded volume after live upgrade: +1. Deploy Longhorn v1.0.2. +2. Create and attach a volume. Write data to the volume. +3. Disable scheduling for 1 node. +4. Crash the replica on the node. +5. Upgrade Longhorn to the latest. Verify the volume robustness `Degraded`. +6. Enable scheduling for the node. Verify the failed replica of the existing degraded volume will be reused. +7. Verify the data content, and the volume r/w still works fine. + +#### Failed replicas reusage backoff won't block replica replenishment +1. Deploy the latest Longhorn. +2. Create and attach a volume. Write data to the volume. +3. Update `Replica Replenishment Wait Interval` to 60s. +4. Crash a replica: removing the volume head file and creating a directory with the volume head file name. Then the replica reuse will continuously fail. e.g., `rm volume-head-001.img && mkdir volume-head-001.img` +5. Verify: + 1. There is a backoff interval for the failed replica reuse. + 2. A new replica will be created after (around) 60s despite the failed replica reuse is in backoff. + 3. the data content. + 4. the volume r/w still works fine. + +#### Integration Test Plan +##### Reuse the failed replicas when the replica data is messed up +1. Set a long wait interval for setting `replica-replenishment-wait-interval`. +2. Disable the setting soft node anti-affinity. +3. Create and attach a volume. Then write data to the volume. +4. Disable the scheduling for a node. +5. Mess up the data of a random snapshot or the volume head for a replica. Then crash the replica on the node. + --> Verify Longhorn won't create a new replica on the node for the volume. +6. Update setting `replica-replenishment-wait-interval` to a small value. +7. Verify Longhorn starts to create a new replica for the volume. + Notice that the new replica scheduling will fail. +8. Update setting `replica-replenishment-wait-interval` to a large value. +9. Delete the newly created replica. + --> Verify Longhorn won't create a new replica on the node + for the volume. +10. Enable the scheduling for the node. +11. Verify the failed replica (in step 5) will be reused. +12. Verify the volume r/w still works fine. + +#### Reuse the failed replicas with scheduling check +1. Set a long wait interval for setting `replica-replenishment-wait-interval`. +2. Disable the setting soft node anti-affinity. +3. Add tags for all nodes and disks. +4. Create and attach a volume with node and disk selectors. Then write data to the volume. +5. Disable the scheduling for the 2 nodes (node1 and node2). +6. Crash the replicas on the node1 and node2. + --> Verify Longhorn won't create new replicas on the nodes. +7. Remove tags for node1 and the related disks. +8. Enable the scheduling for node1 and node2. +9. Verify the only failed replica on node2 is reused. +10. Add the tags back for node1 and the related disks. +11. Verify the failed replica on node1 is reused. +12. Verify the volume r/w still works fine. + +### Upgrade strategy +Need to update `volume.Status.LastDegradedAt` for existing degraded volumes during live upgrade. + From c95e9481397b58e9361dcf00e239d9785d53fc8f Mon Sep 17 00:00:00 2001 From: Phan Le Date: Wed, 9 Sep 2020 14:50:48 -0700 Subject: [PATCH 13/33] enhancement: Add LEP for Prometheus support Longhorn#1180 Signed-off-by: Phan Le --- enhancements/20200909-prometheus-support.md | 462 ++++++++++++++++++++ 1 file changed, 462 insertions(+) create mode 100644 enhancements/20200909-prometheus-support.md diff --git a/enhancements/20200909-prometheus-support.md b/enhancements/20200909-prometheus-support.md new file mode 100644 index 0000000..2407c24 --- /dev/null +++ b/enhancements/20200909-prometheus-support.md @@ -0,0 +1,462 @@ +# Prometheus Support + +## Summary + + +We currently do not have a way for users to monitor and alert about events happen in Longhorn such as volume is full, backup is failed, CPU usage, memory consumption. +This enhancement exports Prometheus metrics so that users can use Prometheus or other monitoring systems to monitor Longhorn. + +### Related Issues + +https://github.com/longhorn/longhorn/issues/1180 + +## Motivation + +### Goals + +We are planing to expose 22 metrics in this release: +1. longhorn_volume_capacity_bytes +1. longhorn_volume_actual_size_bytes +1. longhorn_volume_state +1. longhorn_volume_robustness + +1. longhorn_node_status +1. longhorn_node_count_total +1. longhorn_node_cpu_capacity_millicpu +1. longhorn_node_cpu_usage_millicpu +1. longhorn_node_memory_capacity_bytes +1. longhorn_node_memory_usage_bytes +1. longhorn_node_storage_capacity_bytes +1. longhorn_node_storage_usage_bytes +1. longhorn_node_storage_reservation_bytes + +1. longhorn_disk_capacity_bytes +1. longhorn_disk_usage_bytes +1. longhorn_disk_reservation_bytes + +1. longhorn_instance_manager_cpu_usage_millicpu +1. longhorn_instance_manager_cpu_requests_millicpu +1. longhorn_instance_manager_memory_usage_bytes +1. longhorn_instance_manager_memory_requests_bytes + +1. longhorn_manager_cpu_usage_millicpu +1. longhorn_manager_memory_usage_bytes + + + + +See the [User Experience In Detail](#user-experience-in-detail) section for definition of each metric. + +### Non-goals + +We are not planing to expose 6 metrics in this release: +1. longhorn_backup_stats_number_failed_backups +1. longhorn_backup_stats_number_succeed_backups +1. longhorn_backup_stats_backup_status (status for this backup (0=InProgress,1=Done,2=Failed)) +1. longhorn_volume_io_ops +1. longhorn_volume_io_read_throughput +1. longhorn_volume_io_write_throughput + +## Proposal + +### User Stories + +Longhorn already has a great UI with many useful information. +However, Longhorn doesn't have any alert/notification mechanism yet. +Also, we don't have any dashboard or graphing support so that users can have overview picture of the storage system. +This enhancement will address both of the above issues. + +#### Story 1 +In many cases, a problem/issue can be quickly discovered if we have a monitoring dashboard. +For example, there are many times users ask us for supporting and the problems were that the Longhorn engines were killed due to over-use CPU limit. +If there is a CPU monitoring dashboard for instance managers, those problems can be quickly detected. + +#### Story 2 +User want to be notified about abnomal event such as disk space limit approaching. +We can expose metrics provide information about it and user can scrape the metrics and setup alert system. + +### User Experience In Detail + +After this enhancement is merged, Longhorn expose metrics at end point `/metrics` in Prometheus' [text-based format](https://prometheus.io/docs/instrumenting/exposition_formats/). +Users can use Prometheus or other monitoring systems to collect those metrics by scraping the end point `/metrics` in longhorn manager. +Then, user can display the collected data using tools such as Grafana. +User can also setup alert by using tools such as Prometheus Alertmanager. + +Below are the desciptions of metrics which Longhorn exposes and how users can use them: + +1. longhorn_volume_capacity_bytes + + This metric reports the configured size in bytes for each volume which is managed by the current longhorn manager. + + This metric contains 2 labels (dimensions): + * `node`: the node of the longhorn manager which is managing this volume + * `volume`: the name of this volume + + Example of a sample of this metric could be: + ``` + longhorn_volume_capacity_bytes{node="worker-2",volume="testvol"} 6.442450944e+09 + ``` + Users can use this metrics to draw graph about and quickly see the big volumes in the storage system. + +1. longhorn_volume_actual_size_bytes + + This metric reports the actual space used by each replica of the volume on the corresponding nodes + + This metric contains 2 labels (dimensions): + * `node`: the node of the longhorn manager which is managing this volume + * `volume`: the name of this volume + + Example of a sample of this metric could be: + ``` + longhorn_volume_actual_size_bytes{node="worker-2",volume="testvol"} 1.1917312e+08 + ``` + Users can use this metrics to the actual size occupied on disks of Longhorn volumes + +1. longhorn_volume_state + + This metric reports the state of the volume. The states are: 1=creating, 2=attached, 3=Detached, 4=Attaching, 5=Detaching, 6=Deleting. + + This metric contains 2 labels (dimensions): + * `node`: the node of the longhorn manager which is managing this volume + * `volume`: the name of this volume + + Example of a sample of this metric could be: + ``` + longhorn_volume_state{node="worker-3",volume="testvol1"} 2 + ``` + +1. longhorn_volume_robustness + + This metric reports the robustness of the volume. Possible values are: 0=unknown, 1=healthy, 2=degraded, 3=faulted + + This metric contains 2 labels (dimensions): + * `node`: the node of the longhorn manager which is managing this volume + * `volume`: the name of this volume + + Example of a sample of this metric could be: + ``` + longhorn_volume_robustness{node="worker-3",volume="testvol1"} 1 + ``` + +1. longhorn_node_status + + This metric reports the `ready`, `schedulable`, `mountPropagation` condition for the current node. + + This metric contains 3 labels (dimensions): + * `node` + * `condition`: the name of the condition (`ready`, `schedulable`, `mountPropagation`) + * `condition_reason` + + Example of a sample of this metric could be: + ``` + longhorn_node_status{condition="allowScheduling",condition_reason="",node="worker-3"} 1 + longhorn_node_status{condition="mountpropagation",condition_reason="",node="worker-3"} 1 + longhorn_node_status{condition="ready",condition_reason="",node="worker-3"} 1 + longhorn_node_status{condition="schedulable",condition_reason="",node="worker-3"} 1 + ``` + Users can use this metrics to setup alert about node status. + +1. longhorn_node_count_total + + This metric reports the total nodes in Longhorn system. + + Example of a sample of this metric could be: + ``` + longhorn_node_count_total 3 + ``` + Users can use this metric to detect the number of down nodes + +1. longhorn_node_cpu_capacity_millicpu + + Report the maximum allocatable cpu on this node + + Example of a sample of this metric could be: + ``` + longhorn_node_cpu_capacity_millicpu{node="worker-3"} 2000 + ``` + +1. longhorn_node_cpu_usage_millicpu + + Report the cpu usage on this node + + Example of a sample of this metric could be: + ``` + longhorn_node_cpu_usage_millicpu{node="worker-3"} 149 + ``` + +1. longhorn_node_memory_capacity_bytes + + Report the maximum allocatable memory on this node + + Example of a sample of this metric could be: + ``` + longhorn_node_memory_capacity_bytes{node="worker-3"} 4.031217664e+09 + ``` + +1. longhorn_node_memory_usage_bytes + + Report the memory usage on this node + + Example of a sample of this metric could be: + ``` + longhorn_node_memory_usage_bytes{node="worker-3"} 1.643794432e+09 + ``` + +1. longhorn_node_storage_capacity_bytes + + Report the storage capacity of this node + + Example of a sample of this metric could be: + ``` + longhorn_node_storage_capacity_bytes{node="worker-3"} 8.3987283968e+10 + ``` + +1. longhorn_node_storage_usage_bytes + + Report the used storage of this node + + Example of a sample of this metric could be: + ``` + longhorn_node_storage_usage_bytes{node="worker-3"} 9.060212736e+09 + ``` + +1. longhorn_node_storage_reservation_bytes + + Report the reserved storage for other applications and system on this node + + Example of a sample of this metric could be: + ``` + longhorn_node_storage_reservation_bytes{node="worker-3"} 2.519618519e+10 + ``` + +1. longhorn_disk_capacity_bytes + + Report the storage capacity of this disk. + + Example of a sample of this metric could be: + ``` + longhorn_disk_capacity_bytes{disk="default-disk-8b28ee3134628183",node="worker-3"} 8.3987283968e+10 + ``` + +1. longhorn_disk_usage_bytes + + Report the used storage of this disk + + Example of a sample of this metric could be: + ``` + longhorn_disk_usage_bytes{disk="default-disk-8b28ee3134628183",node="worker-3"} 9.060212736e+09 + ``` + +1. longhorn_disk_reservation_bytes + + Report the reserved storage for other applications and system on this disk + + Example of a sample of this metric could be: + ``` + longhorn_disk_reservation_bytes{disk="default-disk-8b28ee3134628183",node="worker-3"} 2.519618519e+10 + ``` + +1. longhorn_instance_manager_cpu_requests_millicpu + + This metric reports the requested CPU resources in Kubernetes of the Longhorn instance managers on the current node. + The unit of this metric is milliCPU. See more about the unit at https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/#cpu-units + + This metric contains 3 labels (dimensions): + * `node` + * `instance_manager` + * `instance_manager_type` + + Example of a sample of this metric could be: + ``` + longhorn_instance_manager_cpu_requests_millicpu{instance_manager="instance-manager-r-61ffe369",instance_manager_type="replica",node="worker-3"} 250 + ``` + +1. longhorn_instance_manager_cpu_usage_millicpu + + This metric reports the CPU usage of the Longhorn instance managers on the current node. + The unit of this metric is milliCPU. See more about the unit at https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/#cpu-units + + This metric contains 3 labels (dimensions): + * `node` + * `instance_manager` + * `instance_manager_type` + + Example of a sample of this metric could be: + ``` + longhorn_instance_manager_cpu_usage_millicpulonghorn_instance_manager_memory_requests_bytes{instance_manager="instance-manager-r-61ffe369",instance_manager_type="replica",node="worker-3"} 0 + ``` + +1. longhorn_instance_manager_memory_requests_bytes + + This metric reports the requested memory in Kubernetes of the Longhorn instance managers on the current node. + + This metric contains 3 labels (dimensions): + * `node` + * `instance_manager` + * `instance_manager_type` + + Example of a sample of this metric could be: + ``` + longhorn_instance_manager_memory_requests_bytes{instance_manager="instance-manager-e-0a67975b",instance_manager_type="engine",node="worker-3"} 0 + ``` + +1. longhorn_instance_manager_usage_memory_bytes + + This metrics reports the memory usage of the Longhorn instance managers on the current node. + + This metric contains 3 labels (dimensions): + * `node` + * `instance_manager` + * `instance_manager_type` + + Example of a sample of this metric could be: + ``` + longhorn_instance_manager_memory_usage_bytes{instance_manager="instance-manager-e-0a67975b",instance_manager_type="engine",node="worker-3"} 1.374208e+07 + ``` + +1. longhorn_manager_cpu_usage_millicpu + + This metric reports the CPU usage of the Longhorn manager on the current node. + The unit of this metric is milliCPU. See more about the unit at https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/#cpu-units + + This metric contains 2 labels (dimensions): + * `node` + * `manager` + + Example of a sample of this metric could be: + ``` + longhorn_manager_cpu_usage_millicpu{manager="longhorn-manager-x5cjj",node="phan-cluster-23-worker-3"} 15 + ``` + +1. longhorn_manager_memory_usage_bytes + + This metric reports the memory usage of the Longhorn manager on the current node. + + This metric contains 2 labels (dimensions): + * `node` + * `manager` + + Example of a sample of this metric could be: + ``` + longhorn_manager_memory_usage_bytes{manager="longhorn-manager-x5cjj",node="worker-3"} 2.7979776e+07 + ``` + +### API changes +We add a new end point `/metrics` to exposes all longhorn Prometheus metrics. +## Design + +### Implementation Overview +We follow the [Prometheus best practice](https://prometheus.io/docs/instrumenting/writing_exporters/#deployment), each Longhorn manager reports information about the components it manages. +Prometheus can use service discovery mechanisim to find all longhorn-manager pods in longhorn-backend service. + +We create a new collector for each type (volumeCollector, backupCollector, nodeCollector, etc..) and have a common baseCollector. +This structure is similar to the controller package: we have volumeController, nodeController, etc.. which have a common baseController. +The end result is a structure like a tree: +``` +a custom registry <- many custom collectors share the same base collector <- many metrics in each custom collector +``` +When a scrape request is made to endpoint `/metric`, a handler gathers data in the Longhorn custom registry, which in turn gathers data in custom collectors, which in turn gathers data in all metrics. + +Below are how we collect data for each metric: + +1. longhorn_volume_capacity_bytes + + We get the information about volumes' capacity by reading volume CRD from datastore. + When volume move to a different node, the current longhorn manager stops reporting the vol. + The volume will be reported by a new longhorn manager. + +1. longhorn_actual_size_bytes + + We get the information about volumes' actual size by reading volume CRD from datastore. + When volume move to a different node, the current longhorn manager stops reporting the vol. + The volume will be reported by a new longhorn manager. + +1. longhorn_volume_state + + We get the information about volumes' state by reading volume CRD from datastore. + +1. longhorn_volume_robustness + + We get the information about volumes' robustness by reading volume CRD from datastore. + +1. longhorn_node_status + + We get the information about node status by reading node CRD from datastore. + Nodes don't move likes volume, so we don't have to decide which longhorn manager reports which node. + +1. longhorn_node_count_total + + We get the information about total number node by reading from datastore + +1. longhorn_node_cpu_capacity_millicpu + + We get the information about the maximum allocatable cpu on this node by reading Kubernetes node resource + +1. longhorn_node_cpu_usage_millicpu + + We get the information about the cpu usage on this node from metric client + +1. longhorn_node_memory_capacity_bytes + + We get the information about the maximum allocatable memory on this node by reading Kubernetes node resource + +1. longhorn_node_memory_usage_bytes + + We get the information about the memory usage on this node from metric client + +1. longhorn_node_storage_capacity_bytes + + We get the information by reading node CRD from datastore + +1. longhorn_node_storage_usage_bytes + + We get the information by reading node CRD from datastore + +1. longhorn_node_storage_reservation_bytes + + We get the information by reading node CRD from datastore + +1. longhorn_disk_capacity_bytes + + We get the information by reading node CRD from datastore + +1. longhorn_disk_usage_bytes + + We get the information by reading node CRD from datastore + +1. longhorn_disk_reservation_bytes + + We get the information by reading node CRD from datastore + +1. longhorn_instance_manager_cpu_requests_millicpu + + We get the information by reading instance manager Pod objects from datastore. + +1. longhorn_instance_manager_cpu_usage_millicpu + + We get the information by using kubernetes metric client. + +1. longhorn_instance_manager_memory_usage_bytes + + We get the information by using kubernetes metric client. + +1. longhorn_instance_manager_memory_requests_bytes + + We get the information by reading instance manager Pod objects from datastore. + +1. longhorn_manager_cpu_usage_millicpu + + We get the information by using kubernetes metric client. + +1. longhorn_manager_memory_usage_bytes + + We get the information by using kubernetes metric client. + + +### Test plan + +The manual test plan is detailed at [here](https://github.com/longhorn/longhorn-tests/blob/master/docs/content/manual/release-specific/v1.1.0/prometheus_support.md) + +### Upgrade strategy + +This enhancement doesn't require any upgrade. From bf7a7ab92a39c277c12a83057b104790f19b64ed Mon Sep 17 00:00:00 2001 From: Shuo Wu Date: Fri, 6 Nov 2020 20:14:04 +0800 Subject: [PATCH 14/33] enhancement: Add a new enhancement 'disk-reconnection' Longhorn #1269 Signed-off-by: Shuo Wu --- enhancements/20201106-disk-reconnection.md | 104 +++++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 enhancements/20201106-disk-reconnection.md diff --git a/enhancements/20201106-disk-reconnection.md b/enhancements/20201106-disk-reconnection.md new file mode 100644 index 0000000..382d928 --- /dev/null +++ b/enhancements/20201106-disk-reconnection.md @@ -0,0 +1,104 @@ +# Disk Reconnection + +## Summary +When disks are reconnected/migrated to other Longhorn nodes, Longhorn should be able to figure out the disk reconnection and update the node ID as well as the data path for the related replicas (including failed replicas). + +### Related Issues +https://github.com/longhorn/longhorn/issues/1269 + +## Motivation +### Goals +The goal of this feature is to reuse the existing data of the failed replica when the corresponding disk is back. + +### Non-Goals +As for how to reuse the existing data and handle rebuild related feature, it is already implemented in #1304, which is not the intention of this enhancement. + +## Proposal +Identifying the disk that is previously used in Longhorn is not the the key point. The essential of this feature is that Longhorn should know where to reuse existing data of all related replicas when the disk is reconnected. +In other words, the fields that indicating the replica data position should be updated when the disk is reconnected. + +### User Stories +#### Migrate the existing disks to new nodes +Before the enhancement, there is no way to reuse the existing data when a disk is reconnected/migrated. + +After the enhancement, this can be done by: +1. detach the volumes using the disk +2. Reconnect the disk to the another node (both nodes keep running) +3. reattach the related volumes + +#### Scale down the node but reuse the disks on the node +Before the enhancement, there is no chance to reuse the failed replicas on the node. + +After the enhancement, Longhorn will update the path and node id for all failed replicas using the disks, then Longhorn can reuse the failed replicas during rebuilding. + +### User Experience In Detail +#### Migrate the existing disks to new nodes +1. Detach all related volumes using the disk before the disk migration. +2. Directly move the disk to the new node (physically or in cloud vendor) and mount the disk. +3. Add the disk with the new mount point to the corresponding new Longhorn node in Longhorn Node page. +4. Attach the volumes for the workloads. + +#### Scale down the node but reuse the disks on the node +1. Directly shut down the node when there are replicas on the node. Then the replicas on the node will fail. +2. Move the disks on the down node to other running nodes (physically or in cloud vendor). +3. Add the disk with the new mount point to the corresponding new Longhorn node in Longhorn Node page. +4. Wait then verify the failed replicas using the disk will be reused, and the node ID & path info will be updated. + +### API Changes +There is no API change. + +## Design +### Implementation Overview +#### longhorn-manager: +1. When a disk is ready, Longhorn can list all related replicas via `replica.Spec.DiskID` then sync up node ID and path info for these replicas. + - If a disk is not ready, the scheduling info will be cleaned up. Longhorn won't be confused of updating replicas if multiple disconnected disks using the same Disk UUID. + - Need to add a disk related label for replicas. +2. Store DiskUUID rather than the disk name in `replica.Spec.DiskID` + - Need to update `DiskID` for existing replicas during upgrade. +3. Since the disk path of a replica may get changed but the data directory name is immutable. It's better to split `replica.Spec.DataPath` to `replica.Spec.DiskPath` and `replica.Spec.DataDirectoryName`. Then it's more convenient to sync up the disk path for replicas. + - Need to update the path fields for existing replicas during upgrade. + +### Test Plan +#### Integration Tests +##### Disk migration +1. Disable the node soft anti-affinity. +2. Create a new host disk. +3. Disable the default disk and add the extra disk with scheduling enabled for the current node. +4. Launch a Longhorn volume with 1 replica. + Then verify the only replica is scheduled to the new disk. +5. Write random data to the volume then verify the data. +6. Detach the volume. +7. Unmount then remount the disk to another path. (disk migration) +8. Create another Longhorn disk based on the migrated path. +9. Verify the Longhorn disk state. + - The Longhorn disk added before the migration should become "unschedulable". + - The Longhorn disk created after the migration should become "schedulable". +10. Verify the replica DiskID and the path is updated. +11. Attach the volume. Then verify the state and the data. + +#### Manual Tests +##### Some Longhorn worker nodes in AWS Auto Scaling group is in replacement +1. Set `ReplicaReplenishmentWaitInterval`. Make sure it's longer than the time needs for node replacement. +2. Launch a Kubernetes cluster with the nodes in AWS Auto Scaling group. Then Deploy Longhorn. +3. Deploy some workloads using Longhorn volumes. +4. Wait for/Trigger the ASG instance replacement. +5. Verify new replicas won't be created before reaching `ReplicaReplenishmentWaitInterval`. +6. Verify the failed replicas are reused after the node recovery. +7. Verify if workloads still work fine with the volumes after the recovery. + +##### Longhorn upgrade with node down and removal +1. Launch Longhorn v1.0.x +2. Create and attach a volume, then write data to the volume. +3. Directly remove a Kubernetes node, and shut down a node. +4. Wait for the related replicas failure. Then record `replica.Spec.DiskID` for the failed replicas. +5. Upgrade to Longhorn master +6. Verify the Longhorn node related to the removed node is gone. +7. Verify + 1. `replica.Spec.DiskID` on the down node is updated and the field of the replica on the gone node is unchanged. + 2. `replica.Spec.DataPath` for all replicas becomes empty. +8. Remove all unscheduled replicas. +9. Power on the down node. Wait for the failed replica on the down node being reused. +10. Wait for a new replica being replenished and available. + +### Upgrade strategy +Need to update disk ID and data path for existing replicas. From dcc4ac54cb42101447f48551469401780e5e2ecb Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Thu, 12 Nov 2020 08:46:07 -0800 Subject: [PATCH 15/33] Update README.md Remove the Astronomer badge since it was archived by the author. Signed-off-by: Sheng Yang --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1a87dc3..096b713 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Longhorn [![Astronomer](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Fastronomer.ullaakut.eu%2Fshields%3Fowner%3Dlonghorn%26name%3Dlonghorn)](https://github.com/Ullaakut/astronomer) +# Longhorn ### Build Status * Engine: [![Build Status](https://drone-publish.longhorn.io/api/badges/longhorn/longhorn-engine/status.svg)](https://drone-publish.longhorn.io/longhorn/longhorn-engine) [![Go Report Card](https://goreportcard.com/badge/github.com/longhorn/longhorn-engine)](https://goreportcard.com/report/github.com/longhorn/longhorn-engine) From 82f774c59428c948557d27e7378e0754b6b62571 Mon Sep 17 00:00:00 2001 From: William Jimenez Date: Fri, 13 Nov 2020 18:55:22 -0800 Subject: [PATCH 16/33] Update README.md highlight community events Signed-off-by: William Jimenez --- README.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 096b713..eccc930 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ * Test: [![Build Status](http://drone-publish.longhorn.io/api/badges/longhorn/longhorn-tests/status.svg)](http://drone-publish.longhorn.io/longhorn/longhorn-tests) ### Overview -Longhorn is a distributed block storage system for Kubernetes. +Longhorn is a distributed block storage system for Kubernetes. Longhorn is cloud native storage because it is built using Kubernetes and container primatives. Longhorn is lightweight, reliable, and powerful. You can install Longhorn on an existing Kubernetes cluster with one `kubectl apply` command or using Helm charts. Once Longhorn is installed, it adds persistent volume support to the Kubernetes cluster. @@ -23,6 +23,13 @@ Longhorn implements distributed block storage using containers and microservices You can read more technical details of Longhorn [here](https://longhorn.io/). + +## Get Involved +**Community Meeting and Office Hours**!: Hosted by the core maintainers of Longhorn: 2nd Friday of the every month at 09:00 Pacific Time (PT)/12:00 Eastern Time (ET) on Zoom: http://bit.ly/longhorn-community-meeting. Gcal event: http://bit.ly/longhorn-events +**Longhorn Mailing List**!: Stay up to date on the latest news and events: https://lists.cncf.io/g/cncf-longhorn + +You can read more about the community and its events here: https://github.com/longhorn/community + ## Current status The latest release of Longhorn is **v1.0.2**. From e6b58b5102b881141937299f6f2383c2cd850db9 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Thu, 19 Nov 2020 15:48:45 -0800 Subject: [PATCH 17/33] Update bug_report.md Add more questions regarding the node config and underlying infrastructure. Signed-off-by: Sheng Yang --- .github/ISSUE_TEMPLATE/bug_report.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index b39b446..b653681 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -27,7 +27,13 @@ If applicable, add the Longhorn managers' log when the issue happens. **Environment:** - Longhorn version: - Kubernetes version: - - Node OS type and version: + - Node config + - OS type and version + - CPU per node: + - Memory per node: + - Disk type + - Network bandwidth and latency between the nodes: + - Underlying Infrastructure (e.g. on AWS/GCE, EKS/GKE, VMWare/KVM, Baremetal): **Additional context** Add any other context about the problem here. From 2d6c06a791cd051151636645ca5167d9c1a176b1 Mon Sep 17 00:00:00 2001 From: Phan Le Date: Thu, 3 Dec 2020 16:02:41 -0800 Subject: [PATCH 18/33] Fix crash loop error in longhorn-iscsi-installation We install iscsi in the init container then sleep in the main container. This avoids crash loop after finishing installing iscsi Longhorn #1741 Signed-off-by: Phan Le --- deploy/iscsi/longhorn-iscsi-installation.yaml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/deploy/iscsi/longhorn-iscsi-installation.yaml b/deploy/iscsi/longhorn-iscsi-installation.yaml index 8e2a157..02d201c 100644 --- a/deploy/iscsi/longhorn-iscsi-installation.yaml +++ b/deploy/iscsi/longhorn-iscsi-installation.yaml @@ -16,7 +16,8 @@ spec: app: longhorn-iscsi-installation spec: hostNetwork: true - containers: + hostPID: true + initContainers: - name: iscsi-installation command: - nsenter @@ -28,4 +29,8 @@ spec: image: alpine:3.7 securityContext: privileged: true - hostPID: true + containers: + - name: sleep + image: k8s.gcr.io/pause:3.1 + updateStrategy: + type: RollingUpdate From 8bcf391dae739f62a47c203ca3ae7ea822e5b9f9 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Sun, 6 Dec 2020 09:22:16 -0800 Subject: [PATCH 19/33] Update README.md Update the build badge for Share Manager. Signed-off-by: Sheng Yang --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index eccc930..1b816d6 100644 --- a/README.md +++ b/README.md @@ -2,8 +2,9 @@ ### Build Status * Engine: [![Build Status](https://drone-publish.longhorn.io/api/badges/longhorn/longhorn-engine/status.svg)](https://drone-publish.longhorn.io/longhorn/longhorn-engine) [![Go Report Card](https://goreportcard.com/badge/github.com/longhorn/longhorn-engine)](https://goreportcard.com/report/github.com/longhorn/longhorn-engine) -* Instance Manager: [![Build Status](http://drone-publish.longhorn.io/api/badges/longhorn/longhorn-instance-manager/status.svg)](http://drone-publish.longhorn.io/longhorn/longhorn-instance-manager)[![Go Report Card](https://goreportcard.com/badge/github.com/longhorn/longhorn-instance-manager)](https://goreportcard.com/report/github.com/longhorn/longhorn-instance-manager) * Manager: [![Build Status](https://drone-publish.longhorn.io/api/badges/longhorn/longhorn-manager/status.svg)](https://drone-publish.longhorn.io/longhorn/longhorn-manager)[![Go Report Card](https://goreportcard.com/badge/github.com/longhorn/longhorn-manager)](https://goreportcard.com/report/github.com/longhorn/longhorn-manager) +* Instance Manager: [![Build Status](http://drone-publish.longhorn.io/api/badges/longhorn/longhorn-instance-manager/status.svg)](http://drone-publish.longhorn.io/longhorn/longhorn-instance-manager)[![Go Report Card](https://goreportcard.com/badge/github.com/longhorn/longhorn-instance-manager)](https://goreportcard.com/report/github.com/longhorn/longhorn-instance-manager) +* Share Manager: [![Build Status](http://drone-publish.longhorn.io/api/badges/longhorn/longhorn-share-manager/status.svg)](http://drone-publish.longhorn.io/longhorn/longhorn-share-manager)[![Go Report Card](https://goreportcard.com/badge/github.com/longhorn/longhorn-share-manager)](https://goreportcard.com/report/github.com/longhorn/longhorn-share-manager) * UI: [![Build Status](https://drone-publish.longhorn.io/api/badges/longhorn/longhorn-ui/status.svg)](https://drone-publish.longhorn.io/longhorn/longhorn-ui) * Test: [![Build Status](http://drone-publish.longhorn.io/api/badges/longhorn/longhorn-tests/status.svg)](http://drone-publish.longhorn.io/longhorn/longhorn-tests) From 9cf7cc57aca2026f80701f42ef5d6d92460bae26 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Mon, 7 Dec 2020 18:35:55 -0800 Subject: [PATCH 20/33] Update question.md Signed-off-by: Sheng Yang --- .github/ISSUE_TEMPLATE/question.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md index 8488260..783fb15 100644 --- a/.github/ISSUE_TEMPLATE/question.md +++ b/.github/ISSUE_TEMPLATE/question.md @@ -6,5 +6,18 @@ labels: question assignees: '' --- +**Question** +**Environment:** + - Longhorn version: + - Kubernetes version: + - Node config + - OS type and version + - CPU per node: + - Memory per node: + - Disk type + - Network bandwidth and latency between the nodes: + - Underlying Infrastructure (e.g. on AWS/GCE, EKS/GKE, VMWare/KVM, Baremetal): +**Additional context** +Add any other context about the problem here. From d62bcccfc793200939d604e406644750d4a1cfe9 Mon Sep 17 00:00:00 2001 From: Shuo Wu Date: Fri, 11 Dec 2020 20:29:31 +0800 Subject: [PATCH 21/33] chart: Remove the annotation which causes CRDs not removed during uninstallation Will find another way to migrate the CRD file later. Longhorn #2035 Signed-off-by: Shuo Wu --- chart/templates/crds.yaml | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/chart/templates/crds.yaml b/chart/templates/crds.yaml index 78635e5..04cf9ed 100644 --- a/chart/templates/crds.yaml +++ b/chart/templates/crds.yaml @@ -3,8 +3,6 @@ kind: CustomResourceDefinition metadata: labels: {{- include "longhorn.labels" . | nindent 4 }} longhorn-manager: Engine - annotations: - helm.sh/resource-policy: keep name: engines.longhorn.io spec: group: longhorn.io @@ -56,8 +54,6 @@ kind: CustomResourceDefinition metadata: labels: {{- include "longhorn.labels" . | nindent 4 }} longhorn-manager: Replica - annotations: - helm.sh/resource-policy: keep name: replicas.longhorn.io spec: group: longhorn.io @@ -113,8 +109,6 @@ kind: CustomResourceDefinition metadata: labels: {{- include "longhorn.labels" . | nindent 4 }} longhorn-manager: Setting - annotations: - helm.sh/resource-policy: keep name: settings.longhorn.io spec: group: longhorn.io @@ -147,8 +141,6 @@ kind: CustomResourceDefinition metadata: labels: {{- include "longhorn.labels" . | nindent 4 }} longhorn-manager: Volume - annotations: - helm.sh/resource-policy: keep name: volumes.longhorn.io spec: group: longhorn.io @@ -204,8 +196,6 @@ kind: CustomResourceDefinition metadata: labels: {{- include "longhorn.labels" . | nindent 4 }} longhorn-manager: EngineImage - annotations: - helm.sh/resource-policy: keep name: engineimages.longhorn.io spec: group: longhorn.io @@ -257,8 +247,6 @@ kind: CustomResourceDefinition metadata: labels: {{- include "longhorn.labels" . | nindent 4 }} longhorn-manager: Node - annotations: - helm.sh/resource-policy: keep name: nodes.longhorn.io spec: group: longhorn.io @@ -306,8 +294,6 @@ kind: CustomResourceDefinition metadata: labels: {{- include "longhorn.labels" . | nindent 4 }} longhorn-manager: InstanceManager - annotations: - helm.sh/resource-policy: keep name: instancemanagers.longhorn.io spec: group: longhorn.io @@ -355,8 +341,6 @@ kind: CustomResourceDefinition metadata: labels: {{- include "longhorn.labels" . | nindent 4 }} longhorn-manager: ShareManager - annotations: - helm.sh/resource-policy: keep name: sharemanagers.longhorn.io spec: group: longhorn.io From 2eeb93afb50b890eb91ee0564408446c761a8b68 Mon Sep 17 00:00:00 2001 From: Shuo Wu Date: Fri, 11 Dec 2020 20:15:02 +0800 Subject: [PATCH 22/33] chart: Correct the inconsistent for question.yml Longhorn #2082 Signed-off-by: Shuo Wu --- chart/questions.yml | 42 ++++++++++++++++++++++++++++++++---------- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/chart/questions.yml b/chart/questions.yml index bb36330..b094878 100644 --- a/chart/questions.yml +++ b/chart/questions.yml @@ -58,6 +58,18 @@ questions: type: string label: Longhorn Instance Manager Image Tag group: "Longhorn Images Settings" + - variable: image.longhorn.shareManager.repository + default: longhornio/longhorn-share-manager + description: "Specify Longhorn Share Manager Image Repository" + type: string + label: Longhorn Share Manager Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.shareManager.tag + default: v1_20201204 + description: "Specify Longhorn Share Manager Image Tag" + type: string + label: Longhorn Share Manager Image Tag + group: "Longhorn Images Settings" - variable: image.csi.attacher.repository default: longhornio/csi-attacher description: "Specify CSI attacher image repository. Leave blank to autodetect." @@ -158,7 +170,7 @@ questions: group: "Longhorn CSI Driver Settings" - variable: csi.attacherReplicaCount type: int - default: + default: 3 min: 1 max: 10 description: "Specify replica count of CSI Attacher. By default 3." @@ -166,7 +178,7 @@ questions: group: "Longhorn CSI Driver Settings" - variable: csi.provisionerReplicaCount type: int - default: + default: 3 min: 1 max: 10 description: "Specify replica count of CSI Provisioner. By default 3." @@ -174,7 +186,7 @@ questions: group: "Longhorn CSI Driver Settings" - variable: csi.resizerReplicaCount type: int - default: + default: 3 min: 1 max: 10 description: "Specify replica count of CSI Resizer. By default 3." @@ -182,7 +194,7 @@ questions: group: "Longhorn CSI Driver Settings" - variable: csi.snapshotterReplicaCount type: int - default: + default: 3 min: 1 max: 10 description: "Specify replica count of CSI Snapshotter. By default 3." @@ -269,10 +281,13 @@ The available modes are: default: 3 - variable: defaultSettings.guaranteedEngineCPU label: Guaranteed Engine CPU - description: 'Allow Longhorn Instance Managers to have guaranteed CPU allocation. The value is how many CPUs should be reserved for each Engine/Replica Instance Manager Pod created by Longhorn. For example, 0.1 means one-tenth of a CPU. This will help maintain engine stability during high node workload. It only applies to the Engine/Replica Manager Pods created after the setting took effect. -WARNING: After this setting is changed, all the instance managers on all the nodes will be automatically restarted. -WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES. -By default 0.25.' + description: "Allow Longhorn Instance Managers to have guaranteed CPU allocation. By default 0.25. The value is how many CPUs should be reserved for each Engine/Replica Instance Manager Pod created by Longhorn. For example, 0.1 means one-tenth of a CPU. This will help maintain engine stability during high node workload. It only applies to the Engine/Replica Instance Manager Pods created after the setting took effect. +In order to prevent unexpected volume crash, you can use the following formula to calculate an appropriate value for this setting: +'Guaranteed Engine CPU = The estimated max Longhorn volume/replica count on a node * 0.1'. +The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting. +If it's hard to estimate the volume/replica count now, you can leave it with the default value, or allocate 1/8 of total CPU of a node. Then you can tune it when there is no running workload using Longhorn volumes. +WARNING: After this setting is changed, all the instance managers on all the nodes will be automatically restarted +WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES." group: "Longhorn Default Settings" type: float default: 0.25 @@ -291,8 +306,15 @@ By default 0.25.' default: 300 - variable: defaultSettings.taintToleration label: Kubernetes Taint Toleration - description: 'To dedicate nodes to store Longhorn replicas and reject other general workloads, set tolerations for Longhorn and add taints for the storage nodes. All Longhorn volumes should be detached before modifying toleration settings. We recommend setting tolerations during Longhorn deployment because the Longhorn system cannot be operated during the update. Multiple tolerations can be set here, and these tolerations are separated by semicolon. For example, `key1=value1:NoSchedule; key2:NoExecute`. Because `kubernetes.io` is used as the key of all Kubernetes default tolerations, it should not be used in the toleration settings. -WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES.' + description: "To dedicate nodes to store Longhorn replicas and reject other general workloads, set tolerations for Longhorn and add taints for the storage nodes. +All Longhorn volumes should be detached before modifying toleration settings. +We recommend setting tolerations during Longhorn deployment because the Longhorn system cannot be operated during the update. +Multiple tolerations can be set here, and these tolerations are separated by semicolon. For example: +* `key1=value1:NoSchedule; key2:NoExecute` +* `:` this toleration tolerates everything because an empty key with operator `Exists` matches all keys, values and effects +* `key1=value1:` this toleration has empty effect. It matches all effects with key `key1` +Because `kubernetes.io` is used as the key of all Kubernetes default tolerations, it should not be used in the toleration settings. +WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES!" group: "Longhorn Default Settings" type: string default: "" From 5ec0c5a261f4b0efb9f65f4a11a194279b491cff Mon Sep 17 00:00:00 2001 From: Shuo Wu Date: Wed, 9 Dec 2020 14:47:13 +0800 Subject: [PATCH 23/33] chart: Update Kubernetes version requirement for chart Since we update the CRD apiVersion to apiextensions.k8s.io/v1, the minimal Kubernetes version requirement is v1.16 now. Longhorn #2061 Signed-off-by: Shuo Wu --- chart/Chart.yaml | 2 +- chart/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/chart/Chart.yaml b/chart/Chart.yaml index 9a3a78d..2172538 100644 --- a/chart/Chart.yaml +++ b/chart/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 name: longhorn version: 1.1.0-rc1 appVersion: v1.1.0-rc1 -kubeVersion: ">=v1.14.0-r0" +kubeVersion: ">=v1.16.0-r0" description: Longhorn is a distributed block storage system for Kubernetes. keywords: - longhorn diff --git a/chart/README.md b/chart/README.md index 13e6312..79d07dc 100644 --- a/chart/README.md +++ b/chart/README.md @@ -16,7 +16,7 @@ Longhorn is 100% open source software. Project source code is spread across a nu ## Prerequisites 1. Docker v1.13+ -2. Kubernetes v1.15+ +2. Kubernetes v1.16+ 3. Make sure `curl`, `findmnt`, `grep`, `awk` and `blkid` has been installed in all nodes of the Kubernetes cluster. 4. Make sure `open-iscsi` has been installed in all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already. From 3da26d195e2a48a79a9eafc05f307b862e9a9c9f Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Mon, 14 Dec 2020 20:07:24 -0800 Subject: [PATCH 24/33] Sync with manager commit eb98fc29d8ab37ec3c0650150a75d73ed22a4f93 Author: Sheng Yang Date: Mon Dec 14 19:29:12 2020 -0800 Longhorn v1.1.0-rc2 release Signed-off-by: Sheng Yang Signed-off-by: Sheng Yang --- deploy/longhorn-images.txt | 6 +++--- deploy/longhorn.yaml | 20 +++++++------------- deploy/release-images.txt | 6 +++--- uninstall/uninstall.yaml | 6 +++--- 4 files changed, 16 insertions(+), 22 deletions(-) diff --git a/deploy/longhorn-images.txt b/deploy/longhorn-images.txt index 515e79f..f545f54 100644 --- a/deploy/longhorn-images.txt +++ b/deploy/longhorn-images.txt @@ -1,8 +1,8 @@ -longhornio/longhorn-engine:v1.1.0-rc1 +longhornio/longhorn-engine:master longhornio/longhorn-instance-manager:v1_20201021 longhornio/longhorn-share-manager:v1_20201204 -longhornio/longhorn-manager:v1.1.0-rc1 -longhornio/longhorn-ui:v1.1.0-rc1 +longhornio/longhorn-manager:master +longhornio/longhorn-ui:master longhornio/csi-attacher:v2.2.1-lh1 longhornio/csi-provisioner:v1.6.0-lh1 longhornio/csi-resizer:v0.5.1-lh1 diff --git a/deploy/longhorn.yaml b/deploy/longhorn.yaml index 1edcad1..8febee9 100644 --- a/deploy/longhorn.yaml +++ b/deploy/longhorn.yaml @@ -593,7 +593,7 @@ spec: spec: containers: - name: longhorn-manager - image: longhornio/longhorn-manager:v1.1.0-rc1 + image: longhornio/longhorn-manager:master imagePullPolicy: IfNotPresent securityContext: privileged: true @@ -602,13 +602,13 @@ spec: - -d - daemon - --engine-image - - longhornio/longhorn-engine:v1.1.0-rc1 + - longhornio/longhorn-engine:master - --instance-manager-image - longhornio/longhorn-instance-manager:v1_20201021 - --share-manager-image - longhornio/longhorn-share-manager:v1_20201204 - --manager-image - - longhornio/longhorn-manager:v1.1.0-rc1 + - longhornio/longhorn-manager:master - --service-account - longhorn-service-account ports: @@ -622,9 +622,6 @@ spec: mountPath: /host/dev/ - name: proc mountPath: /host/proc/ - - name: varrun - mountPath: /var/run/ - mountPropagation: Bidirectional - name: longhorn mountPath: /var/lib/longhorn/ mountPropagation: Bidirectional @@ -653,9 +650,6 @@ spec: - name: proc hostPath: path: /proc/ - - name: varrun - hostPath: - path: /var/run/ - name: longhorn hostPath: path: /var/lib/longhorn/ @@ -705,7 +699,7 @@ spec: spec: containers: - name: longhorn-ui - image: longhornio/longhorn-ui:v1.1.0-rc1 + image: longhornio/longhorn-ui:master imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 @@ -752,18 +746,18 @@ spec: spec: initContainers: - name: wait-longhorn-manager - image: longhornio/longhorn-manager:v1.1.0-rc1 + image: longhornio/longhorn-manager:master command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done'] containers: - name: longhorn-driver-deployer - image: longhornio/longhorn-manager:v1.1.0-rc1 + image: longhornio/longhorn-manager:master imagePullPolicy: IfNotPresent command: - longhorn-manager - -d - deploy-driver - --manager-image - - longhornio/longhorn-manager:v1.1.0-rc1 + - longhornio/longhorn-manager:master - --manager-url - http://longhorn-backend:9500/v1 env: diff --git a/deploy/release-images.txt b/deploy/release-images.txt index 515e79f..f545f54 100644 --- a/deploy/release-images.txt +++ b/deploy/release-images.txt @@ -1,8 +1,8 @@ -longhornio/longhorn-engine:v1.1.0-rc1 +longhornio/longhorn-engine:master longhornio/longhorn-instance-manager:v1_20201021 longhornio/longhorn-share-manager:v1_20201204 -longhornio/longhorn-manager:v1.1.0-rc1 -longhornio/longhorn-ui:v1.1.0-rc1 +longhornio/longhorn-manager:master +longhornio/longhorn-ui:master longhornio/csi-attacher:v2.2.1-lh1 longhornio/csi-provisioner:v1.6.0-lh1 longhornio/csi-resizer:v0.5.1-lh1 diff --git a/uninstall/uninstall.yaml b/uninstall/uninstall.yaml index 50e82e1..0ac3806 100644 --- a/uninstall/uninstall.yaml +++ b/uninstall/uninstall.yaml @@ -16,7 +16,7 @@ rules: verbs: - "*" - apiGroups: [""] - resources: ["pods", "persistentvolumes", "persistentvolumeclaims", "nodes", "configmaps"] + resources: ["pods", "persistentvolumes", "persistentvolumeclaims", "nodes", "configmaps", "secrets", "services", "endpoints"] verbs: ["*"] - apiGroups: ["apps"] resources: ["daemonsets", "statefulsets", "deployments"] @@ -34,7 +34,7 @@ rules: resources: ["csidrivers", "storageclasses"] verbs: ["*"] - apiGroups: ["longhorn.io"] - resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes", "instancemanagers"] + resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes", "instancemanagers", "sharemanagers"] verbs: ["*"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] @@ -67,7 +67,7 @@ spec: spec: containers: - name: longhorn-uninstall - image: longhornio/longhorn-manager:v1.1.0-rc1 + image: longhornio/longhorn-manager:master imagePullPolicy: Always command: - longhorn-manager From 880bc5ed0ff8cc842dae69985d68e4f5754f4019 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Mon, 14 Dec 2020 20:08:17 -0800 Subject: [PATCH 25/33] Update version to v1.1.0-rc2 Signed-off-by: Sheng Yang --- chart/Chart.yaml | 4 ++-- chart/questions.yml | 6 +++--- chart/values.yaml | 6 +++--- deploy/longhorn-images.txt | 6 +++--- deploy/longhorn.yaml | 14 +++++++------- deploy/release-images.txt | 6 +++--- uninstall/uninstall.yaml | 2 +- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/chart/Chart.yaml b/chart/Chart.yaml index 2172538..b2471ae 100644 --- a/chart/Chart.yaml +++ b/chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: longhorn -version: 1.1.0-rc1 -appVersion: v1.1.0-rc1 +version: 1.1.0-rc2 +appVersion: v1.1.0-rc2 kubeVersion: ">=v1.16.0-r0" description: Longhorn is a distributed block storage system for Kubernetes. keywords: diff --git a/chart/questions.yml b/chart/questions.yml index b094878..647dceb 100644 --- a/chart/questions.yml +++ b/chart/questions.yml @@ -17,7 +17,7 @@ questions: label: Longhorn Manager Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.manager.tag - default: v1.1.0-rc1 + default: v1.1.0-rc2 description: "Specify Longhorn Manager Image Tag" type: string label: Longhorn Manager Image Tag @@ -29,7 +29,7 @@ questions: label: Longhorn Engine Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.engine.tag - default: v1.1.0-rc1 + default: v1.1.0-rc2 description: "Specify Longhorn Engine Image Tag" type: string label: Longhorn Engine Image Tag @@ -41,7 +41,7 @@ questions: label: Longhorn UI Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.ui.tag - default: v1.1.0-rc1 + default: v1.1.0-rc2 description: "Specify Longhorn UI Image Tag" type: string label: Longhorn UI Image Tag diff --git a/chart/values.yaml b/chart/values.yaml index 10ac312..a253860 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -9,13 +9,13 @@ image: longhorn: engine: repository: longhornio/longhorn-engine - tag: v1.1.0-rc1 + tag: v1.1.0-rc2 manager: repository: longhornio/longhorn-manager - tag: v1.1.0-rc1 + tag: v1.1.0-rc2 ui: repository: longhornio/longhorn-ui - tag: v1.1.0-rc1 + tag: v1.1.0-rc2 instanceManager: repository: longhornio/longhorn-instance-manager tag: v1_20201021 diff --git a/deploy/longhorn-images.txt b/deploy/longhorn-images.txt index f545f54..d938e9a 100644 --- a/deploy/longhorn-images.txt +++ b/deploy/longhorn-images.txt @@ -1,8 +1,8 @@ -longhornio/longhorn-engine:master +longhornio/longhorn-engine:v1.1.0-rc2 longhornio/longhorn-instance-manager:v1_20201021 longhornio/longhorn-share-manager:v1_20201204 -longhornio/longhorn-manager:master -longhornio/longhorn-ui:master +longhornio/longhorn-manager:v1.1.0-rc2 +longhornio/longhorn-ui:v1.1.0-rc2 longhornio/csi-attacher:v2.2.1-lh1 longhornio/csi-provisioner:v1.6.0-lh1 longhornio/csi-resizer:v0.5.1-lh1 diff --git a/deploy/longhorn.yaml b/deploy/longhorn.yaml index 8febee9..db112f1 100644 --- a/deploy/longhorn.yaml +++ b/deploy/longhorn.yaml @@ -593,7 +593,7 @@ spec: spec: containers: - name: longhorn-manager - image: longhornio/longhorn-manager:master + image: longhornio/longhorn-manager:v1.1.0-rc2 imagePullPolicy: IfNotPresent securityContext: privileged: true @@ -602,13 +602,13 @@ spec: - -d - daemon - --engine-image - - longhornio/longhorn-engine:master + - longhornio/longhorn-engine:v1.1.0-rc2 - --instance-manager-image - longhornio/longhorn-instance-manager:v1_20201021 - --share-manager-image - longhornio/longhorn-share-manager:v1_20201204 - --manager-image - - longhornio/longhorn-manager:master + - longhornio/longhorn-manager:v1.1.0-rc2 - --service-account - longhorn-service-account ports: @@ -699,7 +699,7 @@ spec: spec: containers: - name: longhorn-ui - image: longhornio/longhorn-ui:master + image: longhornio/longhorn-ui:v1.1.0-rc2 imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 @@ -746,18 +746,18 @@ spec: spec: initContainers: - name: wait-longhorn-manager - image: longhornio/longhorn-manager:master + image: longhornio/longhorn-manager:v1.1.0-rc2 command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done'] containers: - name: longhorn-driver-deployer - image: longhornio/longhorn-manager:master + image: longhornio/longhorn-manager:v1.1.0-rc2 imagePullPolicy: IfNotPresent command: - longhorn-manager - -d - deploy-driver - --manager-image - - longhornio/longhorn-manager:master + - longhornio/longhorn-manager:v1.1.0-rc2 - --manager-url - http://longhorn-backend:9500/v1 env: diff --git a/deploy/release-images.txt b/deploy/release-images.txt index f545f54..d938e9a 100644 --- a/deploy/release-images.txt +++ b/deploy/release-images.txt @@ -1,8 +1,8 @@ -longhornio/longhorn-engine:master +longhornio/longhorn-engine:v1.1.0-rc2 longhornio/longhorn-instance-manager:v1_20201021 longhornio/longhorn-share-manager:v1_20201204 -longhornio/longhorn-manager:master -longhornio/longhorn-ui:master +longhornio/longhorn-manager:v1.1.0-rc2 +longhornio/longhorn-ui:v1.1.0-rc2 longhornio/csi-attacher:v2.2.1-lh1 longhornio/csi-provisioner:v1.6.0-lh1 longhornio/csi-resizer:v0.5.1-lh1 diff --git a/uninstall/uninstall.yaml b/uninstall/uninstall.yaml index 0ac3806..1caac69 100644 --- a/uninstall/uninstall.yaml +++ b/uninstall/uninstall.yaml @@ -67,7 +67,7 @@ spec: spec: containers: - name: longhorn-uninstall - image: longhornio/longhorn-manager:master + image: longhornio/longhorn-manager:v1.1.0-rc2 imagePullPolicy: Always command: - longhorn-manager From 2c25009c8a5f96b80c5aba50c31c1f63b66b6bf7 Mon Sep 17 00:00:00 2001 From: Shuo Wu Date: Tue, 15 Dec 2020 14:24:39 +0800 Subject: [PATCH 26/33] Chart: Add share manager info Signed-off-by: Shuo Wu --- chart/Chart.yaml | 1 + chart/README.md | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/chart/Chart.yaml b/chart/Chart.yaml index b2471ae..e123b60 100644 --- a/chart/Chart.yaml +++ b/chart/Chart.yaml @@ -16,6 +16,7 @@ sources: - https://github.com/longhorn/longhorn - https://github.com/longhorn/longhorn-engine - https://github.com/longhorn/longhorn-instance-manager +- https://github.com/longhorn/longhorn-share-manager - https://github.com/longhorn/longhorn-manager - https://github.com/longhorn/longhorn-ui - https://github.com/longhorn/longhorn-tests diff --git a/chart/README.md b/chart/README.md index 79d07dc..d2f6d05 100644 --- a/chart/README.md +++ b/chart/README.md @@ -10,7 +10,8 @@ Longhorn is 100% open source software. Project source code is spread across a nu 1. Longhorn Engine -- Core controller/replica logic https://github.com/longhorn/longhorn-engine 2. Longhorn Instance Manager -- Controller/replica instance lifecycle management https://github.com/longhorn/longhorn-instance-manager -3. Longhorn Manager -- Longhorn orchestration, includes CSI driver for Kubernetes https://github.com/longhorn/longhorn-manager +3. Longhorn Share Manager -- NFS provisioner that exposes Longhorn volumes as ReadWriteMany volumes. https://github.com/longhorn/longhorn-share-manager +4. Longhorn Manager -- Longhorn orchestration, includes CSI driver for Kubernetes https://github.com/longhorn/longhorn-manager 4. Longhorn UI -- Dashboard https://github.com/longhorn/longhorn-ui ## Prerequisites From d242a624a7f9f3b7652a7f21bfcb4438dcd46f44 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Wed, 16 Dec 2020 21:11:33 -0800 Subject: [PATCH 27/33] Sync with Longhorn manager commit 7eddd32279f08c40e0ed979313e689ec703146b8 Author: Sheng Yang Date: Wed Dec 16 20:38:45 2020 -0800 Longhorn v1.1.0-rc3 release Signed-off-by: Sheng Yang Signed-off-by: Sheng Yang --- deploy/longhorn-images.txt | 8 ++++---- deploy/longhorn.yaml | 16 ++++++++-------- deploy/release-images.txt | 8 ++++---- examples/data_migration.yaml | 34 ++++++++++++++++++++++++++++++++++ uninstall/uninstall.yaml | 2 +- 5 files changed, 51 insertions(+), 17 deletions(-) create mode 100644 examples/data_migration.yaml diff --git a/deploy/longhorn-images.txt b/deploy/longhorn-images.txt index d938e9a..d1d2174 100644 --- a/deploy/longhorn-images.txt +++ b/deploy/longhorn-images.txt @@ -1,8 +1,8 @@ -longhornio/longhorn-engine:v1.1.0-rc2 -longhornio/longhorn-instance-manager:v1_20201021 +longhornio/longhorn-engine:master +longhornio/longhorn-instance-manager:v1_20201216 longhornio/longhorn-share-manager:v1_20201204 -longhornio/longhorn-manager:v1.1.0-rc2 -longhornio/longhorn-ui:v1.1.0-rc2 +longhornio/longhorn-manager:master +longhornio/longhorn-ui:master longhornio/csi-attacher:v2.2.1-lh1 longhornio/csi-provisioner:v1.6.0-lh1 longhornio/csi-resizer:v0.5.1-lh1 diff --git a/deploy/longhorn.yaml b/deploy/longhorn.yaml index db112f1..b22152a 100644 --- a/deploy/longhorn.yaml +++ b/deploy/longhorn.yaml @@ -593,7 +593,7 @@ spec: spec: containers: - name: longhorn-manager - image: longhornio/longhorn-manager:v1.1.0-rc2 + image: longhornio/longhorn-manager:master imagePullPolicy: IfNotPresent securityContext: privileged: true @@ -602,13 +602,13 @@ spec: - -d - daemon - --engine-image - - longhornio/longhorn-engine:v1.1.0-rc2 + - longhornio/longhorn-engine:master - --instance-manager-image - - longhornio/longhorn-instance-manager:v1_20201021 + - longhornio/longhorn-instance-manager:v1_20201216 - --share-manager-image - longhornio/longhorn-share-manager:v1_20201204 - --manager-image - - longhornio/longhorn-manager:v1.1.0-rc2 + - longhornio/longhorn-manager:master - --service-account - longhorn-service-account ports: @@ -699,7 +699,7 @@ spec: spec: containers: - name: longhorn-ui - image: longhornio/longhorn-ui:v1.1.0-rc2 + image: longhornio/longhorn-ui:master imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 @@ -746,18 +746,18 @@ spec: spec: initContainers: - name: wait-longhorn-manager - image: longhornio/longhorn-manager:v1.1.0-rc2 + image: longhornio/longhorn-manager:master command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done'] containers: - name: longhorn-driver-deployer - image: longhornio/longhorn-manager:v1.1.0-rc2 + image: longhornio/longhorn-manager:master imagePullPolicy: IfNotPresent command: - longhorn-manager - -d - deploy-driver - --manager-image - - longhornio/longhorn-manager:v1.1.0-rc2 + - longhornio/longhorn-manager:master - --manager-url - http://longhorn-backend:9500/v1 env: diff --git a/deploy/release-images.txt b/deploy/release-images.txt index d938e9a..d1d2174 100644 --- a/deploy/release-images.txt +++ b/deploy/release-images.txt @@ -1,8 +1,8 @@ -longhornio/longhorn-engine:v1.1.0-rc2 -longhornio/longhorn-instance-manager:v1_20201021 +longhornio/longhorn-engine:master +longhornio/longhorn-instance-manager:v1_20201216 longhornio/longhorn-share-manager:v1_20201204 -longhornio/longhorn-manager:v1.1.0-rc2 -longhornio/longhorn-ui:v1.1.0-rc2 +longhornio/longhorn-manager:master +longhornio/longhorn-ui:master longhornio/csi-attacher:v2.2.1-lh1 longhornio/csi-provisioner:v1.6.0-lh1 longhornio/csi-resizer:v0.5.1-lh1 diff --git a/examples/data_migration.yaml b/examples/data_migration.yaml new file mode 100644 index 0000000..cd2d724 --- /dev/null +++ b/examples/data_migration.yaml @@ -0,0 +1,34 @@ +apiVersion: batch/v1 +kind: Job +metadata: + namespace: default # namespace where the pvc's exist + name: volume-migration +spec: + completions: 1 + parallelism: 1 + backoffLimit: 3 + template: + metadata: + name: volume-migration + labels: + name: volume-migration + spec: + restartPolicy: Never + containers: + - name: volume-migration + image: ubuntu:xenial + tty: true + command: [ "/bin/sh" ] + args: [ "-c", "cp -r -v /mnt/old /mnt/new" ] + volumeMounts: + - name: old-vol + mountPath: /mnt/old + - name: new-vol + mountPath: /mnt/new + volumes: + - name: old-vol + persistentVolumeClaim: + claimName: data-source-pvc # change to data source pvc + - name: new-vol + persistentVolumeClaim: + claimName: data-target-pvc # change to data target pvc \ No newline at end of file diff --git a/uninstall/uninstall.yaml b/uninstall/uninstall.yaml index 1caac69..0ac3806 100644 --- a/uninstall/uninstall.yaml +++ b/uninstall/uninstall.yaml @@ -67,7 +67,7 @@ spec: spec: containers: - name: longhorn-uninstall - image: longhornio/longhorn-manager:v1.1.0-rc2 + image: longhornio/longhorn-manager:master imagePullPolicy: Always command: - longhorn-manager From 696e6588f56fbee8334d13229e9251f0a1fd1094 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Wed, 16 Dec 2020 21:13:18 -0800 Subject: [PATCH 28/33] Update instance manager to v1_20201216 Signed-off-by: Sheng Yang --- chart/questions.yml | 4 ++-- chart/values.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/chart/questions.yml b/chart/questions.yml index 647dceb..4a1e93a 100644 --- a/chart/questions.yml +++ b/chart/questions.yml @@ -53,7 +53,7 @@ questions: label: Longhorn Instance Manager Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.instanceManager.tag - default: v1_20201021 + default: v1_20201216 description: "Specify Longhorn Instance Manager Image Tag" type: string label: Longhorn Instance Manager Image Tag @@ -509,4 +509,4 @@ Warning: This option works only when there is a failed replica in the volume. An description: "Setup a pod security policy for Longhorn workloads." label: Pod Security Policy type: boolean - group: "Other Settings" \ No newline at end of file + group: "Other Settings" diff --git a/chart/values.yaml b/chart/values.yaml index a253860..967a47a 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -18,7 +18,7 @@ image: tag: v1.1.0-rc2 instanceManager: repository: longhornio/longhorn-instance-manager - tag: v1_20201021 + tag: v1_20201216 shareManager: repository: longhornio/longhorn-share-manager tag: v1_20201204 From e834f3177c0f80aef35ffe67833ec0939687bb34 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Wed, 16 Dec 2020 21:12:13 -0800 Subject: [PATCH 29/33] Update version to v1.1.0-rc3 Signed-off-by: Sheng Yang --- chart/Chart.yaml | 4 ++-- chart/questions.yml | 6 +++--- chart/values.yaml | 6 +++--- deploy/longhorn-images.txt | 6 +++--- deploy/longhorn.yaml | 14 +++++++------- deploy/release-images.txt | 6 +++--- uninstall/uninstall.yaml | 2 +- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/chart/Chart.yaml b/chart/Chart.yaml index e123b60..7b26b68 100644 --- a/chart/Chart.yaml +++ b/chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: longhorn -version: 1.1.0-rc2 -appVersion: v1.1.0-rc2 +version: 1.1.0-rc3 +appVersion: v1.1.0-rc3 kubeVersion: ">=v1.16.0-r0" description: Longhorn is a distributed block storage system for Kubernetes. keywords: diff --git a/chart/questions.yml b/chart/questions.yml index 4a1e93a..010cd63 100644 --- a/chart/questions.yml +++ b/chart/questions.yml @@ -17,7 +17,7 @@ questions: label: Longhorn Manager Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.manager.tag - default: v1.1.0-rc2 + default: v1.1.0-rc3 description: "Specify Longhorn Manager Image Tag" type: string label: Longhorn Manager Image Tag @@ -29,7 +29,7 @@ questions: label: Longhorn Engine Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.engine.tag - default: v1.1.0-rc2 + default: v1.1.0-rc3 description: "Specify Longhorn Engine Image Tag" type: string label: Longhorn Engine Image Tag @@ -41,7 +41,7 @@ questions: label: Longhorn UI Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.ui.tag - default: v1.1.0-rc2 + default: v1.1.0-rc3 description: "Specify Longhorn UI Image Tag" type: string label: Longhorn UI Image Tag diff --git a/chart/values.yaml b/chart/values.yaml index 967a47a..a5d75e6 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -9,13 +9,13 @@ image: longhorn: engine: repository: longhornio/longhorn-engine - tag: v1.1.0-rc2 + tag: v1.1.0-rc3 manager: repository: longhornio/longhorn-manager - tag: v1.1.0-rc2 + tag: v1.1.0-rc3 ui: repository: longhornio/longhorn-ui - tag: v1.1.0-rc2 + tag: v1.1.0-rc3 instanceManager: repository: longhornio/longhorn-instance-manager tag: v1_20201216 diff --git a/deploy/longhorn-images.txt b/deploy/longhorn-images.txt index d1d2174..3af2641 100644 --- a/deploy/longhorn-images.txt +++ b/deploy/longhorn-images.txt @@ -1,8 +1,8 @@ -longhornio/longhorn-engine:master +longhornio/longhorn-engine:v1.1.0-rc3 longhornio/longhorn-instance-manager:v1_20201216 longhornio/longhorn-share-manager:v1_20201204 -longhornio/longhorn-manager:master -longhornio/longhorn-ui:master +longhornio/longhorn-manager:v1.1.0-rc3 +longhornio/longhorn-ui:v1.1.0-rc3 longhornio/csi-attacher:v2.2.1-lh1 longhornio/csi-provisioner:v1.6.0-lh1 longhornio/csi-resizer:v0.5.1-lh1 diff --git a/deploy/longhorn.yaml b/deploy/longhorn.yaml index b22152a..4d82be6 100644 --- a/deploy/longhorn.yaml +++ b/deploy/longhorn.yaml @@ -593,7 +593,7 @@ spec: spec: containers: - name: longhorn-manager - image: longhornio/longhorn-manager:master + image: longhornio/longhorn-manager:v1.1.0-rc3 imagePullPolicy: IfNotPresent securityContext: privileged: true @@ -602,13 +602,13 @@ spec: - -d - daemon - --engine-image - - longhornio/longhorn-engine:master + - longhornio/longhorn-engine:v1.1.0-rc3 - --instance-manager-image - longhornio/longhorn-instance-manager:v1_20201216 - --share-manager-image - longhornio/longhorn-share-manager:v1_20201204 - --manager-image - - longhornio/longhorn-manager:master + - longhornio/longhorn-manager:v1.1.0-rc3 - --service-account - longhorn-service-account ports: @@ -699,7 +699,7 @@ spec: spec: containers: - name: longhorn-ui - image: longhornio/longhorn-ui:master + image: longhornio/longhorn-ui:v1.1.0-rc3 imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 @@ -746,18 +746,18 @@ spec: spec: initContainers: - name: wait-longhorn-manager - image: longhornio/longhorn-manager:master + image: longhornio/longhorn-manager:v1.1.0-rc3 command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done'] containers: - name: longhorn-driver-deployer - image: longhornio/longhorn-manager:master + image: longhornio/longhorn-manager:v1.1.0-rc3 imagePullPolicy: IfNotPresent command: - longhorn-manager - -d - deploy-driver - --manager-image - - longhornio/longhorn-manager:master + - longhornio/longhorn-manager:v1.1.0-rc3 - --manager-url - http://longhorn-backend:9500/v1 env: diff --git a/deploy/release-images.txt b/deploy/release-images.txt index d1d2174..3af2641 100644 --- a/deploy/release-images.txt +++ b/deploy/release-images.txt @@ -1,8 +1,8 @@ -longhornio/longhorn-engine:master +longhornio/longhorn-engine:v1.1.0-rc3 longhornio/longhorn-instance-manager:v1_20201216 longhornio/longhorn-share-manager:v1_20201204 -longhornio/longhorn-manager:master -longhornio/longhorn-ui:master +longhornio/longhorn-manager:v1.1.0-rc3 +longhornio/longhorn-ui:v1.1.0-rc3 longhornio/csi-attacher:v2.2.1-lh1 longhornio/csi-provisioner:v1.6.0-lh1 longhornio/csi-resizer:v0.5.1-lh1 diff --git a/uninstall/uninstall.yaml b/uninstall/uninstall.yaml index 0ac3806..116b4f6 100644 --- a/uninstall/uninstall.yaml +++ b/uninstall/uninstall.yaml @@ -67,7 +67,7 @@ spec: spec: containers: - name: longhorn-uninstall - image: longhornio/longhorn-manager:master + image: longhornio/longhorn-manager:v1.1.0-rc3 imagePullPolicy: Always command: - longhorn-manager From 4891c1ef3b8fc58e428b7f9d0a3ce809f7043bd1 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Thu, 17 Dec 2020 18:08:23 -0800 Subject: [PATCH 30/33] Sync with manager commit 278ff44085b967923d6f07dfb43a95a7b2974470 Author: Sheng Yang Date: Thu Dec 17 15:04:10 2020 -0800 Longhorn v1.1.0 release Signed-off-by: Sheng Yang Signed-off-by: Sheng Yang --- deploy/longhorn-images.txt | 6 +++--- deploy/longhorn.yaml | 14 +++++++------- deploy/release-images.txt | 6 +++--- uninstall/uninstall.yaml | 2 +- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/deploy/longhorn-images.txt b/deploy/longhorn-images.txt index 3af2641..d1d2174 100644 --- a/deploy/longhorn-images.txt +++ b/deploy/longhorn-images.txt @@ -1,8 +1,8 @@ -longhornio/longhorn-engine:v1.1.0-rc3 +longhornio/longhorn-engine:master longhornio/longhorn-instance-manager:v1_20201216 longhornio/longhorn-share-manager:v1_20201204 -longhornio/longhorn-manager:v1.1.0-rc3 -longhornio/longhorn-ui:v1.1.0-rc3 +longhornio/longhorn-manager:master +longhornio/longhorn-ui:master longhornio/csi-attacher:v2.2.1-lh1 longhornio/csi-provisioner:v1.6.0-lh1 longhornio/csi-resizer:v0.5.1-lh1 diff --git a/deploy/longhorn.yaml b/deploy/longhorn.yaml index 4d82be6..b22152a 100644 --- a/deploy/longhorn.yaml +++ b/deploy/longhorn.yaml @@ -593,7 +593,7 @@ spec: spec: containers: - name: longhorn-manager - image: longhornio/longhorn-manager:v1.1.0-rc3 + image: longhornio/longhorn-manager:master imagePullPolicy: IfNotPresent securityContext: privileged: true @@ -602,13 +602,13 @@ spec: - -d - daemon - --engine-image - - longhornio/longhorn-engine:v1.1.0-rc3 + - longhornio/longhorn-engine:master - --instance-manager-image - longhornio/longhorn-instance-manager:v1_20201216 - --share-manager-image - longhornio/longhorn-share-manager:v1_20201204 - --manager-image - - longhornio/longhorn-manager:v1.1.0-rc3 + - longhornio/longhorn-manager:master - --service-account - longhorn-service-account ports: @@ -699,7 +699,7 @@ spec: spec: containers: - name: longhorn-ui - image: longhornio/longhorn-ui:v1.1.0-rc3 + image: longhornio/longhorn-ui:master imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 @@ -746,18 +746,18 @@ spec: spec: initContainers: - name: wait-longhorn-manager - image: longhornio/longhorn-manager:v1.1.0-rc3 + image: longhornio/longhorn-manager:master command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done'] containers: - name: longhorn-driver-deployer - image: longhornio/longhorn-manager:v1.1.0-rc3 + image: longhornio/longhorn-manager:master imagePullPolicy: IfNotPresent command: - longhorn-manager - -d - deploy-driver - --manager-image - - longhornio/longhorn-manager:v1.1.0-rc3 + - longhornio/longhorn-manager:master - --manager-url - http://longhorn-backend:9500/v1 env: diff --git a/deploy/release-images.txt b/deploy/release-images.txt index 3af2641..d1d2174 100644 --- a/deploy/release-images.txt +++ b/deploy/release-images.txt @@ -1,8 +1,8 @@ -longhornio/longhorn-engine:v1.1.0-rc3 +longhornio/longhorn-engine:master longhornio/longhorn-instance-manager:v1_20201216 longhornio/longhorn-share-manager:v1_20201204 -longhornio/longhorn-manager:v1.1.0-rc3 -longhornio/longhorn-ui:v1.1.0-rc3 +longhornio/longhorn-manager:master +longhornio/longhorn-ui:master longhornio/csi-attacher:v2.2.1-lh1 longhornio/csi-provisioner:v1.6.0-lh1 longhornio/csi-resizer:v0.5.1-lh1 diff --git a/uninstall/uninstall.yaml b/uninstall/uninstall.yaml index 116b4f6..0ac3806 100644 --- a/uninstall/uninstall.yaml +++ b/uninstall/uninstall.yaml @@ -67,7 +67,7 @@ spec: spec: containers: - name: longhorn-uninstall - image: longhornio/longhorn-manager:v1.1.0-rc3 + image: longhornio/longhorn-manager:master imagePullPolicy: Always command: - longhorn-manager From 1489feee7b0d60582c85b3eda494358ae897ee79 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Thu, 17 Dec 2020 18:09:01 -0800 Subject: [PATCH 31/33] Update version to v1.1.0 Signed-off-by: Sheng Yang --- chart/Chart.yaml | 4 ++-- chart/questions.yml | 6 +++--- chart/values.yaml | 6 +++--- deploy/longhorn-images.txt | 6 +++--- deploy/longhorn.yaml | 14 +++++++------- deploy/release-images.txt | 6 +++--- uninstall/uninstall.yaml | 2 +- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/chart/Chart.yaml b/chart/Chart.yaml index 7b26b68..8e15810 100644 --- a/chart/Chart.yaml +++ b/chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: longhorn -version: 1.1.0-rc3 -appVersion: v1.1.0-rc3 +version: 1.1.0 +appVersion: v1.1.0 kubeVersion: ">=v1.16.0-r0" description: Longhorn is a distributed block storage system for Kubernetes. keywords: diff --git a/chart/questions.yml b/chart/questions.yml index 010cd63..ef33761 100644 --- a/chart/questions.yml +++ b/chart/questions.yml @@ -17,7 +17,7 @@ questions: label: Longhorn Manager Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.manager.tag - default: v1.1.0-rc3 + default: v1.1.0 description: "Specify Longhorn Manager Image Tag" type: string label: Longhorn Manager Image Tag @@ -29,7 +29,7 @@ questions: label: Longhorn Engine Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.engine.tag - default: v1.1.0-rc3 + default: v1.1.0 description: "Specify Longhorn Engine Image Tag" type: string label: Longhorn Engine Image Tag @@ -41,7 +41,7 @@ questions: label: Longhorn UI Image Repository group: "Longhorn Images Settings" - variable: image.longhorn.ui.tag - default: v1.1.0-rc3 + default: v1.1.0 description: "Specify Longhorn UI Image Tag" type: string label: Longhorn UI Image Tag diff --git a/chart/values.yaml b/chart/values.yaml index a5d75e6..d3345d4 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -9,13 +9,13 @@ image: longhorn: engine: repository: longhornio/longhorn-engine - tag: v1.1.0-rc3 + tag: v1.1.0 manager: repository: longhornio/longhorn-manager - tag: v1.1.0-rc3 + tag: v1.1.0 ui: repository: longhornio/longhorn-ui - tag: v1.1.0-rc3 + tag: v1.1.0 instanceManager: repository: longhornio/longhorn-instance-manager tag: v1_20201216 diff --git a/deploy/longhorn-images.txt b/deploy/longhorn-images.txt index d1d2174..25bb23f 100644 --- a/deploy/longhorn-images.txt +++ b/deploy/longhorn-images.txt @@ -1,8 +1,8 @@ -longhornio/longhorn-engine:master +longhornio/longhorn-engine:v1.1.0 longhornio/longhorn-instance-manager:v1_20201216 longhornio/longhorn-share-manager:v1_20201204 -longhornio/longhorn-manager:master -longhornio/longhorn-ui:master +longhornio/longhorn-manager:v1.1.0 +longhornio/longhorn-ui:v1.1.0 longhornio/csi-attacher:v2.2.1-lh1 longhornio/csi-provisioner:v1.6.0-lh1 longhornio/csi-resizer:v0.5.1-lh1 diff --git a/deploy/longhorn.yaml b/deploy/longhorn.yaml index b22152a..2a112c1 100644 --- a/deploy/longhorn.yaml +++ b/deploy/longhorn.yaml @@ -593,7 +593,7 @@ spec: spec: containers: - name: longhorn-manager - image: longhornio/longhorn-manager:master + image: longhornio/longhorn-manager:v1.1.0 imagePullPolicy: IfNotPresent securityContext: privileged: true @@ -602,13 +602,13 @@ spec: - -d - daemon - --engine-image - - longhornio/longhorn-engine:master + - longhornio/longhorn-engine:v1.1.0 - --instance-manager-image - longhornio/longhorn-instance-manager:v1_20201216 - --share-manager-image - longhornio/longhorn-share-manager:v1_20201204 - --manager-image - - longhornio/longhorn-manager:master + - longhornio/longhorn-manager:v1.1.0 - --service-account - longhorn-service-account ports: @@ -699,7 +699,7 @@ spec: spec: containers: - name: longhorn-ui - image: longhornio/longhorn-ui:master + image: longhornio/longhorn-ui:v1.1.0 imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 @@ -746,18 +746,18 @@ spec: spec: initContainers: - name: wait-longhorn-manager - image: longhornio/longhorn-manager:master + image: longhornio/longhorn-manager:v1.1.0 command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done'] containers: - name: longhorn-driver-deployer - image: longhornio/longhorn-manager:master + image: longhornio/longhorn-manager:v1.1.0 imagePullPolicy: IfNotPresent command: - longhorn-manager - -d - deploy-driver - --manager-image - - longhornio/longhorn-manager:master + - longhornio/longhorn-manager:v1.1.0 - --manager-url - http://longhorn-backend:9500/v1 env: diff --git a/deploy/release-images.txt b/deploy/release-images.txt index d1d2174..25bb23f 100644 --- a/deploy/release-images.txt +++ b/deploy/release-images.txt @@ -1,8 +1,8 @@ -longhornio/longhorn-engine:master +longhornio/longhorn-engine:v1.1.0 longhornio/longhorn-instance-manager:v1_20201216 longhornio/longhorn-share-manager:v1_20201204 -longhornio/longhorn-manager:master -longhornio/longhorn-ui:master +longhornio/longhorn-manager:v1.1.0 +longhornio/longhorn-ui:v1.1.0 longhornio/csi-attacher:v2.2.1-lh1 longhornio/csi-provisioner:v1.6.0-lh1 longhornio/csi-resizer:v0.5.1-lh1 diff --git a/uninstall/uninstall.yaml b/uninstall/uninstall.yaml index 0ac3806..b9d1f20 100644 --- a/uninstall/uninstall.yaml +++ b/uninstall/uninstall.yaml @@ -67,7 +67,7 @@ spec: spec: containers: - name: longhorn-uninstall - image: longhornio/longhorn-manager:master + image: longhornio/longhorn-manager:v1.1.0 imagePullPolicy: Always command: - longhorn-manager From 55cfc3482aa8e51e5c9c3860467b2b3d4f98a06b Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Thu, 17 Dec 2020 18:21:15 -0800 Subject: [PATCH 32/33] Remove `examples/rwx` Remove it to prevent misleading users. It should be removed from Longhorn manager as well. Signed-off-by: Sheng Yang --- examples/rwx/01-security.yaml | 85 --------- examples/rwx/02-longhorn-nfs-provisioner.yaml | 178 ------------------ examples/rwx/03-rwx-test.yaml | 59 ------ 3 files changed, 322 deletions(-) delete mode 100644 examples/rwx/01-security.yaml delete mode 100644 examples/rwx/02-longhorn-nfs-provisioner.yaml delete mode 100644 examples/rwx/03-rwx-test.yaml diff --git a/examples/rwx/01-security.yaml b/examples/rwx/01-security.yaml deleted file mode 100644 index e11c910..0000000 --- a/examples/rwx/01-security.yaml +++ /dev/null @@ -1,85 +0,0 @@ -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: longhorn-nfs-provisioner -spec: - fsGroup: - rule: RunAsAny - allowedCapabilities: - - DAC_READ_SEARCH - - SYS_RESOURCE - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - configMap - - downwardAPI - - emptyDir - - persistentVolumeClaim - - secret - - hostPath ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: longhorn-nfs-provisioner -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "update", "patch"] - - apiGroups: [""] - resources: ["services", "endpoints"] - verbs: ["get"] - - apiGroups: ["extensions"] - resources: ["podsecuritypolicies"] - resourceNames: ["longhorn-nfs-provisioner"] - verbs: ["use"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: longhorn-nfs-provisioner -subjects: - - kind: ServiceAccount - name: longhorn-nfs-provisioner - namespace: longhorn-system -roleRef: - kind: ClusterRole - name: longhorn-nfs-provisioner - apiGroup: rbac.authorization.k8s.io ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: leader-locking-longhorn-nfs-provisioner - namespace: longhorn-system -rules: - - apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "list", "watch", "create", "update", "patch"] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: leader-locking-longhorn-nfs-provisioner - namespace: longhorn-system -subjects: - - kind: ServiceAccount - name: longhorn-nfs-provisioner - namespace: longhorn-system -roleRef: - kind: Role - name: leader-locking-longhorn-nfs-provisioner - apiGroup: rbac.authorization.k8s.io diff --git a/examples/rwx/02-longhorn-nfs-provisioner.yaml b/examples/rwx/02-longhorn-nfs-provisioner.yaml deleted file mode 100644 index 3a8016a..0000000 --- a/examples/rwx/02-longhorn-nfs-provisioner.yaml +++ /dev/null @@ -1,178 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: longhorn-nfs-provisioner - namespace: longhorn-system ---- -kind: Service -apiVersion: v1 -metadata: - name: longhorn-nfs-provisioner - namespace: longhorn-system - labels: - app: longhorn-nfs-provisioner -spec: - # hardcode a cluster ip for the service - # so that on delete & recreate of the service the previous pv's still point - # to this nfs-provisioner, pick a new ip for each new nfs provisioner - clusterIP: 10.43.111.111 - ports: - - name: nfs - port: 2049 - - name: nfs-udp - port: 2049 - protocol: UDP - - name: nlockmgr - port: 32803 - - name: nlockmgr-udp - port: 32803 - protocol: UDP - - name: mountd - port: 20048 - - name: mountd-udp - port: 20048 - protocol: UDP - - name: rquotad - port: 875 - - name: rquotad-udp - port: 875 - protocol: UDP - - name: rpcbind - port: 111 - - name: rpcbind-udp - port: 111 - protocol: UDP - - name: statd - port: 662 - - name: statd-udp - port: 662 - protocol: UDP - selector: - app: longhorn-nfs-provisioner ---- -kind: Deployment -apiVersion: apps/v1 -metadata: - name: longhorn-nfs-provisioner - namespace: longhorn-system -spec: - selector: - matchLabels: - app: longhorn-nfs-provisioner - replicas: 1 - strategy: - type: Recreate - template: - metadata: - labels: - app: longhorn-nfs-provisioner - spec: - serviceAccount: longhorn-nfs-provisioner - containers: - - name: longhorn-nfs-provisioner - image: quay.io/kubernetes_incubator/nfs-provisioner:latest - ports: - - name: nfs - containerPort: 2049 - - name: nfs-udp - containerPort: 2049 - protocol: UDP - - name: nlockmgr - containerPort: 32803 - - name: nlockmgr-udp - containerPort: 32803 - protocol: UDP - - name: mountd - containerPort: 20048 - - name: mountd-udp - containerPort: 20048 - protocol: UDP - - name: rquotad - containerPort: 875 - - name: rquotad-udp - containerPort: 875 - protocol: UDP - - name: rpcbind - containerPort: 111 - - name: rpcbind-udp - containerPort: 111 - protocol: UDP - - name: statd - containerPort: 662 - - name: statd-udp - containerPort: 662 - protocol: UDP - securityContext: - capabilities: - add: - - DAC_READ_SEARCH - - SYS_RESOURCE - args: - - "-provisioner=nfs.longhorn.io" - - "-device-based-fsids=false" - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: SERVICE_NAME - value: longhorn-nfs-provisioner - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - imagePullPolicy: "IfNotPresent" - readinessProbe: - exec: - command: - - ls - - /export - initialDelaySeconds: 5 - periodSeconds: 5 - livenessProbe: - exec: - command: - - ls - - /export - initialDelaySeconds: 5 - periodSeconds: 5 - volumeMounts: - - name: export-volume - mountPath: /export - volumes: - - name: export-volume - persistentVolumeClaim: - claimName: longhorn-nfs-provisioner - # we want really quick failover - terminationGracePeriodSeconds: 30 - tolerations: - - effect: NoExecute - key: node.kubernetes.io/not-ready - operator: Exists - tolerationSeconds: 60 - - effect: NoExecute - key: node.kubernetes.io/unreachable - operator: Exists - tolerationSeconds: 60 ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: longhorn-nfs-provisioner # longhorn backing pvc - namespace: longhorn-system -spec: - storageClassName: longhorn - accessModes: - - ReadWriteOnce - resources: - requests: - storage: "20G" # make this 10% bigger then the workload pvc ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: longhorn-nfs # workload storage class -provisioner: nfs.longhorn.io -mountOptions: - - "vers=4.1" - - "noresvport" diff --git a/examples/rwx/03-rwx-test.yaml b/examples/rwx/03-rwx-test.yaml deleted file mode 100644 index d138dea..0000000 --- a/examples/rwx/03-rwx-test.yaml +++ /dev/null @@ -1,59 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: nfs-test - namespace: default -spec: - accessModes: - - ReadWriteMany - storageClassName: longhorn-nfs - resources: - requests: - storage: 1Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nfs-test - labels: - app: nfs-test - namespace: default -spec: - replicas: 4 - selector: - matchLabels: - app: nfs-test - strategy: - type: Recreate - template: - metadata: - labels: - app: nfs-test - spec: - containers: - - image: ubuntu:xenial - imagePullPolicy: Always - command: ["/bin/sh", "-c"] - args: - - sleep 30; touch /mnt/nfs-test/test.log; while true; do date >> /mnt/nfs-test/test.log; sleep 1; done; - name: nfs-test - stdin: true - tty: true - livenessProbe: - exec: - command: - - timeout - - "10" - - ls - - /mnt/nfs-test - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 10 - volumeMounts: - - mountPath: /mnt/nfs-test - name: nfs-test - restartPolicy: Always - volumes: - - name: nfs-test - persistentVolumeClaim: - claimName: nfs-test From 000a124d0feb2a8616b2fe59266e3cc0a9e0b042 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Thu, 17 Dec 2020 19:32:16 -0800 Subject: [PATCH 33/33] Longhorn v1.1.0 release Signed-off-by: Sheng Yang --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1b816d6..7407bb7 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ You can read more about the community and its events here: https://github.com/lo ## Current status -The latest release of Longhorn is **v1.0.2**. +The latest release of Longhorn is **v1.1.0**. ## Source code Longhorn is 100% open source software. Project source code is spread across a number of repos: