Compare commits

...

21 Commits

Author SHA1 Message Date
David Ko
cfa6cd727f Release 1.3.1 RC2
Signed-off-by: David Ko <dko@suse.com>
2022-08-08 20:48:06 +08:00
David Ko
564286098c Update share and backing images
Signed-off-by: David Ko <dko@suse.com>
(cherry picked from commit 743fa08e8f)
2022-08-08 17:12:36 +08:00
Shuo Wu
2def71350e Update longhorn-instance-manager tag
Signed-off-by: Shuo Wu <shuo.wu@suse.com>
2022-08-08 17:05:42 +08:00
David Ko
5976459490 Update share and backing images
Signed-off-by: David Ko <dko@suse.com>
2022-08-08 14:22:56 +08:00
Derek Su
038b5b35e0 Update charts
Longhorn 4332

Signed-off-by: Derek Su <derek.su@suse.com>
2022-08-02 15:42:07 +08:00
David Ko
3ad971e1c3 Release 1.3.1 RC1
Signed-off-by: David Ko <dko@suse.com>
2022-07-29 16:21:32 +08:00
Serge Tkatchouk
87ded08cd7 Add Gentoo support to environment_check.sh
This addition will allow Gentoo users to run this script and get sensible error messages in case they forgot to install required packages.

Signed-off-by: Serge Tkatchouk <sp1j3t@gmail.com>
(cherry picked from commit c1b93f5531)
2022-07-29 13:32:31 +08:00
David Ko
eb186ec432 Add update-manifests-dev-version.sh
Signed-off-by: David Ko <dko@suse.com>
(cherry picked from commit 0a275ab34f)
Signed-off-by: David Ko <dko@suse.com>
2022-07-27 17:53:45 +08:00
lodufqa
be276a9de9 Update chart/values.yaml
Co-authored-by: David Ko <dko@suse.com>
Signed-off-by: Amadeusz Kryze <amadeusz.kryze@gmail.com>
(cherry picked from commit eda558c0d5)
2022-07-27 17:53:45 +08:00
Amadeusz Kryze
7956c08eb5 Annotation for service LonghornManager will be configurable.
Signed-off-by: Amadeusz Kryze <amadeusz.kryze@gmail.com>
(cherry picked from commit 1e7289dfe0)
2022-07-27 17:53:45 +08:00
Sebastian Podjasek
c2293062db Add value to set manager log in json format
Signed-off-by: Sebastian Podjasek <sebastian.podjasek@intelliway.pl>
(cherry picked from commit d48e95b8c3)
2022-07-27 17:53:45 +08:00
Dante Foulke
af3f2220f7 bugfix for issue #4216
Signed-off-by: Dante Foulke <flkdnt@gmail.com>
(cherry picked from commit edc1b83c5f)
2022-07-27 17:53:45 +08:00
Phan Le
36fc0c41b1 Add rancher chart migration script
The script helps to migrate Longhorn installed in the old Rancher
catalog UI to the new chart in Rancher app&marketplace UI

Longhorn-3714

Signed-off-by: Phan Le <phan.le@suse.com>
(cherry picked from commit 0614c55fc3)
2022-07-27 17:53:45 +08:00
Łukasz Sowa
a879d34d27 Separate tls ingress option from secure backend
Signed-off-by: Łukasz Sowa <lukasz@owlsome.dev>
(cherry picked from commit fe5565dbcf)
2022-07-27 17:53:45 +08:00
Andru Cherny
709aa3b8ed move replicas from UI and driver to values
Signed-off-by: Andru Cherny <wiroatom@gmail.com>
(cherry picked from commit 368d8363da)
2022-07-27 17:53:45 +08:00
tgfree
4cef751aca fix some typo on doc
Signed-off-by: tgfree <tgfree7@gmail.com>

(cherry picked from commit 1e8dd33559)
2022-07-27 17:53:45 +08:00
Chris Chien
1363461b35 rename generate-longhorm-yaml.sh to generate-longhorn-yaml.sh
Signed-off-by: Chris Chien <chris.chien@suse.com>
2022-07-27 15:07:09 +08:00
Derek Su
bc06b10c34 chart: fix the conditions of Rancher deployed Windows Cluster
Longhorn 4289

Signed-off-by: Derek Su <derek.su@suse.com>
2022-07-27 12:33:17 +08:00
Tristan Lins
80c0c2d8c6 Add preserveUnknownFields: false to crd specs
Signed-off-by: Tristan Lins <tristan.lins@chamaeleon.de>
2022-07-27 10:59:37 +08:00
c3y1huang
1907172f05 chart: support Rancher deployed Windows Cluster
Longhorn-4262

Signed-off-by: Chin-Ya Huang <chin-ya.huang@suse.com>
2022-07-22 19:11:18 +08:00
Derek Su
3973f871c8 Update longhorn-instance-manager to v1_20220722
Longhorn 4136

Signed-off-by: Derek Su <derek.su@suse.com>
2022-07-22 17:25:41 +08:00
37 changed files with 522 additions and 143 deletions

View File

@ -31,7 +31,8 @@ The latest release of Longhorn is [![Releases](https://img.shields.io/github/rel
## Release Status ## Release Status
| Release | Version | Type | | Release | Version | Type |
| --------|---------|----------------| |---------|---------|--------|
| 1.3 | 1.3.0 | Latest |
| 1.2 | 1.2.4 | Stable | | 1.2 | 1.2.4 | Stable |
| 1.1 | 1.1.3 | Stable | | 1.1 | 1.1.3 | Stable |

View File

@ -1,7 +1,7 @@
apiVersion: v1 apiVersion: v1
name: longhorn name: longhorn
version: 1.3.0 version: 1.3.1-rc2
appVersion: v1.3.0 appVersion: v1.3.1-rc2
kubeVersion: ">=1.18.0-0" kubeVersion: ">=1.18.0-0"
description: Longhorn is a distributed block storage system for Kubernetes. description: Longhorn is a distributed block storage system for Kubernetes.
keywords: keywords:

View File

@ -17,7 +17,7 @@ questions:
label: Longhorn Manager Image Repository label: Longhorn Manager Image Repository
group: "Longhorn Images Settings" group: "Longhorn Images Settings"
- variable: image.longhorn.manager.tag - variable: image.longhorn.manager.tag
default: v1.3.0 default: v1.3.1-rc2
description: "Specify Longhorn Manager Image Tag" description: "Specify Longhorn Manager Image Tag"
type: string type: string
label: Longhorn Manager Image Tag label: Longhorn Manager Image Tag
@ -29,7 +29,7 @@ questions:
label: Longhorn Engine Image Repository label: Longhorn Engine Image Repository
group: "Longhorn Images Settings" group: "Longhorn Images Settings"
- variable: image.longhorn.engine.tag - variable: image.longhorn.engine.tag
default: v1.3.0 default: v1.3.1-rc2
description: "Specify Longhorn Engine Image Tag" description: "Specify Longhorn Engine Image Tag"
type: string type: string
label: Longhorn Engine Image Tag label: Longhorn Engine Image Tag
@ -41,7 +41,7 @@ questions:
label: Longhorn UI Image Repository label: Longhorn UI Image Repository
group: "Longhorn Images Settings" group: "Longhorn Images Settings"
- variable: image.longhorn.ui.tag - variable: image.longhorn.ui.tag
default: v1.3.0 default: v1.3.1-rc2
description: "Specify Longhorn UI Image Tag" description: "Specify Longhorn UI Image Tag"
type: string type: string
label: Longhorn UI Image Tag label: Longhorn UI Image Tag
@ -53,7 +53,7 @@ questions:
label: Longhorn Instance Manager Image Repository label: Longhorn Instance Manager Image Repository
group: "Longhorn Images Settings" group: "Longhorn Images Settings"
- variable: image.longhorn.instanceManager.tag - variable: image.longhorn.instanceManager.tag
default: v1_20220611 default: v1_20220808
description: "Specify Longhorn Instance Manager Image Tag" description: "Specify Longhorn Instance Manager Image Tag"
type: string type: string
label: Longhorn Instance Manager Image Tag label: Longhorn Instance Manager Image Tag
@ -65,7 +65,7 @@ questions:
label: Longhorn Share Manager Image Repository label: Longhorn Share Manager Image Repository
group: "Longhorn Images Settings" group: "Longhorn Images Settings"
- variable: image.longhorn.shareManager.tag - variable: image.longhorn.shareManager.tag
default: v1_20220531 default: v1_20220808
description: "Specify Longhorn Share Manager Image Tag" description: "Specify Longhorn Share Manager Image Tag"
type: string type: string
label: Longhorn Share Manager Image Tag label: Longhorn Share Manager Image Tag
@ -77,7 +77,7 @@ questions:
label: Longhorn Backing Image Manager Image Repository label: Longhorn Backing Image Manager Image Repository
group: "Longhorn Images Settings" group: "Longhorn Images Settings"
- variable: image.longhorn.backingImageManager.tag - variable: image.longhorn.backingImageManager.tag
default: v3_20220609 default: v3_20220808
description: "Specify Longhorn Backing Image Manager Image Tag" description: "Specify Longhorn Backing Image Manager Image Tag"
type: string type: string
label: Longhorn Backing Image Manager Image Tag label: Longhorn Backing Image Manager Image Tag
@ -670,3 +670,9 @@ WARNING:
label: Pod Security Policy label: Pod Security Policy
type: boolean type: boolean
group: "Other Settings" group: "Other Settings"
- variable: global.cattle.windowsCluster.enabled
default: "false"
description: "Enable this to allow Longhorn to run on the Rancher deployed Windows cluster."
label: Rancher Windows Cluster
type: boolean
group: "Other Settings"

View File

@ -1055,6 +1055,7 @@ metadata:
longhorn-manager: "" longhorn-manager: ""
name: engineimages.longhorn.io name: engineimages.longhorn.io
spec: spec:
preserveUnknownFields: false
conversion: conversion:
strategy: Webhook strategy: Webhook
webhook: webhook:
@ -1086,7 +1087,7 @@ spec:
jsonPath: .spec.image jsonPath: .spec.image
name: Image name: Image
type: string type: string
- description: Number of volumes are using the engine image - description: Number of resources using the engine image
jsonPath: .status.refCount jsonPath: .status.refCount
name: RefCount name: RefCount
type: integer type: integer
@ -1128,7 +1129,7 @@ spec:
jsonPath: .spec.image jsonPath: .spec.image
name: Image name: Image
type: string type: string
- description: Number of volumes are using the engine image - description: Number of resources using the engine image
jsonPath: .status.refCount jsonPath: .status.refCount
name: RefCount name: RefCount
type: integer type: integer
@ -1725,6 +1726,7 @@ metadata:
longhorn-manager: "" longhorn-manager: ""
name: nodes.longhorn.io name: nodes.longhorn.io
spec: spec:
preserveUnknownFields: false
conversion: conversion:
strategy: Webhook strategy: Webhook
webhook: webhook:
@ -2714,6 +2716,7 @@ metadata:
longhorn-manager: "" longhorn-manager: ""
name: volumes.longhorn.io name: volumes.longhorn.io
spec: spec:
preserveUnknownFields: false
conversion: conversion:
strategy: Webhook strategy: Webhook
webhook: webhook:

View File

@ -31,6 +31,9 @@ spec:
command: command:
- longhorn-manager - longhorn-manager
- -d - -d
{{- if eq .Values.longhornManager.log.format "json" }}
- -j
{{- end }}
- daemon - daemon
- --engine-image - --engine-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.engine.repository }}:{{ .Values.image.longhorn.engine.tag }}" - "{{ template "registry_url" . }}{{ .Values.image.longhorn.engine.repository }}:{{ .Values.image.longhorn.engine.tag }}"
@ -94,14 +97,24 @@ spec:
{{- if .Values.longhornManager.priorityClass }} {{- if .Values.longhornManager.priorityClass }}
priorityClassName: {{ .Values.longhornManager.priorityClass | quote }} priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
{{- end }} {{- end }}
{{- if .Values.longhornManager.tolerations }} {{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations: tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{ toYaml .Values.longhornManager.tolerations | indent 6 }} {{ toYaml .Values.longhornManager.tolerations | indent 6 }}
{{- end }} {{- end }}
{{- if .Values.longhornManager.nodeSelector }} {{- end }}
{{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector: nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornManager.nodeSelector }}
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }} {{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
{{- end }} {{- end }}
{{- end }}
serviceAccountName: longhorn-service-account serviceAccountName: longhorn-service-account
updateStrategy: updateStrategy:
rollingUpdate: rollingUpdate:
@ -114,6 +127,10 @@ metadata:
app: longhorn-manager app: longhorn-manager
name: longhorn-backend name: longhorn-backend
namespace: {{ include "release_namespace" . }} namespace: {{ include "release_namespace" . }}
{{- if .Values.longhornManager.serviceAnnotations }}
annotations:
{{ toYaml .Values.longhornManager.serviceAnnotations | indent 4 }}
{{- end }}
spec: spec:
type: {{ .Values.service.manager.type }} type: {{ .Values.service.manager.type }}
sessionAffinity: ClientIP sessionAffinity: ClientIP

View File

@ -20,8 +20,26 @@ data:
{{ if not (kindIs "invalid" .Values.defaultSettings.defaultDataLocality) }}default-data-locality: {{ .Values.defaultSettings.defaultDataLocality }}{{ end }} {{ if not (kindIs "invalid" .Values.defaultSettings.defaultDataLocality) }}default-data-locality: {{ .Values.defaultSettings.defaultDataLocality }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.defaultLonghornStaticStorageClass) }}default-longhorn-static-storage-class: {{ .Values.defaultSettings.defaultLonghornStaticStorageClass }}{{ end }} {{ if not (kindIs "invalid" .Values.defaultSettings.defaultLonghornStaticStorageClass) }}default-longhorn-static-storage-class: {{ .Values.defaultSettings.defaultLonghornStaticStorageClass }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.backupstorePollInterval) }}backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }}{{ end }} {{ if not (kindIs "invalid" .Values.defaultSettings.backupstorePollInterval) }}backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.taintToleration) }}taint-toleration: {{ .Values.defaultSettings.taintToleration }}{{ end }} {{- if or (not (kindIs "invalid" .Values.defaultSettings.taintToleration)) (.Values.global.cattle.windowsCluster.enabled) }}
{{ if not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector) }}system-managed-components-node-selector: {{ .Values.defaultSettings.systemManagedComponentsNodeSelector }}{{ end }} taint-toleration: {{ $windowsDefaultSettingTaintToleration := list }}{{ $defaultSettingTaintToleration := list -}}
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.defaultSetting.taintToleration -}}
{{- $windowsDefaultSettingTaintToleration = .Values.global.cattle.windowsCluster.defaultSetting.taintToleration -}}
{{- end -}}
{{- if not (kindIs "invalid" .Values.defaultSettings.taintToleration) -}}
{{- $defaultSettingTaintToleration = .Values.defaultSettings.taintToleration -}}
{{- end -}}
{{- $taintToleration := list $windowsDefaultSettingTaintToleration $defaultSettingTaintToleration }}{{ join ";" (compact $taintToleration) -}}
{{- end }}
{{- if or (not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector)) (.Values.global.cattle.windowsCluster.enabled) }}
system-managed-components-node-selector: {{ $windowsDefaultSettingNodeSelector := list }}{{ $defaultSettingNodeSelector := list -}}
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector -}}
{{ $windowsDefaultSettingNodeSelector = .Values.global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector -}}
{{- end -}}
{{- if not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector) -}}
{{- $defaultSettingNodeSelector = .Values.defaultSettings.systemManagedComponentsNodeSelector -}}
{{- end -}}
{{- $nodeSelector := list $windowsDefaultSettingNodeSelector $defaultSettingNodeSelector }}{{ join ";" (compact $nodeSelector) -}}
{{- end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.priorityClass) }}priority-class: {{ .Values.defaultSettings.priorityClass }}{{ end }} {{ if not (kindIs "invalid" .Values.defaultSettings.priorityClass) }}priority-class: {{ .Values.defaultSettings.priorityClass }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.autoSalvage) }}auto-salvage: {{ .Values.defaultSettings.autoSalvage }}{{ end }} {{ if not (kindIs "invalid" .Values.defaultSettings.autoSalvage) }}auto-salvage: {{ .Values.defaultSettings.autoSalvage }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly) }}auto-delete-pod-when-volume-detached-unexpectedly: {{ .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly }}{{ end }} {{ if not (kindIs "invalid" .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly) }}auto-delete-pod-when-volume-detached-unexpectedly: {{ .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly }}{{ end }}

View File

@ -91,14 +91,24 @@ spec:
{{- if .Values.longhornDriver.priorityClass }} {{- if .Values.longhornDriver.priorityClass }}
priorityClassName: {{ .Values.longhornDriver.priorityClass | quote }} priorityClassName: {{ .Values.longhornDriver.priorityClass | quote }}
{{- end }} {{- end }}
{{- if .Values.longhornDriver.tolerations }} {{- if or .Values.longhornDriver.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations: tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornDriver.tolerations }}
{{ toYaml .Values.longhornDriver.tolerations | indent 6 }} {{ toYaml .Values.longhornDriver.tolerations | indent 6 }}
{{- end }} {{- end }}
{{- if .Values.longhornDriver.nodeSelector }} {{- end }}
{{- if or .Values.longhornDriver.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector: nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornDriver.nodeSelector }}
{{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }} {{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }}
{{- end }} {{- end }}
{{- end }}
serviceAccountName: longhorn-service-account serviceAccountName: longhorn-service-account
securityContext: securityContext:
runAsUser: 0 runAsUser: 0

View File

@ -6,7 +6,7 @@ metadata:
name: longhorn-ui name: longhorn-ui
namespace: {{ include "release_namespace" . }} namespace: {{ include "release_namespace" . }}
spec: spec:
replicas: 1 replicas: {{ .Values.longhornUI.replicas }}
selector: selector:
matchLabels: matchLabels:
app: longhorn-ui app: longhorn-ui
@ -48,14 +48,24 @@ spec:
{{- if .Values.longhornUI.priorityClass }} {{- if .Values.longhornUI.priorityClass }}
priorityClassName: {{ .Values.longhornUI.priorityClass | quote }} priorityClassName: {{ .Values.longhornUI.priorityClass | quote }}
{{- end }} {{- end }}
{{- if .Values.longhornUI.tolerations }} {{- if or .Values.longhornUI.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations: tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornUI.tolerations }}
{{ toYaml .Values.longhornUI.tolerations | indent 6 }} {{ toYaml .Values.longhornUI.tolerations | indent 6 }}
{{- end }} {{- end }}
{{- if .Values.longhornUI.nodeSelector }} {{- end }}
{{- if or .Values.longhornUI.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector: nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornUI.nodeSelector }}
{{ toYaml .Values.longhornUI.nodeSelector | indent 8 }} {{ toYaml .Values.longhornUI.nodeSelector | indent 8 }}
{{- end }} {{- end }}
{{- end }}
--- ---
kind: Service kind: Service
apiVersion: v1 apiVersion: v1

View File

@ -56,14 +56,24 @@ spec:
{{- if .Values.longhornDriver.priorityClass }} {{- if .Values.longhornDriver.priorityClass }}
priorityClassName: {{ .Values.longhornDriver.priorityClass | quote }} priorityClassName: {{ .Values.longhornDriver.priorityClass | quote }}
{{- end }} {{- end }}
{{- if .Values.longhornDriver.tolerations }} {{- if or .Values.longhornDriver.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations: tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornDriver.tolerations }}
{{ toYaml .Values.longhornDriver.tolerations | indent 6 }} {{ toYaml .Values.longhornDriver.tolerations | indent 6 }}
{{- end }} {{- end }}
{{- if .Values.longhornDriver.nodeSelector }} {{- end }}
{{- if or .Values.longhornDriver.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector: nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornDriver.nodeSelector }}
{{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }} {{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }}
{{- end }} {{- end }}
{{- end }}
serviceAccountName: longhorn-service-account serviceAccountName: longhorn-service-account
--- ---
apiVersion: apps/v1 apiVersion: apps/v1
@ -135,12 +145,22 @@ spec:
{{- if .Values.longhornDriver.priorityClass }} {{- if .Values.longhornDriver.priorityClass }}
priorityClassName: {{ .Values.longhornDriver.priorityClass | quote }} priorityClassName: {{ .Values.longhornDriver.priorityClass | quote }}
{{- end }} {{- end }}
{{- if .Values.longhornDriver.tolerations }} {{- if or .Values.longhornDriver.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations: tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornDriver.tolerations }}
{{ toYaml .Values.longhornDriver.tolerations | indent 6 }} {{ toYaml .Values.longhornDriver.tolerations | indent 6 }}
{{- end }} {{- end }}
{{- if .Values.longhornDriver.nodeSelector }} {{- end }}
{{- if or .Values.longhornDriver.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector: nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if or .Values.longhornDriver.nodeSelector }}
{{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }} {{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }}
{{- end }} {{- end }}
{{- end }}
serviceAccountName: longhorn-service-account serviceAccountName: longhorn-service-account

View File

@ -11,7 +11,7 @@ metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }} labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-ingress app: longhorn-ingress
annotations: annotations:
{{- if .Values.ingress.tls }} {{- if .Values.ingress.secureBackends }}
ingress.kubernetes.io/secure-backends: "true" ingress.kubernetes.io/secure-backends: "true"
{{- end }} {{- end }}
{{- range $key, $value := .Values.ingress.annotations }} {{- range $key, $value := .Values.ingress.annotations }}

View File

@ -38,11 +38,21 @@ spec:
priorityClassName: {{ .Values.longhornManager.priorityClass | quote }} priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
{{- end }} {{- end }}
serviceAccountName: longhorn-service-account serviceAccountName: longhorn-service-account
{{- if .Values.longhornManager.tolerations }} {{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations: tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{ toYaml .Values.longhornManager.tolerations | indent 6 }} {{ toYaml .Values.longhornManager.tolerations | indent 6 }}
{{- end }} {{- end }}
{{- if .Values.longhornManager.nodeSelector }} {{- end }}
{{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector: nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornManager.nodeSelector }}
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }} {{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
{{- end }} {{- end }}
{{- end }}

View File

@ -39,11 +39,21 @@ spec:
priorityClassName: {{ .Values.longhornManager.priorityClass | quote }} priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
{{- end }} {{- end }}
serviceAccountName: longhorn-service-account serviceAccountName: longhorn-service-account
{{- if .Values.longhornManager.tolerations }} {{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations: tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{ toYaml .Values.longhornManager.tolerations | indent 6 }} {{ toYaml .Values.longhornManager.tolerations | indent 6 }}
{{- end }} {{- end }}
{{- if .Values.longhornManager.nodeSelector }} {{- end }}
{{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector: nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if or .Values.longhornManager.nodeSelector }}
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }} {{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
{{- end }} {{- end }}
{{- end }}

View File

@ -4,27 +4,43 @@
global: global:
cattle: cattle:
systemDefaultRegistry: "" systemDefaultRegistry: ""
windowsCluster:
# Enable this to allow Longhorn to run on the Rancher deployed Windows cluster
enabled: false
# Tolerate Linux node taint
tolerations:
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
# Select Linux nodes
nodeSelector:
kubernetes.io/os: "linux"
# Recognize toleration and node selector for Longhorn run-time created components
defaultSetting:
taintToleration: cattle.io/os=linux:NoSchedule
systemManagedComponentsNodeSelector: kubernetes.io/os:linux
image: image:
longhorn: longhorn:
engine: engine:
repository: longhornio/longhorn-engine repository: longhornio/longhorn-engine
tag: v1.3.0 tag: v1.3.1-rc2
manager: manager:
repository: longhornio/longhorn-manager repository: longhornio/longhorn-manager
tag: v1.3.0 tag: v1.3.1-rc2
ui: ui:
repository: longhornio/longhorn-ui repository: longhornio/longhorn-ui
tag: v1.3.0 tag: v1.3.1-rc2
instanceManager: instanceManager:
repository: longhornio/longhorn-instance-manager repository: longhornio/longhorn-instance-manager
tag: v1_20220611 tag: v1_20220808
shareManager: shareManager:
repository: longhornio/longhorn-share-manager repository: longhornio/longhorn-share-manager
tag: v1_20220531 tag: v1_20220808
backingImageManager: backingImageManager:
repository: longhornio/backing-image-manager repository: longhornio/backing-image-manager
tag: v3_20220609 tag: v3_20220808
csi: csi:
attacher: attacher:
repository: longhornio/csi-attacher repository: longhornio/csi-attacher
@ -125,6 +141,9 @@ privateRegistry:
registrySecret: ~ registrySecret: ~
longhornManager: longhornManager:
log:
## Allowed values are `plain` or `json`.
format: plain
priorityClass: ~ priorityClass: ~
tolerations: [] tolerations: []
## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above ## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
@ -138,6 +157,11 @@ longhornManager:
## and uncomment this example block ## and uncomment this example block
# label-key1: "label-value1" # label-key1: "label-value1"
# label-key2: "label-value2" # label-key2: "label-value2"
serviceAnnotations: {}
## If you want to set annotations for the Longhorn Manager service, delete the `{}` in the line above
## and uncomment this example block
# annotation-key1: "annotation-value1"
# annotation-key2: "annotation-value2"
longhornDriver: longhornDriver:
priorityClass: ~ priorityClass: ~
@ -155,6 +179,7 @@ longhornDriver:
# label-key2: "label-value2" # label-key2: "label-value2"
longhornUI: longhornUI:
replicas: 1
priorityClass: ~ priorityClass: ~
tolerations: [] tolerations: []
## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above ## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
@ -193,9 +218,11 @@ ingress:
host: sslip.io host: sslip.io
## Set this to true in order to enable TLS on the ingress record ## Set this to true in order to enable TLS on the ingress record
## A side effect of this will be that the backend service will be connected at port 443
tls: false tls: false
## Enable this in order to enable that the backend service will be connected at port 443
secureBackends: false
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: longhorn.local-tls tlsSecret: longhorn.local-tls

View File

@ -3,9 +3,9 @@ longhornio/csi-provisioner:v2.1.2
longhornio/csi-resizer:v1.2.0 longhornio/csi-resizer:v1.2.0
longhornio/csi-snapshotter:v3.0.3 longhornio/csi-snapshotter:v3.0.3
longhornio/csi-node-driver-registrar:v2.5.0 longhornio/csi-node-driver-registrar:v2.5.0
longhornio/backing-image-manager:v3_20220609 longhornio/backing-image-manager:v3_20220808
longhornio/longhorn-engine:v1.3.0 longhornio/longhorn-engine:v1.3.1-rc2
longhornio/longhorn-instance-manager:v1_20220611 longhornio/longhorn-instance-manager:v1_20220808
longhornio/longhorn-manager:v1.3.0 longhornio/longhorn-manager:v1.3.1-rc2
longhornio/longhorn-share-manager:v1_20220531 longhornio/longhorn-share-manager:v1_20220808
longhornio/longhorn-ui:v1.3.0 longhornio/longhorn-ui:v1.3.1-rc2

View File

@ -13,7 +13,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
spec: spec:
privileged: true privileged: true
allowPrivilegeEscalation: true allowPrivilegeEscalation: true
@ -49,7 +49,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
--- ---
# Source: longhorn/templates/default-setting.yaml # Source: longhorn/templates/default-setting.yaml
apiVersion: v1 apiVersion: v1
@ -60,7 +60,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
data: data:
default-setting.yaml: |- default-setting.yaml: |-
--- ---
@ -73,7 +73,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
data: data:
storageclass.yaml: | storageclass.yaml: |
kind: StorageClass kind: StorageClass
@ -103,7 +103,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
longhorn-manager: "" longhorn-manager: ""
name: backingimagedatasources.longhorn.io name: backingimagedatasources.longhorn.io
spec: spec:
@ -274,7 +274,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
longhorn-manager: "" longhorn-manager: ""
name: backingimagemanagers.longhorn.io name: backingimagemanagers.longhorn.io
spec: spec:
@ -459,7 +459,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
longhorn-manager: "" longhorn-manager: ""
name: backingimages.longhorn.io name: backingimages.longhorn.io
spec: spec:
@ -634,7 +634,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
longhorn-manager: "" longhorn-manager: ""
name: backups.longhorn.io name: backups.longhorn.io
spec: spec:
@ -827,7 +827,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
longhorn-manager: "" longhorn-manager: ""
name: backuptargets.longhorn.io name: backuptargets.longhorn.io
spec: spec:
@ -1010,7 +1010,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
longhorn-manager: "" longhorn-manager: ""
name: backupvolumes.longhorn.io name: backupvolumes.longhorn.io
spec: spec:
@ -1174,10 +1174,11 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
longhorn-manager: "" longhorn-manager: ""
name: engineimages.longhorn.io name: engineimages.longhorn.io
spec: spec:
preserveUnknownFields: false
conversion: conversion:
strategy: Webhook strategy: Webhook
webhook: webhook:
@ -1209,7 +1210,7 @@ spec:
jsonPath: .spec.image jsonPath: .spec.image
name: Image name: Image
type: string type: string
- description: Number of volumes are using the engine image - description: Number of resources using the engine image
jsonPath: .status.refCount jsonPath: .status.refCount
name: RefCount name: RefCount
type: integer type: integer
@ -1251,7 +1252,7 @@ spec:
jsonPath: .spec.image jsonPath: .spec.image
name: Image name: Image
type: string type: string
- description: Number of volumes are using the engine image - description: Number of resources using the engine image
jsonPath: .status.refCount jsonPath: .status.refCount
name: RefCount name: RefCount
type: integer type: integer
@ -1365,7 +1366,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
longhorn-manager: "" longhorn-manager: ""
name: engines.longhorn.io name: engines.longhorn.io
spec: spec:
@ -1686,7 +1687,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
longhorn-manager: "" longhorn-manager: ""
name: instancemanagers.longhorn.io name: instancemanagers.longhorn.io
spec: spec:
@ -1856,10 +1857,11 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
longhorn-manager: "" longhorn-manager: ""
name: nodes.longhorn.io name: nodes.longhorn.io
spec: spec:
preserveUnknownFields: false
conversion: conversion:
strategy: Webhook strategy: Webhook
webhook: webhook:
@ -2086,7 +2088,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
longhorn-manager: "" longhorn-manager: ""
name: orphans.longhorn.io name: orphans.longhorn.io
spec: spec:
@ -2352,7 +2354,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
longhorn-manager: "" longhorn-manager: ""
name: replicas.longhorn.io name: replicas.longhorn.io
spec: spec:
@ -2544,7 +2546,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
longhorn-manager: "" longhorn-manager: ""
name: settings.longhorn.io name: settings.longhorn.io
spec: spec:
@ -2635,7 +2637,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
longhorn-manager: "" longhorn-manager: ""
name: sharemanagers.longhorn.io name: sharemanagers.longhorn.io
spec: spec:
@ -2746,7 +2748,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
longhorn-manager: "" longhorn-manager: ""
name: snapshots.longhorn.io name: snapshots.longhorn.io
spec: spec:
@ -2870,10 +2872,11 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
longhorn-manager: "" longhorn-manager: ""
name: volumes.longhorn.io name: volumes.longhorn.io
spec: spec:
preserveUnknownFields: false
conversion: conversion:
strategy: Webhook strategy: Webhook
webhook: webhook:
@ -3201,7 +3204,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
rules: rules:
- apiGroups: - apiGroups:
- apiextensions.k8s.io - apiextensions.k8s.io
@ -3262,7 +3265,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
kind: ClusterRole kind: ClusterRole
@ -3280,7 +3283,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
namespace: longhorn-system namespace: longhorn-system
rules: rules:
- apiGroups: - apiGroups:
@ -3300,7 +3303,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
namespace: longhorn-system namespace: longhorn-system
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
@ -3321,7 +3324,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
app: longhorn-manager app: longhorn-manager
name: longhorn-backend name: longhorn-backend
namespace: longhorn-system namespace: longhorn-system
@ -3342,7 +3345,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
app: longhorn-ui app: longhorn-ui
name: longhorn-frontend name: longhorn-frontend
namespace: longhorn-system namespace: longhorn-system
@ -3363,7 +3366,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
app: longhorn-conversion-webhook app: longhorn-conversion-webhook
name: longhorn-conversion-webhook name: longhorn-conversion-webhook
namespace: longhorn-system namespace: longhorn-system
@ -3384,7 +3387,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
app: longhorn-admission-webhook app: longhorn-admission-webhook
name: longhorn-admission-webhook name: longhorn-admission-webhook
namespace: longhorn-system namespace: longhorn-system
@ -3405,7 +3408,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
name: longhorn-engine-manager name: longhorn-engine-manager
namespace: longhorn-system namespace: longhorn-system
spec: spec:
@ -3421,7 +3424,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
name: longhorn-replica-manager name: longhorn-replica-manager
namespace: longhorn-system namespace: longhorn-system
spec: spec:
@ -3437,7 +3440,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
app: longhorn-manager app: longhorn-manager
name: longhorn-manager name: longhorn-manager
namespace: longhorn-system namespace: longhorn-system
@ -3450,16 +3453,16 @@ spec:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
app: longhorn-manager app: longhorn-manager
spec: spec:
initContainers: initContainers:
- name: wait-longhorn-admission-webhook - name: wait-longhorn-admission-webhook
image: longhornio/longhorn-manager:v1.3.0 image: longhornio/longhorn-manager:v1.3.1-rc2
command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" -k https://longhorn-admission-webhook:9443/v1/healthz) != "200" ]; do echo waiting; sleep 2; done'] command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" -k https://longhorn-admission-webhook:9443/v1/healthz) != "200" ]; do echo waiting; sleep 2; done']
containers: containers:
- name: longhorn-manager - name: longhorn-manager
image: longhornio/longhorn-manager:v1.3.0 image: longhornio/longhorn-manager:v1.3.1-rc2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
securityContext: securityContext:
privileged: true privileged: true
@ -3468,15 +3471,15 @@ spec:
- -d - -d
- daemon - daemon
- --engine-image - --engine-image
- "longhornio/longhorn-engine:v1.3.0" - "longhornio/longhorn-engine:v1.3.1-rc2"
- --instance-manager-image - --instance-manager-image
- "longhornio/longhorn-instance-manager:v1_20220611" - "longhornio/longhorn-instance-manager:v1_20220808"
- --share-manager-image - --share-manager-image
- "longhornio/longhorn-share-manager:v1_20220531" - "longhornio/longhorn-share-manager:v1_20220808"
- --backing-image-manager-image - --backing-image-manager-image
- "longhornio/backing-image-manager:v3_20220609" - "longhornio/backing-image-manager:v3_20220808"
- --manager-image - --manager-image
- "longhornio/longhorn-manager:v1.3.0" - "longhornio/longhorn-manager:v1.3.1-rc2"
- --service-account - --service-account
- longhorn-service-account - longhorn-service-account
ports: ports:
@ -3536,7 +3539,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
spec: spec:
replicas: 1 replicas: 1
selector: selector:
@ -3547,23 +3550,23 @@ spec:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
app: longhorn-driver-deployer app: longhorn-driver-deployer
spec: spec:
initContainers: initContainers:
- name: wait-longhorn-manager - name: wait-longhorn-manager
image: longhornio/longhorn-manager:v1.3.0 image: longhornio/longhorn-manager:v1.3.1-rc2
command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done'] command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done']
containers: containers:
- name: longhorn-driver-deployer - name: longhorn-driver-deployer
image: longhornio/longhorn-manager:v1.3.0 image: longhornio/longhorn-manager:v1.3.1-rc2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
command: command:
- longhorn-manager - longhorn-manager
- -d - -d
- deploy-driver - deploy-driver
- --manager-image - --manager-image
- "longhornio/longhorn-manager:v1.3.0" - "longhornio/longhorn-manager:v1.3.1-rc2"
- --manager-url - --manager-url
- http://longhorn-backend:9500/v1 - http://longhorn-backend:9500/v1
env: env:
@ -3600,7 +3603,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
app: longhorn-ui app: longhorn-ui
name: longhorn-ui name: longhorn-ui
namespace: longhorn-system namespace: longhorn-system
@ -3614,12 +3617,12 @@ spec:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
app: longhorn-ui app: longhorn-ui
spec: spec:
containers: containers:
- name: longhorn-ui - name: longhorn-ui
image: longhornio/longhorn-ui:v1.3.0 image: longhornio/longhorn-ui:v1.3.1-rc2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
volumeMounts: volumeMounts:
- name : nginx-cache - name : nginx-cache
@ -3651,7 +3654,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
app: longhorn-conversion-webhook app: longhorn-conversion-webhook
name: longhorn-conversion-webhook name: longhorn-conversion-webhook
namespace: longhorn-system namespace: longhorn-system
@ -3665,7 +3668,7 @@ spec:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
app: longhorn-conversion-webhook app: longhorn-conversion-webhook
spec: spec:
affinity: affinity:
@ -3682,7 +3685,7 @@ spec:
topologyKey: kubernetes.io/hostname topologyKey: kubernetes.io/hostname
containers: containers:
- name: longhorn-conversion-webhook - name: longhorn-conversion-webhook
image: longhornio/longhorn-manager:v1.3.0 image: longhornio/longhorn-manager:v1.3.1-rc2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
securityContext: securityContext:
runAsUser: 2000 runAsUser: 2000
@ -3711,7 +3714,7 @@ metadata:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
app: longhorn-admission-webhook app: longhorn-admission-webhook
name: longhorn-admission-webhook name: longhorn-admission-webhook
namespace: longhorn-system namespace: longhorn-system
@ -3725,7 +3728,7 @@ spec:
labels: labels:
app.kubernetes.io/name: longhorn app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0 app.kubernetes.io/version: v1.3.1-rc2
app: longhorn-admission-webhook app: longhorn-admission-webhook
spec: spec:
affinity: affinity:
@ -3742,14 +3745,14 @@ spec:
topologyKey: kubernetes.io/hostname topologyKey: kubernetes.io/hostname
initContainers: initContainers:
- name: wait-longhorn-conversion-webhook - name: wait-longhorn-conversion-webhook
image: longhornio/longhorn-manager:v1.3.0 image: longhornio/longhorn-manager:v1.3.1-rc2
command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" -k https://longhorn-conversion-webhook:9443/v1/healthz) != "200" ]; do echo waiting; sleep 2; done'] command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" -k https://longhorn-conversion-webhook:9443/v1/healthz) != "200" ]; do echo waiting; sleep 2; done']
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
securityContext: securityContext:
runAsUser: 2000 runAsUser: 2000
containers: containers:
- name: longhorn-admission-webhook - name: longhorn-admission-webhook
image: longhornio/longhorn-manager:v1.3.0 image: longhornio/longhorn-manager:v1.3.1-rc2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
securityContext: securityContext:
runAsUser: 2000 runAsUser: 2000

View File

@ -15,7 +15,7 @@ https://github.com/longhorn/longhorn/issues/972
1. Previously Longhorn is using filesystem ID as keys to the map of disks on the node. But we found there is no guarantee that filesystem ID won't change after the node reboots for certain filesystems e.g. XFS. 1. Previously Longhorn is using filesystem ID as keys to the map of disks on the node. But we found there is no guarantee that filesystem ID won't change after the node reboots for certain filesystems e.g. XFS.
1. We want to enable the ability to configure CRD directly, prepare for the CRD based API access in the future 1. We want to enable the ability to configure CRD directly, prepare for the CRD based API access in the future
1. We also need to make sure previously implemented safe guards are not impacted by this change: 1. We also need to make sure previously implemented safe guards are not impacted by this change:
1. If a disk was accidentally umounted on the node, we should detect that and stop replica from scheduling into it. 1. If a disk was accidentally unmounted on the node, we should detect that and stop replica from scheduling into it.
1. We shouldn't allow user to add two disks pointed to the same filesystem 1. We shouldn't allow user to add two disks pointed to the same filesystem
### Non-goals ### Non-goals

View File

@ -75,4 +75,4 @@ No special upgrade strategy is necessary. Once the user upgrades to the new vers
### Notes ### Notes
- There is interest in allowing the user to decide on whether or not to retain the `Persistent Volume` (and possibly `Persistent Volume Claim`) for certain use cases such as restoring from a `Backup`. However, this would require changes to the way `go-rancher` generates the `Go` client that we use so that `Delete` requests against resources are able to take inputs. - There is interest in allowing the user to decide on whether or not to retain the `Persistent Volume` (and possibly `Persistent Volume Claim`) for certain use cases such as restoring from a `Backup`. However, this would require changes to the way `go-rancher` generates the `Go` client that we use so that `Delete` requests against resources are able to take inputs.
- In the case that a `Volume` is provisioned from a `Storage Class` (and set to be `Deleted` once the `Persistent Volume Claim` utilizing that `Volume` has been deleted), the `Volume` should still be deleted properly regardless of how the deletion was initiated. If the `Volume` is deleted from the UI, the call that the `Volume Controller` makes to delete the `Persistent Volume` would only trigger one more deletion call from the `CSI` server to delete the `Volume`, which would return successfully and allow the `Persistent Volume` to be deleted and the `Volume` to be deleted as wekk. If the `Volume` is deleted because of the `Persistent Volume Claim`, the `CSI` server would be able to successfully make a `Volume` deletion call before deleting the `Persistent Volume`. The `Volume Controller` would have no additional resources to delete and be able to finish deletion of the `Volume`. - In the case that a `Volume` is provisioned from a `Storage Class` (and set to be `Deleted` once the `Persistent Volume Claim` utilizing that `Volume` has been deleted), the `Volume` should still be deleted properly regardless of how the deletion was initiated. If the `Volume` is deleted from the UI, the call that the `Volume Controller` makes to delete the `Persistent Volume` would only trigger one more deletion call from the `CSI` server to delete the `Volume`, which would return successfully and allow the `Persistent Volume` to be deleted and the `Volume` to be deleted as well. If the `Volume` is deleted because of the `Persistent Volume Claim`, the `CSI` server would be able to successfully make a `Volume` deletion call before deleting the `Persistent Volume`. The `Volume Controller` would have no additional resources to delete and be able to finish deletion of the `Volume`.

View File

@ -16,7 +16,7 @@ https://github.com/longhorn/longhorn/issues/298
## Proposal ## Proposal
1. Add `Eviction Requested` with `true` and `false` selection buttons for disks and nodes. This is for user to evict or cancel the eviction of the disks or the nodes. 1. Add `Eviction Requested` with `true` and `false` selection buttons for disks and nodes. This is for user to evict or cancel the eviction of the disks or the nodes.
2. Add new `evictionRequested` field to `Node.Spec`, `Node.Spec.disks` Spec and `Replica.Status`. These will help tracking the request from user and trigger replica controller to update `Replica.Status` and volume controler to do the eviction. And this will reconcile with `scheduledReplica` of selected disks on the nodes. 2. Add new `evictionRequested` field to `Node.Spec`, `Node.Spec.disks` Spec and `Replica.Status`. These will help tracking the request from user and trigger replica controller to update `Replica.Status` and volume controller to do the eviction. And this will reconcile with `scheduledReplica` of selected disks on the nodes.
3. Display `fail to evict` error message to `Dashboard` and any other eviction errors to the `Event log`. 3. Display `fail to evict` error message to `Dashboard` and any other eviction errors to the `Event log`.
### User Stories ### User Stories
@ -47,7 +47,7 @@ From an API perspective, the call to set `Eviction Requested` to `true` or `fals
### Implementation Overview ### Implementation Overview
1. On `Longhorn UI` `Node` page, for nodes eviction, adding `Eviction Requested` `true` and `false` options in the `Edit Node` sub-selection, next to `Node Scheduling`. For disks eviction, adding `Eviction Requested` `true` and `false` options in `Edit node and disks` sub-selection under `Operation` column next to each disk `Scheduling` options. This is for user to evict or cancel the eviction of the disks or the nodes. 1. On `Longhorn UI` `Node` page, for nodes eviction, adding `Eviction Requested` `true` and `false` options in the `Edit Node` sub-selection, next to `Node Scheduling`. For disks eviction, adding `Eviction Requested` `true` and `false` options in `Edit node and disks` sub-selection under `Operation` column next to each disk `Scheduling` options. This is for user to evict or cancel the eviction of the disks or the nodes.
2. Add new `evictionRequested` field to `Node.Spec`, `Node.Spec.disks` Spec and `Replica.Status`. These will help tracking the request from user and trigger replica controller to update `Replica.Status` and volume controler to do the eviction. And this will reconcile with `scheduledReplica` of selected disks on the nodes. 2. Add new `evictionRequested` field to `Node.Spec`, `Node.Spec.disks` Spec and `Replica.Status`. These will help tracking the request from user and trigger replica controller to update `Replica.Status` and volume controller to do the eviction. And this will reconcile with `scheduledReplica` of selected disks on the nodes.
3. Add a informer in `Replica Controller` to get these information and update `evictionRequested` field in `Replica.Status`. 3. Add a informer in `Replica Controller` to get these information and update `evictionRequested` field in `Replica.Status`.
4. Once `Eviction Requested` has been set to `true` for disks or nodes, the `evictionRequested` fields for the disks and nodes will be set to `true` (default is `false`). 4. Once `Eviction Requested` has been set to `true` for disks or nodes, the `evictionRequested` fields for the disks and nodes will be set to `true` (default is `false`).
5. `Replica Controller` will update `evictionRequested` field in `Replica.Status` and `Volume Controller` to get these information from it's replicas. 5. `Replica Controller` will update `evictionRequested` field in `Replica.Status` and `Volume Controller` to get these information from it's replicas.
@ -61,7 +61,7 @@ From an API perspective, the call to set `Eviction Requested` to `true` or `fals
#### Manual Test Plan For Disks and Nodes Eviction #### Manual Test Plan For Disks and Nodes Eviction
Positive Case: Positive Case:
For both `Replica Node Level Soft Anti-Affinity` has been enabled and disabled. Also the volume can be 'Attaced' or 'Detached'. For both `Replica Node Level Soft Anti-Affinity` has been enabled and disabled. Also the volume can be 'Attached' or 'Detached'.
1. User can select one or more disks or nodes for eviction. Select `Eviction Requested` to `true` on the disabled disks or nodes, Longhorn should start rebuild replicas for the volumes which have replicas on the eviction disks or nodes, and after rebuild success, the replica number on the evicted disks or nodes should be 0. E.g. When there are 3 nodes in the cluster, and with `Replica Node Level Soft Anti-Affinity` is set to `false`, disable one node, and create a volume with replica count 2. And then evict one of them, the eviction should get stuck, then set `Replica Node Level Soft Anti-Affinity` to `true`, the eviction should go through. 1. User can select one or more disks or nodes for eviction. Select `Eviction Requested` to `true` on the disabled disks or nodes, Longhorn should start rebuild replicas for the volumes which have replicas on the eviction disks or nodes, and after rebuild success, the replica number on the evicted disks or nodes should be 0. E.g. When there are 3 nodes in the cluster, and with `Replica Node Level Soft Anti-Affinity` is set to `false`, disable one node, and create a volume with replica count 2. And then evict one of them, the eviction should get stuck, then set `Replica Node Level Soft Anti-Affinity` to `true`, the eviction should go through.
Negative Cases: Negative Cases:
@ -73,10 +73,10 @@ For `Replica Node Level Soft Anti-Affinity` is enabled, create 2 replicas on the
For `Replica Node Level Soft Anti-Affinity` is disabled, create 1 replica on a disk, and evict this disk or node, the replica should goto the other disk of node. For `Replica Node Level Soft Anti-Affinity` is disabled, create 1 replica on a disk, and evict this disk or node, the replica should goto the other disk of node.
For node eviction, Longhorn will process the evition based on the disks for the node, this is like disk eviction. After eviction success, the replica number on the evicted node should be 0. For node eviction, Longhorn will process the eviction based on the disks for the node, this is like disk eviction. After eviction success, the replica number on the evicted node should be 0.
#### Error Indication #### Error Indication
During the eviction, user can click the `Replicas Number` on the `Node` page, and set which replicas are left from eviction, and click the `Replica Name` will redirect user to the `Volume` page to set if there is any error for this volume. If there is any error during the rebuild, Longhorn should display the error message from UI. The error could be `failed to schedule a replica` due to disk space or based on schedule policy, can not find a valid disk to put the relica. During the eviction, user can click the `Replicas Number` on the `Node` page, and set which replicas are left from eviction, and click the `Replica Name` will redirect user to the `Volume` page to set if there is any error for this volume. If there is any error during the rebuild, Longhorn should display the error message from UI. The error could be `failed to schedule a replica` due to disk space or based on schedule policy, can not find a valid disk to put the replica.
### Upgrade strategy ### Upgrade strategy
No special upgrade strategy is necessary. Once the user upgrades to the new version of `Longhorn`, these new capabilities will be accessible from the `longhorn-ui` without any special work. No special upgrade strategy is necessary. Once the user upgrades to the new version of `Longhorn`, these new capabilities will be accessible from the `longhorn-ui` without any special work.

View File

@ -61,12 +61,12 @@ Same as the Design
### Test plan ### Test plan
1. Setup a cluster of 3 nodes 1. Setup a cluster of 3 nodes
1. Install Longhorn and set `Default Replica Count = 2` (because we will turn off one node) 1. Install Longhorn and set `Default Replica Count = 2` (because we will turn off one node)
1. Create a SetfullSet with 2 pods using the command: 1. Create a StatefulSet with 2 pods using the command:
``` ```
kubectl create -f https://raw.githubusercontent.com/longhorn/longhorn/master/examples/statefulset.yaml kubectl create -f https://raw.githubusercontent.com/longhorn/longhorn/master/examples/statefulset.yaml
``` ```
1. Create a volume + pv + pvc named `vol1` and create a deployment of default ubuntu named `shell` with the usage of pvc `vol1` mounted under `/mnt/vol1` 1. Create a volume + pv + pvc named `vol1` and create a deployment of default ubuntu named `shell` with the usage of pvc `vol1` mounted under `/mnt/vol1`
1. Find the node which contains one pod of the StatefullSet/Deployment. Power off the node 1. Find the node which contains one pod of the StatefulSet/Deployment. Power off the node
#### StatefulSet #### StatefulSet
##### if `NodeDownPodDeletionPolicy ` is set to `do-nothing ` | `delete-deployment-pod` ##### if `NodeDownPodDeletionPolicy ` is set to `do-nothing ` | `delete-deployment-pod`

View File

@ -119,7 +119,7 @@ UI modification:
* On the right volume info panel, add a <div> to display `selectedVolume.dataLocality` * On the right volume info panel, add a <div> to display `selectedVolume.dataLocality`
* On the right volume panel, in the Health row, add an icon for data locality status. * On the right volume panel, in the Health row, add an icon for data locality status.
Specifically, if `dataLocality=best-effort` but there is not a local replica then display a warning icon. Specifically, if `dataLocality=best-effort` but there is not a local replica then display a warning icon.
Similar to the replica node redundancy wanring [here](https://github.com/longhorn/longhorn-ui/blob/0a52c1f0bef172d8ececdf4e1e953bfe78c86f29/src/routes/volume/detail/VolumeInfo.js#L47) Similar to the replica node redundancy warning [here](https://github.com/longhorn/longhorn-ui/blob/0a52c1f0bef172d8ececdf4e1e953bfe78c86f29/src/routes/volume/detail/VolumeInfo.js#L47)
* In the volume's actions dropdown, add a new action to update `dataLocality` * In the volume's actions dropdown, add a new action to update `dataLocality`
1. In Rancher UI, add a parameter `dataLocality` when create storage class using Longhorn provisioner. 1. In Rancher UI, add a parameter `dataLocality` when create storage class using Longhorn provisioner.

View File

@ -15,7 +15,7 @@ https://github.com/longhorn/longhorn/issues/508
1. By default 'DisableRevisionCounter' is 'false', but Longhorn provides an optional for user to disable it. 1. By default 'DisableRevisionCounter' is 'false', but Longhorn provides an optional for user to disable it.
2. Once user set 'DisableRevisionCounter' to 'true' globally or individually, this will improve Longhorn data path performance. 2. Once user set 'DisableRevisionCounter' to 'true' globally or individually, this will improve Longhorn data path performance.
3. And for 'DisableRevisionCounter' is 'true', Longhorn will keep the ability to find the most suitable replica to recover the volume when the engine is faulted(all the replicas are in 'ERR' state). 3. And for 'DisableRevisionCounter' is 'true', Longhorn will keep the ability to find the most suitable replica to recover the volume when the engine is faulted(all the replicas are in 'ERR' state).
4. Also during Longhorn Engine starting, with head file information it's unlikly to find out out of synced replicas. So will skip the check. 4. Also during Longhorn Engine starting, with head file information it's unlikely to find out out of synced replicas. So will skip the check.
## Proposal ## Proposal
@ -41,7 +41,7 @@ Or from StorageClass yaml file, user can set 'parameters' 'revisionCounterDisabl
User can also set 'DisableRevisionCounter' for each individual volumes created by Longhorn UI this individual setting will over write the global setting. User can also set 'DisableRevisionCounter' for each individual volumes created by Longhorn UI this individual setting will over write the global setting.
Once the volume has 'DisableRevisionCounter' to 'true', there won't be revision counter file. And the 'Automatic salvage' is 'true', when the engine is fauled, the engine will pick the most suitable replica as 'Source of Truth' to recover the volume. Once the volume has 'DisableRevisionCounter' to 'true', there won't be revision counter file. And the 'Automatic salvage' is 'true', when the engine is faulted, the engine will pick the most suitable replica as 'Source of Truth' to recover the volume.
### API changes ### API changes
@ -63,12 +63,12 @@ And for the API compatibility issues, always check the 'EngineImage.Statue.cliAP
1. Add 'Volume.Spec.RevisionCounterDisabled', 'Replica.Spec.RevisionCounterDisabled' and 'Engine.Spec.RevisionCounterDisabled' to volume, replica and engine objects. 1. Add 'Volume.Spec.RevisionCounterDisabled', 'Replica.Spec.RevisionCounterDisabled' and 'Engine.Spec.RevisionCounterDisabled' to volume, replica and engine objects.
2. Once 'RevisionCounterDisabled' is 'true', volume controller will set 'Volume.Spec.RevisionCounterDisabled' to true, 'Replica.Spec.RevisionCounterDisabled' and 'Engine.Spec.RevisionCounterDisabled' will set to true. And during 'ReplicaProcessCreate' and 'EngineProcessCreate' , this will be passed to engine replica process and engine controller process to start a replica and controller without revision counter. 2. Once 'RevisionCounterDisabled' is 'true', volume controller will set 'Volume.Spec.RevisionCounterDisabled' to true, 'Replica.Spec.RevisionCounterDisabled' and 'Engine.Spec.RevisionCounterDisabled' will set to true. And during 'ReplicaProcessCreate' and 'EngineProcessCreate' , this will be passed to engine replica process and engine controller process to start a replica and controller without revision counter.
3. During 'ReplicaProcessCreate' and 'EngineProcessCreate', if 'Replica.Spec.RevisionCounterDisabled' or 'Engine.Spec.RevisionCounterDisabled' is true, it will pass extra parameter to engine replica to start replica without revision counter or to engine controller to start controller without revision counter support, otherwise keep it the same as current and engine replica will use the default value 'false' for this extra paramter. This is the same as the engine controller to set the 'salvageRequested' flag. 3. During 'ReplicaProcessCreate' and 'EngineProcessCreate', if 'Replica.Spec.RevisionCounterDisabled' or 'Engine.Spec.RevisionCounterDisabled' is true, it will pass extra parameter to engine replica to start replica without revision counter or to engine controller to start controller without revision counter support, otherwise keep it the same as current and engine replica will use the default value 'false' for this extra parameter. This is the same as the engine controller to set the 'salvageRequested' flag.
4. Add 'RevisionCounterDisabled' in 'ReplicaInfo', when engine controller start, it will get all replica information. 4. Add 'RevisionCounterDisabled' in 'ReplicaInfo', when engine controller start, it will get all replica information.
4. For engine controlloer starting cases: 4. For engine controller starting cases:
- If revision counter is not disabled, stay with the current logic. - If revision counter is not disabled, stay with the current logic.
- If revision counter is disabled, engine will not check the synchronization of the replicas. - If revision counter is disabled, engine will not check the synchronization of the replicas.
- If unexpected case (engine controller has revision counter diabled but any of the replica doesn't, or engine controller has revision counter enabled, but any of the replica doesn't), engine controller will log this as error and mark unmatched replicas to 'ERR'. - If unexpected case (engine controller has revision counter disabled but any of the replica doesn't, or engine controller has revision counter enabled, but any of the replica doesn't), engine controller will log this as error and mark unmatched replicas to 'ERR'.
#### Add New Logic for Salvage #### Add New Logic for Salvage

View File

@ -47,7 +47,7 @@ No API change is required.
3. replica eviction happens (volume.Status.Robustness is Healthy) 3. replica eviction happens (volume.Status.Robustness is Healthy)
4. there is no potential reusable replica 4. there is no potential reusable replica
5. there is a potential reusable replica but the replica replenishment wait interval is passed. 5. there is a potential reusable replica but the replica replenishment wait interval is passed.
3. Reuse the failed replica by cleaning up `ReplicaSpec.HealthyAt` and `ReplicaSpec.FailedAt`. And `Replica.Spec.RebuildRetryCount` will be increasd by 1. 3. Reuse the failed replica by cleaning up `ReplicaSpec.HealthyAt` and `ReplicaSpec.FailedAt`. And `Replica.Spec.RebuildRetryCount` will be increased by 1.
4. Clean up the related record in `Replica.Spec.RebuildRetryCount` when the rebuilding replica becomes mode `RW`. 4. Clean up the related record in `Replica.Spec.RebuildRetryCount` when the rebuilding replica becomes mode `RW`.
5. Guarantee the reused failed replica will be stopped before re-launching it. 5. Guarantee the reused failed replica will be stopped before re-launching it.

View File

@ -72,7 +72,7 @@ For example, there are many times users ask us for supporting and the problems w
If there is a CPU monitoring dashboard for instance managers, those problems can be quickly detected. If there is a CPU monitoring dashboard for instance managers, those problems can be quickly detected.
#### Story 2 #### Story 2
User want to be notified about abnomal event such as disk space limit approaching. User want to be notified about abnormal event such as disk space limit approaching.
We can expose metrics provide information about it and user can scrape the metrics and setup alert system. We can expose metrics provide information about it and user can scrape the metrics and setup alert system.
### User Experience In Detail ### User Experience In Detail
@ -82,7 +82,7 @@ Users can use Prometheus or other monitoring systems to collect those metrics by
Then, user can display the collected data using tools such as Grafana. Then, user can display the collected data using tools such as Grafana.
User can also setup alert by using tools such as Prometheus Alertmanager. User can also setup alert by using tools such as Prometheus Alertmanager.
Below are the desciptions of metrics which Longhorn exposes and how users can use them: Below are the descriptions of metrics which Longhorn exposes and how users can use them:
1. longhorn_volume_capacity_bytes 1. longhorn_volume_capacity_bytes
@ -347,7 +347,7 @@ We add a new end point `/metrics` to exposes all longhorn Prometheus metrics.
### Implementation Overview ### Implementation Overview
We follow the [Prometheus best practice](https://prometheus.io/docs/instrumenting/writing_exporters/#deployment), each Longhorn manager reports information about the components it manages. We follow the [Prometheus best practice](https://prometheus.io/docs/instrumenting/writing_exporters/#deployment), each Longhorn manager reports information about the components it manages.
Prometheus can use service discovery mechanisim to find all longhorn-manager pods in longhorn-backend service. Prometheus can use service discovery mechanism to find all longhorn-manager pods in longhorn-backend service.
We create a new collector for each type (volumeCollector, backupCollector, nodeCollector, etc..) and have a common baseCollector. We create a new collector for each type (volumeCollector, backupCollector, nodeCollector, etc..) and have a common baseCollector.
This structure is similar to the controller package: we have volumeController, nodeController, etc.. which have a common baseController. This structure is similar to the controller package: we have volumeController, nodeController, etc.. which have a common baseController.

View File

@ -45,7 +45,7 @@ For part 2, we upgrade engine image for a volume when the following conditions a
### User Stories ### User Stories
Before this enhancement, users have to manually upgrade engine images for volume after upgrading Longhorn system to a newer version. Before this enhancement, users have to manually upgrade engine images for volume after upgrading Longhorn system to a newer version.
If there are thoudsands of volumes in the system, this is a significant manual work. If there are thousands of volumes in the system, this is a significant manual work.
After this enhancement users either have to do nothing (in case live upgrade is possible) After this enhancement users either have to do nothing (in case live upgrade is possible)
or they only have to scale down/up the workload (in case there is a new default IM image) or they only have to scale down/up the workload (in case there is a new default IM image)

View File

@ -70,7 +70,7 @@ spec:
url: https://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img url: https://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
``` ```
Afterwards deploy the `cirros-rwx-blk.yaml` to create a live migratabale virtual machine. Afterwards deploy the `cirros-rwx-blk.yaml` to create a live migratable virtual machine.
```yaml ```yaml
apiVersion: kubevirt.io/v1alpha3 apiVersion: kubevirt.io/v1alpha3
kind: VirtualMachine kind: VirtualMachine

View File

@ -155,14 +155,14 @@ With an example of cluster set for 2 zones and default of 2 replicas volume:
- The default value is `ignored`. - The default value is `ignored`.
- In Volume Controller `syncVolume` -> `ReconcileEngineReplicaState` -> `replenishReplicas`, calculate and add number of replicas to be rebalanced to `replenishCount`. - In Volume Controller `syncVolume` -> `ReconcileEngineReplicaState` -> `replenishReplicas`, calculate and add number of replicas to be rebalanced to `replenishCount`.
> The logic ignores all `soft-anti-affinity` settings. This will always try to achieve zone balance then node balance. And creating for replicas will leave for ReplicaScheduler to determine for the canidates. > The logic ignores all `soft-anti-affinity` settings. This will always try to achieve zone balance then node balance. And creating for replicas will leave for ReplicaScheduler to determine for the candidates.
1. Skip volume replica rebalance when volume spec `replicaAutoBalance` is `disabled`. 1. Skip volume replica rebalance when volume spec `replicaAutoBalance` is `disabled`.
2. Skip if volume `Robustness` is not `healthy`. 2. Skip if volume `Robustness` is not `healthy`.
3. For `least-effort`, try to get the replica rebalance count. 3. For `least-effort`, try to get the replica rebalance count.
1. For `zone` duplicates, get the replenish number. 1. For `zone` duplicates, get the replenish number.
1. List all the occupied node zones with volume replicas running. 1. List all the occupied node zones with volume replicas running.
- The zone is balanced when this is equal to volume spec `NumberOfReplicas`. - The zone is balanced when this is equal to volume spec `NumberOfReplicas`.
2. List all available and schedulabled nodes in non-occupied zones. 2. List all available and schedulable nodes in non-occupied zones.
- The zone is balanced when no available nodes are found. - The zone is balanced when no available nodes are found.
3. Get the number of replicas off-balanced: 3. Get the number of replicas off-balanced:
- number of replicas in volume spec - number of occupied node zones. - number of replicas in volume spec - number of occupied node zones.

View File

@ -354,7 +354,7 @@ Labels
[labels/2]: [b] [labels/2]: [b]
``` ```
- `Name` field should be immutable. - `Name` field should be immutable.
- `Task` field should be imuutable. - `Task` field should be immutable.
*And* user edit the fields in the form. *And* user edit the fields in the form.

View File

@ -337,7 +337,7 @@ After the enhancement, users can directly specify the BackingImage during volume
- BackingImageDataSource has not been created. Add retry would solve this case. - BackingImageDataSource has not been created. Add retry would solve this case.
- BackingImageDataSource is gone but BackingImage has not been cleaned up. Longhorn can ignore BackingImageDataSource when BackingImage deletion timestamp is set. - BackingImageDataSource is gone but BackingImage has not been cleaned up. Longhorn can ignore BackingImageDataSource when BackingImage deletion timestamp is set.
- BackingImage disk cleanup: - BackingImage disk cleanup:
- This cannot break the HA besides affacting replicas. The main idea is similar to the cleanup in BackingImage Controller. - This cannot break the HA besides attaching replicas. The main idea is similar to the cleanup in BackingImage Controller.
9. In CSI: 9. In CSI:
- Check the backing image during the volume creation. - Check the backing image during the volume creation.
- The missing BackingImage will be created when both BackingImage name and data source info are provided. - The missing BackingImage will be created when both BackingImage name and data source info are provided.
@ -370,7 +370,7 @@ After the enhancement, users can directly specify the BackingImage during volume
- Similar to `Fetch`, the image will try to reuse existing files. - Similar to `Fetch`, the image will try to reuse existing files.
- The manager is responsible for managing all port. The image will use the functions provided by the manager to get then release ports. - The manager is responsible for managing all port. The image will use the functions provided by the manager to get then release ports.
- API `Send`: Send a backing image file to a receiver. This should be similar to replica rebuilding. - API `Send`: Send a backing image file to a receiver. This should be similar to replica rebuilding.
- API `Delete`: Unregister the image then delete the imge work directory. Make sure syncing or pulling will be cancelled if exists. - API `Delete`: Unregister the image then delete the image work directory. Make sure syncing or pulling will be cancelled if exists.
- API `Get`/`List`: Collect the status of one backing image file/all backing image files. - API `Get`/`List`: Collect the status of one backing image file/all backing image files.
- API `Watch`: establish a streaming connection to report BackingImage file info. - API `Watch`: establish a streaming connection to report BackingImage file info.
- As I mentioned above, we will use BackingImage UUID to generate work directories for each BackingImage. The work directory is like: - As I mentioned above, we will use BackingImage UUID to generate work directories for each BackingImage. The work directory is like:

View File

@ -190,7 +190,7 @@ Using those methods, the Sparse-tools know where is a data/hole interval to tran
### Longhorn CSI plugin ### Longhorn CSI plugin
* Advertise that Longhorn CSI driver has ability to clone a volume, `csi.ControllerServiceCapability_RPC_CLONE_VOLUME` * Advertise that Longhorn CSI driver has ability to clone a volume, `csi.ControllerServiceCapability_RPC_CLONE_VOLUME`
* When receiving a volume creat request, inspect `req.GetVolumeContentSource()` to see if it is from anther volume. * When receiving a volume creat request, inspect `req.GetVolumeContentSource()` to see if it is from another volume.
If so, create a new Longhorn volume with appropriate `DataSource` set so Longhorn volume controller can start cloning later on. If so, create a new Longhorn volume with appropriate `DataSource` set so Longhorn volume controller can start cloning later on.
### Test plan ### Test plan

View File

@ -66,7 +66,7 @@ After the enhancement, Longhorn automatically finds out the orphaned replica dir
- Users can enable the global auto-deletion on setting page. By default, the auto-deletion is disabled. - Users can enable the global auto-deletion on setting page. By default, the auto-deletion is disabled.
- Via `kubectl` - Via `kubectl`
- Users can list the orphaned replica directoris by `kubectl -n longhorn-system get orphans`. - Users can list the orphaned replica directories by `kubectl -n longhorn-system get orphans`.
- Users can delete the orphaned replica directories by `kubectl -n longhorn-system delete orphan <name>`. - Users can delete the orphaned replica directories by `kubectl -n longhorn-system delete orphan <name>`.
- Users can enable the global auto-deletion by `kubectl -n longhorn-system edit settings orphan-auto-deletion` - Users can enable the global auto-deletion by `kubectl -n longhorn-system edit settings orphan-auto-deletion`

View File

@ -29,7 +29,7 @@ What is out of scope for this enhancement? Listing non-goals helps to focus disc
This is where we get down to the nitty gritty of what the proposal actually is. This is where we get down to the nitty gritty of what the proposal actually is.
### User Stories ### User Stories
Detail the things that people will be able to do if this enhancement is implemented. A good practise is including a comparsion of what user cannot do before the enhancement implemented, why user would want an enhancement and what user need to do after, to make it clear why the enhancement beneficial to the user. Detail the things that people will be able to do if this enhancement is implemented. A good practise is including a comparison of what user cannot do before the enhancement implemented, why user would want an enhancement and what user need to do after, to make it clear why the enhancement beneficial to the user.
The experience details should be in the `User Experience In Detail` later. The experience details should be in the `User Experience In Detail` later.

View File

@ -71,9 +71,9 @@ detect_node_os()
{ {
local pod="$1" local pod="$1"
OS=`kubectl exec -it $pod -- nsenter --mount=/proc/1/ns/mnt -- bash -c 'grep -E "^ID_LIKE=" /etc/os-release | cut -d= -f2'` OS=`kubectl exec -i $pod -- nsenter --mount=/proc/1/ns/mnt -- bash -c 'grep -E "^ID_LIKE=" /etc/os-release | cut -d= -f2'`
if [[ -z "${OS}" ]]; then if [[ -z "${OS}" ]]; then
OS=`kubectl exec -it $pod -- nsenter --mount=/proc/1/ns/mnt -- bash -c 'grep -E "^ID=" /etc/os-release | cut -d= -f2'` OS=`kubectl exec -i $pod -- nsenter --mount=/proc/1/ns/mnt -- bash -c 'grep -E "^ID=" /etc/os-release | cut -d= -f2'`
fi fi
echo "$OS" echo "$OS"
} }
@ -97,6 +97,10 @@ set_packages_and_check_cmd()
CHECK_CMD='pacman -Q' CHECK_CMD='pacman -Q'
PACKAGES=(nfs-utils open-iscsi) PACKAGES=(nfs-utils open-iscsi)
;; ;;
*"gentoo"* )
CHECK_CMD='qlist -I'
PACKAGES=(net-fs/nfs-utils sys-block/open-iscsi)
;;
*) *)
CHECK_CMD='' CHECK_CMD=''
PACKAGES=() PACKAGES=()
@ -227,7 +231,7 @@ check_package_installed() {
for ((i=0; i<${#PACKAGES[@]}; i++)); do for ((i=0; i<${#PACKAGES[@]}; i++)); do
local package=${PACKAGES[$i]} local package=${PACKAGES[$i]}
kubectl exec -it $pod -- nsenter --mount=/proc/1/ns/mnt -- timeout 30 bash -c "$CHECK_CMD $package" > /dev/null 2>&1 kubectl exec -i $pod -- nsenter --mount=/proc/1/ns/mnt -- timeout 30 bash -c "$CHECK_CMD $package" > /dev/null 2>&1
if [ $? != 0 ]; then if [ $? != 0 ]; then
allFound=false allFound=false
node=`kubectl get ${pod} --no-headers -o=custom-columns=:.spec.nodeName` node=`kubectl get ${pod} --no-headers -o=custom-columns=:.spec.nodeName`

View File

@ -0,0 +1,201 @@
#!/bin/bash
#set -x
kubectl get-all version &> /dev/null
if [ $? -ne 0 ]; then
echo "ERROR: command (kubectl get-all) is not found. Please install it here: https://github.com/corneliusweig/ketall#installation"
exit 1
fi
set -e
usage() {
echo ""
echo "The migration includes:"
echo "1. Running the script with --type=migrate to migrate the labels and annotations for Longhorn resources"
echo "2. Manually installing Longhorn chart in app&marketplace UI"
echo "3. Running script with --type=cleanup to remove the old Longhorn chart from old catalog UI"
echo ""
echo "usage:"
echo "$0 [options]"
echo " -u | --upstream-kubeconfig: upstream rancher cluster kubeconfig path"
echo " -d | --downstream-kubeconfig: downstream cluster kubeconfig path"
echo " -t | --type: specify the type you want to run (migrate or cleanup)"
echo " --dry-run: do not run migriation"
echo ""
echo "example:"
echo " $0 -u /path/to/upstream/rancher/cluster/kubeconfig -d /path/to/downstream/cluster/kubeconfig"
}
SCRIPT_DIR="$(dirname "$0")"
UPSTREAM_KUBECONFIG=""
DOWNSTREAM_KUBECONFIG=""
KUBECTL_DRY_RUN=""
while [ "$1" != "" ]; do
case $1 in
-u | --upstream-kubeconfig)
shift
UPSTREAM_KUBECONFIG="$1"
;;
-d | --downstream-kubeconfig)
shift
DOWNSTREAM_KUBECONFIG="$1"
;;
-t | --type)
shift
TYPE="$1"
;;
--dry-run)
KUBECTL_DRY_RUN="--dry-run=client"
;;
*)
usage
exit 1
;;
esac
shift
done
if [ -z "$UPSTREAM_KUBECONFIG" ]; then
echo "--upstream-kubeconfig is mandatory"
usage
exit 1
fi
if [ -z "$DOWNSTREAM_KUBECONFIG" ]; then
echo "--downstream-kubeconfig is mandatory"
usage
exit 1
fi
if [ "$TYPE" != "migrate" ] && [ "$TYPE" != "cleanup" ] ; then
echo "--type must be set to migrate or cleanup"
usage
exit 1
fi
# Longhorn Namespace
RELEASE_NAMESPACE=longhorn-system
# Longhorn Release Name
RELEASE_NAME=longhorn-system
echo "Looking up Rancher Project App '${RELEASE_NAME}' ..."
DOWNSTREAMCLUSTERID=$(cat ${DOWNSTREAM_KUBECONFIG} | grep "server:.*https://.*/k8s/clusters/.*" | awk -F'/' '{print $(NF)}' | awk -F'"' '{print $1}')
RANCHERAPP=$(kubectl --kubeconfig ${UPSTREAM_KUBECONFIG} get --all-namespaces apps.project.cattle.io -o jsonpath='{range.items[*]}{.metadata.namespace} {.metadata.name} {.spec.targetNamespace} {.spec.projectName} {.spec.externalId}{"\n"}{end}' | grep -s "${RELEASE_NAME} ${RELEASE_NAMESPACE} ${DOWNSTREAMCLUSTERID}")
RANCHERAPPNS=$(echo "${RANCHERAPP}" | awk '{print $1}')
RANCHERAPPEXTERNALID=$(echo "${RANCHERAPP}" | awk '{print $5}')
RANCHERAPPCATALOG=$(echo "${RANCHERAPPEXTERNALID}" | sed -n 's/.*catalog=\(.*\)/\1/p' | awk -F '&' '{print $1}' | sed 's/migrated-//')
RANCHERAPPTEMPLATE=$(echo "${RANCHERAPPEXTERNALID}" | sed -n 's/.*template=\(.*\)/\1/p' | awk -F '&' '{print $1}')
RANCHERAPPTEMPLATEVERSION=$(echo "${RANCHERAPPEXTERNALID}" | sed -n 's/.*version=\(.*\)/\1/p' | awk -F '&' '{print $1}')
RANCHERAPPVALUES=""
RANCHERAPPANSWERS=""
if [ -z "$DOWNSTREAMCLUSTERID" ] || [ -z "$RANCHERAPP" ] || [ -z "$RANCHERAPPNS" ] || [ -z "$RANCHERAPPCATALOG" ] || [ -z "$RANCHERAPPTEMPLATE" ] || [ -z "$RANCHERAPPTEMPLATEVERSION" ]; then
echo "Rancher Project App '${RELEASE_NAME}' not found!"
exit 1
fi
RANCHERAPPVALUES=$(kubectl --kubeconfig ${UPSTREAM_KUBECONFIG} -n ${RANCHERAPPNS} get apps.project.cattle.io ${RELEASE_NAME} -o go-template='{{if .spec.valuesYaml}}{{.spec.valuesYaml}}{{end}}')
if [ -z "${RANCHERAPPVALUES}" ]; then
RANCHERAPPANSWERS=$(kubectl --kubeconfig ${UPSTREAM_KUBECONFIG} -n ${RANCHERAPPNS} get apps.project.cattle.io ${RELEASE_NAME} -o go-template='{{if .spec.answers}}{{range $key,$value := .spec.answers}}{{$key}}: {{$value}}{{"\n"}}{{end}}{{end}}' | sed 's/: /=/' | sed 's/$/,/' | sed '$ s/.$//' | tr -d '\n')
fi
if [ -z "${RANCHERAPPVALUES:-$RANCHERAPPANSWERS}" ]; then
echo "No valid answers found!"
exit 1
fi
echo ""
echo "Rancher Project App '${RELEASE_NAME}' found:"
echo " Project-Namespace: ${RANCHERAPPNS}"
echo " Downstream-Cluster: ${DOWNSTREAMCLUSTERID}"
echo " Catalog: ${RANCHERAPPCATALOG}"
echo " Template: ${RANCHERAPPTEMPLATE} (${RANCHERAPPTEMPLATEVERSION})"
echo " Answers:"
printf '%s\n' "${RANCHERAPPVALUES:-$RANCHERAPPANSWERS}"
echo ""
if [ "$TYPE" == "cleanup" ] ; then
MANAGER=$(kubectl --kubeconfig ${DOWNSTREAM_KUBECONFIG} -n ${RELEASE_NAMESPACE} get ds longhorn-manager -ojsonpath="{.metadata.labels['app\.kubernetes\.io/managed-by']}")
if [ $MANAGER != "Helm" ] ; then
echo "Labels have not been migrated. Did you run the part 1 by specifying the flag --type=migrate ?"
exit 1
fi
echo ""
echo "Patching Project App Catalog ..."
kubectl --kubeconfig ${UPSTREAM_KUBECONFIG} -n ${RANCHERAPPNS} ${KUBECTL_DRY_RUN} patch apps.project.cattle.io ${RELEASE_NAME} --type=merge --patch-file=/dev/stdin <<-EOF
{
"metadata": {
"annotations": {
"cattle.io/skipUninstall": "true",
"catalog.cattle.io/ui-source-repo": "helm3-library",
"catalog.cattle.io/ui-source-repo-type": "cluster",
"apps.cattle.io/migrated": "true"
}
}
}
EOF
if [ $? -ne 0 ]; then
echo "Failed Patching Project App Catalog"
exit 1
fi
echo ""
echo "Deleting Project App Catalog ..."
kubectl --kubeconfig ${UPSTREAM_KUBECONFIG} -n ${RANCHERAPPNS} ${KUBECTL_DRY_RUN} delete apps.project.cattle.io ${RELEASE_NAME}
exit 0
fi
echo ""
echo ""
echo "Checking concurrent-automatic-engine-upgrade-per-node-limit setting ..."
SETTING=$(kubectl --kubeconfig ${DOWNSTREAM_KUBECONFIG} -n ${RELEASE_NAMESPACE} get settings.longhorn.io concurrent-automatic-engine-upgrade-per-node-limit -ojsonpath="{.value}")
if [ "$SETTING" != "0" ]; then
echo "concurrent-automatic-engine-upgrade-per-node-limit must be set to 0 before the migration"
exit 1
fi
echo ""
echo ""
echo "Looking up existing Resources ..."
RESOURCES=$(kubectl get-all --kubeconfig ${DOWNSTREAM_KUBECONFIG} --exclude AppRevision -o name -l io.cattle.field/appId=${RELEASE_NAME} 2>/dev/null | sort)
if [[ "$RESOURCES" == "No resources"* ]]; then
RESOURCES=""
fi
echo ""
echo "Patching CRD Resources ..."
for resource in $RESOURCES; do
if [[ $resource == "customresourcedefinition.apiextensions.k8s.io/"* ]]; then
kubectl --kubeconfig ${DOWNSTREAM_KUBECONFIG} -n ${RELEASE_NAMESPACE} ${KUBECTL_DRY_RUN} annotate --overwrite ${resource} "meta.helm.sh/release-name"="longhorn-crd" "meta.helm.sh/release-namespace"="${RELEASE_NAMESPACE}" "helm.sh/resource-policy"="keep"
kubectl --kubeconfig ${DOWNSTREAM_KUBECONFIG} -n ${RELEASE_NAMESPACE} ${KUBECTL_DRY_RUN} label --overwrite ${resource} "app.kubernetes.io/managed-by"="Helm"
fi
done
echo ""
echo "Patching Other Resources ..."
for resource in $RESOURCES; do
if [[ $resource == "customresourcedefinition.apiextensions.k8s.io/"* ]]; then
continue
fi
kubectl --kubeconfig ${DOWNSTREAM_KUBECONFIG} -n ${RELEASE_NAMESPACE} ${KUBECTL_DRY_RUN} annotate --overwrite ${resource} "meta.helm.sh/release-name"="longhorn" "meta.helm.sh/release-namespace"="${RELEASE_NAMESPACE}"
kubectl --kubeconfig ${DOWNSTREAM_KUBECONFIG} -n ${RELEASE_NAMESPACE} ${KUBECTL_DRY_RUN} label --overwrite ${resource} "app.kubernetes.io/managed-by"="Helm"
done
echo ""
echo "-----------------------------"
echo "Successfully updated the annotations and labels for the resources!"
echo "Next step:"
echo " 1. Go to Rancher UI -> Go to the downstream cluster -> App&Marketplace -> Charts"
echo " 2. Find and select the Longhorn chart"
echo " 3. Select the chart version corresponding the Longhorn version ${RANCHERAPPTEMPLATEVERSION}"
echo " 4. Install the chart with the correct helm values. Here are the helm values of your old charts: "
printf '%s\n' "${RANCHERAPPVALUES:-$RANCHERAPPANSWERS}"
echo " 5. Verify that the migrated charts are working ok"
echo " 6. Run this script again with the flag --type=cleanup to remove the old chart from the legacy UI"

View File

@ -0,0 +1,39 @@
#!/usr/bin/env bash
# Example:
# ./scripts/update-manifests-dev-version.sh 1.3.0 1.4.0
#
# Result:
# Chart version will be updated to 1.4.0-dev
# Images (manager, engine, ui) will be updated to master-head
#
set -o errexit
set -o nounset
PRJ_DIR=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/.." 2>/dev/null || realpath "$(dirname "${BASH_SOURCE[0]}")/.." 2>/dev/null)
CURRENT_VERSION=${CURRENT_VERSION:-$1}
NEW_VERSION=${NEW_VERSION:-$2}-dev
mapfile -t manifests < <(find "$PRJ_DIR" -type f -a \( -name '*.yaml' -o -name 'longhorn-images.txt' \))
if [[ ${#manifests[@]} -le 0 ]]; then
echo "No manifests found to update from $PRJ_DIR" >/dev/stderr
exit 1
fi
echo "Updating $CURRENT_VERSION -> $NEW_VERSION-dev with master-head images in below manifests"
for f in "${manifests[@]}"; do
f_name=$(basename "$f")
if [[ $f_name == "Chart.yaml" ]]; then
sed -i "s#\(version: \)${CURRENT_VERSION}#\1${NEW_VERSION}#g" "$f"
sed -i "s#\(appVersion: v\)${CURRENT_VERSION}#\1${NEW_VERSION}#g" "$f"
else
sed -i "s#\(:\s*\)v${CURRENT_VERSION}#\1master-head#g" "$f"
fi
echo "$f updated"
done
. "$PRJ_DIR"/scripts/generate-longhorn-yaml.sh

View File

@ -106,7 +106,7 @@ spec:
spec: spec:
containers: containers:
- name: longhorn-uninstall - name: longhorn-uninstall
image: longhornio/longhorn-manager:v1.3.0 image: longhornio/longhorn-manager:v1.3.1-rc2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
securityContext: securityContext:
privileged: true privileged: true