Compare commits

...

60 Commits

Author SHA1 Message Date
David Ko
b5d5f69477 release: 1.3.3-rc2
Signed-off-by: David Ko <dko@suse.com>
2023-03-22 21:25:35 +08:00
David Ko
ca9a600c0b release: 1.3.3-rc1
Signed-off-by: David Ko <dko@suse.com>
2023-03-20 21:27:06 +08:00
Derek Su
f6163cb068 chart: add conditions in engine/replica status.instanceStatus
Longhorn 1330

Signed-off-by: Derek Su <derek.su@suse.com>
(cherry picked from commit 13ac2a6641)
2023-03-20 20:19:40 +08:00
David Ko
5bc615d9bc chore(build): update system managed images for 1.3
Signed-off-by: David Ko <dko@suse.com>
2023-03-20 18:43:52 +08:00
Jack Lin
8b179fde12 chore(version-bump): backing-image-manager
Ref: longhorn/longhorn 5443

Signed-off-by: Jack Lin <jack.lin@suse.com>
2023-03-20 17:51:06 +08:00
James Lu
3459840bc6 feat(recurring-job): update chart for new tasks
Ref: 4898

Signed-off-by: James Lu <james.lu@suse.com>
(cherry picked from commit 15701bbe26)
2023-03-20 16:21:14 +08:00
James Lu
1a573f8492 feat(recurring-job): update YAML for new tasks
Ref: 4898

Signed-off-by: James Lu <james.lu@suse.com>
(cherry picked from commit 6c6cb23be1)
2023-03-20 16:21:14 +08:00
Phan Le
e1f6e72c15 Add nodeDrainPolicy setting
longhorn-5549

Signed-off-by: Phan Le <phan.le@suse.com>
(cherry picked from commit 86d06696df)
2023-03-18 08:28:18 +08:00
James Lu
d64d62e405 chore(version-bump): longhorn-instance-manager
Update YAML and chart files from v1_20230310 to v1_20230317.

Ref: 5265

Signed-off-by: James Lu <james.lu@suse.com>
2023-03-17 23:09:32 +08:00
James Lu
4a645b8edb chore(version-bump): longhorn-instance-manager
Update YAML and chart files from v1_20230223 to v1_20230310.

Ref: 5265

Signed-off-by: James Lu <james.lu@suse.com>
2023-03-13 14:20:17 +08:00
ChanYiLin
ad927264aa doc: update prerequisites in chart readme to make it consistent with documentation
Signed-off-by: Jack Lin <jack.lin@suse.com>
2023-03-13 10:32:40 +08:00
Viktor Hedefalk
39ce8a1645 Update data_migration.yaml
Fixes #5484

(cherry picked from commit 92fd5b54ed)
2023-03-08 15:59:42 +08:00
Rayan Das
3d13d08d52 update k8s.gcr.io to registry.k8s.io
Signed-off-by: Rayan Das <rayandas91@gmail.com>
(cherry picked from commit e1ea3d7515)
2023-03-01 12:49:47 +08:00
Chin-Ya Huang
9d8f2bef07 feat(recurring-job): update YAML for new tasks
Ref: 3836

Signed-off-by: Chin-Ya Huang <chin-ya.huang@suse.com>
(cherry picked from commit 2ea5513286)
2023-02-22 12:25:13 +08:00
Chin-Ya Huang
15f5c97e24 feat(recurring-job): update chart for new tasks
Ref: 3836

Signed-off-by: Chin-Ya Huang <chin-ya.huang@suse.com>
(cherry picked from commit 761abc7611)
2023-02-22 12:25:13 +08:00
achims311
9148325775 Fix for bug #5304 (second version including POSIX way to call subroutine) (#5314)
* Fix for bug #5304.

It uses the same technologie to get the kernel release as it was used
before to get the os of the node

Signed-off-by: Achim Schaefer <longhorn@schaefer-home.eu>

* used a lower case variable name as suggested by innobead

Signed-off-by: Achim Schaefer <longhorn@schaefer-home.eu>

---------

Signed-off-by: Achim Schaefer <longhorn@schaefer-home.eu>
Co-authored-by: David Ko <dko@suse.com>
(cherry picked from commit 94a23e5b05)
2023-02-07 15:00:57 +08:00
Haribo112
d447a5ad6e Made environment_check.sh POSIX compliant (#5310)
Made environment_check.sh POSIX compliant

Signed-off-by: Harold Holsappel <h.holsappel@iwink.nl>
Co-authored-by: Harold Holsappel <h.holsappel@iwink.nl>
(cherry picked from commit 5a071e502c)
2023-02-06 18:05:51 +08:00
Thomas Fenzl
dab68907cc update iscsi installation image to latest alpine.
(cherry picked from commit 674cdd0df0)
2023-02-05 23:17:19 +08:00
Derek Su
7495f22de6 environment check: precisely check kernel option
Longhorn 3157

Signed-off-by: Derek Su <derek.su@suse.com>
(cherry picked from commit 62998adab2)
2022-12-26 20:24:22 +08:00
Derek Su
9061c1f458 environment_check.sh: add nfs client kernel support
Longhorn 3157

Signed-off-by: Derek Su <derek.su@suse.com>
(cherry picked from commit c83497b685)
2022-12-26 16:38:41 +08:00
Derek Su
3ca190e1f5 environment_check.sh: check the hostname uniqueness
Longhorn 5012

Signed-off-by: Derek Su <derek.su@suse.com>
(cherry picked from commit 939ac11774)
2022-12-26 16:38:41 +08:00
James Lu
db383e4ec4 chore(ui): modify Affinity of UI for helm chart
Change the number of the replica from 1 to 2 for helm chart

Ref: 4987

Signed-off-by: James Lu <james.lu@suse.com>
2022-12-15 18:42:42 +08:00
James Lu
1b7c7fc45b chore(ui): modify Affinity of UI in deploy.yaml
Change the number of the replica from 1 to 2.

Ref: 4987

Signed-off-by: James Lu <james.lu@suse.com>
2022-12-15 18:42:42 +08:00
Chin-Ya Huang
b2c607d4ae feat(csi-liveness-probe): update air-gap images
Ref: 3907

Signed-off-by: Chin-Ya Huang <chin-ya.huang@suse.com>
(cherry picked from commit 400b8cd097)
2022-11-29 12:51:53 +08:00
Chin-Ya Huang
b41b9abbb5 feat(csi-liveness-probe): update YAML
Ref: 3907

Signed-off-by: Chin-Ya Huang <chin-ya.huang@suse.com>
(cherry picked from commit f3525fe363)
2022-11-29 12:51:53 +08:00
Chin-Ya Huang
60f9d635fd feat(csi-liveness-probe): chart update
Ref: 3907

Signed-off-by: Chin-Ya Huang <chin-ya.huang@suse.com>
(cherry picked from commit e847b7f62c)
2022-11-29 12:51:53 +08:00
Ray Chang
44041acf6c fix: modify parameter names , output-file and version, in help function
Longhorn 1521

Signed-off-by: Ray Chang <ray.chang@suse.com>
(cherry picked from commit e68f633053122fd8dadd87e140ae104a00ce62ff)
2022-11-01 16:30:36 +08:00
Ray Chang
b8312dbbdc improvement(restore): script for restoring from backup file
Longhorn 1521

Signed-off-by: Ray Chang <ray.chang@suse.com>
(cherry picked from commit 7805007c99c7197b8d860db1850f24c373982d0f)
2022-10-21 08:06:22 +08:00
David Ko
a425df9a8b Release 1.3.2
Signed-off-by: David Ko <dko@suse.com>
2022-10-14 16:34:14 +08:00
David Ko
0dd8d0bba9 Release 1.3.2-rc2
Signed-off-by: David Ko <dko@suse.com>
2022-10-11 13:53:19 +08:00
Phan Le
47012ea980 Fix: update outdated csi-image tags in chart/questions.yaml
Signed-off-by: Phan Le <phan.le@suse.com>
2022-10-04 08:03:18 +08:00
Ray Chang
199849b17b Add K8s version limitation to < 1.4 chart
Longhorn 4525

Signed-off-by: Ray Chang <ray.chang@suse.com>
2022-10-03 17:59:41 +08:00
David Ko
2753477993 Release 1.3.2-rc1
Signed-off-by: David Ko <dko@suse.com>
2022-10-03 16:15:09 +08:00
David Ko
e6463acced Update system managed componente images for v1.3
Signed-off-by: David Ko <dko@suse.com>
2022-10-03 15:45:55 +08:00
Ray Chang
6c010cfc06 Remove resources block in values.yaml
Longhorn 4601

Signed-off-by: Ray Chang <ray.chang@suse.com>
(cherry picked from commit 61ad5112c120cc049cd9a6af70cc76ceca585e41)
2022-09-29 14:02:46 +08:00
James Lu
cefb54500f Update LEP: Failed-Backups-Cleanup
Add failed backup TTL to enable and disbale the auto-deletion.

Signed-off-by: James Lu <james.lu@suse.com>
2022-08-24 17:11:52 +08:00
James Lu
685fd9d5d5 Failed backups cleanup: update deploy YAML
Add a new option `failed-backup-ttl` and update the LEP for failed
backup cleanup.

Longhorn 3898

Signed-off-by: James Lu <james.lu@suse.com>
2022-08-24 17:11:52 +08:00
Phan Le
a44c828db6 Add volumeattachment to uninstaller's service account
longhorn-4405

Signed-off-by: Phan Le <phan.le@suse.com>
2022-08-12 06:58:11 +08:00
David Ko
4ba39a989b Release 1.3.1
Signed-off-by: David Ko <dko@suse.com>
2022-08-11 21:11:38 +08:00
David Ko
cfa6cd727f Release 1.3.1 RC2
Signed-off-by: David Ko <dko@suse.com>
2022-08-08 20:48:06 +08:00
David Ko
564286098c Update share and backing images
Signed-off-by: David Ko <dko@suse.com>
(cherry picked from commit 743fa08e8f)
2022-08-08 17:12:36 +08:00
Shuo Wu
2def71350e Update longhorn-instance-manager tag
Signed-off-by: Shuo Wu <shuo.wu@suse.com>
2022-08-08 17:05:42 +08:00
David Ko
5976459490 Update share and backing images
Signed-off-by: David Ko <dko@suse.com>
2022-08-08 14:22:56 +08:00
Derek Su
038b5b35e0 Update charts
Longhorn 4332

Signed-off-by: Derek Su <derek.su@suse.com>
2022-08-02 15:42:07 +08:00
David Ko
3ad971e1c3 Release 1.3.1 RC1
Signed-off-by: David Ko <dko@suse.com>
2022-07-29 16:21:32 +08:00
Serge Tkatchouk
87ded08cd7 Add Gentoo support to environment_check.sh
This addition will allow Gentoo users to run this script and get sensible error messages in case they forgot to install required packages.

Signed-off-by: Serge Tkatchouk <sp1j3t@gmail.com>
(cherry picked from commit c1b93f5531)
2022-07-29 13:32:31 +08:00
David Ko
eb186ec432 Add update-manifests-dev-version.sh
Signed-off-by: David Ko <dko@suse.com>
(cherry picked from commit 0a275ab34f)
Signed-off-by: David Ko <dko@suse.com>
2022-07-27 17:53:45 +08:00
lodufqa
be276a9de9 Update chart/values.yaml
Co-authored-by: David Ko <dko@suse.com>
Signed-off-by: Amadeusz Kryze <amadeusz.kryze@gmail.com>
(cherry picked from commit eda558c0d5)
2022-07-27 17:53:45 +08:00
Amadeusz Kryze
7956c08eb5 Annotation for service LonghornManager will be configurable.
Signed-off-by: Amadeusz Kryze <amadeusz.kryze@gmail.com>
(cherry picked from commit 1e7289dfe0)
2022-07-27 17:53:45 +08:00
Sebastian Podjasek
c2293062db Add value to set manager log in json format
Signed-off-by: Sebastian Podjasek <sebastian.podjasek@intelliway.pl>
(cherry picked from commit d48e95b8c3)
2022-07-27 17:53:45 +08:00
Dante Foulke
af3f2220f7 bugfix for issue #4216
Signed-off-by: Dante Foulke <flkdnt@gmail.com>
(cherry picked from commit edc1b83c5f)
2022-07-27 17:53:45 +08:00
Phan Le
36fc0c41b1 Add rancher chart migration script
The script helps to migrate Longhorn installed in the old Rancher
catalog UI to the new chart in Rancher app&marketplace UI

Longhorn-3714

Signed-off-by: Phan Le <phan.le@suse.com>
(cherry picked from commit 0614c55fc3)
2022-07-27 17:53:45 +08:00
Łukasz Sowa
a879d34d27 Separate tls ingress option from secure backend
Signed-off-by: Łukasz Sowa <lukasz@owlsome.dev>
(cherry picked from commit fe5565dbcf)
2022-07-27 17:53:45 +08:00
Andru Cherny
709aa3b8ed move replicas from UI and driver to values
Signed-off-by: Andru Cherny <wiroatom@gmail.com>
(cherry picked from commit 368d8363da)
2022-07-27 17:53:45 +08:00
tgfree
4cef751aca fix some typo on doc
Signed-off-by: tgfree <tgfree7@gmail.com>

(cherry picked from commit 1e8dd33559)
2022-07-27 17:53:45 +08:00
Chris Chien
1363461b35 rename generate-longhorm-yaml.sh to generate-longhorn-yaml.sh
Signed-off-by: Chris Chien <chris.chien@suse.com>
2022-07-27 15:07:09 +08:00
Derek Su
bc06b10c34 chart: fix the conditions of Rancher deployed Windows Cluster
Longhorn 4289

Signed-off-by: Derek Su <derek.su@suse.com>
2022-07-27 12:33:17 +08:00
Tristan Lins
80c0c2d8c6 Add preserveUnknownFields: false to crd specs
Signed-off-by: Tristan Lins <tristan.lins@chamaeleon.de>
2022-07-27 10:59:37 +08:00
c3y1huang
1907172f05 chart: support Rancher deployed Windows Cluster
Longhorn-4262

Signed-off-by: Chin-Ya Huang <chin-ya.huang@suse.com>
2022-07-22 19:11:18 +08:00
Derek Su
3973f871c8 Update longhorn-instance-manager to v1_20220722
Longhorn 4136

Signed-off-by: Derek Su <derek.su@suse.com>
2022-07-22 17:25:41 +08:00
45 changed files with 1057 additions and 210 deletions

View File

@ -30,10 +30,11 @@ The latest release of Longhorn is [![Releases](https://img.shields.io/github/rel
## Release Status
| Release | Version | Type |
| --------|---------|----------------|
| 1.2 | 1.2.4 | Stable |
| 1.1 | 1.1.3 | Stable |
| Release | Version | Type |
|---------|---------|--------|
| 1.3 | 1.3.0 | Latest |
| 1.2 | 1.2.4 | Stable |
| 1.1 | 1.1.3 | Stable |
## Get Involved

View File

@ -1,8 +1,8 @@
apiVersion: v1
name: longhorn
version: 1.3.0
appVersion: v1.3.0
kubeVersion: ">=1.18.0-0"
version: 1.3.3-rc2
appVersion: v1.3.3-rc2
kubeVersion: ">=1.18.0-0 <1.25.0-0"
description: Longhorn is a distributed block storage system for Kubernetes.
keywords:
- longhorn

View File

@ -18,7 +18,7 @@ Longhorn is 100% open source software. Project source code is spread across a nu
## Prerequisites
1. A container runtime compatible with Kubernetes (Docker v1.13+, containerd v1.3.7+, etc.)
2. Kubernetes v1.18+
2. Kubernetes >= v1.18 and <= v1.24
3. Make sure `bash`, `curl`, `findmnt`, `grep`, `awk` and `blkid` has been installed in all nodes of the Kubernetes cluster.
4. Make sure `open-iscsi` has been installed, and the `iscsid` daemon is running on all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already.

View File

@ -17,7 +17,7 @@ questions:
label: Longhorn Manager Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.manager.tag
default: v1.3.0
default: v1.3.3-rc2
description: "Specify Longhorn Manager Image Tag"
type: string
label: Longhorn Manager Image Tag
@ -29,7 +29,7 @@ questions:
label: Longhorn Engine Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.engine.tag
default: v1.3.0
default: v1.3.3-rc2
description: "Specify Longhorn Engine Image Tag"
type: string
label: Longhorn Engine Image Tag
@ -41,7 +41,7 @@ questions:
label: Longhorn UI Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.ui.tag
default: v1.3.0
default: v1.3.3-rc2
description: "Specify Longhorn UI Image Tag"
type: string
label: Longhorn UI Image Tag
@ -53,7 +53,7 @@ questions:
label: Longhorn Instance Manager Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.instanceManager.tag
default: v1_20220611
default: v1_20230317
description: "Specify Longhorn Instance Manager Image Tag"
type: string
label: Longhorn Instance Manager Image Tag
@ -65,7 +65,7 @@ questions:
label: Longhorn Share Manager Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.shareManager.tag
default: v1_20220531
default: v1_20230320
description: "Specify Longhorn Share Manager Image Tag"
type: string
label: Longhorn Share Manager Image Tag
@ -77,7 +77,7 @@ questions:
label: Longhorn Backing Image Manager Image Repository
group: "Longhorn Images Settings"
- variable: image.longhorn.backingImageManager.tag
default: v3_20220609
default: v3_20230320
description: "Specify Longhorn Backing Image Manager Image Tag"
type: string
label: Longhorn Backing Image Manager Image Tag
@ -89,7 +89,7 @@ questions:
label: Longhorn CSI Attacher Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.attacher.tag
default: v3.2.1
default: v3.4.0
description: "Specify CSI attacher image tag. Leave blank to autodetect."
type: string
label: Longhorn CSI Attacher Image Tag
@ -113,7 +113,7 @@ questions:
label: Longhorn CSI Node Driver Registrar Image Repository
group: "Longhorn CSI Driver Images"
- variable: image.csi.nodeDriverRegistrar.tag
default: v2.3.0
default: v2.5.0
description: "Specify CSI Node Driver Registrar image tag. Leave blank to autodetect."
type: string
label: Longhorn CSI Node Driver Registrar Image Tag
@ -142,6 +142,18 @@ questions:
type: string
label: Longhorn CSI Driver Snapshotter Image Tag
group: "Longhorn CSI Driver Images"
- variable: image.csi.livenessProbe.repository
default: longhornio/livenessprobe
description: "Specify CSI liveness probe image repository. Leave blank to autodetect."
type: string
label: Longhorn CSI Liveness Probe Image Repository
group: "Longhorn CSI Liveness Probe Images"
- variable: image.csi.livenessProbe.tag
default: v2.8.0
description: "Specify CSI liveness probe image tag. Leave blank to autodetect."
type: string
label: Longhorn CSI Liveness Probe Image Tag
group: "Longhorn CSI Driver Images"
- variable: privateRegistry.registryUrl
label: Private registry URL
description: "URL of private registry. Leave blank to apply system default registry."
@ -330,6 +342,16 @@ The available volume spec options are:
type: int
min: 0
default: 300
- variable: defaultSettings.failedBackupTTL
label: Failed Backup Time to Live
description: "In minutes. This setting determines how long Longhorn will keep the backup resource that was failed. Set to 0 to disable the auto-deletion.
Failed backups will be checked and cleaned up during backupstore polling which is controlled by **Backupstore Poll Interval** setting.
Hence this value determines the minimal wait interval of the cleanup. And the actual cleanup interval is multiple of **Backupstore Poll Interval**.
Disabling **Backupstore Poll Interval** also means to disable failed backup auto-deletion."
group: "Longhorn Default Settings"
type: int
min: 0
default: 1440
- variable: defaultSettings.autoSalvage
label: Automatic salvage
description: "If enabled, volumes will be automatically salvaged when all the replicas become faulty e.g. due to network disconnection. Longhorn will try to figure out which replica(s) are usable, then use them for the volume. By default true."
@ -378,6 +400,19 @@ If this setting is enabled, Longhorn will **not** block `kubectl drain` action o
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.nodeDrainPolicy
label: Node Drain Policy
description: "Define the policy to use when a node with the last healthy replica of a volume is drained.
- **block-if-contains-last-replica** Longhorn will block the drain when the node contains the last healthy replica of a volume.
- **allow-if-replica-is-stopped** Longhorn will allow the drain when the node contains the last healthy replica of a volume but the replica is stopped. WARNING: possible data loss if the node is removed after draining. Select this option if you want to drain the node and do in-place upgrade/maintenance.
- **always-allow** Longhorn will allow the drain even though the node contains the last healthy replica of a volume. WARNING: possible data loss if the node is removed after draining. Also possible data corruption if the last replica was running during the draining."
group: "Longhorn Default Settings"
type: enum
options:
- "block-if-contains-last-replica"
- "allow-if-replica-is-stopped"
- "always-allow"
default: "block-if-contains-last-replica"
- variable: defaultSettings.mkfsExt4Parameters
label: Custom mkfs.ext4 parameters
description: "Allows setting additional filesystem creation parameters for ext4. For older host kernels it might be necessary to disable the optional ext4 metadata_csum feature by specifying `-O ^64bit,^metadata_csum`."
@ -670,3 +705,9 @@ WARNING:
label: Pod Security Policy
type: boolean
group: "Other Settings"
- variable: global.cattle.windowsCluster.enabled
default: "false"
description: "Enable this to allow Longhorn to run on the Rancher deployed Windows cluster."
label: Rancher Windows Cluster
type: boolean
group: "Other Settings"

View File

@ -1055,6 +1055,7 @@ metadata:
longhorn-manager: ""
name: engineimages.longhorn.io
spec:
preserveUnknownFields: false
conversion:
strategy: Webhook
webhook:
@ -1086,7 +1087,7 @@ spec:
jsonPath: .spec.image
name: Image
type: string
- description: Number of volumes are using the engine image
- description: Number of resources using the engine image
jsonPath: .status.refCount
name: RefCount
type: integer
@ -1128,7 +1129,7 @@ spec:
jsonPath: .spec.image
name: Image
type: string
- description: Number of volumes are using the engine image
- description: Number of resources using the engine image
jsonPath: .status.refCount
name: RefCount
type: integer
@ -1411,6 +1412,30 @@ spec:
type: object
nullable: true
type: object
conditions:
items:
properties:
lastProbeTime:
description: Last time we probed the condition.
type: string
lastTransitionTime:
description: Last time the condition transitioned from one status to another.
type: string
message:
description: Human-readable message indicating details about last transition.
type: string
reason:
description: Unique, one-word, CamelCase reason for the condition's last transition.
type: string
status:
description: Status is the status of the condition. Can be True, False, Unknown.
type: string
type:
description: Type is the type of the condition.
type: string
type: object
nullable: true
type: array
currentImage:
type: string
currentReplicaAddressMap:
@ -1725,6 +1750,7 @@ metadata:
longhorn-manager: ""
name: nodes.longhorn.io
spec:
preserveUnknownFields: false
conversion:
strategy: Webhook
webhook:
@ -2115,7 +2141,7 @@ spec:
jsonPath: .spec.groups
name: Groups
type: string
- description: Should be one of "backup" or "snapshot"
- description: Should be one of "snapshot", "snapshot-force-create", "snapshot-cleanup", "snapshot-delete", "backup" or "backup-force-create"
jsonPath: .spec.task
name: Task
type: string
@ -2177,10 +2203,14 @@ spec:
description: The retain count of the snapshot/backup.
type: integer
task:
description: The recurring job type. Can be "snapshot" or "backup".
description: The recurring job task. Can be "snapshot", "snapshot-force-create", "snapshot-cleanup", "snapshot-delete", "backup" or "backup-force-create".
enum:
- snapshot
- snapshot-force-create
- snapshot-cleanup
- snapshot-delete
- backup
- backup-force-create
type: string
type: object
status:
@ -2355,6 +2385,30 @@ spec:
status:
description: ReplicaStatus defines the observed state of the Longhorn replica
properties:
conditions:
items:
properties:
lastProbeTime:
description: Last time we probed the condition.
type: string
lastTransitionTime:
description: Last time the condition transitioned from one status to another.
type: string
message:
description: Human-readable message indicating details about last transition.
type: string
reason:
description: Unique, one-word, CamelCase reason for the condition's last transition.
type: string
status:
description: Status is the status of the condition. Can be True, False, Unknown.
type: string
type:
description: Type is the type of the condition.
type: string
type: object
nullable: true
type: array
currentImage:
type: string
currentState:
@ -2714,6 +2768,7 @@ metadata:
longhorn-manager: ""
name: volumes.longhorn.io
spec:
preserveUnknownFields: false
conversion:
strategy: Webhook
webhook:
@ -2897,7 +2952,11 @@ spec:
task:
enum:
- snapshot
- snapshot-force-create
- snapshot-cleanup
- snapshot-delete
- backup
- backup-force-create
type: string
type: object
type: array

View File

@ -31,6 +31,9 @@ spec:
command:
- longhorn-manager
- -d
{{- if eq .Values.longhornManager.log.format "json" }}
- -j
{{- end }}
- daemon
- --engine-image
- "{{ template "registry_url" . }}{{ .Values.image.longhorn.engine.repository }}:{{ .Values.image.longhorn.engine.tag }}"
@ -92,15 +95,25 @@ spec:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornManager.priorityClass }}
priorityClassName: {{ .Values.longhornManager.priorityClass | quote}}
priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if .Values.longhornManager.nodeSelector }}
{{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornManager.nodeSelector }}
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
{{- end }}
{{- end }}
serviceAccountName: longhorn-service-account
updateStrategy:
@ -114,6 +127,10 @@ metadata:
app: longhorn-manager
name: longhorn-backend
namespace: {{ include "release_namespace" . }}
{{- if .Values.longhornManager.serviceAnnotations }}
annotations:
{{ toYaml .Values.longhornManager.serviceAnnotations | indent 4 }}
{{- end }}
spec:
type: {{ .Values.service.manager.type }}
sessionAffinity: ClientIP

View File

@ -20,8 +20,27 @@ data:
{{ if not (kindIs "invalid" .Values.defaultSettings.defaultDataLocality) }}default-data-locality: {{ .Values.defaultSettings.defaultDataLocality }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.defaultLonghornStaticStorageClass) }}default-longhorn-static-storage-class: {{ .Values.defaultSettings.defaultLonghornStaticStorageClass }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.backupstorePollInterval) }}backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.taintToleration) }}taint-toleration: {{ .Values.defaultSettings.taintToleration }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector) }}system-managed-components-node-selector: {{ .Values.defaultSettings.systemManagedComponentsNodeSelector }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.failedBackupTTL) }}failed-backup-ttl: {{ .Values.defaultSettings.failedBackupTTL }}{{ end }}
{{- if or (not (kindIs "invalid" .Values.defaultSettings.taintToleration)) (.Values.global.cattle.windowsCluster.enabled) }}
taint-toleration: {{ $windowsDefaultSettingTaintToleration := list }}{{ $defaultSettingTaintToleration := list -}}
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.defaultSetting.taintToleration -}}
{{- $windowsDefaultSettingTaintToleration = .Values.global.cattle.windowsCluster.defaultSetting.taintToleration -}}
{{- end -}}
{{- if not (kindIs "invalid" .Values.defaultSettings.taintToleration) -}}
{{- $defaultSettingTaintToleration = .Values.defaultSettings.taintToleration -}}
{{- end -}}
{{- $taintToleration := list $windowsDefaultSettingTaintToleration $defaultSettingTaintToleration }}{{ join ";" (compact $taintToleration) -}}
{{- end }}
{{- if or (not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector)) (.Values.global.cattle.windowsCluster.enabled) }}
system-managed-components-node-selector: {{ $windowsDefaultSettingNodeSelector := list }}{{ $defaultSettingNodeSelector := list -}}
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector -}}
{{ $windowsDefaultSettingNodeSelector = .Values.global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector -}}
{{- end -}}
{{- if not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector) -}}
{{- $defaultSettingNodeSelector = .Values.defaultSettings.systemManagedComponentsNodeSelector -}}
{{- end -}}
{{- $nodeSelector := list $windowsDefaultSettingNodeSelector $defaultSettingNodeSelector }}{{ join ";" (compact $nodeSelector) -}}
{{- end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.priorityClass) }}priority-class: {{ .Values.defaultSettings.priorityClass }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.autoSalvage) }}auto-salvage: {{ .Values.defaultSettings.autoSalvage }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly) }}auto-delete-pod-when-volume-detached-unexpectedly: {{ .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly }}{{ end }}
@ -29,6 +48,7 @@ data:
{{ if not (kindIs "invalid" .Values.defaultSettings.replicaZoneSoftAntiAffinity) }}replica-zone-soft-anti-affinity: {{ .Values.defaultSettings.replicaZoneSoftAntiAffinity }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.nodeDownPodDeletionPolicy) }}node-down-pod-deletion-policy: {{ .Values.defaultSettings.nodeDownPodDeletionPolicy }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.allowNodeDrainWithLastHealthyReplica) }}allow-node-drain-with-last-healthy-replica: {{ .Values.defaultSettings.allowNodeDrainWithLastHealthyReplica }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.nodeDrainPolicy) }}node-drain-policy: {{ .Values.defaultSettings.nodeDrainPolicy }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.mkfsExt4Parameters) }}mkfs-ext4-parameters: {{ .Values.defaultSettings.mkfsExt4Parameters }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.disableReplicaRebuild) }}disable-replica-rebuild: {{ .Values.defaultSettings.disableReplicaRebuild }}{{ end }}
{{ if not (kindIs "invalid" .Values.defaultSettings.replicaReplenishmentWaitInterval) }}replica-replenishment-wait-interval: {{ .Values.defaultSettings.replicaReplenishmentWaitInterval }}{{ end }}

View File

@ -67,6 +67,10 @@ spec:
- name: CSI_SNAPSHOTTER_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.snapshotter.repository }}:{{ .Values.image.csi.snapshotter.tag }}"
{{- end }}
{{- if and .Values.image.csi.livenessProbe.repository .Values.image.csi.livenessProbe.tag }}
- name: CSI_LIVENESS_PROBE_IMAGE
value: "{{ template "registry_url" . }}{{ .Values.image.csi.livenessProbe.repository }}:{{ .Values.image.csi.livenessProbe.tag }}"
{{- end }}
{{- if .Values.csi.attacherReplicaCount }}
- name: CSI_ATTACHER_REPLICA_COUNT
value: {{ .Values.csi.attacherReplicaCount | quote }}
@ -89,15 +93,25 @@ spec:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornDriver.priorityClass }}
priorityClassName: {{ .Values.longhornDriver.priorityClass | quote}}
priorityClassName: {{ .Values.longhornDriver.priorityClass | quote }}
{{- end }}
{{- if .Values.longhornDriver.tolerations }}
{{- if or .Values.longhornDriver.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornDriver.tolerations }}
{{ toYaml .Values.longhornDriver.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if .Values.longhornDriver.nodeSelector }}
{{- if or .Values.longhornDriver.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornDriver.nodeSelector }}
{{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }}
{{- end }}
{{- end }}
serviceAccountName: longhorn-service-account
securityContext:

View File

@ -6,7 +6,7 @@ metadata:
name: longhorn-ui
namespace: {{ include "release_namespace" . }}
spec:
replicas: 1
replicas: {{ .Values.longhornUI.replicas }}
selector:
matchLabels:
app: longhorn-ui
@ -15,6 +15,18 @@ spec:
labels: {{- include "longhorn.labels" . | nindent 8 }}
app: longhorn-ui
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- longhorn-ui
topologyKey: kubernetes.io/hostname
containers:
- name: longhorn-ui
image: {{ template "registry_url" . }}{{ .Values.image.longhorn.ui.repository }}:{{ .Values.image.longhorn.ui.tag }}
@ -46,15 +58,25 @@ spec:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornUI.priorityClass }}
priorityClassName: {{ .Values.longhornUI.priorityClass | quote}}
priorityClassName: {{ .Values.longhornUI.priorityClass | quote }}
{{- end }}
{{- if .Values.longhornUI.tolerations }}
{{- if or .Values.longhornUI.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornUI.tolerations }}
{{ toYaml .Values.longhornUI.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if .Values.longhornUI.nodeSelector }}
{{- if or .Values.longhornUI.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornUI.nodeSelector }}
{{ toYaml .Values.longhornUI.nodeSelector | indent 8 }}
{{- end }}
{{- end }}
---
kind: Service

View File

@ -54,15 +54,25 @@ spec:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornDriver.priorityClass }}
priorityClassName: {{ .Values.longhornDriver.priorityClass | quote}}
priorityClassName: {{ .Values.longhornDriver.priorityClass | quote }}
{{- end }}
{{- if .Values.longhornDriver.tolerations }}
{{- if or .Values.longhornDriver.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornDriver.tolerations }}
{{ toYaml .Values.longhornDriver.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if .Values.longhornDriver.nodeSelector }}
{{- if or .Values.longhornDriver.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornDriver.nodeSelector }}
{{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }}
{{- end }}
{{- end }}
serviceAccountName: longhorn-service-account
---
@ -133,14 +143,24 @@ spec:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornDriver.priorityClass }}
priorityClassName: {{ .Values.longhornDriver.priorityClass | quote}}
priorityClassName: {{ .Values.longhornDriver.priorityClass | quote }}
{{- end }}
{{- if .Values.longhornDriver.tolerations }}
{{- if or .Values.longhornDriver.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornDriver.tolerations }}
{{ toYaml .Values.longhornDriver.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if .Values.longhornDriver.nodeSelector }}
{{- if or .Values.longhornDriver.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if or .Values.longhornDriver.nodeSelector }}
{{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }}
{{- end }}
{{- end }}
serviceAccountName: longhorn-service-account

View File

@ -11,7 +11,7 @@ metadata:
labels: {{- include "longhorn.labels" . | nindent 4 }}
app: longhorn-ingress
annotations:
{{- if .Values.ingress.tls }}
{{- if .Values.ingress.secureBackends }}
ingress.kubernetes.io/secure-backends: "true"
{{- end }}
{{- range $key, $value := .Values.ingress.annotations }}

View File

@ -35,14 +35,24 @@ spec:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornManager.priorityClass }}
priorityClassName: {{ .Values.longhornManager.priorityClass | quote}}
priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
{{- end }}
serviceAccountName: longhorn-service-account
{{- if .Values.longhornManager.tolerations }}
{{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if .Values.longhornManager.nodeSelector }}
{{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.longhornManager.nodeSelector }}
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
{{- end }}
{{- end }}

View File

@ -21,10 +21,10 @@ data:
staleReplicaTimeout: "30"
fromBackup: ""
{{- if .Values.persistence.defaultFsType }}
fsType: "{{.Values.persistence.defaultFsType}}"
fsType: "{{ .Values.persistence.defaultFsType }}"
{{- end }}
{{- if .Values.persistence.migratable }}
migratable: "{{.Values.persistence.migratable}}"
migratable: "{{ .Values.persistence.migratable }}"
{{- end }}
{{- if .Values.persistence.backingImage.enable }}
backingImage: {{ .Values.persistence.backingImage.name }}

View File

@ -36,14 +36,24 @@ spec:
- name: {{ .Values.privateRegistry.registrySecret }}
{{- end }}
{{- if .Values.longhornManager.priorityClass }}
priorityClassName: {{ .Values.longhornManager.priorityClass | quote}}
priorityClassName: {{ .Values.longhornManager.priorityClass | quote }}
{{- end }}
serviceAccountName: longhorn-service-account
{{- if .Values.longhornManager.tolerations }}
{{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }}
tolerations:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }}
{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }}
{{- end }}
{{- if .Values.longhornManager.tolerations }}
{{ toYaml .Values.longhornManager.tolerations | indent 6 }}
{{- end }}
{{- end }}
{{- if .Values.longhornManager.nodeSelector }}
{{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }}
nodeSelector:
{{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }}
{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }}
{{- end }}
{{- if or .Values.longhornManager.nodeSelector }}
{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }}
{{- end }}
{{- end }}

View File

@ -4,27 +4,43 @@
global:
cattle:
systemDefaultRegistry: ""
windowsCluster:
# Enable this to allow Longhorn to run on the Rancher deployed Windows cluster
enabled: false
# Tolerate Linux node taint
tolerations:
- key: "cattle.io/os"
value: "linux"
effect: "NoSchedule"
operator: "Equal"
# Select Linux nodes
nodeSelector:
kubernetes.io/os: "linux"
# Recognize toleration and node selector for Longhorn run-time created components
defaultSetting:
taintToleration: cattle.io/os=linux:NoSchedule
systemManagedComponentsNodeSelector: kubernetes.io/os:linux
image:
longhorn:
engine:
repository: longhornio/longhorn-engine
tag: v1.3.0
tag: v1.3.3-rc2
manager:
repository: longhornio/longhorn-manager
tag: v1.3.0
tag: v1.3.3-rc2
ui:
repository: longhornio/longhorn-ui
tag: v1.3.0
tag: v1.3.3-rc2
instanceManager:
repository: longhornio/longhorn-instance-manager
tag: v1_20220611
tag: v1_20230317
shareManager:
repository: longhornio/longhorn-share-manager
tag: v1_20220531
tag: v1_20230320
backingImageManager:
repository: longhornio/backing-image-manager
tag: v3_20220609
tag: v3_20230320
csi:
attacher:
repository: longhornio/csi-attacher
@ -41,6 +57,9 @@ image:
snapshotter:
repository: longhornio/csi-snapshotter
tag: v3.0.3
livenessProbe:
repository: longhornio/livenessprobe
tag: v2.8.0
pullPolicy: IfNotPresent
service:
@ -92,6 +111,7 @@ defaultSettings:
defaultReplicaCount: ~
defaultLonghornStaticStorageClass: ~
backupstorePollInterval: ~
failedBackupTTL: ~
taintToleration: ~
systemManagedComponentsNodeSelector: ~
priorityClass: ~
@ -101,6 +121,7 @@ defaultSettings:
replicaZoneSoftAntiAffinity: ~
nodeDownPodDeletionPolicy: ~
allowNodeDrainWithLastHealthyReplica: ~
nodeDrainPolicy : ~
mkfsExt4Parameters: ~
disableReplicaRebuild: ~
replicaReplenishmentWaitInterval: ~
@ -125,6 +146,9 @@ privateRegistry:
registrySecret: ~
longhornManager:
log:
## Allowed values are `plain` or `json`.
format: plain
priorityClass: ~
tolerations: []
## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above
@ -138,6 +162,11 @@ longhornManager:
## and uncomment this example block
# label-key1: "label-value1"
# label-key2: "label-value2"
serviceAnnotations: {}
## If you want to set annotations for the Longhorn Manager service, delete the `{}` in the line above
## and uncomment this example block
# annotation-key1: "annotation-value1"
# annotation-key2: "annotation-value2"
longhornDriver:
priorityClass: ~
@ -155,6 +184,7 @@ longhornDriver:
# label-key2: "label-value2"
longhornUI:
replicas: 2
priorityClass: ~
tolerations: []
## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above
@ -169,19 +199,6 @@ longhornUI:
# label-key1: "label-value1"
# label-key2: "label-value2"
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
#
ingress:
## Set to true to enable ingress record generation
enabled: false
@ -193,9 +210,11 @@ ingress:
host: sslip.io
## Set this to true in order to enable TLS on the ingress record
## A side effect of this will be that the backend service will be connected at port 443
tls: false
## Enable this in order to enable that the backend service will be connected at port 443
secureBackends: false
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: longhorn.local-tls

View File

@ -3,9 +3,10 @@ longhornio/csi-provisioner:v2.1.2
longhornio/csi-resizer:v1.2.0
longhornio/csi-snapshotter:v3.0.3
longhornio/csi-node-driver-registrar:v2.5.0
longhornio/backing-image-manager:v3_20220609
longhornio/longhorn-engine:v1.3.0
longhornio/longhorn-instance-manager:v1_20220611
longhornio/longhorn-manager:v1.3.0
longhornio/longhorn-share-manager:v1_20220531
longhornio/longhorn-ui:v1.3.0
longhornio/livenessprobe:v2.8.0
longhornio/backing-image-manager:v3_20230320
longhornio/longhorn-engine:v1.3.3-rc2
longhornio/longhorn-instance-manager:v1_20230317
longhornio/longhorn-manager:v1.3.3-rc2
longhornio/longhorn-share-manager:v1_20230320
longhornio/longhorn-ui:v1.3.3-rc2

View File

@ -13,7 +13,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
spec:
privileged: true
allowPrivilegeEscalation: true
@ -49,7 +49,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
---
# Source: longhorn/templates/default-setting.yaml
apiVersion: v1
@ -60,7 +60,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
data:
default-setting.yaml: |-
---
@ -73,7 +73,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
data:
storageclass.yaml: |
kind: StorageClass
@ -103,7 +103,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
longhorn-manager: ""
name: backingimagedatasources.longhorn.io
spec:
@ -274,7 +274,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
longhorn-manager: ""
name: backingimagemanagers.longhorn.io
spec:
@ -459,7 +459,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
longhorn-manager: ""
name: backingimages.longhorn.io
spec:
@ -634,7 +634,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
longhorn-manager: ""
name: backups.longhorn.io
spec:
@ -827,7 +827,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
longhorn-manager: ""
name: backuptargets.longhorn.io
spec:
@ -1010,7 +1010,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
longhorn-manager: ""
name: backupvolumes.longhorn.io
spec:
@ -1174,10 +1174,11 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
longhorn-manager: ""
name: engineimages.longhorn.io
spec:
preserveUnknownFields: false
conversion:
strategy: Webhook
webhook:
@ -1209,7 +1210,7 @@ spec:
jsonPath: .spec.image
name: Image
type: string
- description: Number of volumes are using the engine image
- description: Number of resources using the engine image
jsonPath: .status.refCount
name: RefCount
type: integer
@ -1251,7 +1252,7 @@ spec:
jsonPath: .spec.image
name: Image
type: string
- description: Number of volumes are using the engine image
- description: Number of resources using the engine image
jsonPath: .status.refCount
name: RefCount
type: integer
@ -1365,7 +1366,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
longhorn-manager: ""
name: engines.longhorn.io
spec:
@ -1538,6 +1539,30 @@ spec:
type: object
nullable: true
type: object
conditions:
items:
properties:
lastProbeTime:
description: Last time we probed the condition.
type: string
lastTransitionTime:
description: Last time the condition transitioned from one status to another.
type: string
message:
description: Human-readable message indicating details about last transition.
type: string
reason:
description: Unique, one-word, CamelCase reason for the condition's last transition.
type: string
status:
description: Status is the status of the condition. Can be True, False, Unknown.
type: string
type:
description: Type is the type of the condition.
type: string
type: object
nullable: true
type: array
currentImage:
type: string
currentReplicaAddressMap:
@ -1686,7 +1711,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
longhorn-manager: ""
name: instancemanagers.longhorn.io
spec:
@ -1856,10 +1881,11 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
longhorn-manager: ""
name: nodes.longhorn.io
spec:
preserveUnknownFields: false
conversion:
strategy: Webhook
webhook:
@ -2086,7 +2112,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
longhorn-manager: ""
name: orphans.longhorn.io
spec:
@ -2255,7 +2281,7 @@ spec:
jsonPath: .spec.groups
name: Groups
type: string
- description: Should be one of "backup" or "snapshot"
- description: Should be one of "snapshot", "snapshot-force-create", "snapshot-cleanup", "snapshot-delete", "backup" or "backup-force-create"
jsonPath: .spec.task
name: Task
type: string
@ -2317,10 +2343,14 @@ spec:
description: The retain count of the snapshot/backup.
type: integer
task:
description: The recurring job type. Can be "snapshot" or "backup".
description: The recurring job task. Can be "snapshot", "snapshot-force-create", "snapshot-cleanup", "snapshot-delete", "backup" or "backup-force-create".
enum:
- snapshot
- snapshot-force-create
- snapshot-cleanup
- snapshot-delete
- backup
- backup-force-create
type: string
type: object
status:
@ -2352,7 +2382,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
longhorn-manager: ""
name: replicas.longhorn.io
spec:
@ -2499,6 +2529,30 @@ spec:
status:
description: ReplicaStatus defines the observed state of the Longhorn replica
properties:
conditions:
items:
properties:
lastProbeTime:
description: Last time we probed the condition.
type: string
lastTransitionTime:
description: Last time the condition transitioned from one status to another.
type: string
message:
description: Human-readable message indicating details about last transition.
type: string
reason:
description: Unique, one-word, CamelCase reason for the condition's last transition.
type: string
status:
description: Status is the status of the condition. Can be True, False, Unknown.
type: string
type:
description: Type is the type of the condition.
type: string
type: object
nullable: true
type: array
currentImage:
type: string
currentState:
@ -2544,7 +2598,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
longhorn-manager: ""
name: settings.longhorn.io
spec:
@ -2635,7 +2689,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
longhorn-manager: ""
name: sharemanagers.longhorn.io
spec:
@ -2746,7 +2800,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
longhorn-manager: ""
name: snapshots.longhorn.io
spec:
@ -2870,10 +2924,11 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
longhorn-manager: ""
name: volumes.longhorn.io
spec:
preserveUnknownFields: false
conversion:
strategy: Webhook
webhook:
@ -3057,7 +3112,11 @@ spec:
task:
enum:
- snapshot
- snapshot-force-create
- snapshot-cleanup
- snapshot-delete
- backup
- backup-force-create
type: string
type: object
type: array
@ -3201,7 +3260,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
rules:
- apiGroups:
- apiextensions.k8s.io
@ -3262,7 +3321,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@ -3280,7 +3339,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
namespace: longhorn-system
rules:
- apiGroups:
@ -3300,7 +3359,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
namespace: longhorn-system
roleRef:
apiGroup: rbac.authorization.k8s.io
@ -3321,7 +3380,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
app: longhorn-manager
name: longhorn-backend
namespace: longhorn-system
@ -3342,7 +3401,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
app: longhorn-ui
name: longhorn-frontend
namespace: longhorn-system
@ -3363,7 +3422,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
app: longhorn-conversion-webhook
name: longhorn-conversion-webhook
namespace: longhorn-system
@ -3384,7 +3443,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
app: longhorn-admission-webhook
name: longhorn-admission-webhook
namespace: longhorn-system
@ -3405,7 +3464,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
name: longhorn-engine-manager
namespace: longhorn-system
spec:
@ -3421,7 +3480,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
name: longhorn-replica-manager
namespace: longhorn-system
spec:
@ -3437,7 +3496,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
app: longhorn-manager
name: longhorn-manager
namespace: longhorn-system
@ -3450,16 +3509,16 @@ spec:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
app: longhorn-manager
spec:
initContainers:
- name: wait-longhorn-admission-webhook
image: longhornio/longhorn-manager:v1.3.0
image: longhornio/longhorn-manager:v1.3.3-rc2
command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" -k https://longhorn-admission-webhook:9443/v1/healthz) != "200" ]; do echo waiting; sleep 2; done']
containers:
- name: longhorn-manager
image: longhornio/longhorn-manager:v1.3.0
image: longhornio/longhorn-manager:v1.3.3-rc2
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
@ -3468,15 +3527,15 @@ spec:
- -d
- daemon
- --engine-image
- "longhornio/longhorn-engine:v1.3.0"
- "longhornio/longhorn-engine:v1.3.3-rc2"
- --instance-manager-image
- "longhornio/longhorn-instance-manager:v1_20220611"
- "longhornio/longhorn-instance-manager:v1_20230317"
- --share-manager-image
- "longhornio/longhorn-share-manager:v1_20220531"
- "longhornio/longhorn-share-manager:v1_20230320"
- --backing-image-manager-image
- "longhornio/backing-image-manager:v3_20220609"
- "longhornio/backing-image-manager:v3_20230320"
- --manager-image
- "longhornio/longhorn-manager:v1.3.0"
- "longhornio/longhorn-manager:v1.3.3-rc2"
- --service-account
- longhorn-service-account
ports:
@ -3536,7 +3595,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
spec:
replicas: 1
selector:
@ -3547,23 +3606,23 @@ spec:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
app: longhorn-driver-deployer
spec:
initContainers:
- name: wait-longhorn-manager
image: longhornio/longhorn-manager:v1.3.0
image: longhornio/longhorn-manager:v1.3.3-rc2
command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done']
containers:
- name: longhorn-driver-deployer
image: longhornio/longhorn-manager:v1.3.0
image: longhornio/longhorn-manager:v1.3.3-rc2
imagePullPolicy: IfNotPresent
command:
- longhorn-manager
- -d
- deploy-driver
- --manager-image
- "longhornio/longhorn-manager:v1.3.0"
- "longhornio/longhorn-manager:v1.3.3-rc2"
- --manager-url
- http://longhorn-backend:9500/v1
env:
@ -3589,6 +3648,8 @@ spec:
value: "longhornio/csi-resizer:v1.2.0"
- name: CSI_SNAPSHOTTER_IMAGE
value: "longhornio/csi-snapshotter:v3.0.3"
- name: CSI_LIVENESS_PROBE_IMAGE
value: "longhornio/livenessprobe:v2.8.0"
serviceAccountName: longhorn-service-account
securityContext:
runAsUser: 0
@ -3600,12 +3661,12 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
app: longhorn-ui
name: longhorn-ui
namespace: longhorn-system
spec:
replicas: 1
replicas: 2
selector:
matchLabels:
app: longhorn-ui
@ -3614,12 +3675,24 @@ spec:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
app: longhorn-ui
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- longhorn-ui
topologyKey: kubernetes.io/hostname
containers:
- name: longhorn-ui
image: longhornio/longhorn-ui:v1.3.0
image: longhornio/longhorn-ui:v1.3.3-rc2
imagePullPolicy: IfNotPresent
volumeMounts:
- name : nginx-cache
@ -3651,7 +3724,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
app: longhorn-conversion-webhook
name: longhorn-conversion-webhook
namespace: longhorn-system
@ -3665,7 +3738,7 @@ spec:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
app: longhorn-conversion-webhook
spec:
affinity:
@ -3682,7 +3755,7 @@ spec:
topologyKey: kubernetes.io/hostname
containers:
- name: longhorn-conversion-webhook
image: longhornio/longhorn-manager:v1.3.0
image: longhornio/longhorn-manager:v1.3.3-rc2
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 2000
@ -3711,7 +3784,7 @@ metadata:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
app: longhorn-admission-webhook
name: longhorn-admission-webhook
namespace: longhorn-system
@ -3725,7 +3798,7 @@ spec:
labels:
app.kubernetes.io/name: longhorn
app.kubernetes.io/instance: longhorn
app.kubernetes.io/version: v1.3.0
app.kubernetes.io/version: v1.3.3-rc2
app: longhorn-admission-webhook
spec:
affinity:
@ -3742,14 +3815,14 @@ spec:
topologyKey: kubernetes.io/hostname
initContainers:
- name: wait-longhorn-conversion-webhook
image: longhornio/longhorn-manager:v1.3.0
image: longhornio/longhorn-manager:v1.3.3-rc2
command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" -k https://longhorn-conversion-webhook:9443/v1/healthz) != "200" ]; do echo waiting; sleep 2; done']
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 2000
containers:
- name: longhorn-admission-webhook
image: longhornio/longhorn-manager:v1.3.0
image: longhornio/longhorn-manager:v1.3.3-rc2
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 2000

View File

@ -26,11 +26,11 @@ spec:
- bash
- -c
- *cmd
image: alpine:3.12
image: alpine:3.17
securityContext:
privileged: true
containers:
- name: sleep
image: k8s.gcr.io/pause:3.1
image: registry.k8s.io/pause:3.1
updateStrategy:
type: RollingUpdate

View File

@ -31,6 +31,6 @@ spec:
privileged: true
containers:
- name: sleep
image: k8s.gcr.io/pause:3.1
image: registry.k8s.io/pause:3.1
updateStrategy:
type: RollingUpdate

View File

@ -15,7 +15,7 @@ https://github.com/longhorn/longhorn/issues/972
1. Previously Longhorn is using filesystem ID as keys to the map of disks on the node. But we found there is no guarantee that filesystem ID won't change after the node reboots for certain filesystems e.g. XFS.
1. We want to enable the ability to configure CRD directly, prepare for the CRD based API access in the future
1. We also need to make sure previously implemented safe guards are not impacted by this change:
1. If a disk was accidentally umounted on the node, we should detect that and stop replica from scheduling into it.
1. If a disk was accidentally unmounted on the node, we should detect that and stop replica from scheduling into it.
1. We shouldn't allow user to add two disks pointed to the same filesystem
### Non-goals

View File

@ -75,4 +75,4 @@ No special upgrade strategy is necessary. Once the user upgrades to the new vers
### Notes
- There is interest in allowing the user to decide on whether or not to retain the `Persistent Volume` (and possibly `Persistent Volume Claim`) for certain use cases such as restoring from a `Backup`. However, this would require changes to the way `go-rancher` generates the `Go` client that we use so that `Delete` requests against resources are able to take inputs.
- In the case that a `Volume` is provisioned from a `Storage Class` (and set to be `Deleted` once the `Persistent Volume Claim` utilizing that `Volume` has been deleted), the `Volume` should still be deleted properly regardless of how the deletion was initiated. If the `Volume` is deleted from the UI, the call that the `Volume Controller` makes to delete the `Persistent Volume` would only trigger one more deletion call from the `CSI` server to delete the `Volume`, which would return successfully and allow the `Persistent Volume` to be deleted and the `Volume` to be deleted as wekk. If the `Volume` is deleted because of the `Persistent Volume Claim`, the `CSI` server would be able to successfully make a `Volume` deletion call before deleting the `Persistent Volume`. The `Volume Controller` would have no additional resources to delete and be able to finish deletion of the `Volume`.
- In the case that a `Volume` is provisioned from a `Storage Class` (and set to be `Deleted` once the `Persistent Volume Claim` utilizing that `Volume` has been deleted), the `Volume` should still be deleted properly regardless of how the deletion was initiated. If the `Volume` is deleted from the UI, the call that the `Volume Controller` makes to delete the `Persistent Volume` would only trigger one more deletion call from the `CSI` server to delete the `Volume`, which would return successfully and allow the `Persistent Volume` to be deleted and the `Volume` to be deleted as well. If the `Volume` is deleted because of the `Persistent Volume Claim`, the `CSI` server would be able to successfully make a `Volume` deletion call before deleting the `Persistent Volume`. The `Volume Controller` would have no additional resources to delete and be able to finish deletion of the `Volume`.

View File

@ -16,7 +16,7 @@ https://github.com/longhorn/longhorn/issues/298
## Proposal
1. Add `Eviction Requested` with `true` and `false` selection buttons for disks and nodes. This is for user to evict or cancel the eviction of the disks or the nodes.
2. Add new `evictionRequested` field to `Node.Spec`, `Node.Spec.disks` Spec and `Replica.Status`. These will help tracking the request from user and trigger replica controller to update `Replica.Status` and volume controler to do the eviction. And this will reconcile with `scheduledReplica` of selected disks on the nodes.
2. Add new `evictionRequested` field to `Node.Spec`, `Node.Spec.disks` Spec and `Replica.Status`. These will help tracking the request from user and trigger replica controller to update `Replica.Status` and volume controller to do the eviction. And this will reconcile with `scheduledReplica` of selected disks on the nodes.
3. Display `fail to evict` error message to `Dashboard` and any other eviction errors to the `Event log`.
### User Stories
@ -47,7 +47,7 @@ From an API perspective, the call to set `Eviction Requested` to `true` or `fals
### Implementation Overview
1. On `Longhorn UI` `Node` page, for nodes eviction, adding `Eviction Requested` `true` and `false` options in the `Edit Node` sub-selection, next to `Node Scheduling`. For disks eviction, adding `Eviction Requested` `true` and `false` options in `Edit node and disks` sub-selection under `Operation` column next to each disk `Scheduling` options. This is for user to evict or cancel the eviction of the disks or the nodes.
2. Add new `evictionRequested` field to `Node.Spec`, `Node.Spec.disks` Spec and `Replica.Status`. These will help tracking the request from user and trigger replica controller to update `Replica.Status` and volume controler to do the eviction. And this will reconcile with `scheduledReplica` of selected disks on the nodes.
2. Add new `evictionRequested` field to `Node.Spec`, `Node.Spec.disks` Spec and `Replica.Status`. These will help tracking the request from user and trigger replica controller to update `Replica.Status` and volume controller to do the eviction. And this will reconcile with `scheduledReplica` of selected disks on the nodes.
3. Add a informer in `Replica Controller` to get these information and update `evictionRequested` field in `Replica.Status`.
4. Once `Eviction Requested` has been set to `true` for disks or nodes, the `evictionRequested` fields for the disks and nodes will be set to `true` (default is `false`).
5. `Replica Controller` will update `evictionRequested` field in `Replica.Status` and `Volume Controller` to get these information from it's replicas.
@ -61,7 +61,7 @@ From an API perspective, the call to set `Eviction Requested` to `true` or `fals
#### Manual Test Plan For Disks and Nodes Eviction
Positive Case:
For both `Replica Node Level Soft Anti-Affinity` has been enabled and disabled. Also the volume can be 'Attaced' or 'Detached'.
For both `Replica Node Level Soft Anti-Affinity` has been enabled and disabled. Also the volume can be 'Attached' or 'Detached'.
1. User can select one or more disks or nodes for eviction. Select `Eviction Requested` to `true` on the disabled disks or nodes, Longhorn should start rebuild replicas for the volumes which have replicas on the eviction disks or nodes, and after rebuild success, the replica number on the evicted disks or nodes should be 0. E.g. When there are 3 nodes in the cluster, and with `Replica Node Level Soft Anti-Affinity` is set to `false`, disable one node, and create a volume with replica count 2. And then evict one of them, the eviction should get stuck, then set `Replica Node Level Soft Anti-Affinity` to `true`, the eviction should go through.
Negative Cases:
@ -73,10 +73,10 @@ For `Replica Node Level Soft Anti-Affinity` is enabled, create 2 replicas on the
For `Replica Node Level Soft Anti-Affinity` is disabled, create 1 replica on a disk, and evict this disk or node, the replica should goto the other disk of node.
For node eviction, Longhorn will process the evition based on the disks for the node, this is like disk eviction. After eviction success, the replica number on the evicted node should be 0.
For node eviction, Longhorn will process the eviction based on the disks for the node, this is like disk eviction. After eviction success, the replica number on the evicted node should be 0.
#### Error Indication
During the eviction, user can click the `Replicas Number` on the `Node` page, and set which replicas are left from eviction, and click the `Replica Name` will redirect user to the `Volume` page to set if there is any error for this volume. If there is any error during the rebuild, Longhorn should display the error message from UI. The error could be `failed to schedule a replica` due to disk space or based on schedule policy, can not find a valid disk to put the relica.
During the eviction, user can click the `Replicas Number` on the `Node` page, and set which replicas are left from eviction, and click the `Replica Name` will redirect user to the `Volume` page to set if there is any error for this volume. If there is any error during the rebuild, Longhorn should display the error message from UI. The error could be `failed to schedule a replica` due to disk space or based on schedule policy, can not find a valid disk to put the replica.
### Upgrade strategy
No special upgrade strategy is necessary. Once the user upgrades to the new version of `Longhorn`, these new capabilities will be accessible from the `longhorn-ui` without any special work.

View File

@ -61,12 +61,12 @@ Same as the Design
### Test plan
1. Setup a cluster of 3 nodes
1. Install Longhorn and set `Default Replica Count = 2` (because we will turn off one node)
1. Create a SetfullSet with 2 pods using the command:
1. Create a StatefulSet with 2 pods using the command:
```
kubectl create -f https://raw.githubusercontent.com/longhorn/longhorn/master/examples/statefulset.yaml
```
1. Create a volume + pv + pvc named `vol1` and create a deployment of default ubuntu named `shell` with the usage of pvc `vol1` mounted under `/mnt/vol1`
1. Find the node which contains one pod of the StatefullSet/Deployment. Power off the node
1. Find the node which contains one pod of the StatefulSet/Deployment. Power off the node
#### StatefulSet
##### if `NodeDownPodDeletionPolicy ` is set to `do-nothing ` | `delete-deployment-pod`

View File

@ -119,7 +119,7 @@ UI modification:
* On the right volume info panel, add a <div> to display `selectedVolume.dataLocality`
* On the right volume panel, in the Health row, add an icon for data locality status.
Specifically, if `dataLocality=best-effort` but there is not a local replica then display a warning icon.
Similar to the replica node redundancy wanring [here](https://github.com/longhorn/longhorn-ui/blob/0a52c1f0bef172d8ececdf4e1e953bfe78c86f29/src/routes/volume/detail/VolumeInfo.js#L47)
Similar to the replica node redundancy warning [here](https://github.com/longhorn/longhorn-ui/blob/0a52c1f0bef172d8ececdf4e1e953bfe78c86f29/src/routes/volume/detail/VolumeInfo.js#L47)
* In the volume's actions dropdown, add a new action to update `dataLocality`
1. In Rancher UI, add a parameter `dataLocality` when create storage class using Longhorn provisioner.

View File

@ -15,7 +15,7 @@ https://github.com/longhorn/longhorn/issues/508
1. By default 'DisableRevisionCounter' is 'false', but Longhorn provides an optional for user to disable it.
2. Once user set 'DisableRevisionCounter' to 'true' globally or individually, this will improve Longhorn data path performance.
3. And for 'DisableRevisionCounter' is 'true', Longhorn will keep the ability to find the most suitable replica to recover the volume when the engine is faulted(all the replicas are in 'ERR' state).
4. Also during Longhorn Engine starting, with head file information it's unlikly to find out out of synced replicas. So will skip the check.
4. Also during Longhorn Engine starting, with head file information it's unlikely to find out out of synced replicas. So will skip the check.
## Proposal
@ -41,7 +41,7 @@ Or from StorageClass yaml file, user can set 'parameters' 'revisionCounterDisabl
User can also set 'DisableRevisionCounter' for each individual volumes created by Longhorn UI this individual setting will over write the global setting.
Once the volume has 'DisableRevisionCounter' to 'true', there won't be revision counter file. And the 'Automatic salvage' is 'true', when the engine is fauled, the engine will pick the most suitable replica as 'Source of Truth' to recover the volume.
Once the volume has 'DisableRevisionCounter' to 'true', there won't be revision counter file. And the 'Automatic salvage' is 'true', when the engine is faulted, the engine will pick the most suitable replica as 'Source of Truth' to recover the volume.
### API changes
@ -63,12 +63,12 @@ And for the API compatibility issues, always check the 'EngineImage.Statue.cliAP
1. Add 'Volume.Spec.RevisionCounterDisabled', 'Replica.Spec.RevisionCounterDisabled' and 'Engine.Spec.RevisionCounterDisabled' to volume, replica and engine objects.
2. Once 'RevisionCounterDisabled' is 'true', volume controller will set 'Volume.Spec.RevisionCounterDisabled' to true, 'Replica.Spec.RevisionCounterDisabled' and 'Engine.Spec.RevisionCounterDisabled' will set to true. And during 'ReplicaProcessCreate' and 'EngineProcessCreate' , this will be passed to engine replica process and engine controller process to start a replica and controller without revision counter.
3. During 'ReplicaProcessCreate' and 'EngineProcessCreate', if 'Replica.Spec.RevisionCounterDisabled' or 'Engine.Spec.RevisionCounterDisabled' is true, it will pass extra parameter to engine replica to start replica without revision counter or to engine controller to start controller without revision counter support, otherwise keep it the same as current and engine replica will use the default value 'false' for this extra paramter. This is the same as the engine controller to set the 'salvageRequested' flag.
3. During 'ReplicaProcessCreate' and 'EngineProcessCreate', if 'Replica.Spec.RevisionCounterDisabled' or 'Engine.Spec.RevisionCounterDisabled' is true, it will pass extra parameter to engine replica to start replica without revision counter or to engine controller to start controller without revision counter support, otherwise keep it the same as current and engine replica will use the default value 'false' for this extra parameter. This is the same as the engine controller to set the 'salvageRequested' flag.
4. Add 'RevisionCounterDisabled' in 'ReplicaInfo', when engine controller start, it will get all replica information.
4. For engine controlloer starting cases:
4. For engine controller starting cases:
- If revision counter is not disabled, stay with the current logic.
- If revision counter is disabled, engine will not check the synchronization of the replicas.
- If unexpected case (engine controller has revision counter diabled but any of the replica doesn't, or engine controller has revision counter enabled, but any of the replica doesn't), engine controller will log this as error and mark unmatched replicas to 'ERR'.
- If unexpected case (engine controller has revision counter disabled but any of the replica doesn't, or engine controller has revision counter enabled, but any of the replica doesn't), engine controller will log this as error and mark unmatched replicas to 'ERR'.
#### Add New Logic for Salvage

View File

@ -47,7 +47,7 @@ No API change is required.
3. replica eviction happens (volume.Status.Robustness is Healthy)
4. there is no potential reusable replica
5. there is a potential reusable replica but the replica replenishment wait interval is passed.
3. Reuse the failed replica by cleaning up `ReplicaSpec.HealthyAt` and `ReplicaSpec.FailedAt`. And `Replica.Spec.RebuildRetryCount` will be increasd by 1.
3. Reuse the failed replica by cleaning up `ReplicaSpec.HealthyAt` and `ReplicaSpec.FailedAt`. And `Replica.Spec.RebuildRetryCount` will be increased by 1.
4. Clean up the related record in `Replica.Spec.RebuildRetryCount` when the rebuilding replica becomes mode `RW`.
5. Guarantee the reused failed replica will be stopped before re-launching it.

View File

@ -72,7 +72,7 @@ For example, there are many times users ask us for supporting and the problems w
If there is a CPU monitoring dashboard for instance managers, those problems can be quickly detected.
#### Story 2
User want to be notified about abnomal event such as disk space limit approaching.
User want to be notified about abnormal event such as disk space limit approaching.
We can expose metrics provide information about it and user can scrape the metrics and setup alert system.
### User Experience In Detail
@ -82,7 +82,7 @@ Users can use Prometheus or other monitoring systems to collect those metrics by
Then, user can display the collected data using tools such as Grafana.
User can also setup alert by using tools such as Prometheus Alertmanager.
Below are the desciptions of metrics which Longhorn exposes and how users can use them:
Below are the descriptions of metrics which Longhorn exposes and how users can use them:
1. longhorn_volume_capacity_bytes
@ -347,7 +347,7 @@ We add a new end point `/metrics` to exposes all longhorn Prometheus metrics.
### Implementation Overview
We follow the [Prometheus best practice](https://prometheus.io/docs/instrumenting/writing_exporters/#deployment), each Longhorn manager reports information about the components it manages.
Prometheus can use service discovery mechanisim to find all longhorn-manager pods in longhorn-backend service.
Prometheus can use service discovery mechanism to find all longhorn-manager pods in longhorn-backend service.
We create a new collector for each type (volumeCollector, backupCollector, nodeCollector, etc..) and have a common baseCollector.
This structure is similar to the controller package: we have volumeController, nodeController, etc.. which have a common baseController.

View File

@ -45,7 +45,7 @@ For part 2, we upgrade engine image for a volume when the following conditions a
### User Stories
Before this enhancement, users have to manually upgrade engine images for volume after upgrading Longhorn system to a newer version.
If there are thoudsands of volumes in the system, this is a significant manual work.
If there are thousands of volumes in the system, this is a significant manual work.
After this enhancement users either have to do nothing (in case live upgrade is possible)
or they only have to scale down/up the workload (in case there is a new default IM image)

View File

@ -70,7 +70,7 @@ spec:
url: https://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
```
Afterwards deploy the `cirros-rwx-blk.yaml` to create a live migratabale virtual machine.
Afterwards deploy the `cirros-rwx-blk.yaml` to create a live migratable virtual machine.
```yaml
apiVersion: kubevirt.io/v1alpha3
kind: VirtualMachine

View File

@ -155,14 +155,14 @@ With an example of cluster set for 2 zones and default of 2 replicas volume:
- The default value is `ignored`.
- In Volume Controller `syncVolume` -> `ReconcileEngineReplicaState` -> `replenishReplicas`, calculate and add number of replicas to be rebalanced to `replenishCount`.
> The logic ignores all `soft-anti-affinity` settings. This will always try to achieve zone balance then node balance. And creating for replicas will leave for ReplicaScheduler to determine for the canidates.
> The logic ignores all `soft-anti-affinity` settings. This will always try to achieve zone balance then node balance. And creating for replicas will leave for ReplicaScheduler to determine for the candidates.
1. Skip volume replica rebalance when volume spec `replicaAutoBalance` is `disabled`.
2. Skip if volume `Robustness` is not `healthy`.
3. For `least-effort`, try to get the replica rebalance count.
1. For `zone` duplicates, get the replenish number.
1. List all the occupied node zones with volume replicas running.
- The zone is balanced when this is equal to volume spec `NumberOfReplicas`.
2. List all available and schedulabled nodes in non-occupied zones.
2. List all available and schedulable nodes in non-occupied zones.
- The zone is balanced when no available nodes are found.
3. Get the number of replicas off-balanced:
- number of replicas in volume spec - number of occupied node zones.

View File

@ -354,7 +354,7 @@ Labels
[labels/2]: [b]
```
- `Name` field should be immutable.
- `Task` field should be imuutable.
- `Task` field should be immutable.
*And* user edit the fields in the form.

View File

@ -337,7 +337,7 @@ After the enhancement, users can directly specify the BackingImage during volume
- BackingImageDataSource has not been created. Add retry would solve this case.
- BackingImageDataSource is gone but BackingImage has not been cleaned up. Longhorn can ignore BackingImageDataSource when BackingImage deletion timestamp is set.
- BackingImage disk cleanup:
- This cannot break the HA besides affacting replicas. The main idea is similar to the cleanup in BackingImage Controller.
- This cannot break the HA besides attaching replicas. The main idea is similar to the cleanup in BackingImage Controller.
9. In CSI:
- Check the backing image during the volume creation.
- The missing BackingImage will be created when both BackingImage name and data source info are provided.
@ -370,7 +370,7 @@ After the enhancement, users can directly specify the BackingImage during volume
- Similar to `Fetch`, the image will try to reuse existing files.
- The manager is responsible for managing all port. The image will use the functions provided by the manager to get then release ports.
- API `Send`: Send a backing image file to a receiver. This should be similar to replica rebuilding.
- API `Delete`: Unregister the image then delete the imge work directory. Make sure syncing or pulling will be cancelled if exists.
- API `Delete`: Unregister the image then delete the image work directory. Make sure syncing or pulling will be cancelled if exists.
- API `Get`/`List`: Collect the status of one backing image file/all backing image files.
- API `Watch`: establish a streaming connection to report BackingImage file info.
- As I mentioned above, we will use BackingImage UUID to generate work directories for each BackingImage. The work directory is like:

View File

@ -190,7 +190,7 @@ Using those methods, the Sparse-tools know where is a data/hole interval to tran
### Longhorn CSI plugin
* Advertise that Longhorn CSI driver has ability to clone a volume, `csi.ControllerServiceCapability_RPC_CLONE_VOLUME`
* When receiving a volume creat request, inspect `req.GetVolumeContentSource()` to see if it is from anther volume.
* When receiving a volume creat request, inspect `req.GetVolumeContentSource()` to see if it is from another volume.
If so, create a new Longhorn volume with appropriate `DataSource` set so Longhorn volume controller can start cloning later on.
### Test plan

View File

@ -66,7 +66,7 @@ After the enhancement, Longhorn automatically finds out the orphaned replica dir
- Users can enable the global auto-deletion on setting page. By default, the auto-deletion is disabled.
- Via `kubectl`
- Users can list the orphaned replica directoris by `kubectl -n longhorn-system get orphans`.
- Users can list the orphaned replica directories by `kubectl -n longhorn-system get orphans`.
- Users can delete the orphaned replica directories by `kubectl -n longhorn-system delete orphan <name>`.
- Users can enable the global auto-deletion by `kubectl -n longhorn-system edit settings orphan-auto-deletion`

View File

@ -0,0 +1,110 @@
# Failed Backup Clean Up
## Summary
Longhorn will leave the failed backups behind and will not delete the backups automatically either until the backup target is removed. Failed backup cleanup will be occurred when making a backup to remote backup target failed. This LEP will trigger the deletion of failed backups automatically.
### Related Issues
[[IMPROVEMENT] Support failed/obsolete orphaned backup cleanup](https://github.com/longhorn/longhorn/issues/3898)
## Motivation
### Goals
- Support the auto-deletion of failed backups that exceeded the TTL.
- Support the global auto-deletion option of failed backups cleanup for users.
- The process should not be stuck in the reconciliation of the controllers.
### Non-goals [optional]
- Clean up unknown files or directories on the remote backup target.
## Proposal
1. The `backup_volume_controller` will be responsible for deleting Backup CR when there is a backup which state is in `Error` or `Unknown`.
The reconciliation procedure of the `backup_volume_controller` gets the latest failed backups from the datastore and delete the failed backups.
```text
queue ┌───────────────┐
┌┐ ┌┐ ┌┐ │ │
... ││ ││ ││ ──────► │ syncHandler() |
└┘ └┘ └┘ │ │
└───────┬───────┘
┌──────────▼───────────┐
│ │
│ reconcile() |
│ │
└──────────┬───────────┘
┌──────────▼───────────┐
│ │
│ get failed backups │
│ |
| then delete them │
│ │
└──────────────────────┘
```
### User Stories
When a user or recurring job tries to make a backup and store it in the remote backup target, many situations will cause the backup procedure failed. In some cases, there will be some failed backups still staying in the Longhorn system and this kind of backups are not handled by the Longhorn system until user removes the backup target. Or users can manage the failed backups via Longhorn GUI or command line tools manually.
After the enhancement, Longhorn can delete the failed backups automatically after enabling auto-deletion.
### User Experience In Detail
- Via Longhorn GUI
- Users can be aware of that backup was failed if auto-deletion is disabled.
- Users can check the event log to understand why the backup failed and deleted.
- Via `kubectl`
- Users can list the failed backups by `kubectl -n longhorn-system get backups` if auto-deletion is disabled.
## Design
### Implementation Overview
**Settings**
- Add setting `failed-backup-ttl`. Default value is `1440` minutes and set to `0` to disable the auto-deletion.
**Failed Backup**
- Backups in the state `longhorn.BackupStateError` or `longhorn.BackupStateUnknown`.
**Backup Controller**
- Start the monitor and sync the backup status with the monitor in each reconcile loop.
- Update the backup status.
- Trigger `backup_volume_controller` to delete the failed backups.
**Backup Volume controller**
- Reconcile loop usually is triggered after backupstore polling which is controlled by **Backupstore Poll Interval** setting.
- Start to get all backups in each reconcile loop
- Tell failed backups from all backups and try to delete failed backups by default.
- Update the backup volume CR status.
### Test plan
**Integration tests**
- `backups` CRs with `Error` or `Unknown` state will be removed by `backup_volume_controller` triggered by backupstore polling when the `backup_monitor` detects the backup failed.
- `backups` CRs with `Error` or `Unknown` state will not be handled if the auto-deletion is disabled.
## Note [optional]
### Why not leverage the current orphan framework
1. We already have the backup CR to handle the backup resources and failed backup is not like orphaned replica which is not owned by any volume at the beginning.
2. Cascading deletion of orphaned CR and backup CR would be more complicated than we just handle the failed backups immediately when backup procedure failed. Both in this LEP or orphan framework we would delete the failed backups by `backup_volume_controller`.
3. Listing orphaned backups and failed backups on both two UI pages `Orphaned Data` and `Backup` might be a bit confusing for users. Deleting items manually on either of two pages would be involved in what it mentioned at statement 2.

View File

@ -29,7 +29,7 @@ What is out of scope for this enhancement? Listing non-goals helps to focus disc
This is where we get down to the nitty gritty of what the proposal actually is.
### User Stories
Detail the things that people will be able to do if this enhancement is implemented. A good practise is including a comparsion of what user cannot do before the enhancement implemented, why user would want an enhancement and what user need to do after, to make it clear why the enhancement beneficial to the user.
Detail the things that people will be able to do if this enhancement is implemented. A good practise is including a comparison of what user cannot do before the enhancement implemented, why user would want an enhancement and what user need to do after, to make it clear why the enhancement beneficial to the user.
The experience details should be in the `User Experience In Detail` later.

View File

@ -19,7 +19,7 @@ spec:
image: ubuntu:xenial
tty: true
command: [ "/bin/sh" ]
args: [ "-c", "cp -r -v /mnt/old/* /mnt/new" ]
args: [ "-c", "cp -r -v /mnt/old/. /mnt/new" ]
volumeMounts:
- name: old-vol
mountPath: /mnt/old

View File

@ -16,7 +16,7 @@ spec:
--output-file '/tmp/restore/<OUTPUT_FILE>'
--output-format <OUTPUT_FORMAT>
# the version of longhorn engine should be v0.4.1 or higher
image: rancher/longhorn-engine:v0.4.1
image: longhornio/longhorn-engine:<LONGHORN_VERSION>
imagePullPolicy: IfNotPresent
securityContext:
privileged: true

View File

@ -32,7 +32,7 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: k8s.gcr.io/nginx-slim:0.8
image: registry.k8s.io/nginx-slim:0.8
livenessProbe:
exec:
command:

View File

@ -1,5 +1,8 @@
#!/bin/bash
######################################################
# Log
######################################################
export RED='\x1b[0;31m'
export GREEN='\x1b[38;5;22m'
export CYAN='\x1b[36m'
@ -67,17 +70,9 @@ error() {
fi
}
detect_node_os()
{
local pod="$1"
OS=`kubectl exec -it $pod -- nsenter --mount=/proc/1/ns/mnt -- bash -c 'grep -E "^ID_LIKE=" /etc/os-release | cut -d= -f2'`
if [[ -z "${OS}" ]]; then
OS=`kubectl exec -it $pod -- nsenter --mount=/proc/1/ns/mnt -- bash -c 'grep -E "^ID=" /etc/os-release | cut -d= -f2'`
fi
echo "$OS"
}
######################################################
# Check logics
######################################################
set_packages_and_check_cmd()
{
case $OS in
@ -97,6 +92,10 @@ set_packages_and_check_cmd()
CHECK_CMD='pacman -Q'
PACKAGES=(nfs-utils open-iscsi)
;;
*"gentoo"* )
CHECK_CMD='qlist -I'
PACKAGES=(net-fs/nfs-utils sys-block/open-iscsi)
;;
*)
CHECK_CMD=''
PACKAGES=()
@ -106,23 +105,45 @@ set_packages_and_check_cmd()
esac
}
check_dependencies() {
detect_node_kernel_release()
{
local pod="$1"
KERNEL_RELEASE=$(kubectl exec -i $pod -- nsenter --mount=/proc/1/ns/mnt -- bash -c 'uname -r')
echo "$KERNEL_RELEASE"
}
detect_node_os()
{
local pod="$1"
OS=$(kubectl exec -i $pod -- nsenter --mount=/proc/1/ns/mnt -- bash -c 'grep -E "^ID_LIKE=" /etc/os-release | cut -d= -f2')
if [[ -z "${OS}" ]]; then
OS=$(kubectl exec -i $pod -- nsenter --mount=/proc/1/ns/mnt -- bash -c 'grep -E "^ID=" /etc/os-release | cut -d= -f2')
fi
echo "$OS"
}
check_local_dependencies() {
local targets=($@)
local allFound=true
local all_found=true
for ((i=0; i<${#targets[@]}; i++)); do
local target=${targets[$i]}
if [ "$(which $target)" == "" ]; then
allFound=false
all_found=false
error "Not found: $target"
fi
done
if [ "$allFound" == "false" ]; then
error "Please install missing dependencies."
if [ "$all_found" == "false" ]; then
msg="Please install missing dependencies: ${targets[@]}."
info "$msg"
exit 2
else
info "Required dependencies are installed."
fi
msg="Required dependencies '${targets[@]}' are installed."
info "$msg"
}
create_ds() {
@ -211,12 +232,12 @@ check_mount_propagation() {
}
check_package_installed() {
local pods=$(kubectl get pods -o name | grep longhorn-environment-check)
local pods=$(kubectl get pods -o name -l app=longhorn-environment-check)
local allFound=true
local all_found=true
for pod in ${pods}; do
OS=`detect_node_os $pod`
OS=$(detect_node_os $pod)
if [ x"$OS" == x"" ]; then
error "Unable to detect OS on node $node."
exit 2
@ -227,72 +248,137 @@ check_package_installed() {
for ((i=0; i<${#PACKAGES[@]}; i++)); do
local package=${PACKAGES[$i]}
kubectl exec -it $pod -- nsenter --mount=/proc/1/ns/mnt -- timeout 30 bash -c "$CHECK_CMD $package" > /dev/null 2>&1
kubectl exec -i $pod -- nsenter --mount=/proc/1/ns/mnt -- timeout 30 bash -c "$CHECK_CMD $package" > /dev/null 2>&1
if [ $? != 0 ]; then
allFound=false
node=`kubectl get ${pod} --no-headers -o=custom-columns=:.spec.nodeName`
all_found=false
node=$(kubectl get ${pod} --no-headers -o=custom-columns=:.spec.nodeName)
error "$package is not found in $node."
fi
done
done
if [ "$allFound" == "false" ]; then
if [ "$all_found" == "false" ]; then
error "Please install missing packages."
exit 2
else
info "Required packages are installed."
fi
info "Required packages are installed."
}
check_hostname_uniqueness() {
hostnames=$(kubectl get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="Hostname")].address}')
declare -A deduplicate_hostnames
num_nodes=0
for hostname in ${hostnames}; do
num_nodes=$((num_nodes+1))
deduplicate_hostnames["${hostname}"]="${hostname}"
done
if [ "${#deduplicate_hostnames[@]}" != "${num_nodes}" ]; then
error "Nodes do not have unique hostnames."
exit 2
fi
info "Hostname uniqueness check is passed."
}
check_multipathd() {
local pods=$(kubectl get pods -o name | grep longhorn-environment-check)
local allNotFound=true
local pods=$(kubectl get pods -o name -l app=longhorn-environment-check)
local all_not_found=true
for pod in ${pods}; do
kubectl exec -t $pod -- nsenter --mount=/proc/1/ns/mnt -- bash -c "systemctl status --no-pager multipathd.service" > /dev/null 2>&1
if [ $? = 0 ]; then
allNotFound=false
node=`kubectl get ${pod} --no-headers -o=custom-columns=:.spec.nodeName`
all_not_found=false
node=$(kubectl get ${pod} --no-headers -o=custom-columns=:.spec.nodeName)
warn "multipathd is running on $node."
fi
done
if [ "$allNotFound" == "false" ]; then
if [ "$all_not_found" == "false" ]; then
warn "multipathd would probably result in the Longhorn volume mount failure. Please refer to https://longhorn.io/kb/troubleshooting-volume-with-multipath for more information."
fi
}
check_iscsid() {
local pods=$(kubectl get pods -o name | grep longhorn-environment-check)
local allFound=true
local pods=$(kubectl get pods -o name -l app=longhorn-environment-check)
local all_found=true
for pod in ${pods}; do
kubectl exec -t $pod -- nsenter --mount=/proc/1/ns/mnt -- bash -c "systemctl status --no-pager iscsid.service" > /dev/null 2>&1
if [ $? != 0 ]; then
allFound=false
node=`kubectl get ${pod} --no-headers -o=custom-columns=:.spec.nodeName`
all_found=false
node=$(kubectl get ${pod} --no-headers -o=custom-columns=:.spec.nodeName)
error "iscsid is not running on $node."
fi
done
if [ "$allFound" == "false" ]; then
if [ "$all_found" == "false" ]; then
exit 2
fi
}
DEPENDENCIES=(kubectl jq mktemp)
check_dependencies ${DEPENDENCIES[@]}
check_nfs_client_kernel_support() {
local pods=$(kubectl get pods -o name -l app=longhorn-environment-check)
local all_found=true
local nfs_client_kernel_configs=("CONFIG_NFS_V4_1" "CONFIG_NFS_V4_2")
for config in "${nfs_client_kernel_configs[@]}"; do
declare -A nodes=()
for pod in ${pods}; do
local kernel_release=$(detect_node_kernel_release $pod)
if [ x"$kernel_release" == x"" ]; then
error "Unable to detect kernel release on node $node."
exit 2
fi
node=$(kubectl get ${pod} --no-headers -o=custom-columns=:.spec.nodeName)
res=$(kubectl exec -t $pod -- nsenter --mount=/proc/1/ns/mnt -- bash -c "grep -E \"^# ${config} is not set\" /boot/config-${kernel_release}" > /dev/null 2>&1)
if [[ $? == 0 ]]; then
all_found=false
nodes["${node}"]="${node}"
else
res=$(kubectl exec -t $pod -- nsenter --mount=/proc/1/ns/mnt -- bash -c "grep -E \"^${config}=\" /boot/config-${kernel_release}" > /dev/null 2>&1)
if [[ $? != 0 ]]; then
all_found=false
warn "Unable to check kernel config ${config} on node ${node}"
fi
fi
done
if [ ${#nodes[@]} != 0 ]; then
warn ""${config}" kernel config is not enabled on nodes ${nodes[*]}."
fi
done
if [[ ${all_found} == false ]]; then
warn "NFS client kernel support, ${nfs_client_kernel_configs[*]}, is not enabled on Longhorn nodes. Please refer to https://longhorn.io/docs/1.4.0/deploy/install/#installing-nfsv4-client for more information."
fi
}
######################################################
# Main logics
######################################################
DEPENDENCIES=("kubectl" "jq" "mktemp")
check_local_dependencies "${DEPENDENCIES[@]}"
# Check the each host has a unique hostname (for RWX volume)
check_hostname_uniqueness
# Create a daemonset for checking the requirements in each node
TEMP_DIR=$(mktemp -d)
trap cleanup EXIT
create_ds
wait_ds_ready
check_nfs_client_kernel_support
check_package_installed
check_iscsid
check_multipathd
check_mount_propagation
exit 0

View File

@ -0,0 +1,201 @@
#!/bin/bash
#set -x
kubectl get-all version &> /dev/null
if [ $? -ne 0 ]; then
echo "ERROR: command (kubectl get-all) is not found. Please install it here: https://github.com/corneliusweig/ketall#installation"
exit 1
fi
set -e
usage() {
echo ""
echo "The migration includes:"
echo "1. Running the script with --type=migrate to migrate the labels and annotations for Longhorn resources"
echo "2. Manually installing Longhorn chart in app&marketplace UI"
echo "3. Running script with --type=cleanup to remove the old Longhorn chart from old catalog UI"
echo ""
echo "usage:"
echo "$0 [options]"
echo " -u | --upstream-kubeconfig: upstream rancher cluster kubeconfig path"
echo " -d | --downstream-kubeconfig: downstream cluster kubeconfig path"
echo " -t | --type: specify the type you want to run (migrate or cleanup)"
echo " --dry-run: do not run migriation"
echo ""
echo "example:"
echo " $0 -u /path/to/upstream/rancher/cluster/kubeconfig -d /path/to/downstream/cluster/kubeconfig"
}
SCRIPT_DIR="$(dirname "$0")"
UPSTREAM_KUBECONFIG=""
DOWNSTREAM_KUBECONFIG=""
KUBECTL_DRY_RUN=""
while [ "$1" != "" ]; do
case $1 in
-u | --upstream-kubeconfig)
shift
UPSTREAM_KUBECONFIG="$1"
;;
-d | --downstream-kubeconfig)
shift
DOWNSTREAM_KUBECONFIG="$1"
;;
-t | --type)
shift
TYPE="$1"
;;
--dry-run)
KUBECTL_DRY_RUN="--dry-run=client"
;;
*)
usage
exit 1
;;
esac
shift
done
if [ -z "$UPSTREAM_KUBECONFIG" ]; then
echo "--upstream-kubeconfig is mandatory"
usage
exit 1
fi
if [ -z "$DOWNSTREAM_KUBECONFIG" ]; then
echo "--downstream-kubeconfig is mandatory"
usage
exit 1
fi
if [ "$TYPE" != "migrate" ] && [ "$TYPE" != "cleanup" ] ; then
echo "--type must be set to migrate or cleanup"
usage
exit 1
fi
# Longhorn Namespace
RELEASE_NAMESPACE=longhorn-system
# Longhorn Release Name
RELEASE_NAME=longhorn-system
echo "Looking up Rancher Project App '${RELEASE_NAME}' ..."
DOWNSTREAMCLUSTERID=$(cat ${DOWNSTREAM_KUBECONFIG} | grep "server:.*https://.*/k8s/clusters/.*" | awk -F'/' '{print $(NF)}' | awk -F'"' '{print $1}')
RANCHERAPP=$(kubectl --kubeconfig ${UPSTREAM_KUBECONFIG} get --all-namespaces apps.project.cattle.io -o jsonpath='{range.items[*]}{.metadata.namespace} {.metadata.name} {.spec.targetNamespace} {.spec.projectName} {.spec.externalId}{"\n"}{end}' | grep -s "${RELEASE_NAME} ${RELEASE_NAMESPACE} ${DOWNSTREAMCLUSTERID}")
RANCHERAPPNS=$(echo "${RANCHERAPP}" | awk '{print $1}')
RANCHERAPPEXTERNALID=$(echo "${RANCHERAPP}" | awk '{print $5}')
RANCHERAPPCATALOG=$(echo "${RANCHERAPPEXTERNALID}" | sed -n 's/.*catalog=\(.*\)/\1/p' | awk -F '&' '{print $1}' | sed 's/migrated-//')
RANCHERAPPTEMPLATE=$(echo "${RANCHERAPPEXTERNALID}" | sed -n 's/.*template=\(.*\)/\1/p' | awk -F '&' '{print $1}')
RANCHERAPPTEMPLATEVERSION=$(echo "${RANCHERAPPEXTERNALID}" | sed -n 's/.*version=\(.*\)/\1/p' | awk -F '&' '{print $1}')
RANCHERAPPVALUES=""
RANCHERAPPANSWERS=""
if [ -z "$DOWNSTREAMCLUSTERID" ] || [ -z "$RANCHERAPP" ] || [ -z "$RANCHERAPPNS" ] || [ -z "$RANCHERAPPCATALOG" ] || [ -z "$RANCHERAPPTEMPLATE" ] || [ -z "$RANCHERAPPTEMPLATEVERSION" ]; then
echo "Rancher Project App '${RELEASE_NAME}' not found!"
exit 1
fi
RANCHERAPPVALUES=$(kubectl --kubeconfig ${UPSTREAM_KUBECONFIG} -n ${RANCHERAPPNS} get apps.project.cattle.io ${RELEASE_NAME} -o go-template='{{if .spec.valuesYaml}}{{.spec.valuesYaml}}{{end}}')
if [ -z "${RANCHERAPPVALUES}" ]; then
RANCHERAPPANSWERS=$(kubectl --kubeconfig ${UPSTREAM_KUBECONFIG} -n ${RANCHERAPPNS} get apps.project.cattle.io ${RELEASE_NAME} -o go-template='{{if .spec.answers}}{{range $key,$value := .spec.answers}}{{$key}}: {{$value}}{{"\n"}}{{end}}{{end}}' | sed 's/: /=/' | sed 's/$/,/' | sed '$ s/.$//' | tr -d '\n')
fi
if [ -z "${RANCHERAPPVALUES:-$RANCHERAPPANSWERS}" ]; then
echo "No valid answers found!"
exit 1
fi
echo ""
echo "Rancher Project App '${RELEASE_NAME}' found:"
echo " Project-Namespace: ${RANCHERAPPNS}"
echo " Downstream-Cluster: ${DOWNSTREAMCLUSTERID}"
echo " Catalog: ${RANCHERAPPCATALOG}"
echo " Template: ${RANCHERAPPTEMPLATE} (${RANCHERAPPTEMPLATEVERSION})"
echo " Answers:"
printf '%s\n' "${RANCHERAPPVALUES:-$RANCHERAPPANSWERS}"
echo ""
if [ "$TYPE" == "cleanup" ] ; then
MANAGER=$(kubectl --kubeconfig ${DOWNSTREAM_KUBECONFIG} -n ${RELEASE_NAMESPACE} get ds longhorn-manager -ojsonpath="{.metadata.labels['app\.kubernetes\.io/managed-by']}")
if [ $MANAGER != "Helm" ] ; then
echo "Labels have not been migrated. Did you run the part 1 by specifying the flag --type=migrate ?"
exit 1
fi
echo ""
echo "Patching Project App Catalog ..."
kubectl --kubeconfig ${UPSTREAM_KUBECONFIG} -n ${RANCHERAPPNS} ${KUBECTL_DRY_RUN} patch apps.project.cattle.io ${RELEASE_NAME} --type=merge --patch-file=/dev/stdin <<-EOF
{
"metadata": {
"annotations": {
"cattle.io/skipUninstall": "true",
"catalog.cattle.io/ui-source-repo": "helm3-library",
"catalog.cattle.io/ui-source-repo-type": "cluster",
"apps.cattle.io/migrated": "true"
}
}
}
EOF
if [ $? -ne 0 ]; then
echo "Failed Patching Project App Catalog"
exit 1
fi
echo ""
echo "Deleting Project App Catalog ..."
kubectl --kubeconfig ${UPSTREAM_KUBECONFIG} -n ${RANCHERAPPNS} ${KUBECTL_DRY_RUN} delete apps.project.cattle.io ${RELEASE_NAME}
exit 0
fi
echo ""
echo ""
echo "Checking concurrent-automatic-engine-upgrade-per-node-limit setting ..."
SETTING=$(kubectl --kubeconfig ${DOWNSTREAM_KUBECONFIG} -n ${RELEASE_NAMESPACE} get settings.longhorn.io concurrent-automatic-engine-upgrade-per-node-limit -ojsonpath="{.value}")
if [ "$SETTING" != "0" ]; then
echo "concurrent-automatic-engine-upgrade-per-node-limit must be set to 0 before the migration"
exit 1
fi
echo ""
echo ""
echo "Looking up existing Resources ..."
RESOURCES=$(kubectl get-all --kubeconfig ${DOWNSTREAM_KUBECONFIG} --exclude AppRevision -o name -l io.cattle.field/appId=${RELEASE_NAME} 2>/dev/null | sort)
if [[ "$RESOURCES" == "No resources"* ]]; then
RESOURCES=""
fi
echo ""
echo "Patching CRD Resources ..."
for resource in $RESOURCES; do
if [[ $resource == "customresourcedefinition.apiextensions.k8s.io/"* ]]; then
kubectl --kubeconfig ${DOWNSTREAM_KUBECONFIG} -n ${RELEASE_NAMESPACE} ${KUBECTL_DRY_RUN} annotate --overwrite ${resource} "meta.helm.sh/release-name"="longhorn-crd" "meta.helm.sh/release-namespace"="${RELEASE_NAMESPACE}" "helm.sh/resource-policy"="keep"
kubectl --kubeconfig ${DOWNSTREAM_KUBECONFIG} -n ${RELEASE_NAMESPACE} ${KUBECTL_DRY_RUN} label --overwrite ${resource} "app.kubernetes.io/managed-by"="Helm"
fi
done
echo ""
echo "Patching Other Resources ..."
for resource in $RESOURCES; do
if [[ $resource == "customresourcedefinition.apiextensions.k8s.io/"* ]]; then
continue
fi
kubectl --kubeconfig ${DOWNSTREAM_KUBECONFIG} -n ${RELEASE_NAMESPACE} ${KUBECTL_DRY_RUN} annotate --overwrite ${resource} "meta.helm.sh/release-name"="longhorn" "meta.helm.sh/release-namespace"="${RELEASE_NAMESPACE}"
kubectl --kubeconfig ${DOWNSTREAM_KUBECONFIG} -n ${RELEASE_NAMESPACE} ${KUBECTL_DRY_RUN} label --overwrite ${resource} "app.kubernetes.io/managed-by"="Helm"
done
echo ""
echo "-----------------------------"
echo "Successfully updated the annotations and labels for the resources!"
echo "Next step:"
echo " 1. Go to Rancher UI -> Go to the downstream cluster -> App&Marketplace -> Charts"
echo " 2. Find and select the Longhorn chart"
echo " 3. Select the chart version corresponding the Longhorn version ${RANCHERAPPTEMPLATEVERSION}"
echo " 4. Install the chart with the correct helm values. Here are the helm values of your old charts: "
printf '%s\n' "${RANCHERAPPVALUES:-$RANCHERAPPANSWERS}"
echo " 5. Verify that the migrated charts are working ok"
echo " 6. Run this script again with the flag --type=cleanup to remove the old chart from the legacy UI"

104
scripts/restore-backup-to-file.sh Executable file
View File

@ -0,0 +1,104 @@
#!/bin/bash
export RED='\x1b[0;31m'
export NO_COLOR='\x1b[0m'
usage () {
echo "USAGE: $0 --aws-access-key <your_aws_access_key> \ "
echo " --aws-secret-access-key <your_aws_secret_access_key> \ "
echo " --backup-url s3://backupbucket@ap-northeast-1/backupstore?backup=<backup_name>&volume=<volume_name> \ "
echo " --output-file volume.raw \ "
echo " --output-format raw \ "
echo " --version <longhorn_version>"
echo " --backing-file <backing_file_path>"
echo "Restore a Longhorn backup to a raw image or a qcow2 image."
echo ""
echo " -u, --backup-url (Required) Backups S3/NFS URL. e.g., s3://backupbucket@us-east-1/backupstore?backup=backup-bd326da2c4414b02&volume=volumeexamplename"
echo " -o, --output-file (Required) Output file, e.g., /tmp/restore/volume.raw"
echo " -f, --output-format (Required) Output file format, e.g., raw or qcow2"
echo " -v, --version (Required) Longhorn version, e.g., v1.3.2"
echo " --aws-access-key (Optional) AWS credentials access key"
echo " --aws-secret-access-key (Optional) AWS credentials access secret key"
echo " -b, --backing-file (Optional) backing image. e.g., /tmp/backingfile.qcow2"
echo " -h, --help Usage message"
}
error_invalid_params() {
echo -e "${RED}[ERROR]Invalid params. Check the required params.${NO_COLOR}"
usage
exit 1
}
while [[ "$#" -gt 0 ]]; do
key="$1"
case $key in
--aws-access-key)
aws_access_key="$2"
shift # past argument
shift # past value
;;
--aws-secret-access-key)
aws_secret_access_key="$2"
shift # past argument
shift # past value
;;
-u|--backup-url)
backup_url="$2"
shift # past argument
shift # past value
;;
-o|--output-file)
output_file="$2"
shift # past argument
shift # past value
;;
-f|--output-format)
output_format="$2"
shift # past argument
shift # past value
;;
-b|--backing-file)
backing_file="$2"
shift # past argument
shift # past value
;;
-v|--version)
version="$2"
shift # past argument
shift # past value
;;
-h|--help)
usage
exit 0
shift
;;
*)
error_invalid_params
;;
esac
done
# Check the required parameters exits
if [ -z "${backup_url}" ] || [ -z "${output_file}" ] || [ -z "${output_format}" ] || [ -z "${version}" ]; then
error_invalid_params
fi
if [[ "${backup_url}" =~ ^[Ss]3 ]]; then
if [ -z "${aws_access_key}" ] || [ -z "${aws_secret_access_key}" ]; then
error_invalid_params
fi
fi
# Compose the docker arguments
if [[ "${backup_url}" =~ ^[Ss]3 ]]; then
CUSTOMIZED_ARGS="-e AWS_ACCESS_KEY_ID="${aws_access_key}" -e AWS_SECRET_ACCESS_KEY="${aws_secret_access_key}" "
else
CUSTOMIZED_ARGS="--cap-add SYS_ADMIN --security-opt apparmor:unconfined"
fi
# Start restoring a backup to an image file.
docker run ${CUSTOMIZED_ARGS} -v /tmp/restore:/tmp/restore \
longhornio/longhorn-engine:"${version}" longhorn backup \
restore-to-file ""${backup_url}"" \
--output-file "/tmp/restore/${output_file}" \
--output-format "${output_format}" \
--backing-file "${backing_file}"

View File

@ -0,0 +1,39 @@
#!/usr/bin/env bash
# Example:
# ./scripts/update-manifests-dev-version.sh 1.3.0 1.4.0
#
# Result:
# Chart version will be updated to 1.4.0-dev
# Images (manager, engine, ui) will be updated to master-head
#
set -o errexit
set -o nounset
PRJ_DIR=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/.." 2>/dev/null || realpath "$(dirname "${BASH_SOURCE[0]}")/.." 2>/dev/null)
CURRENT_VERSION=${CURRENT_VERSION:-$1}
NEW_VERSION=${NEW_VERSION:-$2}-dev
mapfile -t manifests < <(find "$PRJ_DIR" -type f -a \( -name '*.yaml' -o -name 'longhorn-images.txt' \))
if [[ ${#manifests[@]} -le 0 ]]; then
echo "No manifests found to update from $PRJ_DIR" >/dev/stderr
exit 1
fi
echo "Updating $CURRENT_VERSION -> $NEW_VERSION-dev with master-head images in below manifests"
for f in "${manifests[@]}"; do
f_name=$(basename "$f")
if [[ $f_name == "Chart.yaml" ]]; then
sed -i "s#\(version: \)${CURRENT_VERSION}#\1${NEW_VERSION}#g" "$f"
sed -i "s#\(appVersion: v\)${CURRENT_VERSION}#\1${NEW_VERSION}#g" "$f"
else
sed -i "s#\(:\s*\)v${CURRENT_VERSION}#\1master-head#g" "$f"
fi
echo "$f updated"
done
. "$PRJ_DIR"/scripts/generate-longhorn-yaml.sh

View File

@ -61,7 +61,7 @@ rules:
resources: ["priorityclasses"]
verbs: ["watch", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["csidrivers", "storageclasses"]
resources: ["csidrivers", "storageclasses", "volumeattachments"]
verbs: ["*"]
- apiGroups: ["longhorn.io"]
resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes", "instancemanagers", "sharemanagers",
@ -106,7 +106,7 @@ spec:
spec:
containers:
- name: longhorn-uninstall
image: longhornio/longhorn-manager:v1.3.0
image: longhornio/longhorn-manager:v1.3.3-rc2
imagePullPolicy: IfNotPresent
securityContext:
privileged: true