Merge pull request #752 from longhorn/v0.6.0

V0.6.0
This commit is contained in:
Sheng Yang 2019-09-21 15:55:26 -07:00 committed by GitHub
commit 7aa122b10e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 595 additions and 508 deletions

View File

@ -122,10 +122,6 @@ Noted that the UI is unauthenticated when you installed Longhorn using YAML file
# Upgrade # Upgrade
Since v0.3.3, Longhorn is able to perform fully-automated non-disruptive upgrades, meaning that the upgrade process won't disrupt the running volumes. Existing volumes continue to run even as the software that implements these volumes are upgraded.
If you're upgrading from Longhorn v0.3.0 or newer:
## Upgrade Longhorn manager ## Upgrade Longhorn manager
##### On Kubernetes clusters Managed by Rancher 2.1 or newer ##### On Kubernetes clusters Managed by Rancher 2.1 or newer
@ -143,7 +139,8 @@ helm upgrade longhorn ./longhorn/chart
## Upgrade Longhorn engine ## Upgrade Longhorn engine
After upgraded manager, follow [the steps here](docs/upgrade.md#upgrade-longhorn-engine) to upgrade Longhorn engine for existing volumes. After upgraded manager, follow [the steps here](docs/upgrade.md#upgrade-longhorn-engine) to upgrade Longhorn engine for existing volumes.
1. For non distruptive upgrade, follow [the live upgrade steps here](./docs/upgrade.md#live-upgrade) 1. Upgrade v0.5.0 to v0.6.0 requires offline upgrade. Live upgrade is not supported for this upgrade.
2. For non distruptive upgrade from previous version to v0.5.0, follow [the live upgrade steps here](./docs/upgrade.md#live-upgrade)
For more details about upgrade in Longhorn or upgrade from older versions, [see here](docs/upgrade.md). For more details about upgrade in Longhorn or upgrade from older versions, [see here](docs/upgrade.md).
@ -204,10 +201,13 @@ More examples are available at `./examples/`
### [Snapshot and Backup](./docs/snapshot-backup.md) ### [Snapshot and Backup](./docs/snapshot-backup.md)
### [Volume operations](./docs/volume.md) ### [Volume operations](./docs/volume.md)
### [Multiple disks, including how to change the default path for storage](./docs/multidisk.md) ### [Settings](./docs/settings.md)
### [Multiple disks](./docs/multidisk.md)
### [iSCSI](./docs/iscsi.md) ### [iSCSI](./docs/iscsi.md)
### [Base image](./docs/base-image.md)
### [Kubernetes workload in Longhorn UI](./docs/k8s-workload.md) ### [Kubernetes workload in Longhorn UI](./docs/k8s-workload.md)
### [Storage Tags](./docs/storage-tags.md)
### [Customized default setting](./docs/customized-default-setting.md)
### [Taint Toleration](./docs/taint-toleration.md)
### [Restoring Stateful Set volumes](./docs/restore_statefulset.md) ### [Restoring Stateful Set volumes](./docs/restore_statefulset.md)
### [Google Kubernetes Engine](./docs/gke.md) ### [Google Kubernetes Engine](./docs/gke.md)

View File

@ -1,8 +1,8 @@
apiVersion: v1 apiVersion: v1
name: longhorn name: longhorn
version: 0.5.0 version: 0.6.0
appVersion: v0.5.0 appVersion: v0.6.0
kubeVersion: ">=v1.8.0-r0" kubeVersion: ">=v1.12.0-r0"
description: Longhorn is a distributed block storage system for Kubernetes powered by Rancher Labs. description: Longhorn is a distributed block storage system for Kubernetes powered by Rancher Labs.
keywords: keywords:
- longhorn - longhorn

View File

@ -12,7 +12,7 @@ questions:
- csi - csi
- flexvolume - flexvolume
label: Longhorn Kubernetes Driver label: Longhorn Kubernetes Driver
group: "Longhorn Settings" group: "Longhorn Driver Settings"
show_subquestion_if: flexvolume show_subquestion_if: flexvolume
subquestions: subquestions:
- variable: persistence.flexvolumePath - variable: persistence.flexvolumePath
@ -59,18 +59,102 @@ questions:
- variable: persistence.defaultClass - variable: persistence.defaultClass
default: "true" default: "true"
description: "Set as default StorageClass" description: "Set as default StorageClass"
group: "Longhorn Settings" group: "Longhorn Driver Settings"
type: boolean type: boolean
required: true required: true
label: Default Storage Class label: Default Storage Class
- variable: persistence.defaultClassReplicaCount - variable: persistence.defaultClassReplicaCount
description: "Set replica count for default StorageClass" description: "Set replica count for default StorageClass"
group: "Longhorn Settings" group: "Longhorn Driver Settings"
type: int type: int
default: 3 default: 3
min: 1 min: 1
max: 10 max: 10
label: Default Storage Class Replica Count label: Default Storage Class Replica Count
- variable: defaultSettings.backupTarget
label: Backup Target
description: "The target used for backup. Support NFS or S3."
group: "Longhorn Default Settings"
type: string
default:
- variable: defaultSettings.backupTargetCredentialSecret
label: Backup Target Credential Secret
description: "The Kubernetes secret associated with the backup target."
group: "Longhorn Default Settings"
type: string
default:
- variable: defaultSettings.createDefaultDiskLabeledNodes
label: Create Default Disk on Labeled Nodes
description: 'Create default Disk automatically only on Nodes with the label "node.longhorn.io/create-default-disk=true" if no other Disks exist. If disabled, default Disk will be created on all new Nodes (only on first add). By default false.'
group: "Longhorn Default Settings"
type: boolean
default: "false"
- variable: defaultSettings.defaultDataPath
label: Default Data Path
description: 'Default path to use for storing data on a host. By default "/var/lib/rancher/longhorn/"'
group: "Longhorn Default Settings"
type: string
default: "/var/lib/rancher/longhorn/"
- variable: defaultSettings.replicaSoftAntiAffinity
label: Replica Soft Anti-Affinity
description: 'Allow scheduling on nodes with existing healthy replicas of the same volume. By default true.'
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.storageOverProvisioningPercentage
label: Storage Over Provisioning Percentage
description: "The over-provisioning percentage defines how much storage can be allocated relative to the hard drive's capacity. By default 500."
group: "Longhorn Default Settings"
type: int
min: 0
default: 500
- variable: defaultSettings.storageMinimalAvailablePercentage
label: Storage Minimal Available Percentage
description: "If one disk's available capacity to it's maximum capacity in % is less than the minimal available percentage, the disk would become unschedulable until more space freed up. By default 10."
group: "Longhorn Default Settings"
type: int
min: 0
max: 100
default: 10
- variable: defaultSettings.upgradeChecker
label: Enable Upgrade Checker
description: 'Upgrade Checker will check for new Longhorn version periodically. When there is a new version available, it will notify the user using UI. By default true.'
group: "Longhorn Default Settings"
type: boolean
default: "true"
- variable: defaultSettings.defaultReplicaCount
label: Default Replica Count
description: "The default number of replicas when creating the volume from Longhorn UI. For Kubernetes, update the `numberOfReplicas` in the StorageClass. By default 3."
group: "Longhorn Default Settings"
type: int
min: 1
max: 20
default: 3
- variable: defaultSettings.guaranteedEngineCPU
label: Guaranteed Engine CPU
description: '(EXPERIMENTAL FEATURE) Allow Longhorn Engine to have guaranteed CPU allocation. The value is how many CPUs should be reserved for each Engine/Replica Manager Pod created by Longhorn. For example, 0.1 means one-tenth of a CPU. This will help maintain engine stability during high node workload. It only applies to the Engine/Replica Manager Pods created after the setting took effect. WARNING: Attaching of the volume may fail or stuck while using this feature due to the resource constraint. Disabled ("0") by default.'
group: "Longhorn Default Settings"
type: float
default: 0
- variable: defaultSettings.defaultLonghornStaticStorageClass
label: Default Longhorn Static StorageClass Name
description: "The 'storageClassName' is for PV/PVC when creating PV/PVC for an existing Longhorn volume. Notice that it's unnecessary for users create the related StorageClass object in Kubernetes since the StorageClass would only be used as matching labels for PVC bounding purpose. By default 'longhorn-static'."
group: "Longhorn Default Settings"
type: string
default: "longhorn-static"
- variable: defaultSettings.backupstorePollInterval
label: Backupstore Poll Interval
description: "In seconds. The interval to poll the backup store for updating volumes' Last Backup field. By default 300."
group: "Longhorn Default Settings"
type: int
min: 0
default: 300
- variable: defaultSettings.taintToleration
label: Kubernetes Taint Toleration
description: "By setting tolerations for Longhorn then adding taints for the nodes, the nodes with large storage can be dedicated to Longhorn only (to store replica data) and reject other general workloads. Multiple tolerations can be set here, and these tolerations are separated by semicolon. For example, \"key1=value1:NoSchedule; key2:NoExecute\". Notice that \"kubernetes.io\" is used as the key of all Kubernetes default tolerations, please do not contain this substring in your toleration setting."
group: "Longhorn Default Settings"
type: string
default: ""
- variable: ingress.enabled - variable: ingress.enabled
default: "false" default: "false"
description: "Expose app using Layer 7 Load Balancer - ingress" description: "Expose app using Layer 7 Load Balancer - ingress"

View File

@ -1,4 +1,4 @@
apiVersion: rbac.authorization.k8s.io/v1beta1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata: metadata:
name: longhorn-role name: longhorn-role
@ -28,5 +28,5 @@ rules:
resources: ["csinodeinfos"] resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"] verbs: ["get", "list", "watch"]
- apiGroups: ["longhorn.rancher.io"] - apiGroups: ["longhorn.rancher.io"]
resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes"] resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes", "instancemanagers"]
verbs: ["*"] verbs: ["*"]

View File

@ -1,4 +1,4 @@
apiVersion: rbac.authorization.k8s.io/v1beta1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding kind: ClusterRoleBinding
metadata: metadata:
name: longhorn-bind name: longhorn-bind

View File

@ -105,3 +105,21 @@ spec:
singular: node singular: node
scope: Namespaced scope: Namespaced
version: v1alpha1 version: v1alpha1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
longhorn-manager: InstanceManager
name: instancemanagers.longhorn.rancher.io
spec:
group: longhorn.rancher.io
names:
kind: InstanceManager
listKind: InstanceManagerList
plural: instancemanagers
shortNames:
- lhim
singular: instancemanager
scope: Namespaced
version: v1alpha1

View File

@ -1,4 +1,4 @@
apiVersion: apps/v1beta2 apiVersion: apps/v1
kind: DaemonSet kind: DaemonSet
metadata: metadata:
labels: labels:
@ -43,6 +43,8 @@ spec:
- name: longhorn - name: longhorn
mountPath: /var/lib/rancher/longhorn/ mountPath: /var/lib/rancher/longhorn/
mountPropagation: Bidirectional mountPropagation: Bidirectional
- name: longhorn-default-setting
mountPath: /var/lib/longhorn/setting/
env: env:
- name: POD_NAMESPACE - name: POD_NAMESPACE
valueFrom: valueFrom:
@ -58,6 +60,8 @@ spec:
fieldPath: spec.nodeName fieldPath: spec.nodeName
- name: LONGHORN_BACKEND_SVC - name: LONGHORN_BACKEND_SVC
value: longhorn-backend value: longhorn-backend
- name: DEFAULT_SETTING_PATH
value: /var/lib/longhorn/setting/default-setting.yaml
volumes: volumes:
- name: dev - name: dev
hostPath: hostPath:
@ -71,6 +75,9 @@ spec:
- name: longhorn - name: longhorn
hostPath: hostPath:
path: /var/lib/rancher/longhorn/ path: /var/lib/rancher/longhorn/
- name: longhorn-default-setting
configMap:
name: longhorn-default-setting
serviceAccountName: longhorn-service-account serviceAccountName: longhorn-service-account
--- ---
apiVersion: v1 apiVersion: v1

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: longhorn-default-setting
namespace: {{ .Release.Namespace }}
data:
default-setting.yaml: |-
backup-target: {{ .Values.defaultSettings.backupTarget }}
backup-target-credential-secret: {{ .Values.defaultSettings.backupTargetCredentialSecret }}
create-default-disk-labeled-nodes: {{ .Values.defaultSettings.createDefaultDiskLabeledNodes }}
default-data-path: {{ .Values.defaultSettings.defaultDataPath }}
replica-soft-anti-affinity: {{ .Values.defaultSettings.replicaSoftAntiAffinity }}
storage-over-provisioning-percentage: {{ .Values.defaultSettings.storageOverProvisioningPercentage }}
storage-minimal-available-percentage: {{ .Values.defaultSettings.storageMinimalAvailablePercentage }}
upgrade-checker: {{ .Values.defaultSettings.upgradeChecker }}
default-replica-count: {{ .Values.defaultSettings.defaultReplicaCount }}
guaranteed-engine-cpu: {{ .Values.defaultSettings.guaranteedEngineCPU }}
default-longhorn-static-storage-class: {{ .Values.defaultSettings.defaultLonghornStaticStorageClass }}
backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }}
taint-toleration: {{ .Values.defaultSettings.taintToleration }}

View File

@ -1,4 +1,4 @@
apiVersion: apps/v1beta2 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: longhorn-driver-deployer name: longhorn-driver-deployer

View File

@ -1,4 +1,4 @@
apiVersion: apps/v1beta2 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
labels: labels:

View File

@ -3,12 +3,12 @@
# Declare variables to be passed into your templates. # Declare variables to be passed into your templates.
image: image:
longhorn: longhorn:
engine: rancher/longhorn-engine engine: longhornio/longhorn-engine
engineTag: v0.5.0 engineTag: v0.6.0
manager: rancher/longhorn-manager manager: longhornio/longhorn-manager
managerTag: v0.5.0 managerTag: v0.6.0
ui: rancher/longhorn-ui ui: longhornio/longhorn-ui
uiTag: v0.5.0 uiTag: v0.6.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
service: service:
@ -37,6 +37,21 @@ csi:
attacherReplicaCount: attacherReplicaCount:
provisionerReplicaCount: provisionerReplicaCount:
defaultSettings:
backupTarget:
backupTargetCredentialSecret:
createDefaultDiskLabeledNodes:
defaultDataPath:
replicaSoftAntiAffinity:
storageOverProvisioningPercentage:
storageMinimalAvailablePercentage:
upgradeChecker:
defaultReplicaCount:
guaranteedEngineCPU:
defaultLonghornStaticStorageClass:
backupstorePollInterval:
taintToleration:
resources: {} resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious # We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little # choice for the user. This also increases chances charts run on environments with little

View File

@ -2,6 +2,7 @@ apiVersion: v1
kind: Secret kind: Secret
metadata: metadata:
name: minio-secret name: minio-secret
namespace: default
type: Opaque type: Opaque
data: data:
AWS_ACCESS_KEY_ID: bG9uZ2hvcm4tdGVzdC1hY2Nlc3Mta2V5 # longhorn-test-access-key AWS_ACCESS_KEY_ID: bG9uZ2hvcm4tdGVzdC1hY2Nlc3Mta2V5 # longhorn-test-access-key
@ -24,6 +25,7 @@ apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: longhorn-test-minio name: longhorn-test-minio
namespace: default
labels: labels:
app: longhorn-test-minio app: longhorn-test-minio
spec: spec:
@ -55,6 +57,7 @@ apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: minio-service name: minio-service
namespace: default
spec: spec:
selector: selector:
app: longhorn-test-minio app: longhorn-test-minio

View File

@ -2,6 +2,7 @@ apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: longhorn-test-nfs name: longhorn-test-nfs
namespace: default
labels: labels:
app: longhorn-test-nfs app: longhorn-test-nfs
spec: spec:
@ -37,6 +38,7 @@ kind: Service
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: longhorn-test-nfs-svc name: longhorn-test-nfs-svc
namespace: default
spec: spec:
selector: selector:
app: longhorn-test-nfs app: longhorn-test-nfs

View File

@ -9,7 +9,7 @@ metadata:
name: longhorn-service-account name: longhorn-service-account
namespace: longhorn-system namespace: longhorn-system
--- ---
apiVersion: rbac.authorization.k8s.io/v1beta1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata: metadata:
name: longhorn-role name: longhorn-role
@ -39,10 +39,10 @@ rules:
resources: ["csinodeinfos"] resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"] verbs: ["get", "list", "watch"]
- apiGroups: ["longhorn.rancher.io"] - apiGroups: ["longhorn.rancher.io"]
resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes"] resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes", "instancemanagers"]
verbs: ["*"] verbs: ["*"]
--- ---
apiVersion: rbac.authorization.k8s.io/v1beta1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding kind: ClusterRoleBinding
metadata: metadata:
name: longhorn-bind name: longhorn-bind
@ -163,7 +163,46 @@ spec:
scope: Namespaced scope: Namespaced
version: v1alpha1 version: v1alpha1
--- ---
apiVersion: apps/v1beta2 apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
longhorn-manager: InstanceManager
name: instancemanagers.longhorn.rancher.io
spec:
group: longhorn.rancher.io
names:
kind: InstanceManager
listKind: InstanceManagerList
plural: instancemanagers
shortNames:
- lhim
singular: instancemanager
scope: Namespaced
version: v1alpha1
---
apiVersion: v1
kind: ConfigMap
metadata:
name: longhorn-default-setting
namespace: longhorn-system
data:
default-setting.yaml: |-
backup-target:
backup-target-credential-secret:
create-default-disk-labeled-nodes:
default-data-path:
replica-soft-anti-affinity:
storage-over-provisioning-percentage:
storage-minimal-available-percentage:
upgrade-checker:
default-replica-count:
guaranteed-engine-cpu:
default-longhorn-static-storage-class:
backupstore-poll-interval:
taint-toleration:
---
apiVersion: apps/v1
kind: DaemonSet kind: DaemonSet
metadata: metadata:
labels: labels:
@ -181,7 +220,7 @@ spec:
spec: spec:
containers: containers:
- name: longhorn-manager - name: longhorn-manager
image: rancher/longhorn-manager:v0.5.0 image: longhornio/longhorn-manager:v0.6.0
imagePullPolicy: Always imagePullPolicy: Always
securityContext: securityContext:
privileged: true privileged: true
@ -190,9 +229,9 @@ spec:
- -d - -d
- daemon - daemon
- --engine-image - --engine-image
- rancher/longhorn-engine:v0.5.0 - longhornio/longhorn-engine:v0.6.0
- --manager-image - --manager-image
- rancher/longhorn-manager:v0.5.0 - longhornio/longhorn-manager:v0.6.0
- --service-account - --service-account
- longhorn-service-account - longhorn-service-account
ports: ports:
@ -207,6 +246,8 @@ spec:
- name: longhorn - name: longhorn
mountPath: /var/lib/rancher/longhorn/ mountPath: /var/lib/rancher/longhorn/
mountPropagation: Bidirectional mountPropagation: Bidirectional
- name: longhorn-default-setting
mountPath: /var/lib/longhorn/setting/
env: env:
- name: POD_NAMESPACE - name: POD_NAMESPACE
valueFrom: valueFrom:
@ -220,6 +261,9 @@ spec:
valueFrom: valueFrom:
fieldRef: fieldRef:
fieldPath: spec.nodeName fieldPath: spec.nodeName
# Should be: mount path of the volume longhorn-default-setting + the key of the configmap data in 04-default-setting.yaml
- name: DEFAULT_SETTING_PATH
value: /var/lib/longhorn/setting/default-setting.yaml
volumes: volumes:
- name: dev - name: dev
hostPath: hostPath:
@ -233,6 +277,9 @@ spec:
- name: longhorn - name: longhorn
hostPath: hostPath:
path: /var/lib/rancher/longhorn/ path: /var/lib/rancher/longhorn/
- name: longhorn-default-setting
configMap:
name: longhorn-default-setting
serviceAccountName: longhorn-service-account serviceAccountName: longhorn-service-account
--- ---
kind: Service kind: Service
@ -250,7 +297,7 @@ spec:
targetPort: 9500 targetPort: 9500
sessionAffinity: ClientIP sessionAffinity: ClientIP
--- ---
apiVersion: apps/v1beta2 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
labels: labels:
@ -269,12 +316,13 @@ spec:
spec: spec:
containers: containers:
- name: longhorn-ui - name: longhorn-ui
image: rancher/longhorn-ui:v0.5.0 image: longhornio/longhorn-ui:v0.6.0
ports: ports:
- containerPort: 8000 - containerPort: 8000
env: env:
- name: LONGHORN_MANAGER_IP - name: LONGHORN_MANAGER_IP
value: "http://longhorn-backend:9500" value: "http://longhorn-backend:9500"
serviceAccountName: longhorn-service-account
--- ---
kind: Service kind: Service
apiVersion: v1 apiVersion: v1
@ -291,7 +339,7 @@ spec:
targetPort: 8000 targetPort: 8000
type: LoadBalancer type: LoadBalancer
--- ---
apiVersion: apps/v1beta2 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: longhorn-driver-deployer name: longhorn-driver-deployer
@ -308,18 +356,18 @@ spec:
spec: spec:
initContainers: initContainers:
- name: wait-longhorn-manager - name: wait-longhorn-manager
image: rancher/longhorn-manager:v0.5.0 image: longhornio/longhorn-manager:v0.6.0
command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done'] command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done']
containers: containers:
- name: longhorn-driver-deployer - name: longhorn-driver-deployer
image: rancher/longhorn-manager:v0.5.0 image: longhornio/longhorn-manager:v0.6.0
imagePullPolicy: Always imagePullPolicy: Always
command: command:
- longhorn-manager - longhorn-manager
- -d - -d
- deploy-driver - deploy-driver
- --manager-image - --manager-image
- rancher/longhorn-manager:v0.5.0 - longhornio/longhorn-manager:v0.6.0
- --manager-url - --manager-url
- http://longhorn-backend:9500/v1 - http://longhorn-backend:9500/v1
# manually choose "flexvolume" or "csi" # manually choose "flexvolume" or "csi"

View File

@ -1,250 +0,0 @@
# Base Image Support
Longhorn supports creation of block devices backed by a base image. Longhorn
base images are packaged as Docker images. Public or private registries may
be used as a distribution mechanism for your Docker base images.
## Usage
Volumes backed by a base image can be created in three ways.
1. [UI](#ui) - Create Longhorn volumes exposed as block device or iSCSI target
2. [Flexvolume Driver](#flexvolume-driver) - Create Longhorn block devices and consume in Kubernetes pods
3. [CSI Driver](#csi-driver) - (Newer) Create Longhorn block devices and consume in Kubernetes pods
### UI
On the `Volume` tab, click the `Create Volume` button. The `Base Image` field
expects a Docker image name such as `rancher/vm-ubuntu:16.04.4-server-amd64`.
### Flexvolume Driver
The flexvolume driver supports volumes backed by base image. Below is a sample
Flexvolume definition including `baseImage` option.
```
name: flexvol
flexVolume:
driver: "rancher.io/longhorn"
fsType: "ext4"
options:
size: "32Mi"
numberOfReplicas: "3"
staleReplicaTimeout: "20"
fromBackup: ""
baseImage: "rancher/longhorn-test:baseimage-ext4"
```
You do not need to (and probably shouldn't) explicitly set filesystem type
`fsType` when base image is present. If you do, it must match the base image's
filesystem or the flexvolume driver will return an error.
Try it out for yourself. Make sure the Longhorn driver deployer specifies flag
`--driver flexvolume`, otherwise a different driver may be deployed. The
following example creates an nginx pod serving content from a flexvolume with
a base image and is accessible from a service.
```
kubectl create -f https://raw.githubusercontent.com/rancher/longhorn-manager/master/examples/flexvolume/example_baseimage.yaml
```
Wait until the pod is running.
```
kubectl get po/flexvol-baseimage -w
```
Query for the service you created.
```
kubectl get svc/flexvol-baseimage
```
Your service should look similar.
```
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/flexvol-baseimage LoadBalancer 10.43.153.186 <pending> 80:31028/TCP 2m
```
Now let's access something packaged inside the base image through the Nginx
webserver, exposed by the `LoadBalancer` service. If you have LoadBalancer
support and `EXTERNAL-IP` is set, navigate to the following URL.
```
http://<EXTERNAL-IP>/guests/hd/party-wizard.gif
```
Otherwise, navigate to the following URL where `NODE-IP` is the external IP
address of any Kubernetes node and `NODE-PORT` is the second port in the
service (`31028` in the example service above).
```
http://<NODE-IP>:<NODE-PORT>/guests/hd/party-wizard.gif
```
Finally, tear down the pod and service.
```
kubectl delete -f https://raw.githubusercontent.com/rancher/longhorn-manager/master/examples/flexvolume/example_baseimage.yaml
```
### CSI Driver
The CSI driver supports volumes backed by base image. Below is a sample
StorageClass definition including `baseImage` option.
```
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: example
provisioner: rancher.io/longhorn
parameters:
numberOfReplicas: '3'
staleReplicaTimeout: '30'
fromBackup: ''
baseImage: rancher/longhorn-test:baseimage-ext4
```
Let's walk through an example. First, ensure the CSI Plugin is deployed.
```
kubectl -n longhorn-system get daemonset.apps/longhorn-csi-plugin
```
The following example creates an nginx statefulset with two replicas serving
content from two csi-provisioned volumes backed by a base image. The
statefulset is accessible from a service.
```
kubectl create -f https://raw.githubusercontent.com/rancher/longhorn-manager/master/examples/provisioner_with_baseimage.yaml
```
Wait until both pods are running.
```
kubectl -l app=provisioner-baseimage get po -w
```
Query for the service you created.
```
kubectl get svc/csi-baseimage
```
Your service should look similar.
```
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
csi-baseimage LoadBalancer 10.43.47.129 <pending> 80:32768/TCP 4m
```
Now let's access something packaged inside the base image through the Nginx
webserver, exposed by the `LoadBalancer` service. If you have LoadBalancer
support and `EXTERNAL-IP` is set, navigate to the following URL.
```
http://<EXTERNAL-IP>/guests/hd/party-wizard.gif
```
Otherwise, navigate to the following URL where `NODE-IP` is the external IP
address of any Kubernetes node and `NODE-PORT` is the second port in the
service (`32768` in the example service above).
```
http://<NODE-IP>:<NODE-PORT>/guests/hd/party-wizard.gif
```
Finally, tear down the pod and service.
```
kubectl delete -f https://raw.githubusercontent.com/rancher/longhorn-manager/master/examples/provisioner_with_baseimage.yaml
```
## Building
Creating and packaging an empty base image is a very simple process.
1. [Install QEMU](https://en.wikibooks.org/wiki/QEMU/Installing_QEMU).
2. Create a qcow2 image.
```
qemu-img create -f qcow2 example.qcow2 4G
```
3. Create the `Dockerfile` file with the following contents:
```
FROM busybox
COPY example.qcow2 /base_image/example.qcow2
```
4. Build and publish the image:
```
DOCKERHUB_ACCT=rancher
docker build -t ${DOCKERHUB_ACCT}/longhorn-example:baseimage .
docker push ${DOCKERHUB_ACCT}/longhorn-example:baseimage
```
That's it! Your (empty) base image is ready for (no) use. Let's now explore
some use cases for a base image and what we should do to our `example.qcow2`
before building and publishing.
### Simple Filesystem
Suppose we want to store some static web assets in a volume. We have our qcow2
image and the web assets, but how to put the assets in the image?
On a Linux machine, load the network block device module.
```
sudo modprobe nbd
```
Use `qemu-nbd` to expose the image as a network block device.
```
sudo qemu-nbd -f qcow2 -c /dev/nbd0 example.qcow2
```
The raw block device needs a filesystem. Consider your infrastructure and
choose an appropriate filesystem. We will use EXT4 filesystem.
```
sudo mkfs -t ext4 /dev/nbd0
```
Mount the filesystem.
```
mkdir -p example
sudo mount /dev/nbd0 example
```
Copy web assets to filesystem.
```
cp /web/assets/* example/
```
Unmount the filesystem, shutdown `qemu-nbd`, cleanup.
```
sudo umount example
sudo killall qemu-nbd
rmdir example
```
Optionally, compress the image.
```
qemu-img convert -c -O qcow2 example.qcow2 example.compressed.qcow2
```
Follow the build and publish image steps and you are done. [Example script](https://raw.githubusercontent.com/rancher/longhorn-tests/master/manager/test_containers/baseimage/generate.sh).
### Virtual Machine
See [this document](https://github.com/rancher/vm/blob/master/docs/images.md) for the basic procedure of preparing Virtual Machine images.

View File

@ -0,0 +1,87 @@
# Customized Default Setting
## Overview
During Longhorn system deployment, users can customize the default settings for Longhorn. e.g. specify `Create Default Disk With Node Labeled` and `Default Data Path` before starting the Longhorn system.
## Usage
### Via Rancher UI
[Cluster] -> System -> Apps -> Launch -> longhorn -> LONGHORN DEFAULT SETTINGS
### Via Longhorn deployment yaml file
1. Download the longhorn repo:
```
git clone https://github.com/longhorn/longhorn.git
```
2. Modify the config map named `longhorn-default-setting` in the yaml file `longhorn/deploy/longhorn.yaml`. For example:
```
---
apiVersion: v1
kind: ConfigMap
metadata:
name: longhorn-default-setting
namespace: longhorn-system
data:
default-setting.yaml: |-
backup-target: s3://backupbucket@us-east-1/backupstore
backup-target-credential-secret: minio-secret
create-default-disk-labeled-nodes: true
default-data-path: /var/lib/rancher/longhorn-example/
replica-soft-anti-affinity: false
storage-over-provisioning-percentage: 600
storage-minimal-available-percentage: 15
upgrade-checker: false
default-replica-count: 2
guaranteed-engine-cpu:
default-longhorn-static-storage-class: longhorn-static-example
backupstore-poll-interval: 500
taint-toleration: key1=value1:NoSchedule; key2:NoExecute
---
```
### Via helm
1. Download the chart in the longhorn repo:
```
git clone https://github.com/longhorn/longhorn.git
```
2.1. Use helm command with `--set` flag to modify the default settings.
For example:
```
helm install ./longhorn/chart --name longhorn --namespace longhorn-system --set defaultSettings.taintToleration="key1=value1:NoSchedule; key2:NoExecute"
```
2.2. Or directly modifying the default settings in the yaml file `longhorn/chart/values.yaml` then using helm command without `--set` to deploy Longhorn.
For example:
In `longhorn/chart/values.yaml`:
```
defaultSettings:
backupTarget: s3://backupbucket@us-east-1/backupstore
backupTargetCredentialSecret: minio-secret
createDefaultDiskLabeledNodes: true
defaultDataPath: /var/lib/rancher/longhorn-example/
replicaSoftAntiAffinity: false
storageOverProvisioningPercentage: 600
storageMinimalAvailablePercentage: 15
upgradeChecker: false
defaultReplicaCount: 2
guaranteedEngineCPU:
defaultLonghornStaticStorageClass: longhorn-static-example
backupstorePollInterval: 500
taintToleration: key1=value1:NoSchedule; key2:NoExecute
```
Then use helm command:
```
helm install ./longhorn/chart --name longhorn --namespace longhorn-system
```
For more info about using helm, see:
[Install-Longhorn-with-helm](../README.md#install-longhorn-with-helm)
## History
[Original feature request](https://github.com/longhorn/longhorn/issues/623)
Available since v0.6.0

79
docs/settings.md Normal file
View File

@ -0,0 +1,79 @@
# Settings
## Customized Default Setting
To setup setting before installing Longhorn, see [Customized Default Setting](./customized-default-setting.md) for details.
## General
#### Backup Target
* Example: `s3://backupbucket@us-east-1/backupstore`
* Description: The target used for backup. Support NFS or S3. See [Snapshot and Backup](./snapshot-backup.md) for details.
#### Backup Target Credential Secret
* Example: `s3-secret`
* Description: The Kubernetes secret associated with the backup target. See [Snapshot and Backup](./snapshot-backup.md) for details.
#### Backupstore Poll Interval
* Example: `300`
* Description: In seconds. The interval to poll the backup store for updating volumes' Last Backup field. Set to 0 to disable the polling. See [Disaster Recovery Volume](./dr-volume.md) for details.
#### Create Default Disk on Labeled Nodes
* Example: `false`
* Description: Create default Disk automatically only on Nodes with the Kubernetes label `node.longhorn.io/create-default-disk=true` if no other Disks exist. If disabled, the default Disk will be created on all new Nodes when the node was detected for the first time.
* Note: It's useful if the user want to scale the cluster but doesn't want to use the storage on the new nodes.
#### Default Data Path
* Example: `/var/lib/rancher/longhorn`
* Description: Default path to use for storing data on a host
* Note: Can be used with `Create Default Disk on Labeled Nodes` option, to make Longhorn only use the nodes with specific storage mounted at e.g. `/opt/longhorn` directory when scaling the cluster.
#### Default Engine Image
* Example: `longhornio/longhorn-engine:v0.6.0`
* Description: The default engine image used by the manager. Can be changed on the manager starting command line only
* Note: Every Longhorn release will ship with a new Longhorn engine image. If the current Longhorn volumes are not using the default engine, a green arrow will show up, indicate this volume needs to be upgraded to use the default engine.
#### Enable Upgrade Checker
* Example: `true`
* Description: Upgrade Checker will check for new Longhorn version periodically. When there is a new version available, it will notify the user using UI
#### Latest Longhorn Version
* Example: `v0.6.0`
* Description: The latest version of Longhorn available. Update by Upgrade Checker automatically
* Note: Only available if `Upgrade Checker` is enabled.
#### Default Replica Count
* Example: `3`
* Description: The default number of replicas when creating the volume from Longhorn UI. For Kubernetes, update the `numberOfReplicas` in the StorageClass
* Note: The recommended way of choosing the default replica count is: if you have more than three nodes for storage, use 3; otherwise use 2. Using a single replica on a single node cluster is also OK, but the HA functionality wouldn't be available. You can still take snapshots/backups of the volume.
#### Guaranteed Engine CPU
* Example: `0.2`
* Description: (EXPERIMENTAL FEATURE) Allow Longhorn Engine to have guaranteed CPU allocation. The value is how many CPUs should be reserved for each Engine/Replica Manager Pod created by Longhorn. For example, 0.1 means one-tenth of a CPU. This will help maintain engine stability during high node workload. It only applies to the Instance Manager Pods created after the setting took effect. WARNING: Starting the system may fail or stuck while using this feature due to the resource constraint. Disabled (\"0\") by default.
* Note: Please set to **no more than a quarter** of what the node's available CPU resources, since the option would be applied to the two instance managers on the node (engine and replica), and the future upgraded instance managers (another two for engine and replica).
#### Default Longhorn Static StorageClass Name
* Example: `longhorn-static`
* Description: The `storageClassName` is for PV/PVC when creating PV/PVC for an existing Longhorn volume. Notice that it's unnecessary for users to create the related StorageClass object in Kubernetes since the StorageClass would only be used as matching labels for PVC bounding purpose. By default 'longhorn-static'.
#### Kubernetes Taint Toleration
* Example: `nodetype=storage:NoSchedule`
* Description: By setting tolerations for Longhorn then adding taints for the nodes, the nodes with large storage can be dedicated to Longhorn only (to store replica data) and reject other general workloads.
Before modifying toleration setting, all Longhorn volumes should be detached then Longhorn components will be restarted to apply new tolerations. And toleration update will take a while. Users cannot operate Longhorn system during update. Hence it's recommended to set toleration during Longhorn deployment.
Multiple tolerations can be set here, and these tolerations are separated by semicolon. For example, "key1=value1:NoSchedule; key2:NoExecute"
* Note: See [Taint Toleration](./taint-toleration.md) for details.
## Scheduling
#### Replica Soft Anti-Affinity
* Example: `true`
* Description: Allow scheduling on nodes with existing healthy replicas of the same volume
* Note: If the users want to avoid temporarily node down caused replica rebuild, they can set this option to `false`. The volume may be kept in `Degraded` state until another node that doesn't already have a replica scheduled comes online.
#### Storage Over Provisioning Percentage
* Example: `500`
* Description: The over-provisioning percentage defines how much storage can be allocated relative to the hard drive's capacity.
* Note: The users can set this to a lower value if they don't want overprovisioning storage. See [Multiple Disks Support](./multidisk.md#configuration) for details. Also, a replica of volume may take more space than the volume's size since the snapshots would need space to store as well. The users can delete snapshots to reclaim spaces.
#### Storage Minimal Available Percentage
* Example: `10`
* Description: If one disk's available capacity to it's maximum capacity in % is less than the minimal available percentage, the disk would become unschedulable until more space freed up.
* Note: See [Multiple Disks Support](./multidisk.md#configuration) for details.

46
docs/storage-tags.md Normal file
View File

@ -0,0 +1,46 @@
# Storage Tags
## Overview
The storage tag feature enables the user to only use certain nodes or disks for storing Longhorn volume data. For example, performance-sensitive data can use only the high-performance disks which can be tagged as `fast`, `ssd` or `nvme`, or only the high-performance node tagged as `baremetal`.
This feature supports both disks and nodes.
## Setup
The tag setup can be found at Longhorn UI:
1. *Node -> Select one node -> Edit Node and Disks*
2. Click `+New Node Tag` or `+New Disk Tag` to add new tags.
All the existing scheduled replica on the node or disk won't be affected by the new tags
## Usage
When multiple tags are specified for a volume, the disk and the node (the disk belong to) must have all the specified tags to become usable.
### UI
When creating a volume, specify the disk tag and node tag in the UI.
### Kubernetes
Use Kubernetes StorageClass setting to specify tags.
For example:
```
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn-fast
provisioner: rancher.io/longhorn
parameters:
numberOfReplicas: "3"
staleReplicaTimeout: "480"
diskSelector: "ssd"
nodeSelector: "storage,fast"
```
## History
* [Original feature request](https://github.com/longhorn/longhorn/issues/311)
* Available since v0.6.0

32
docs/taint-toleration.md Normal file
View File

@ -0,0 +1,32 @@
# Taint Toleration
## Overview
If users want to create nodes with large storage spaces and/or CPU resources for Longhorn only (to store replica data) and reject other general workloads, they can taint those nodes and add tolerations for Longhorn components. Then Longhorn can be deployed on those nodes.
For more Kubernetes taint and toleration info, see:
[Kubernetes Taint & Tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/)
## Setup
### During installing Longhorn
Follow the instructions to set init taint tolerations: [Customize default settings](https://github.com/longhorn/longhorn/wiki/Feature:-Customized-Default-Setting#usage)
### After Longhorn has been installed
The taint toleration setting can be found at Longhorn UI:
Setting -> General -> Kubernetes Taint Toleration
Users can modify the existing tolerations or add more tolerations here, but noted that it will result in all the Longhorn system components to be recreated.
## Usage
1. Before modifying the toleration setting, users should make sure all Longhorn volumes are `detached`. Since all Longhorn components will be restarted then the Longhorn system is unavailable temporarily. If there are running Longhorn volumes in the system, this means the Longhorn system cannot restart its components and the request will be rejected.
2. During the Longhorn system updates toleration setting and restarts its components, users shouldnt operate the Longhorn system.
3. When users set tolerations, the substring `kubernetes.io` shouldn't be contained in the setting. It is used and considered as the key of Kubernetes default tolerations.
4. Multiple tolerations can be set here, and these tolerations are separated by the semicolon. For example: `key1=value1:NoSchedule; key2:NoExecute`.
## History
[Original feature request](https://github.com/longhorn/longhorn/issues/584)
Available since v0.6.0

View File

@ -26,7 +26,7 @@ Make use of the Longhorn UI is a good start for the troubleshooting. For example
Also, the event logs in the UI dashboard provides some information of probably issues. Check for the event logs in `Warning` level. Also, the event logs in the UI dashboard provides some information of probably issues. Check for the event logs in `Warning` level.
### Manager and engines ### Manager and engines
You can get the log from Longhorn Manager and Engines to help with the troubleshooting. The most useful logs are from `longhorn-manager-xxx`, and the log inside Longhorn Engine, e.g. `<volname>-e-xxxx` and `<volname>-r-xxxx`. You can get the log from Longhorn Manager and Engines to help with the troubleshooting. The most useful logs are from `longhorn-manager-xxx`, and the log inside Longhorn instance managers, e.g. `instance-manager-e-xxxx` and `instance-manager-r-xxxx`.
Since normally there are multiple Longhorn Manager running at the same time, we recommend using [kubetail](https://github.com/johanhaleby/kubetail) which is a great tool to keep track of the logs of multiple pods. You can use: Since normally there are multiple Longhorn Manager running at the same time, we recommend using [kubetail](https://github.com/johanhaleby/kubetail) which is a great tool to keep track of the logs of multiple pods. You can use:
``` ```

View File

@ -27,7 +27,7 @@ Otherwise you will need to download the yaml file from [the official Longhorn De
**ALWAYS MAKE BACKUPS BEFORE UPGRADE THE ENGINE IMAGES.** **ALWAYS MAKE BACKUPS BEFORE UPGRADE THE ENGINE IMAGES.**
### Offline upgrade ### Offline upgrade
If live upgrade is not available (e.g. before v0.3.3), or the volume stuck in degraded state: If live upgrade is not available (e.g. before v0.3.3 or from v0.5.0 to v0.6.0), or the volume stuck in degraded state:
1. Follow [the detach procedure for relevant workloads](upgrade.md#detach-volumes). 1. Follow [the detach procedure for relevant workloads](upgrade.md#detach-volumes).
2. Select all the volumes using batch selection. Click batch operation button 2. Select all the volumes using batch selection. Click batch operation button
`Upgrade Engine`, choose the engine image available in the list. It's `Upgrade Engine`, choose the engine image available in the list. It's
@ -37,7 +37,7 @@ Any volume not part of a Kubernetes workload must be attached from Longhorn UI.
### Live upgrade ### Live upgrade
Live upgrade is available since v0.3.3. Live upgrade is available since v0.3.3, with the exception of upgrade from v0.5.0 to v0.6.0.
Live upgrade should only be done with healthy volumes. Live upgrade should only be done with healthy volumes.

View File

@ -6,8 +6,27 @@ The default replica count can be changed in the setting.
Also, when a volume is attached, the user can change the replica count for the volume in the UI. Also, when a volume is attached, the user can change the replica count for the volume in the UI.
Longhorn will always try to maintain at least given number of healthy replicas for each volume. If the current healthy Longhorn will always try to maintain at least given number of healthy replicas for each volume.
replica count is less than specified replica count, Longhorn will start rebuilding new replicas. If the current healthy 1. If the current healthy replica count is less than specified replica count, Longhorn will start rebuilding new replicas.
replica count is more than specified replica count, Longhorn will do nothing. In the later situation, if user delete one 2. If the current healthy replica count is more than specified replica count, Longhorn will do nothing. In this situation, if user delete one or more healthy replicas, or there are healthy replicas failed, as long as the total healthy replica count doesn't dip below the specified replica count, Longhorn won't start rebuilding new replicas.
or more healthy replicas, or there are healthy replicas failed, as long as the total healthy replica count doesn't dip
below the specified replica count, Longhorn won't start rebuilding new replicas. ### Volume size
Longhorn is a thin-provisioned storage system. That means a Longhorn volume will only take the space it needs at the moment. For example, if you allocated a 20GB volume but only use 1GB of it, the actual data size on your disk would be 1GB. You can see the actual data size in the volume details in the UI.
Longhorn volume itself cannot shrink in size if you've removed content from your volume. For example, if you create a volume of 20GB, used 10GB, then removed the content of 9GB, the actual size on the disk would still be 10GB instead of 1GB. It's because currently Longhorn operates on the block level, not filesystem level, so it doesn't know if user has removed the content or not. That information is mostly kept in the filesystem level.
#### Space taken by the snapshots
Some users may found that a Longhorn volume's actual size is bigger than it's nominal size. That's because in Longhorn, snapshot stored the history data of the volume, which will also take some spaces, depends on how much data was in the snapshot. The snapshot feature enables user to revert back to a certain point in history, create a backup to secondary storage. The snapshot feature is also a part Longhorn on rebuilding process. Everytime when Longhorn detects a replica is down, it will take a (system) snapshot automatically and start rebuilding on another node.
To reduce the space taken by snapshots, user can schedule a recurring snapshot or backup with a retain number, which will
automatically create a new snapshot/backup on schedule, then clean up for any excessive snapshots/backups.
User can also delete unwanted snapshot manually through UI. Any system generated snapshots will be automatically marked for deletion if the deletion of any snapshot was triggered.
#### The latest snapshot
In Longhorn, the latest snapshot cannot be deleted. It because whenever a snapshot is deleted, Longhorn will coalesce it content with the next snapshot, makes sure the next and later snapshot will still have the correct content. But Longhorn cannot do that for the latest snapshot since there is no next snapshot to it. The next "snapshot" of the latest snapshot is the live volume(`volume-head`), which is being read/written by the user at the moment, so the coalescing process cannot happen. Instead, the latest snapshot will be marked as `removed`, and it will be cleaned up next time when possible.
If the users want to clean up the latest snapshot, they can create a new snapshot, then remove the previous "latest" snapshot.

View File

@ -14,7 +14,7 @@ spec:
fsType: ext4 fsType: ext4
volumeAttributes: volumeAttributes:
numberOfReplicas: '3' numberOfReplicas: '3'
staleReplicaTimeout: '30' staleReplicaTimeout: '2880'
volumeHandle: existing-longhorn-volume volumeHandle: existing-longhorn-volume
--- ---
apiVersion: v1 apiVersion: v1

View File

@ -23,13 +23,16 @@ spec:
requests: requests:
storage: 2Gi storage: 2Gi
--- ---
apiVersion: apps/v1beta1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: mysql name: mysql
labels: labels:
app: mysql app: mysql
spec: spec:
selector:
matchLabels:
app: mysql # has to match .spec.template.metadata.labels
strategy: strategy:
type: Recreate type: Recreate
template: template:

View File

@ -5,7 +5,7 @@ metadata:
provisioner: rancher.io/longhorn provisioner: rancher.io/longhorn
parameters: parameters:
numberOfReplicas: '3' numberOfReplicas: '3'
staleReplicaTimeout: '30' staleReplicaTimeout: '2880'
reclaimPolicy: Delete reclaimPolicy: Delete
--- ---
apiVersion: v1 apiVersion: v1

View File

@ -1,43 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
labels:
app: flexvol-baseimage
name: flexvol-baseimage
namespace: default
spec:
containers:
- name: flexvol-baseimage
image: nginx:stable-alpine
imagePullPolicy: IfNotPresent
volumeMounts:
- name: flexvol
mountPath: /usr/share/nginx/html
ports:
- containerPort: 80
volumes:
- name: flexvol
flexVolume:
driver: rancher.io/longhorn
options:
size: 32Mi
numberOfReplicas: "3"
staleReplicaTimeout: "20"
fromBackup: ""
baseImage: rancher/longhorn-test:baseimage-ext4
---
apiVersion: v1
kind: Service
metadata:
labels:
app: flexvol-baseimage
name: flexvol-baseimage
namespace: default
spec:
ports:
- name: web
port: 80
targetPort: 80
selector:
app: flexvol-baseimage
type: LoadBalancer

View File

@ -1,63 +0,0 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
labels:
app: provisioner-baseimage
name: baseimage-storageclass
provisioner: rancher.io/longhorn
parameters:
numberOfReplicas: '3'
staleReplicaTimeout: '30'
fromBackup: ''
baseImage: rancher/longhorn-test:baseimage-ext4
---
apiVersion: v1
kind: Service
metadata:
labels:
app: provisioner-baseimage
name: provisioner-baseimage-service
spec:
ports:
- port: 80
name: web
selector:
app: provisioner-baseimage
type: LoadBalancer
---
apiVersion: apps/v1beta2
kind: StatefulSet
metadata:
labels:
app: provisioner-baseimage
name: provisioner-baseimage-statefulset
spec:
selector:
matchLabels:
app: provisioner-baseimage
serviceName: provisioner-baseimage
replicas: 2
template:
metadata:
labels:
app: provisioner-baseimage
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: nginx:stable-alpine
imagePullPolicy: IfNotPresent
volumeMounts:
- name: baseimage-vol
mountPath: /usr/share/nginx/html
ports:
- containerPort: 80
volumeClaimTemplates:
- metadata:
name: baseimage-vol
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: baseimage-storageclass
resources:
requests:
storage: 32Mi

View File

@ -1,94 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: restore-to-file
namespace: longhorn-system
spec:
nodeName: <NODE_NAME>
initContainers:
- name: prime-base-image
# set base image here
command:
- /bin/sh
- -c
- echo primed-base-image
# set base image here
image: <BASE_IMAGE>
imagePullPolicy: Always
containers:
- name: base-image
command:
- /bin/sh
- -c
- mkdir -p /share/base_image &&
mount --bind /base_image/ /share/base_image &&
echo base image mounted at /share/base_image &&
trap 'umount /share/base_image && echo unmounted' TERM &&
while true; do $(ls /talk/done 2>&1); if [ $? -eq 0 ]; then break;
fi; echo waiting; sleep 1; done;
umount /share/base_image && echo unmounted
# set base image here
image: <BASE_IMAGE>
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
volumeMounts:
- name: share
mountPath: /share
mountPropagation: Bidirectional
- name: talk
mountPath: /talk
- name: restore-to-file
command:
# set restore-to-file arguments here
- /bin/sh
- -c
- while true; do list=$(ls /share/base_image/* 2>&1); if [ $? -eq 0 ]; then break;
fi; echo waiting; sleep 1; done; echo Directory found $list;
longhorn backup restore-to-file
'<BACKUP_URL>'
--backing-file $list
--output-file '/tmp/restore/<OUTPUT_FILE>'
--output-format <OUTPUT_FORMAT>
&& touch /talk/done && chmod 777 /talk/done && echo created /share/done
# the version of longhorn engine should be v0.4.1 or higher
image: rancher/longhorn-engine:v0.4.1
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
volumeMounts:
- name: share
mountPath: /share
mountPropagation: HostToContainer
readOnly: true
- name: talk
mountPath: /talk
- name: disk-directory
mountPath: /tmp/restore # the argument <output-file> should be in this directory
env:
# set Backup Target Credential Secret here.
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: <S3_SECRET_NAME>
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: <S3_SECRET_NAME>
key: AWS_SECRET_ACCESS_KEY
- name: AWS_ENDPOINTS
valueFrom:
secretKeyRef:
name: <S3_SECRET_NAME>
key: AWS_ENDPOINTS
volumes:
- name: share
emptyDir: {}
- name: talk
emptyDir: {}
# the output file can be found on this host path
- name: disk-directory
hostPath:
path: /tmp/restore
restartPolicy: Never

View File

@ -12,7 +12,7 @@ spec:
app: nginx app: nginx
type: NodePort type: NodePort
--- ---
apiVersion: apps/v1beta2 apiVersion: apps/v1
kind: StatefulSet kind: StatefulSet
metadata: metadata:
name: web name: web

View File

@ -5,7 +5,10 @@ metadata:
provisioner: rancher.io/longhorn provisioner: rancher.io/longhorn
parameters: parameters:
numberOfReplicas: "3" numberOfReplicas: "3"
staleReplicaTimeout: "30" staleReplicaTimeout: "2880"
fromBackup: "" fromBackup: ""
# diskSelector: "ssd,fast"
# nodeSelector: "storage,fast"
# recurringJobs: '[{"name":"snap", "task":"snapshot", "cron":"*/1 * * * *", "retain":1}, # recurringJobs: '[{"name":"snap", "task":"snapshot", "cron":"*/1 * * * *", "retain":1},
# {"name":"backup", "task":"backup", "cron":"*/2 * * * *", "retain":1}]' # {"name":"backup", "task":"backup", "cron":"*/2 * * * *", "retain":1,
# "labels": {"interval":"2m"}}]'

69
scripts/lhexec Executable file
View File

@ -0,0 +1,69 @@
#!/usr/bin/env bash
NS="longhorn-system"
print_usage() {
echo "Usage: ${0} [|-h|--help] volume_name longhorn_commands_arguments"
echo ""
echo "Examples:"
echo " ${0} test-vol snapshot ls"
echo " ${0} test-vol info"
echo ""
echo "Note: Must have Longhorn installed in "longhorn-system" namespace and have access to "kubectl" and the namespace"
echo ""
exit 0
}
check_volume_exist(){
VOLUME_NAME=${1}
kubectl -n ${NS} get lhv ${VOLUME_NAME} > /dev/null 2>&1
if [[ ${?} -ne 0 ]]; then
echo "Err: Volume ${VOLUME_NAME} not found"
exit 1
fi
}
check_engine_state(){
VOLUME_NAME=${1}
LHE_STATE_FILTER="{.items[?(@.spec.volumeName==\"${VOLUME_NAME}\")].status.currentState}"
LHE_STATE=`kubectl -n ${NS} get lhe --output=jsonpath="${LHE_STATE_FILTER}"`
if [[ ${LHE_STATE} != "running" ]]; then
echo "Err: Longhorn engine for volume ${VOLUME_NAME} is not running"
exit 1
fi
}
exec_command() {
VOLUME_NAME=${1}
COMMAND_ARGS="${@:2}"
INSTANCE_MANAGER_NAME_FILTER="{.items[?(@.spec.volumeName==\"${VOLUME_NAME}\")].status.instanceManagerName}"
INSTANCE_MANAGER_NAME=`kubectl -n ${NS} get lhe --output=jsonpath="${INSTANCE_MANAGER_NAME_FILTER}"`
ENGINE_PORT_FILTER="{.items[?(@.spec.volumeName==\"${VOLUME_NAME}\")].status.port}"
ENGINE_PORT=`kubectl -n ${NS} get lhe --output=jsonpath="${ENGINE_PORT_FILTER}"`
kubectl -n ${NS} exec -it ${INSTANCE_MANAGER_NAME} -- bash -c "longhorn --url localhost:${ENGINE_PORT} ${COMMAND_ARGS}"
}
ARG=$1
case $ARG in
"" | "-h" | "--help")
print_usage
;;
*)
VOLUME_NAME=${ARG}
shift
COMMAND_ARGS="${@}"
if [[ ${COMMAND_ARGS} == "" ]]; then
COMMAND_ARGS="help"
fi
check_volume_exist ${VOLUME_NAME}
check_engine_state ${VOLUME_NAME}
exec_command ${VOLUME_NAME} ${COMMAND_ARGS}
;;
esac

View File

@ -2,8 +2,9 @@ apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: longhorn-uninstall-service-account name: longhorn-uninstall-service-account
namespace: default
--- ---
apiVersion: rbac.authorization.k8s.io/v1beta1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata: metadata:
name: longhorn-uninstall-role name: longhorn-uninstall-role
@ -24,10 +25,10 @@ rules:
resources: ["jobs", "cronjobs"] resources: ["jobs", "cronjobs"]
verbs: ["*"] verbs: ["*"]
- apiGroups: ["longhorn.rancher.io"] - apiGroups: ["longhorn.rancher.io"]
resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes"] resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes", "instancemanagers"]
verbs: ["*"] verbs: ["*"]
--- ---
apiVersion: rbac.authorization.k8s.io/v1beta1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding kind: ClusterRoleBinding
metadata: metadata:
name: longhorn-uninstall-bind name: longhorn-uninstall-bind
@ -44,6 +45,7 @@ apiVersion: batch/v1
kind: Job kind: Job
metadata: metadata:
name: longhorn-uninstall name: longhorn-uninstall
namespace: default
spec: spec:
activeDeadlineSeconds: 900 activeDeadlineSeconds: 900
backoffLimit: 1 backoffLimit: 1
@ -53,7 +55,7 @@ spec:
spec: spec:
containers: containers:
- name: longhorn-uninstall - name: longhorn-uninstall
image: rancher/longhorn-manager:v0.5.0 image: longhornio/longhorn-manager:v0.6.0
imagePullPolicy: Always imagePullPolicy: Always
command: command:
- longhorn-manager - longhorn-manager