diff --git a/README.md b/README.md index 9925467..15927e2 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,7 @@ The easiest way to install Longhorn is to deploy Longhorn from Rancher Catalog. 1. On Rancher UI, select the cluster and project you want to install Longhorn. We recommended to create a new project e.g. `Storage` for Longhorn. 2. Navigate to the `Catalog Apps` screen. Select `Launch`, find Longhorn in the list. Select `View Details`, then click `Launch`. Longhorn will be installed in the `longhorn-system` namespace. - + After Longhorn has been successfully installed, you can access the Longhorn UI by navigating to the `Catalog Apps` screen. One benefit of installing Longhorn through Rancher catalog is Rancher provides authentication to Longhorn UI. @@ -122,10 +122,6 @@ Noted that the UI is unauthenticated when you installed Longhorn using YAML file # Upgrade -Since v0.3.3, Longhorn is able to perform fully-automated non-disruptive upgrades, meaning that the upgrade process won't disrupt the running volumes. Existing volumes continue to run even as the software that implements these volumes are upgraded. - -If you're upgrading from Longhorn v0.3.0 or newer: - ## Upgrade Longhorn manager ##### On Kubernetes clusters Managed by Rancher 2.1 or newer @@ -143,11 +139,12 @@ helm upgrade longhorn ./longhorn/chart ## Upgrade Longhorn engine After upgraded manager, follow [the steps here](docs/upgrade.md#upgrade-longhorn-engine) to upgrade Longhorn engine for existing volumes. - 1. For non distruptive upgrade, follow [the live upgrade steps here](./docs/upgrade.md#live-upgrade) + 1. Upgrade v0.5.0 to v0.6.0 requires offline upgrade. Live upgrade is not supported for this upgrade. + 2. For non distruptive upgrade from previous version to v0.5.0, follow [the live upgrade steps here](./docs/upgrade.md#live-upgrade) For more details about upgrade in Longhorn or upgrade from older versions, [see here](docs/upgrade.md). -# Create Longhorn Volumes +# Create Longhorn Volumes Before you create Kubernetes volumes, you must first create a storage class. Use following command to create a StorageClass called `longhorn`. @@ -204,10 +201,13 @@ More examples are available at `./examples/` ### [Snapshot and Backup](./docs/snapshot-backup.md) ### [Volume operations](./docs/volume.md) -### [Multiple disks, including how to change the default path for storage](./docs/multidisk.md) +### [Settings](./docs/settings.md) +### [Multiple disks](./docs/multidisk.md) ### [iSCSI](./docs/iscsi.md) -### [Base image](./docs/base-image.md) ### [Kubernetes workload in Longhorn UI](./docs/k8s-workload.md) +### [Storage Tags](./docs/storage-tags.md) +### [Customized default setting](./docs/customized-default-setting.md) +### [Taint Toleration](./docs/taint-toleration.md) ### [Restoring Stateful Set volumes](./docs/restore_statefulset.md) ### [Google Kubernetes Engine](./docs/gke.md) diff --git a/chart/Chart.yaml b/chart/Chart.yaml index 2903fa3..ae4a615 100644 --- a/chart/Chart.yaml +++ b/chart/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v1 name: longhorn -version: 0.5.0 -appVersion: v0.5.0 -kubeVersion: ">=v1.8.0-r0" +version: 0.6.0 +appVersion: v0.6.0 +kubeVersion: ">=v1.12.0-r0" description: Longhorn is a distributed block storage system for Kubernetes powered by Rancher Labs. keywords: - longhorn diff --git a/chart/questions.yml b/chart/questions.yml index 6f29fca..2c6e6c8 100644 --- a/chart/questions.yml +++ b/chart/questions.yml @@ -12,7 +12,7 @@ questions: - csi - flexvolume label: Longhorn Kubernetes Driver - group: "Longhorn Settings" + group: "Longhorn Driver Settings" show_subquestion_if: flexvolume subquestions: - variable: persistence.flexvolumePath @@ -59,18 +59,102 @@ questions: - variable: persistence.defaultClass default: "true" description: "Set as default StorageClass" - group: "Longhorn Settings" + group: "Longhorn Driver Settings" type: boolean required: true label: Default Storage Class - variable: persistence.defaultClassReplicaCount description: "Set replica count for default StorageClass" - group: "Longhorn Settings" + group: "Longhorn Driver Settings" type: int default: 3 min: 1 max: 10 label: Default Storage Class Replica Count +- variable: defaultSettings.backupTarget + label: Backup Target + description: "The target used for backup. Support NFS or S3." + group: "Longhorn Default Settings" + type: string + default: +- variable: defaultSettings.backupTargetCredentialSecret + label: Backup Target Credential Secret + description: "The Kubernetes secret associated with the backup target." + group: "Longhorn Default Settings" + type: string + default: +- variable: defaultSettings.createDefaultDiskLabeledNodes + label: Create Default Disk on Labeled Nodes + description: 'Create default Disk automatically only on Nodes with the label "node.longhorn.io/create-default-disk=true" if no other Disks exist. If disabled, default Disk will be created on all new Nodes (only on first add). By default false.' + group: "Longhorn Default Settings" + type: boolean + default: "false" +- variable: defaultSettings.defaultDataPath + label: Default Data Path + description: 'Default path to use for storing data on a host. By default "/var/lib/rancher/longhorn/"' + group: "Longhorn Default Settings" + type: string + default: "/var/lib/rancher/longhorn/" +- variable: defaultSettings.replicaSoftAntiAffinity + label: Replica Soft Anti-Affinity + description: 'Allow scheduling on nodes with existing healthy replicas of the same volume. By default true.' + group: "Longhorn Default Settings" + type: boolean + default: "true" +- variable: defaultSettings.storageOverProvisioningPercentage + label: Storage Over Provisioning Percentage + description: "The over-provisioning percentage defines how much storage can be allocated relative to the hard drive's capacity. By default 500." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 500 +- variable: defaultSettings.storageMinimalAvailablePercentage + label: Storage Minimal Available Percentage + description: "If one disk's available capacity to it's maximum capacity in % is less than the minimal available percentage, the disk would become unschedulable until more space freed up. By default 10." + group: "Longhorn Default Settings" + type: int + min: 0 + max: 100 + default: 10 +- variable: defaultSettings.upgradeChecker + label: Enable Upgrade Checker + description: 'Upgrade Checker will check for new Longhorn version periodically. When there is a new version available, it will notify the user using UI. By default true.' + group: "Longhorn Default Settings" + type: boolean + default: "true" +- variable: defaultSettings.defaultReplicaCount + label: Default Replica Count + description: "The default number of replicas when creating the volume from Longhorn UI. For Kubernetes, update the `numberOfReplicas` in the StorageClass. By default 3." + group: "Longhorn Default Settings" + type: int + min: 1 + max: 20 + default: 3 +- variable: defaultSettings.guaranteedEngineCPU + label: Guaranteed Engine CPU + description: '(EXPERIMENTAL FEATURE) Allow Longhorn Engine to have guaranteed CPU allocation. The value is how many CPUs should be reserved for each Engine/Replica Manager Pod created by Longhorn. For example, 0.1 means one-tenth of a CPU. This will help maintain engine stability during high node workload. It only applies to the Engine/Replica Manager Pods created after the setting took effect. WARNING: Attaching of the volume may fail or stuck while using this feature due to the resource constraint. Disabled ("0") by default.' + group: "Longhorn Default Settings" + type: float + default: 0 +- variable: defaultSettings.defaultLonghornStaticStorageClass + label: Default Longhorn Static StorageClass Name + description: "The 'storageClassName' is for PV/PVC when creating PV/PVC for an existing Longhorn volume. Notice that it's unnecessary for users create the related StorageClass object in Kubernetes since the StorageClass would only be used as matching labels for PVC bounding purpose. By default 'longhorn-static'." + group: "Longhorn Default Settings" + type: string + default: "longhorn-static" +- variable: defaultSettings.backupstorePollInterval + label: Backupstore Poll Interval + description: "In seconds. The interval to poll the backup store for updating volumes' Last Backup field. By default 300." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 300 +- variable: defaultSettings.taintToleration + label: Kubernetes Taint Toleration + description: "By setting tolerations for Longhorn then adding taints for the nodes, the nodes with large storage can be dedicated to Longhorn only (to store replica data) and reject other general workloads. Multiple tolerations can be set here, and these tolerations are separated by semicolon. For example, \"key1=value1:NoSchedule; key2:NoExecute\". Notice that \"kubernetes.io\" is used as the key of all Kubernetes default tolerations, please do not contain this substring in your toleration setting." + group: "Longhorn Default Settings" + type: string + default: "" - variable: ingress.enabled default: "false" description: "Expose app using Layer 7 Load Balancer - ingress" diff --git a/chart/templates/clusterrole.yaml b/chart/templates/clusterrole.yaml index e8244c7..15914e7 100644 --- a/chart/templates/clusterrole.yaml +++ b/chart/templates/clusterrole.yaml @@ -1,4 +1,4 @@ -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: longhorn-role @@ -28,5 +28,5 @@ rules: resources: ["csinodeinfos"] verbs: ["get", "list", "watch"] - apiGroups: ["longhorn.rancher.io"] - resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes"] + resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes", "instancemanagers"] verbs: ["*"] diff --git a/chart/templates/clusterrolebinding.yaml b/chart/templates/clusterrolebinding.yaml index 12bcd53..3a73990 100644 --- a/chart/templates/clusterrolebinding.yaml +++ b/chart/templates/clusterrolebinding.yaml @@ -1,4 +1,4 @@ -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: longhorn-bind diff --git a/chart/templates/crds.yaml b/chart/templates/crds.yaml index ef45211..601169c 100644 --- a/chart/templates/crds.yaml +++ b/chart/templates/crds.yaml @@ -105,3 +105,21 @@ spec: singular: node scope: Namespaced version: v1alpha1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + labels: + longhorn-manager: InstanceManager + name: instancemanagers.longhorn.rancher.io +spec: + group: longhorn.rancher.io + names: + kind: InstanceManager + listKind: InstanceManagerList + plural: instancemanagers + shortNames: + - lhim + singular: instancemanager + scope: Namespaced + version: v1alpha1 diff --git a/chart/templates/daemonset-sa.yaml b/chart/templates/daemonset-sa.yaml index f2c07c8..4b5577b 100644 --- a/chart/templates/daemonset-sa.yaml +++ b/chart/templates/daemonset-sa.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: DaemonSet metadata: labels: @@ -43,6 +43,8 @@ spec: - name: longhorn mountPath: /var/lib/rancher/longhorn/ mountPropagation: Bidirectional + - name: longhorn-default-setting + mountPath: /var/lib/longhorn/setting/ env: - name: POD_NAMESPACE valueFrom: @@ -58,6 +60,8 @@ spec: fieldPath: spec.nodeName - name: LONGHORN_BACKEND_SVC value: longhorn-backend + - name: DEFAULT_SETTING_PATH + value: /var/lib/longhorn/setting/default-setting.yaml volumes: - name: dev hostPath: @@ -71,6 +75,9 @@ spec: - name: longhorn hostPath: path: /var/lib/rancher/longhorn/ + - name: longhorn-default-setting + configMap: + name: longhorn-default-setting serviceAccountName: longhorn-service-account --- apiVersion: v1 diff --git a/chart/templates/default-setting.yaml b/chart/templates/default-setting.yaml new file mode 100644 index 0000000..1fc48c4 --- /dev/null +++ b/chart/templates/default-setting.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: longhorn-default-setting + namespace: {{ .Release.Namespace }} +data: + default-setting.yaml: |- + backup-target: {{ .Values.defaultSettings.backupTarget }} + backup-target-credential-secret: {{ .Values.defaultSettings.backupTargetCredentialSecret }} + create-default-disk-labeled-nodes: {{ .Values.defaultSettings.createDefaultDiskLabeledNodes }} + default-data-path: {{ .Values.defaultSettings.defaultDataPath }} + replica-soft-anti-affinity: {{ .Values.defaultSettings.replicaSoftAntiAffinity }} + storage-over-provisioning-percentage: {{ .Values.defaultSettings.storageOverProvisioningPercentage }} + storage-minimal-available-percentage: {{ .Values.defaultSettings.storageMinimalAvailablePercentage }} + upgrade-checker: {{ .Values.defaultSettings.upgradeChecker }} + default-replica-count: {{ .Values.defaultSettings.defaultReplicaCount }} + guaranteed-engine-cpu: {{ .Values.defaultSettings.guaranteedEngineCPU }} + default-longhorn-static-storage-class: {{ .Values.defaultSettings.defaultLonghornStaticStorageClass }} + backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }} + taint-toleration: {{ .Values.defaultSettings.taintToleration }} diff --git a/chart/templates/deployment-driver.yaml b/chart/templates/deployment-driver.yaml index 1d21924..8638a2b 100644 --- a/chart/templates/deployment-driver.yaml +++ b/chart/templates/deployment-driver.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: longhorn-driver-deployer diff --git a/chart/templates/deployment-ui.yaml b/chart/templates/deployment-ui.yaml index b8e641e..694fe19 100644 --- a/chart/templates/deployment-ui.yaml +++ b/chart/templates/deployment-ui.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: labels: diff --git a/chart/values.yaml b/chart/values.yaml index a84ddbb..b257f7b 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -3,12 +3,12 @@ # Declare variables to be passed into your templates. image: longhorn: - engine: rancher/longhorn-engine - engineTag: v0.5.0 - manager: rancher/longhorn-manager - managerTag: v0.5.0 - ui: rancher/longhorn-ui - uiTag: v0.5.0 + engine: longhornio/longhorn-engine + engineTag: v0.6.0 + manager: longhornio/longhorn-manager + managerTag: v0.6.0 + ui: longhornio/longhorn-ui + uiTag: v0.6.0 pullPolicy: IfNotPresent service: @@ -37,6 +37,21 @@ csi: attacherReplicaCount: provisionerReplicaCount: +defaultSettings: + backupTarget: + backupTargetCredentialSecret: + createDefaultDiskLabeledNodes: + defaultDataPath: + replicaSoftAntiAffinity: + storageOverProvisioningPercentage: + storageMinimalAvailablePercentage: + upgradeChecker: + defaultReplicaCount: + guaranteedEngineCPU: + defaultLonghornStaticStorageClass: + backupstorePollInterval: + taintToleration: + resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little diff --git a/deploy/backupstores/minio-backupstore.yaml b/deploy/backupstores/minio-backupstore.yaml index 23d7867..2aa2c8f 100644 --- a/deploy/backupstores/minio-backupstore.yaml +++ b/deploy/backupstores/minio-backupstore.yaml @@ -2,6 +2,7 @@ apiVersion: v1 kind: Secret metadata: name: minio-secret + namespace: default type: Opaque data: AWS_ACCESS_KEY_ID: bG9uZ2hvcm4tdGVzdC1hY2Nlc3Mta2V5 # longhorn-test-access-key @@ -24,6 +25,7 @@ apiVersion: v1 kind: Pod metadata: name: longhorn-test-minio + namespace: default labels: app: longhorn-test-minio spec: @@ -55,6 +57,7 @@ apiVersion: v1 kind: Service metadata: name: minio-service + namespace: default spec: selector: app: longhorn-test-minio diff --git a/deploy/backupstores/nfs-backupstore.yaml b/deploy/backupstores/nfs-backupstore.yaml index 7f4ce9c..5baef3b 100644 --- a/deploy/backupstores/nfs-backupstore.yaml +++ b/deploy/backupstores/nfs-backupstore.yaml @@ -2,6 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: longhorn-test-nfs + namespace: default labels: app: longhorn-test-nfs spec: @@ -37,6 +38,7 @@ kind: Service apiVersion: v1 metadata: name: longhorn-test-nfs-svc + namespace: default spec: selector: app: longhorn-test-nfs diff --git a/deploy/longhorn.yaml b/deploy/longhorn.yaml index dd7c602..76fd06e 100644 --- a/deploy/longhorn.yaml +++ b/deploy/longhorn.yaml @@ -9,7 +9,7 @@ metadata: name: longhorn-service-account namespace: longhorn-system --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: longhorn-role @@ -39,10 +39,10 @@ rules: resources: ["csinodeinfos"] verbs: ["get", "list", "watch"] - apiGroups: ["longhorn.rancher.io"] - resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes"] + resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes", "instancemanagers"] verbs: ["*"] --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: longhorn-bind @@ -163,7 +163,46 @@ spec: scope: Namespaced version: v1alpha1 --- -apiVersion: apps/v1beta2 +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + labels: + longhorn-manager: InstanceManager + name: instancemanagers.longhorn.rancher.io +spec: + group: longhorn.rancher.io + names: + kind: InstanceManager + listKind: InstanceManagerList + plural: instancemanagers + shortNames: + - lhim + singular: instancemanager + scope: Namespaced + version: v1alpha1 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: longhorn-default-setting + namespace: longhorn-system +data: + default-setting.yaml: |- + backup-target: + backup-target-credential-secret: + create-default-disk-labeled-nodes: + default-data-path: + replica-soft-anti-affinity: + storage-over-provisioning-percentage: + storage-minimal-available-percentage: + upgrade-checker: + default-replica-count: + guaranteed-engine-cpu: + default-longhorn-static-storage-class: + backupstore-poll-interval: + taint-toleration: +--- +apiVersion: apps/v1 kind: DaemonSet metadata: labels: @@ -181,7 +220,7 @@ spec: spec: containers: - name: longhorn-manager - image: rancher/longhorn-manager:v0.5.0 + image: longhornio/longhorn-manager:v0.6.0 imagePullPolicy: Always securityContext: privileged: true @@ -190,9 +229,9 @@ spec: - -d - daemon - --engine-image - - rancher/longhorn-engine:v0.5.0 + - longhornio/longhorn-engine:v0.6.0 - --manager-image - - rancher/longhorn-manager:v0.5.0 + - longhornio/longhorn-manager:v0.6.0 - --service-account - longhorn-service-account ports: @@ -207,6 +246,8 @@ spec: - name: longhorn mountPath: /var/lib/rancher/longhorn/ mountPropagation: Bidirectional + - name: longhorn-default-setting + mountPath: /var/lib/longhorn/setting/ env: - name: POD_NAMESPACE valueFrom: @@ -220,6 +261,9 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + # Should be: mount path of the volume longhorn-default-setting + the key of the configmap data in 04-default-setting.yaml + - name: DEFAULT_SETTING_PATH + value: /var/lib/longhorn/setting/default-setting.yaml volumes: - name: dev hostPath: @@ -233,6 +277,9 @@ spec: - name: longhorn hostPath: path: /var/lib/rancher/longhorn/ + - name: longhorn-default-setting + configMap: + name: longhorn-default-setting serviceAccountName: longhorn-service-account --- kind: Service @@ -250,7 +297,7 @@ spec: targetPort: 9500 sessionAffinity: ClientIP --- -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: labels: @@ -269,12 +316,13 @@ spec: spec: containers: - name: longhorn-ui - image: rancher/longhorn-ui:v0.5.0 + image: longhornio/longhorn-ui:v0.6.0 ports: - containerPort: 8000 env: - name: LONGHORN_MANAGER_IP value: "http://longhorn-backend:9500" + serviceAccountName: longhorn-service-account --- kind: Service apiVersion: v1 @@ -291,7 +339,7 @@ spec: targetPort: 8000 type: LoadBalancer --- -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: longhorn-driver-deployer @@ -308,18 +356,18 @@ spec: spec: initContainers: - name: wait-longhorn-manager - image: rancher/longhorn-manager:v0.5.0 + image: longhornio/longhorn-manager:v0.6.0 command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done'] containers: - name: longhorn-driver-deployer - image: rancher/longhorn-manager:v0.5.0 + image: longhornio/longhorn-manager:v0.6.0 imagePullPolicy: Always command: - longhorn-manager - -d - deploy-driver - --manager-image - - rancher/longhorn-manager:v0.5.0 + - longhornio/longhorn-manager:v0.6.0 - --manager-url - http://longhorn-backend:9500/v1 # manually choose "flexvolume" or "csi" diff --git a/docs/base-image.md b/docs/base-image.md deleted file mode 100644 index d551eaa..0000000 --- a/docs/base-image.md +++ /dev/null @@ -1,250 +0,0 @@ -# Base Image Support - -Longhorn supports creation of block devices backed by a base image. Longhorn -base images are packaged as Docker images. Public or private registries may -be used as a distribution mechanism for your Docker base images. - -## Usage - -Volumes backed by a base image can be created in three ways. - -1. [UI](#ui) - Create Longhorn volumes exposed as block device or iSCSI target -2. [Flexvolume Driver](#flexvolume-driver) - Create Longhorn block devices and consume in Kubernetes pods -3. [CSI Driver](#csi-driver) - (Newer) Create Longhorn block devices and consume in Kubernetes pods - -### UI - -On the `Volume` tab, click the `Create Volume` button. The `Base Image` field -expects a Docker image name such as `rancher/vm-ubuntu:16.04.4-server-amd64`. - -### Flexvolume Driver - -The flexvolume driver supports volumes backed by base image. Below is a sample -Flexvolume definition including `baseImage` option. - -``` -name: flexvol -flexVolume: - driver: "rancher.io/longhorn" - fsType: "ext4" - options: - size: "32Mi" - numberOfReplicas: "3" - staleReplicaTimeout: "20" - fromBackup: "" - baseImage: "rancher/longhorn-test:baseimage-ext4" -``` - -You do not need to (and probably shouldn't) explicitly set filesystem type -`fsType` when base image is present. If you do, it must match the base image's -filesystem or the flexvolume driver will return an error. - -Try it out for yourself. Make sure the Longhorn driver deployer specifies flag -`--driver flexvolume`, otherwise a different driver may be deployed. The -following example creates an nginx pod serving content from a flexvolume with -a base image and is accessible from a service. - -``` -kubectl create -f https://raw.githubusercontent.com/rancher/longhorn-manager/master/examples/flexvolume/example_baseimage.yaml -``` - -Wait until the pod is running. - -``` -kubectl get po/flexvol-baseimage -w -``` - -Query for the service you created. - -``` -kubectl get svc/flexvol-baseimage -``` - -Your service should look similar. - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/flexvol-baseimage LoadBalancer 10.43.153.186 80:31028/TCP 2m -``` - -Now let's access something packaged inside the base image through the Nginx -webserver, exposed by the `LoadBalancer` service. If you have LoadBalancer -support and `EXTERNAL-IP` is set, navigate to the following URL. - -``` -http:///guests/hd/party-wizard.gif -``` - -Otherwise, navigate to the following URL where `NODE-IP` is the external IP -address of any Kubernetes node and `NODE-PORT` is the second port in the -service (`31028` in the example service above). - -``` -http://:/guests/hd/party-wizard.gif -``` - -Finally, tear down the pod and service. - -``` -kubectl delete -f https://raw.githubusercontent.com/rancher/longhorn-manager/master/examples/flexvolume/example_baseimage.yaml -``` - -### CSI Driver - -The CSI driver supports volumes backed by base image. Below is a sample -StorageClass definition including `baseImage` option. - -``` -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: example -provisioner: rancher.io/longhorn -parameters: - numberOfReplicas: '3' - staleReplicaTimeout: '30' - fromBackup: '' - baseImage: rancher/longhorn-test:baseimage-ext4 -``` - -Let's walk through an example. First, ensure the CSI Plugin is deployed. - -``` -kubectl -n longhorn-system get daemonset.apps/longhorn-csi-plugin -``` - -The following example creates an nginx statefulset with two replicas serving -content from two csi-provisioned volumes backed by a base image. The -statefulset is accessible from a service. - -``` -kubectl create -f https://raw.githubusercontent.com/rancher/longhorn-manager/master/examples/provisioner_with_baseimage.yaml -``` - -Wait until both pods are running. - -``` -kubectl -l app=provisioner-baseimage get po -w -``` - -Query for the service you created. - -``` -kubectl get svc/csi-baseimage -``` - -Your service should look similar. - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -csi-baseimage LoadBalancer 10.43.47.129 80:32768/TCP 4m -``` - -Now let's access something packaged inside the base image through the Nginx -webserver, exposed by the `LoadBalancer` service. If you have LoadBalancer -support and `EXTERNAL-IP` is set, navigate to the following URL. - -``` -http:///guests/hd/party-wizard.gif -``` - -Otherwise, navigate to the following URL where `NODE-IP` is the external IP -address of any Kubernetes node and `NODE-PORT` is the second port in the -service (`32768` in the example service above). - -``` -http://:/guests/hd/party-wizard.gif -``` - -Finally, tear down the pod and service. - -``` -kubectl delete -f https://raw.githubusercontent.com/rancher/longhorn-manager/master/examples/provisioner_with_baseimage.yaml -``` - -## Building - -Creating and packaging an empty base image is a very simple process. - -1. [Install QEMU](https://en.wikibooks.org/wiki/QEMU/Installing_QEMU). -2. Create a qcow2 image. - -``` -qemu-img create -f qcow2 example.qcow2 4G -``` - -3. Create the `Dockerfile` file with the following contents: - -``` -FROM busybox -COPY example.qcow2 /base_image/example.qcow2 -``` - -4. Build and publish the image: - -``` -DOCKERHUB_ACCT=rancher -docker build -t ${DOCKERHUB_ACCT}/longhorn-example:baseimage . -docker push ${DOCKERHUB_ACCT}/longhorn-example:baseimage -``` - -That's it! Your (empty) base image is ready for (no) use. Let's now explore -some use cases for a base image and what we should do to our `example.qcow2` -before building and publishing. - -### Simple Filesystem - -Suppose we want to store some static web assets in a volume. We have our qcow2 -image and the web assets, but how to put the assets in the image? - -On a Linux machine, load the network block device module. - -``` -sudo modprobe nbd -``` - -Use `qemu-nbd` to expose the image as a network block device. - -``` -sudo qemu-nbd -f qcow2 -c /dev/nbd0 example.qcow2 -``` - -The raw block device needs a filesystem. Consider your infrastructure and -choose an appropriate filesystem. We will use EXT4 filesystem. - -``` -sudo mkfs -t ext4 /dev/nbd0 -``` - -Mount the filesystem. - -``` -mkdir -p example -sudo mount /dev/nbd0 example -``` - -Copy web assets to filesystem. - -``` -cp /web/assets/* example/ -``` - -Unmount the filesystem, shutdown `qemu-nbd`, cleanup. - -``` -sudo umount example -sudo killall qemu-nbd -rmdir example -``` - -Optionally, compress the image. - -``` -qemu-img convert -c -O qcow2 example.qcow2 example.compressed.qcow2 -``` - -Follow the build and publish image steps and you are done. [Example script](https://raw.githubusercontent.com/rancher/longhorn-tests/master/manager/test_containers/baseimage/generate.sh). - -### Virtual Machine - -See [this document](https://github.com/rancher/vm/blob/master/docs/images.md) for the basic procedure of preparing Virtual Machine images. diff --git a/docs/customized-default-setting.md b/docs/customized-default-setting.md new file mode 100644 index 0000000..cf06146 --- /dev/null +++ b/docs/customized-default-setting.md @@ -0,0 +1,87 @@ +# Customized Default Setting + +## Overview +During Longhorn system deployment, users can customize the default settings for Longhorn. e.g. specify `Create Default Disk With Node Labeled` and `Default Data Path` before starting the Longhorn system. + +## Usage +### Via Rancher UI +[Cluster] -> System -> Apps -> Launch -> longhorn -> LONGHORN DEFAULT SETTINGS + + +### Via Longhorn deployment yaml file +1. Download the longhorn repo: +``` +git clone https://github.com/longhorn/longhorn.git +``` + +2. Modify the config map named `longhorn-default-setting` in the yaml file `longhorn/deploy/longhorn.yaml`. For example: +``` +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: longhorn-default-setting + namespace: longhorn-system +data: + default-setting.yaml: |- + backup-target: s3://backupbucket@us-east-1/backupstore + backup-target-credential-secret: minio-secret + create-default-disk-labeled-nodes: true + default-data-path: /var/lib/rancher/longhorn-example/ + replica-soft-anti-affinity: false + storage-over-provisioning-percentage: 600 + storage-minimal-available-percentage: 15 + upgrade-checker: false + default-replica-count: 2 + guaranteed-engine-cpu: + default-longhorn-static-storage-class: longhorn-static-example + backupstore-poll-interval: 500 + taint-toleration: key1=value1:NoSchedule; key2:NoExecute +--- +``` + +### Via helm +1. Download the chart in the longhorn repo: +``` +git clone https://github.com/longhorn/longhorn.git +``` + +2.1. Use helm command with `--set` flag to modify the default settings. +For example: +``` +helm install ./longhorn/chart --name longhorn --namespace longhorn-system --set defaultSettings.taintToleration="key1=value1:NoSchedule; key2:NoExecute" +``` + +2.2. Or directly modifying the default settings in the yaml file `longhorn/chart/values.yaml` then using helm command without `--set` to deploy Longhorn. +For example: + +In `longhorn/chart/values.yaml`: +``` +defaultSettings: + backupTarget: s3://backupbucket@us-east-1/backupstore + backupTargetCredentialSecret: minio-secret + createDefaultDiskLabeledNodes: true + defaultDataPath: /var/lib/rancher/longhorn-example/ + replicaSoftAntiAffinity: false + storageOverProvisioningPercentage: 600 + storageMinimalAvailablePercentage: 15 + upgradeChecker: false + defaultReplicaCount: 2 + guaranteedEngineCPU: + defaultLonghornStaticStorageClass: longhorn-static-example + backupstorePollInterval: 500 + taintToleration: key1=value1:NoSchedule; key2:NoExecute +``` + +Then use helm command: +``` +helm install ./longhorn/chart --name longhorn --namespace longhorn-system +``` + +For more info about using helm, see: +[Install-Longhorn-with-helm](../README.md#install-longhorn-with-helm) + +## History +[Original feature request](https://github.com/longhorn/longhorn/issues/623) + +Available since v0.6.0 diff --git a/docs/settings.md b/docs/settings.md new file mode 100644 index 0000000..c39c491 --- /dev/null +++ b/docs/settings.md @@ -0,0 +1,79 @@ +# Settings + +## Customized Default Setting + +To setup setting before installing Longhorn, see [Customized Default Setting](./customized-default-setting.md) for details. + +## General +#### Backup Target +* Example: `s3://backupbucket@us-east-1/backupstore` +* Description: The target used for backup. Support NFS or S3. See [Snapshot and Backup](./snapshot-backup.md) for details. + +#### Backup Target Credential Secret +* Example: `s3-secret` +* Description: The Kubernetes secret associated with the backup target. See [Snapshot and Backup](./snapshot-backup.md) for details. + +#### Backupstore Poll Interval +* Example: `300` +* Description: In seconds. The interval to poll the backup store for updating volumes' Last Backup field. Set to 0 to disable the polling. See [Disaster Recovery Volume](./dr-volume.md) for details. + +#### Create Default Disk on Labeled Nodes +* Example: `false` +* Description: Create default Disk automatically only on Nodes with the Kubernetes label `node.longhorn.io/create-default-disk=true` if no other Disks exist. If disabled, the default Disk will be created on all new Nodes when the node was detected for the first time. +* Note: It's useful if the user want to scale the cluster but doesn't want to use the storage on the new nodes. + +#### Default Data Path +* Example: `/var/lib/rancher/longhorn` +* Description: Default path to use for storing data on a host +* Note: Can be used with `Create Default Disk on Labeled Nodes` option, to make Longhorn only use the nodes with specific storage mounted at e.g. `/opt/longhorn` directory when scaling the cluster. + +#### Default Engine Image +* Example: `longhornio/longhorn-engine:v0.6.0` +* Description: The default engine image used by the manager. Can be changed on the manager starting command line only +* Note: Every Longhorn release will ship with a new Longhorn engine image. If the current Longhorn volumes are not using the default engine, a green arrow will show up, indicate this volume needs to be upgraded to use the default engine. + +#### Enable Upgrade Checker +* Example: `true` +* Description: Upgrade Checker will check for new Longhorn version periodically. When there is a new version available, it will notify the user using UI + +#### Latest Longhorn Version +* Example: `v0.6.0` +* Description: The latest version of Longhorn available. Update by Upgrade Checker automatically +* Note: Only available if `Upgrade Checker` is enabled. + +#### Default Replica Count +* Example: `3` +* Description: The default number of replicas when creating the volume from Longhorn UI. For Kubernetes, update the `numberOfReplicas` in the StorageClass +* Note: The recommended way of choosing the default replica count is: if you have more than three nodes for storage, use 3; otherwise use 2. Using a single replica on a single node cluster is also OK, but the HA functionality wouldn't be available. You can still take snapshots/backups of the volume. + +#### Guaranteed Engine CPU +* Example: `0.2` +* Description: (EXPERIMENTAL FEATURE) Allow Longhorn Engine to have guaranteed CPU allocation. The value is how many CPUs should be reserved for each Engine/Replica Manager Pod created by Longhorn. For example, 0.1 means one-tenth of a CPU. This will help maintain engine stability during high node workload. It only applies to the Instance Manager Pods created after the setting took effect. WARNING: Starting the system may fail or stuck while using this feature due to the resource constraint. Disabled (\"0\") by default. +* Note: Please set to **no more than a quarter** of what the node's available CPU resources, since the option would be applied to the two instance managers on the node (engine and replica), and the future upgraded instance managers (another two for engine and replica). + +#### Default Longhorn Static StorageClass Name +* Example: `longhorn-static` +* Description: The `storageClassName` is for PV/PVC when creating PV/PVC for an existing Longhorn volume. Notice that it's unnecessary for users to create the related StorageClass object in Kubernetes since the StorageClass would only be used as matching labels for PVC bounding purpose. By default 'longhorn-static'. + +#### Kubernetes Taint Toleration +* Example: `nodetype=storage:NoSchedule` +* Description: By setting tolerations for Longhorn then adding taints for the nodes, the nodes with large storage can be dedicated to Longhorn only (to store replica data) and reject other general workloads. +Before modifying toleration setting, all Longhorn volumes should be detached then Longhorn components will be restarted to apply new tolerations. And toleration update will take a while. Users cannot operate Longhorn system during update. Hence it's recommended to set toleration during Longhorn deployment. +Multiple tolerations can be set here, and these tolerations are separated by semicolon. For example, "key1=value1:NoSchedule; key2:NoExecute" +* Note: See [Taint Toleration](./taint-toleration.md) for details. + +## Scheduling +#### Replica Soft Anti-Affinity +* Example: `true` +* Description: Allow scheduling on nodes with existing healthy replicas of the same volume +* Note: If the users want to avoid temporarily node down caused replica rebuild, they can set this option to `false`. The volume may be kept in `Degraded` state until another node that doesn't already have a replica scheduled comes online. + +#### Storage Over Provisioning Percentage +* Example: `500` +* Description: The over-provisioning percentage defines how much storage can be allocated relative to the hard drive's capacity. +* Note: The users can set this to a lower value if they don't want overprovisioning storage. See [Multiple Disks Support](./multidisk.md#configuration) for details. Also, a replica of volume may take more space than the volume's size since the snapshots would need space to store as well. The users can delete snapshots to reclaim spaces. + +#### Storage Minimal Available Percentage +* Example: `10` +* Description: If one disk's available capacity to it's maximum capacity in % is less than the minimal available percentage, the disk would become unschedulable until more space freed up. +* Note: See [Multiple Disks Support](./multidisk.md#configuration) for details. diff --git a/docs/storage-tags.md b/docs/storage-tags.md new file mode 100644 index 0000000..5fce30e --- /dev/null +++ b/docs/storage-tags.md @@ -0,0 +1,46 @@ +# Storage Tags + +## Overview + +The storage tag feature enables the user to only use certain nodes or disks for storing Longhorn volume data. For example, performance-sensitive data can use only the high-performance disks which can be tagged as `fast`, `ssd` or `nvme`, or only the high-performance node tagged as `baremetal`. + +This feature supports both disks and nodes. + +## Setup + +The tag setup can be found at Longhorn UI: + +1. *Node -> Select one node -> Edit Node and Disks* +2. Click `+New Node Tag` or `+New Disk Tag` to add new tags. + +All the existing scheduled replica on the node or disk won't be affected by the new tags + +## Usage + +When multiple tags are specified for a volume, the disk and the node (the disk belong to) must have all the specified tags to become usable. + +### UI + +When creating a volume, specify the disk tag and node tag in the UI. + +### Kubernetes + +Use Kubernetes StorageClass setting to specify tags. + +For example: +``` +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: longhorn-fast +provisioner: rancher.io/longhorn +parameters: + numberOfReplicas: "3" + staleReplicaTimeout: "480" + diskSelector: "ssd" + nodeSelector: "storage,fast" +``` + +## History +* [Original feature request](https://github.com/longhorn/longhorn/issues/311) +* Available since v0.6.0 diff --git a/docs/taint-toleration.md b/docs/taint-toleration.md new file mode 100644 index 0000000..296b04c --- /dev/null +++ b/docs/taint-toleration.md @@ -0,0 +1,32 @@ +# Taint Toleration + +## Overview +If users want to create nodes with large storage spaces and/or CPU resources for Longhorn only (to store replica data) and reject other general workloads, they can taint those nodes and add tolerations for Longhorn components. Then Longhorn can be deployed on those nodes. + +For more Kubernetes taint and toleration info, see: +[Kubernetes Taint & Tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) + +## Setup +### During installing Longhorn +Follow the instructions to set init taint tolerations: [Customize default settings](https://github.com/longhorn/longhorn/wiki/Feature:-Customized-Default-Setting#usage) + +### After Longhorn has been installed +The taint toleration setting can be found at Longhorn UI: + +Setting -> General -> Kubernetes Taint Toleration + +Users can modify the existing tolerations or add more tolerations here, but noted that it will result in all the Longhorn system components to be recreated. + +## Usage +1. Before modifying the toleration setting, users should make sure all Longhorn volumes are `detached`. Since all Longhorn components will be restarted then the Longhorn system is unavailable temporarily. If there are running Longhorn volumes in the system, this means the Longhorn system cannot restart its components and the request will be rejected. + +2. During the Longhorn system updates toleration setting and restarts its components, users shouldn’t operate the Longhorn system. + +3. When users set tolerations, the substring `kubernetes.io` shouldn't be contained in the setting. It is used and considered as the key of Kubernetes default tolerations. + +4. Multiple tolerations can be set here, and these tolerations are separated by the semicolon. For example: `key1=value1:NoSchedule; key2:NoExecute`. + +## History +[Original feature request](https://github.com/longhorn/longhorn/issues/584) + +Available since v0.6.0 diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 2389dc6..d43f314 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -26,7 +26,7 @@ Make use of the Longhorn UI is a good start for the troubleshooting. For example Also, the event logs in the UI dashboard provides some information of probably issues. Check for the event logs in `Warning` level. ### Manager and engines -You can get the log from Longhorn Manager and Engines to help with the troubleshooting. The most useful logs are from `longhorn-manager-xxx`, and the log inside Longhorn Engine, e.g. `-e-xxxx` and `-r-xxxx`. +You can get the log from Longhorn Manager and Engines to help with the troubleshooting. The most useful logs are from `longhorn-manager-xxx`, and the log inside Longhorn instance managers, e.g. `instance-manager-e-xxxx` and `instance-manager-r-xxxx`. Since normally there are multiple Longhorn Manager running at the same time, we recommend using [kubetail](https://github.com/johanhaleby/kubetail) which is a great tool to keep track of the logs of multiple pods. You can use: ``` diff --git a/docs/upgrade.md b/docs/upgrade.md index 8eb310a..849b611 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -27,7 +27,7 @@ Otherwise you will need to download the yaml file from [the official Longhorn De **ALWAYS MAKE BACKUPS BEFORE UPGRADE THE ENGINE IMAGES.** ### Offline upgrade -If live upgrade is not available (e.g. before v0.3.3), or the volume stuck in degraded state: +If live upgrade is not available (e.g. before v0.3.3 or from v0.5.0 to v0.6.0), or the volume stuck in degraded state: 1. Follow [the detach procedure for relevant workloads](upgrade.md#detach-volumes). 2. Select all the volumes using batch selection. Click batch operation button `Upgrade Engine`, choose the engine image available in the list. It's @@ -37,7 +37,7 @@ Any volume not part of a Kubernetes workload must be attached from Longhorn UI. ### Live upgrade -Live upgrade is available since v0.3.3. +Live upgrade is available since v0.3.3, with the exception of upgrade from v0.5.0 to v0.6.0. Live upgrade should only be done with healthy volumes. diff --git a/docs/volume.md b/docs/volume.md index 0a5ae06..8cd5526 100644 --- a/docs/volume.md +++ b/docs/volume.md @@ -6,8 +6,27 @@ The default replica count can be changed in the setting. Also, when a volume is attached, the user can change the replica count for the volume in the UI. -Longhorn will always try to maintain at least given number of healthy replicas for each volume. If the current healthy -replica count is less than specified replica count, Longhorn will start rebuilding new replicas. If the current healthy -replica count is more than specified replica count, Longhorn will do nothing. In the later situation, if user delete one -or more healthy replicas, or there are healthy replicas failed, as long as the total healthy replica count doesn't dip -below the specified replica count, Longhorn won't start rebuilding new replicas. +Longhorn will always try to maintain at least given number of healthy replicas for each volume. +1. If the current healthy replica count is less than specified replica count, Longhorn will start rebuilding new replicas. +2. If the current healthy replica count is more than specified replica count, Longhorn will do nothing. In this situation, if user delete one or more healthy replicas, or there are healthy replicas failed, as long as the total healthy replica count doesn't dip below the specified replica count, Longhorn won't start rebuilding new replicas. + +### Volume size + +Longhorn is a thin-provisioned storage system. That means a Longhorn volume will only take the space it needs at the moment. For example, if you allocated a 20GB volume but only use 1GB of it, the actual data size on your disk would be 1GB. You can see the actual data size in the volume details in the UI. + +Longhorn volume itself cannot shrink in size if you've removed content from your volume. For example, if you create a volume of 20GB, used 10GB, then removed the content of 9GB, the actual size on the disk would still be 10GB instead of 1GB. It's because currently Longhorn operates on the block level, not filesystem level, so it doesn't know if user has removed the content or not. That information is mostly kept in the filesystem level. + +#### Space taken by the snapshots + +Some users may found that a Longhorn volume's actual size is bigger than it's nominal size. That's because in Longhorn, snapshot stored the history data of the volume, which will also take some spaces, depends on how much data was in the snapshot. The snapshot feature enables user to revert back to a certain point in history, create a backup to secondary storage. The snapshot feature is also a part Longhorn on rebuilding process. Everytime when Longhorn detects a replica is down, it will take a (system) snapshot automatically and start rebuilding on another node. + +To reduce the space taken by snapshots, user can schedule a recurring snapshot or backup with a retain number, which will +automatically create a new snapshot/backup on schedule, then clean up for any excessive snapshots/backups. + +User can also delete unwanted snapshot manually through UI. Any system generated snapshots will be automatically marked for deletion if the deletion of any snapshot was triggered. + +#### The latest snapshot + +In Longhorn, the latest snapshot cannot be deleted. It because whenever a snapshot is deleted, Longhorn will coalesce it content with the next snapshot, makes sure the next and later snapshot will still have the correct content. But Longhorn cannot do that for the latest snapshot since there is no next snapshot to it. The next "snapshot" of the latest snapshot is the live volume(`volume-head`), which is being read/written by the user at the moment, so the coalescing process cannot happen. Instead, the latest snapshot will be marked as `removed`, and it will be cleaned up next time when possible. + +If the users want to clean up the latest snapshot, they can create a new snapshot, then remove the previous "latest" snapshot. diff --git a/examples/csi/example_pv.yaml b/examples/csi/example_pv.yaml index 6943462..b059eba 100644 --- a/examples/csi/example_pv.yaml +++ b/examples/csi/example_pv.yaml @@ -14,7 +14,7 @@ spec: fsType: ext4 volumeAttributes: numberOfReplicas: '3' - staleReplicaTimeout: '30' + staleReplicaTimeout: '2880' volumeHandle: existing-longhorn-volume --- apiVersion: v1 diff --git a/examples/deployment.yaml b/examples/deployment.yaml index e00563d..bb20480 100644 --- a/examples/deployment.yaml +++ b/examples/deployment.yaml @@ -23,13 +23,16 @@ spec: requests: storage: 2Gi --- -apiVersion: apps/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: mysql labels: app: mysql spec: + selector: + matchLabels: + app: mysql # has to match .spec.template.metadata.labels strategy: type: Recreate template: diff --git a/examples/example_storageclass.yaml b/examples/example_storageclass.yaml index 0f4e9bd..0f60338 100644 --- a/examples/example_storageclass.yaml +++ b/examples/example_storageclass.yaml @@ -5,7 +5,7 @@ metadata: provisioner: rancher.io/longhorn parameters: numberOfReplicas: '3' - staleReplicaTimeout: '30' + staleReplicaTimeout: '2880' reclaimPolicy: Delete --- apiVersion: v1 diff --git a/examples/flexvolume/example_baseimage.yaml b/examples/flexvolume/example_baseimage.yaml deleted file mode 100644 index 640d561..0000000 --- a/examples/flexvolume/example_baseimage.yaml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - labels: - app: flexvol-baseimage - name: flexvol-baseimage - namespace: default -spec: - containers: - - name: flexvol-baseimage - image: nginx:stable-alpine - imagePullPolicy: IfNotPresent - volumeMounts: - - name: flexvol - mountPath: /usr/share/nginx/html - ports: - - containerPort: 80 - volumes: - - name: flexvol - flexVolume: - driver: rancher.io/longhorn - options: - size: 32Mi - numberOfReplicas: "3" - staleReplicaTimeout: "20" - fromBackup: "" - baseImage: rancher/longhorn-test:baseimage-ext4 ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: flexvol-baseimage - name: flexvol-baseimage - namespace: default -spec: - ports: - - name: web - port: 80 - targetPort: 80 - selector: - app: flexvol-baseimage - type: LoadBalancer diff --git a/examples/provisioner_with_baseimage.yaml b/examples/provisioner_with_baseimage.yaml deleted file mode 100644 index e3ab42b..0000000 --- a/examples/provisioner_with_baseimage.yaml +++ /dev/null @@ -1,63 +0,0 @@ -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - labels: - app: provisioner-baseimage - name: baseimage-storageclass -provisioner: rancher.io/longhorn -parameters: - numberOfReplicas: '3' - staleReplicaTimeout: '30' - fromBackup: '' - baseImage: rancher/longhorn-test:baseimage-ext4 ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: provisioner-baseimage - name: provisioner-baseimage-service -spec: - ports: - - port: 80 - name: web - selector: - app: provisioner-baseimage - type: LoadBalancer ---- -apiVersion: apps/v1beta2 -kind: StatefulSet -metadata: - labels: - app: provisioner-baseimage - name: provisioner-baseimage-statefulset -spec: - selector: - matchLabels: - app: provisioner-baseimage - serviceName: provisioner-baseimage - replicas: 2 - template: - metadata: - labels: - app: provisioner-baseimage - spec: - terminationGracePeriodSeconds: 10 - containers: - - name: nginx - image: nginx:stable-alpine - imagePullPolicy: IfNotPresent - volumeMounts: - - name: baseimage-vol - mountPath: /usr/share/nginx/html - ports: - - containerPort: 80 - volumeClaimTemplates: - - metadata: - name: baseimage-vol - spec: - accessModes: [ "ReadWriteOnce" ] - storageClassName: baseimage-storageclass - resources: - requests: - storage: 32Mi diff --git a/examples/restore_to_file_with_base_image.yaml.template b/examples/restore_to_file_with_base_image.yaml.template deleted file mode 100644 index 9a7e154..0000000 --- a/examples/restore_to_file_with_base_image.yaml.template +++ /dev/null @@ -1,94 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: restore-to-file - namespace: longhorn-system -spec: - nodeName: - initContainers: - - name: prime-base-image - # set base image here - command: - - /bin/sh - - -c - - echo primed-base-image - # set base image here - image: - imagePullPolicy: Always - containers: - - name: base-image - command: - - /bin/sh - - -c - - mkdir -p /share/base_image && - mount --bind /base_image/ /share/base_image && - echo base image mounted at /share/base_image && - trap 'umount /share/base_image && echo unmounted' TERM && - while true; do $(ls /talk/done 2>&1); if [ $? -eq 0 ]; then break; - fi; echo waiting; sleep 1; done; - umount /share/base_image && echo unmounted - # set base image here - image: - imagePullPolicy: IfNotPresent - securityContext: - privileged: true - volumeMounts: - - name: share - mountPath: /share - mountPropagation: Bidirectional - - name: talk - mountPath: /talk - - name: restore-to-file - command: - # set restore-to-file arguments here - - /bin/sh - - -c - - while true; do list=$(ls /share/base_image/* 2>&1); if [ $? -eq 0 ]; then break; - fi; echo waiting; sleep 1; done; echo Directory found $list; - longhorn backup restore-to-file - '' - --backing-file $list - --output-file '/tmp/restore/' - --output-format - && touch /talk/done && chmod 777 /talk/done && echo created /share/done - # the version of longhorn engine should be v0.4.1 or higher - image: rancher/longhorn-engine:v0.4.1 - imagePullPolicy: IfNotPresent - securityContext: - privileged: true - volumeMounts: - - name: share - mountPath: /share - mountPropagation: HostToContainer - readOnly: true - - name: talk - mountPath: /talk - - name: disk-directory - mountPath: /tmp/restore # the argument should be in this directory - env: - # set Backup Target Credential Secret here. - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: - key: AWS_ACCESS_KEY_ID - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: - key: AWS_SECRET_ACCESS_KEY - - name: AWS_ENDPOINTS - valueFrom: - secretKeyRef: - name: - key: AWS_ENDPOINTS - volumes: - - name: share - emptyDir: {} - - name: talk - emptyDir: {} - # the output file can be found on this host path - - name: disk-directory - hostPath: - path: /tmp/restore - restartPolicy: Never diff --git a/examples/statefulset.yaml b/examples/statefulset.yaml index ffa38f3..5fdfcec 100644 --- a/examples/statefulset.yaml +++ b/examples/statefulset.yaml @@ -12,7 +12,7 @@ spec: app: nginx type: NodePort --- -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: StatefulSet metadata: name: web diff --git a/examples/storageclass.yaml b/examples/storageclass.yaml index 3d5e5bb..8e74227 100644 --- a/examples/storageclass.yaml +++ b/examples/storageclass.yaml @@ -5,7 +5,10 @@ metadata: provisioner: rancher.io/longhorn parameters: numberOfReplicas: "3" - staleReplicaTimeout: "30" + staleReplicaTimeout: "2880" fromBackup: "" +# diskSelector: "ssd,fast" +# nodeSelector: "storage,fast" # recurringJobs: '[{"name":"snap", "task":"snapshot", "cron":"*/1 * * * *", "retain":1}, -# {"name":"backup", "task":"backup", "cron":"*/2 * * * *", "retain":1}]' \ No newline at end of file +# {"name":"backup", "task":"backup", "cron":"*/2 * * * *", "retain":1, +# "labels": {"interval":"2m"}}]' diff --git a/scripts/lhexec b/scripts/lhexec new file mode 100755 index 0000000..538bdb9 --- /dev/null +++ b/scripts/lhexec @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +NS="longhorn-system" + +print_usage() { + echo "Usage: ${0} [|-h|--help] volume_name longhorn_commands_arguments" + echo "" + echo "Examples:" + echo " ${0} test-vol snapshot ls" + echo " ${0} test-vol info" + echo "" + echo "Note: Must have Longhorn installed in "longhorn-system" namespace and have access to "kubectl" and the namespace" + echo "" + exit 0 +} + +check_volume_exist(){ + VOLUME_NAME=${1} + kubectl -n ${NS} get lhv ${VOLUME_NAME} > /dev/null 2>&1 + if [[ ${?} -ne 0 ]]; then + echo "Err: Volume ${VOLUME_NAME} not found" + exit 1 + fi +} + +check_engine_state(){ + VOLUME_NAME=${1} + LHE_STATE_FILTER="{.items[?(@.spec.volumeName==\"${VOLUME_NAME}\")].status.currentState}" + LHE_STATE=`kubectl -n ${NS} get lhe --output=jsonpath="${LHE_STATE_FILTER}"` + + if [[ ${LHE_STATE} != "running" ]]; then + echo "Err: Longhorn engine for volume ${VOLUME_NAME} is not running" + exit 1 + fi + +} + +exec_command() { + VOLUME_NAME=${1} + COMMAND_ARGS="${@:2}" + + INSTANCE_MANAGER_NAME_FILTER="{.items[?(@.spec.volumeName==\"${VOLUME_NAME}\")].status.instanceManagerName}" + INSTANCE_MANAGER_NAME=`kubectl -n ${NS} get lhe --output=jsonpath="${INSTANCE_MANAGER_NAME_FILTER}"` + + ENGINE_PORT_FILTER="{.items[?(@.spec.volumeName==\"${VOLUME_NAME}\")].status.port}" + ENGINE_PORT=`kubectl -n ${NS} get lhe --output=jsonpath="${ENGINE_PORT_FILTER}"` + + kubectl -n ${NS} exec -it ${INSTANCE_MANAGER_NAME} -- bash -c "longhorn --url localhost:${ENGINE_PORT} ${COMMAND_ARGS}" + +} + + +ARG=$1 +case $ARG in + "" | "-h" | "--help") + print_usage + ;; + *) + VOLUME_NAME=${ARG} + shift + COMMAND_ARGS="${@}" + if [[ ${COMMAND_ARGS} == "" ]]; then + COMMAND_ARGS="help" + fi + check_volume_exist ${VOLUME_NAME} + check_engine_state ${VOLUME_NAME} + exec_command ${VOLUME_NAME} ${COMMAND_ARGS} + ;; +esac diff --git a/uninstall/uninstall.yaml b/uninstall/uninstall.yaml index 8812090..19d060b 100644 --- a/uninstall/uninstall.yaml +++ b/uninstall/uninstall.yaml @@ -2,8 +2,9 @@ apiVersion: v1 kind: ServiceAccount metadata: name: longhorn-uninstall-service-account + namespace: default --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: longhorn-uninstall-role @@ -24,10 +25,10 @@ rules: resources: ["jobs", "cronjobs"] verbs: ["*"] - apiGroups: ["longhorn.rancher.io"] - resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes"] + resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes", "instancemanagers"] verbs: ["*"] --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: longhorn-uninstall-bind @@ -44,6 +45,7 @@ apiVersion: batch/v1 kind: Job metadata: name: longhorn-uninstall + namespace: default spec: activeDeadlineSeconds: 900 backoffLimit: 1 @@ -53,7 +55,7 @@ spec: spec: containers: - name: longhorn-uninstall - image: rancher/longhorn-manager:v0.5.0 + image: longhornio/longhorn-manager:v0.6.0 imagePullPolicy: Always command: - longhorn-manager