Sync with manager:

commit 2ed3c8c76053c8df3fd418f6076c80c1ca49bf5b
Author: Sheng Yang <sheng.yang@rancher.com>
Date:   Mon Sep 16 18:47:54 2019 -0700

    Longhorn v0.6.0-rc1 release

Signed-off-by: Sheng Yang <sheng.yang@rancher.com>
This commit is contained in:
Sheng Yang 2019-09-16 19:28:31 -07:00
parent fcdc3114c5
commit c8d39afb58
6 changed files with 135 additions and 11 deletions

View File

@ -2,6 +2,7 @@ apiVersion: v1
kind: Secret
metadata:
name: minio-secret
namespace: default
type: Opaque
data:
AWS_ACCESS_KEY_ID: bG9uZ2hvcm4tdGVzdC1hY2Nlc3Mta2V5 # longhorn-test-access-key
@ -24,6 +25,7 @@ apiVersion: v1
kind: Pod
metadata:
name: longhorn-test-minio
namespace: default
labels:
app: longhorn-test-minio
spec:
@ -55,6 +57,7 @@ apiVersion: v1
kind: Service
metadata:
name: minio-service
namespace: default
spec:
selector:
app: longhorn-test-minio

View File

@ -2,6 +2,7 @@ apiVersion: v1
kind: Pod
metadata:
name: longhorn-test-nfs
namespace: default
labels:
app: longhorn-test-nfs
spec:
@ -37,6 +38,7 @@ kind: Service
apiVersion: v1
metadata:
name: longhorn-test-nfs-svc
namespace: default
spec:
selector:
app: longhorn-test-nfs

View File

@ -39,7 +39,7 @@ rules:
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["longhorn.rancher.io"]
resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes"]
resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes", "instancemanagers"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
@ -163,6 +163,45 @@ spec:
scope: Namespaced
version: v1alpha1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
longhorn-manager: InstanceManager
name: instancemanagers.longhorn.rancher.io
spec:
group: longhorn.rancher.io
names:
kind: InstanceManager
listKind: InstanceManagerList
plural: instancemanagers
shortNames:
- lhim
singular: instancemanager
scope: Namespaced
version: v1alpha1
---
apiVersion: v1
kind: ConfigMap
metadata:
name: longhorn-default-setting
namespace: longhorn-system
data:
default-setting.yaml: |-
backup-target:
backup-target-credential-secret:
create-default-disk-labeled-nodes:
default-data-path:
replica-soft-anti-affinity:
storage-over-provisioning-percentage:
storage-minimal-available-percentage:
upgrade-checker:
default-replica-count:
guaranteed-engine-cpu:
default-longhorn-static-storage-class:
backupstore-poll-interval:
taint-toleration:
---
apiVersion: apps/v1beta2
kind: DaemonSet
metadata:
@ -181,7 +220,7 @@ spec:
spec:
containers:
- name: longhorn-manager
image: rancher/longhorn-manager:v0.5.0
image: longhornio/longhorn-manager:v0.6.0-rc1
imagePullPolicy: Always
securityContext:
privileged: true
@ -190,9 +229,9 @@ spec:
- -d
- daemon
- --engine-image
- rancher/longhorn-engine:v0.5.0
- longhornio/longhorn-engine:v0.6.0-rc1
- --manager-image
- rancher/longhorn-manager:v0.5.0
- longhornio/longhorn-manager:v0.6.0-rc1
- --service-account
- longhorn-service-account
ports:
@ -207,6 +246,8 @@ spec:
- name: longhorn
mountPath: /var/lib/rancher/longhorn/
mountPropagation: Bidirectional
- name: longhorn-default-setting
mountPath: /var/lib/longhorn/setting/
env:
- name: POD_NAMESPACE
valueFrom:
@ -220,6 +261,9 @@ spec:
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Should be: mount path of the volume longhorn-default-setting + the key of the configmap data in 04-default-setting.yaml
- name: DEFAULT_SETTING_PATH
value: /var/lib/longhorn/setting/default-setting.yaml
volumes:
- name: dev
hostPath:
@ -233,6 +277,9 @@ spec:
- name: longhorn
hostPath:
path: /var/lib/rancher/longhorn/
- name: longhorn-default-setting
configMap:
name: longhorn-default-setting
serviceAccountName: longhorn-service-account
---
kind: Service
@ -269,12 +316,13 @@ spec:
spec:
containers:
- name: longhorn-ui
image: rancher/longhorn-ui:v0.5.0
image: longhornio/longhorn-ui:v0.6.0-rc1
ports:
- containerPort: 8000
env:
- name: LONGHORN_MANAGER_IP
value: "http://longhorn-backend:9500"
serviceAccountName: longhorn-service-account
---
kind: Service
apiVersion: v1
@ -308,18 +356,18 @@ spec:
spec:
initContainers:
- name: wait-longhorn-manager
image: rancher/longhorn-manager:v0.5.0
image: longhornio/longhorn-manager:v0.6.0-rc1
command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done']
containers:
- name: longhorn-driver-deployer
image: rancher/longhorn-manager:v0.5.0
image: longhornio/longhorn-manager:v0.6.0-rc1
imagePullPolicy: Always
command:
- longhorn-manager
- -d
- deploy-driver
- --manager-image
- rancher/longhorn-manager:v0.5.0
- longhornio/longhorn-manager:v0.6.0-rc1
- --manager-url
- http://longhorn-backend:9500/v1
# manually choose "flexvolume" or "csi"

View File

@ -23,7 +23,7 @@ spec:
requests:
storage: 2Gi
---
apiVersion: apps/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: mysql

69
scripts/lhexec Executable file
View File

@ -0,0 +1,69 @@
#!/usr/bin/env bash
NS="longhorn-system"
print_usage() {
echo "Usage: ${0} [|-h|--help] volume_name longhorn_commands_arguments"
echo ""
echo "Examples:"
echo " ${0} test-vol snapshot ls"
echo " ${0} test-vol info"
echo ""
echo "Note: Must have Longhorn installed in "longhorn-system" namespace and have access to "kubectl" and the namespace"
echo ""
exit 0
}
check_volume_exist(){
VOLUME_NAME=${1}
kubectl -n ${NS} get lhv ${VOLUME_NAME} > /dev/null 2>&1
if [[ ${?} -ne 0 ]]; then
echo "Err: Volume ${VOLUME_NAME} not found"
exit 1
fi
}
check_engine_state(){
VOLUME_NAME=${1}
LHE_STATE_FILTER="{.items[?(@.spec.volumeName==\"${VOLUME_NAME}\")].status.currentState}"
LHE_STATE=`kubectl -n ${NS} get lhe --output=jsonpath="${LHE_STATE_FILTER}"`
if [[ ${LHE_STATE} != "running" ]]; then
echo "Err: Longhorn engine for volume ${VOLUME_NAME} is not running"
exit 1
fi
}
exec_command() {
VOLUME_NAME=${1}
COMMAND_ARGS="${@:2}"
INSTANCE_MANAGER_NAME_FILTER="{.items[?(@.spec.volumeName==\"${VOLUME_NAME}\")].status.instanceManagerName}"
INSTANCE_MANAGER_NAME=`kubectl -n ${NS} get lhe --output=jsonpath="${INSTANCE_MANAGER_NAME_FILTER}"`
ENGINE_PORT_FILTER="{.items[?(@.spec.volumeName==\"${VOLUME_NAME}\")].status.port}"
ENGINE_PORT=`kubectl -n ${NS} get lhe --output=jsonpath="${ENGINE_PORT_FILTER}"`
kubectl -n ${NS} exec -it ${INSTANCE_MANAGER_NAME} -- bash -c "longhorn --url localhost:${ENGINE_PORT} ${COMMAND_ARGS}"
}
ARG=$1
case $ARG in
"" | "-h" | "--help")
print_usage
;;
*)
VOLUME_NAME=${ARG}
shift
COMMAND_ARGS="${@}"
if [[ ${COMMAND_ARGS} == "" ]]; then
COMMAND_ARGS="help"
fi
check_volume_exist ${VOLUME_NAME}
check_engine_state ${VOLUME_NAME}
exec_command ${VOLUME_NAME} ${COMMAND_ARGS}
;;
esac

View File

@ -2,6 +2,7 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: longhorn-uninstall-service-account
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
@ -24,7 +25,7 @@ rules:
resources: ["jobs", "cronjobs"]
verbs: ["*"]
- apiGroups: ["longhorn.rancher.io"]
resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes"]
resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes", "instancemanagers"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
@ -44,6 +45,7 @@ apiVersion: batch/v1
kind: Job
metadata:
name: longhorn-uninstall
namespace: default
spec:
activeDeadlineSeconds: 900
backoffLimit: 1
@ -53,7 +55,7 @@ spec:
spec:
containers:
- name: longhorn-uninstall
image: rancher/longhorn-manager:v0.5.0
image: longhornio/longhorn-manager:v0.6.0-rc1
imagePullPolicy: Always
command:
- longhorn-manager