longhorn/examples/rwx/02-longhorn-nfs-provisioner.yaml
Joshua Moody 23f9cc18b3 Hardcode nfs service cluster ip
So that on a delete & recreate of the service the previous pv's still
point to this nfs-provisioner. We cannot use the hostname since the actual
host doesn't know how to resolve service addresses inside of the cluster.

To support this would require the installation of kube-dns and
modification to the /etc/resolve.conf file on each host.

Signed-off-by: Joshua Moody <joshua.moody@rancher.com>
2020-07-01 12:16:34 -07:00

175 lines
4.4 KiB
YAML

apiVersion: v1
kind: ServiceAccount
metadata:
name: longhorn-nfs-provisioner
---
kind: Service
apiVersion: v1
metadata:
name: longhorn-nfs-provisioner
labels:
app: longhorn-nfs-provisioner
spec:
# hardcode a cluster ip for the service
# so that on delete & recreate of the service the previous pv's still point
# to this nfs-provisioner, pick a new ip for each new nfs provisioner
clusterIP: 10.43.111.111
ports:
- name: nfs
port: 2049
- name: nfs-udp
port: 2049
protocol: UDP
- name: nlockmgr
port: 32803
- name: nlockmgr-udp
port: 32803
protocol: UDP
- name: mountd
port: 20048
- name: mountd-udp
port: 20048
protocol: UDP
- name: rquotad
port: 875
- name: rquotad-udp
port: 875
protocol: UDP
- name: rpcbind
port: 111
- name: rpcbind-udp
port: 111
protocol: UDP
- name: statd
port: 662
- name: statd-udp
port: 662
protocol: UDP
selector:
app: longhorn-nfs-provisioner
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: longhorn-nfs-provisioner
spec:
selector:
matchLabels:
app: longhorn-nfs-provisioner
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: longhorn-nfs-provisioner
spec:
serviceAccount: longhorn-nfs-provisioner
containers:
- name: longhorn-nfs-provisioner
image: quay.io/kubernetes_incubator/nfs-provisioner:latest
ports:
- name: nfs
containerPort: 2049
- name: nfs-udp
containerPort: 2049
protocol: UDP
- name: nlockmgr
containerPort: 32803
- name: nlockmgr-udp
containerPort: 32803
protocol: UDP
- name: mountd
containerPort: 20048
- name: mountd-udp
containerPort: 20048
protocol: UDP
- name: rquotad
containerPort: 875
- name: rquotad-udp
containerPort: 875
protocol: UDP
- name: rpcbind
containerPort: 111
- name: rpcbind-udp
containerPort: 111
protocol: UDP
- name: statd
containerPort: 662
- name: statd-udp
containerPort: 662
protocol: UDP
securityContext:
capabilities:
add:
- DAC_READ_SEARCH
- SYS_RESOURCE
args:
- "-provisioner=nfs.longhorn.io"
- "-device-based-fsids=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: SERVICE_NAME
value: longhorn-nfs-provisioner
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
imagePullPolicy: "IfNotPresent"
readinessProbe:
exec:
command:
- ls
- /export
initialDelaySeconds: 5
periodSeconds: 5
livenessProbe:
exec:
command:
- ls
- /export
initialDelaySeconds: 5
periodSeconds: 5
volumeMounts:
- name: export-volume
mountPath: /export
volumes:
- name: export-volume
persistentVolumeClaim:
claimName: longhorn-nfs-provisioner
# we want really quick failover
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 60
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 60
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: longhorn-nfs-provisioner # longhorn backing pvc
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "20G" # make this 10% bigger then the workload pvc
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: longhorn-nfs # workload storage class
provisioner: nfs.longhorn.io
mountOptions:
- "vers=4.1"
- "noresvport"