Удалось исправить это с помощью следующей конфигурации.Ключевые моменты: - Я забыл добавить узел в nodeAffinity - он работает без ввода volumeBindingMode
apiVersion: v1
kind: PersistentVolume
metadata:
name: standard
spec:
capacity:
storage: 2Gi
# volumeMode field requires BlockVolume Alpha feature gate to be enabled.
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: /temp
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- INSERT_NODE_NAME_HERE
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "true"
name: local-storage
provisioner: kubernetes.io/no-provisioner
config.yaml
proxy:
secretToken: "token"
singleuser:
storage:
dynamic:
storageClass: local-storage
, чтобы убедиться, что ваше хранилище / PV выглядитвот так:
root@asdf:~# kubectl --namespace=kube-system describe pv
Name: standard
Labels: <none>
Annotations: kubectl.kubernetes.io/last-applied-configuration:
{"apiVersion":"v1","kind":"PersistentVolume","metadata":{"annotations":{},"name":"standard"},"spec":{"accessModes":["ReadWriteOnce"],"capa...
pv.kubernetes.io/bound-by-controller: yes
Finalizers: [kubernetes.io/pv-protection]
StorageClass: local-storage
Status: Bound
Claim: jhub/hub-db-dir
Reclaim Policy: Retain
Access Modes: RWO
VolumeMode: Filesystem
Capacity: 2Gi
Node Affinity:
Required Terms:
Term 0: kubernetes.io/hostname in [asdf]
Message:
Source:
Type: LocalVolume (a persistent volume backed by local storage on a node)
Path: /temp
Events: <none>
root@asdf:~# kubectl --namespace=kube-system describe storageclass
Name: local-storage
IsDefaultClass: Yes
Annotations: storageclass.kubernetes.io/is-default-class=true
Provisioner: kubernetes.io/no-provisioner
Parameters: <none>
AllowVolumeExpansion: <unset>
MountOptions: <none>
ReclaimPolicy: Delete
VolumeBindingMode: Immediate
Events: <none>
Теперь концентратор выглядит примерно так:
root@asdf:~# kubectl --namespace=jhub describe pod hub
Name: hub-5d4fcd8fd9-p6crs
Namespace: jhub
Priority: 0
PriorityClassName: <none>
Node: asdf/192.168.0.87
Start Time: Sat, 23 Feb 2019 14:29:51 +0800
Labels: app=jupyterhub
component=hub
hub.jupyter.org/network-access-proxy-api=true
hub.jupyter.org/network-access-proxy-http=true
hub.jupyter.org/network-access-singleuser=true
pod-template-hash=5d4fcd8fd9
release=jhub
Annotations: checksum/config-map: --omitted
checksum/secret: --omitted--
Status: Running
IP: 10.244.0.55
Controlled By: ReplicaSet/hub-5d4fcd8fd9
Containers:
hub:
Container ID: docker://d2d4dec8cc16fe21589e67f1c0c6c6114b59b01c67a9f06391830a1ea711879d
Image: jupyterhub/k8s-hub:0.8.0
Image ID: docker-pullable://jupyterhub/k8s-hub@sha256:e40cfda4f305af1a2fdf759cd0dcda834944bef0095c8b5ecb7734d19f58b512
Port: 8081/TCP
Host Port: 0/TCP
Command:
jupyterhub
--config
/srv/jupyterhub_config.py
--upgrade-db
State: Running
Started: Sat, 23 Feb 2019 14:30:28 +0800
Ready: True
Restart Count: 0
Requests:
cpu: 200m
memory: 512Mi
Environment:
PYTHONUNBUFFERED: 1
HELM_RELEASE_NAME: jhub
POD_NAMESPACE: jhub (v1:metadata.namespace)
CONFIGPROXY_AUTH_TOKEN: <set to the key 'proxy.token' in secret 'hub-secret'> Optional: false
Mounts:
/etc/jupyterhub/config/ from config (rw)
/etc/jupyterhub/secret/ from secret (rw)
/srv/jupyterhub from hub-db-dir (rw)
/var/run/secrets/kubernetes.io/serviceaccount from hub-token-bxzl7 (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
config:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: hub-config
Optional: false
secret:
Type: Secret (a volume populated by a Secret)
SecretName: hub-secret
Optional: false
hub-db-dir:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: hub-db-dir
ReadOnly: false
hub-token-bxzl7:
Type: Secret (a volume populated by a Secret)
SecretName: hub-token-bxzl7
Optional: false
QoS Class: Burstable
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events: <none>