Проблема с кластером хранилища rook - PullRequest
0 голосов
/ 16 мая 2019

У меня есть двухузловой кластер kubernetes, в котором был развернут кластер корневого хранилища согласно инструкциям https://github.com/rook/rook/blob/master/Documentation/ceph-quickstart.md

Мне удалось развернуть кластер.

master $ kubectl create -f common.yaml
namespace/rook-ceph created
customresourcedefinition.apiextensions.k8s.io/cephclusters.ceph.rook.io created
customresourcedefinition.apiextensions.k8s.io/cephfilesystems.ceph.rook.io created
customresourcedefinition.apiextensions.k8s.io/cephnfses.ceph.rook.io created
customresourcedefinition.apiextensions.k8s.io/cephobjectstores.ceph.rook.io created
customresourcedefinition.apiextensions.k8s.io/cephobjectstoreusers.ceph.rook.io created
customresourcedefinition.apiextensions.k8s.io/cephblockpools.ceph.rook.io created
customresourcedefinition.apiextensions.k8s.io/volumes.rook.io created
clusterrole.rbac.authorization.k8s.io/rook-ceph-cluster-mgmt created
clusterrole.rbac.authorization.k8s.io/rook-ceph-cluster-mgmt-rules created
role.rbac.authorization.k8s.io/rook-ceph-system created
clusterrole.rbac.authorization.k8s.io/rook-ceph-global created
clusterrole.rbac.authorization.k8s.io/rook-ceph-global-rules created
clusterrole.rbac.authorization.k8s.io/rook-ceph-mgr-cluster created
clusterrole.rbac.authorization.k8s.io/rook-ceph-mgr-cluster-rules created
serviceaccount/rook-ceph-system created
rolebinding.rbac.authorization.k8s.io/rook-ceph-system created
clusterrolebinding.rbac.authorization.k8s.io/rook-ceph-global created
serviceaccount/rook-ceph-osd created
serviceaccount/rook-ceph-mgr created
role.rbac.authorization.k8s.io/rook-ceph-osd created
clusterrole.rbac.authorization.k8s.io/rook-ceph-mgr-system created
clusterrole.rbac.authorization.k8s.io/rook-ceph-mgr-system-rules created
role.rbac.authorization.k8s.io/rook-ceph-mgr created
rolebinding.rbac.authorization.k8s.io/rook-ceph-cluster-mgmt created
rolebinding.rbac.authorization.k8s.io/rook-ceph-osd created
rolebinding.rbac.authorization.k8s.io/rook-ceph-mgr created
rolebinding.rbac.authorization.k8s.io/rook-ceph-mgr-system created
clusterrolebinding.rbac.authorization.k8s.io/rook-ceph-mgr-cluster created
.
master $ kubectl create -f operator.yaml
deployment.apps/rook-ceph-operator created

master $ kubectl create -f cluster.yaml
cephcluster.ceph.rook.io/rook-ceph created

master $ kubectl get po -n rook-ceph
NAME                                  READY     STATUS      RESTARTS   AGE
rook-ceph-agent-5kl8r                 1/1       Running     0          7m
rook-ceph-agent-jhgjt                 1/1       Running     0          7m
rook-ceph-mgr-a-75c7db76d5-5shwf      1/1       Running     0          2m
rook-ceph-mon-a-5d5d596ff-wqvg5       1/1       Running     0          5m
rook-ceph-mon-d-6fbdd6b4b4-4nm9b      1/1       Running     0          3m
rook-ceph-mon-e-cf647dcf4-v5rgz       1/1       Running     0          3m
rook-ceph-operator-5c75765cdc-2kzcc   1/1       Running     2          9m
rook-ceph-osd-0-67fd55555b-th9kj      1/1       Running     0          26s
rook-ceph-osd-prepare-master-t97sk    0/2       Completed   1          52s
rook-ceph-osd-prepare-node01-pzbv5    1/2       Running     0          52s
rook-discover-28cx9                   1/1       Running     0          7m
rook-discover-qndns                   1/1       Running     0          7m

master $ vi filesystem.yaml
master $ kubectl create -f filesystem.yaml
cephfilesystem.ceph.rook.io/myfs created

master $ vi registry.yaml
master $ kubectl create -f registry.yaml
deployment.apps/kube-registry created

master $ kubectl get deploy -n rook-ceph
NAME                 DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
rook-ceph-mgr-a      1         1         1            0           8m
rook-ceph-mon-a      1         1         1            1           11m
rook-ceph-mon-d      1         1         1            0           9m
rook-ceph-mon-e      1         1         1            0           9m
rook-ceph-operator   1         1         1            0           15m
rook-ceph-osd-0      1         1         1            1           6m

Затем я развернул файловую систему и попытался развернуть Kube-Registry, используя хранилище файловой системы. получение проблемы с монтированием, как показано ниже

  Type     Reason       Age   From               Message
  ----     ------       ----  ----               -------
  Normal   Scheduled    6m    default-scheduler  Successfully assigned kube-system/kube-registry-58db869599-fxcmw to master
  Warning  FailedMount  3m    kubelet, master    MountVolume.SetUp failed for volume "image-store" : mountcommand failed, status: Failure, reason: failed to mount filesystem myfs to /var/lib/kubelet/pods/f66fe5fe-77ed-11e9-a7e7-0242ac110082/volumes/ceph.rook.io~rook/image-store with monitor 10.100.26.43:6789,10.104.161.253:6789,10.103.117.82:6789:/ and options [name=admin secret=AQD6fN1cbv0TBBAAvHJjvGRcTGN6mxcErO8UiA==]: mount failed: exit status 32
Mounting command: systemd-run
Mounting arguments: --description=Kubernetes transient mount for /var/lib/kubelet/pods/f66fe5fe-77ed-11e9-a7e7-0242ac110082/volumes/ceph.rook.io~rook/image-store --scope -- mount -t ceph -o name=admin,secret=AQD6fN1cbv0TBBAAvHJjvGRcTGN6mxcErO8UiA== 10.100.26.43:6789,10.104.161.253:6789,10.103.117.82:6789:/ /var/lib/kubelet/pods/f66fe5fe-77ed-11e9-a7e7-0242ac110082/volumes/ceph.rook.io~rook/image-store
Output: Running scope as unit run-rddc4a13838c24ee58112f543ee5db041.scope.
mount: mount 10.100.26.43:6789,10.104.161.253:6789,10.103.117.82:6789:/ on /var/lib/kubelet/pods/f66fe5fe-77ed-11e9-a7e7-0242ac110082/volumes/ceph.rook.io~rook/image-store failed: Connection timed out
  Warning  FailedMount  2m (x2 over 4m)  kubelet, master  Unable to mount volumes for pod "kube-registry-58db869599-fxcmw_kube-system(f66fe5fe-77ed-11e9-a7e7-0242ac110082)": timeout expired waiting for volumes to attach or mount for pod "kube-system"/"kube-registry-58db869599-fxcmw". list of unmounted volumes=[image-store]. list of unattached volumes=[image-store default-token-2g4pf]
  Warning  FailedMount  22s              kubelet, master  MountVolume.SetUp failed for volume "image-store" : mount command failed, status: Failure, reason: failed to mount filesystem myfs to /var/lib/kubelet/pods/f66fe5fe-77ed-11e9-a7e7-0242ac110082/volumes/ceph.rook.io~rook/image-store with monitor 10.104.161.253:6789,10.103.117.82:6789,10.100.26.43:6789:/ and options [name=admin secret=AQD6fN1cbv0TBBAAvHJjvGRcTGN6mxcErO8UiA==]: mount failed: exit status 32
Mounting command: systemd-run
Mounting arguments: --description=Kubernetes transient mount for /var/lib/kubelet/pods/f66fe5fe-77ed-11e9-a7e7-0242ac110082/volumes/ceph.rook.io~rook/image-store --scope -- mount -t ceph -o name=admin,secret=AQD6fN1cbv0TBBAAvHJjvGRcTGN6mxcErO8UiA== 10.104.161.253:6789,10.103.117.82:6789,10.100.26.43:6789:/ /var/lib/kubelet/pods/f66fe5fe-77ed-11e9-a7e7-0242ac110082/volumes/ceph.rook.io~rook/image-store
Output: Running scope as unit run-r72b0887c8cd1427d9733071104bd8cc6.scope.
mount: mount 10.104.161.253:6789,10.103.117.82:6789,10.100.26.43:6789:/ on /var/lib/kubelet/pods/f66fe5fe-77ed-11e9-a7e7-0242ac110082/volumes/ceph.rook.io~rook/image-store failed: Connection timed out

Как получить реестр, развернутый без проблем?

...