Я использую следующий yaml для развертывания Keydb в моем кластере
---
# Source: keydb/templates/cm-utils.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: keydb-utils
labels:
helm.sh/chart: keydb-0.8.0
app.kubernetes.io/name: keydb
app.kubernetes.io/instance: keydb
app.kubernetes.io/version: "5.3.3"
app.kubernetes.io/managed-by: Helm
data:
server.sh: |
#!/bin/bash
set -euxo pipefail
host="$(hostname)"
port="6379"
replicas=()
for node in {0..2}; do
if [ "$host" != "keydb-${node}" ]; then
replicas+=("--replicaof keydb-${node}.keydb ${port}")
fi
done
keydb-server /etc/keydb/redis.conf \
--active-replica yes \
--multi-master yes \
--appendonly no \
--bind 0.0.0.0 \
--port "$port" \
--protected-mode no \
--server-threads 2 \
"${replicas[@]}"
---
# Source: keydb/templates/svc.yaml
# Headless service for proper name resolution
apiVersion: v1
kind: Service
metadata:
name: keydb
labels:
helm.sh/chart: keydb-0.8.0
app.kubernetes.io/name: keydb
app.kubernetes.io/instance: keydb
app.kubernetes.io/version: "5.3.3"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
clusterIP: None
ports:
- name: server
port: 6379
protocol: TCP
targetPort: keydb
selector:
app.kubernetes.io/name: keydb
app.kubernetes.io/instance: keydb
---
# Source: keydb/templates/sts.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: keydb
labels:
helm.sh/chart: keydb-0.8.0
app.kubernetes.io/name: keydb
app.kubernetes.io/instance: keydb
app.kubernetes.io/version: "5.3.3"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 3
serviceName: keydb
selector:
matchLabels:
app.kubernetes.io/name: keydb
app.kubernetes.io/instance: keydb
template:
metadata:
annotations:
checksum/cm-utils: e0806d2d0698a10e54131bde1119e44c51842191a777c154c308eab52ebb2ec7
labels:
helm.sh/chart: keydb-0.8.0
app.kubernetes.io/name: keydb
app.kubernetes.io/instance: keydb
app.kubernetes.io/version: "5.3.3"
app.kubernetes.io/managed-by: Helm
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- keydb
topologyKey: kubernetes.io/hostname
containers:
- name: keydb
image: eqalpha/keydb:x86_64_v5.3.3
imagePullPolicy: IfNotPresent
command:
- /utils/server.sh
ports:
- name: keydb
containerPort: 6379
protocol: TCP
livenessProbe:
tcpSocket:
port: keydb
readinessProbe:
tcpSocket:
port: keydb
resources:
limits:
cpu: 200m
memory: 2Gi
requests:
cpu: 100m
memory: 1Gi
volumeMounts:
- name: keydb-data
mountPath: /data
- name: utils
mountPath: /utils
readOnly: true
volumes:
- name: utils
configMap:
name: keydb-utils
defaultMode: 0700
items:
- key: server.sh
path: server.sh
volumeClaimTemplates:
- metadata:
name: keydb-data
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi
storageClassName: "gp2"
с помощью команды
kubectl apply -f deploy.yaml
При создании
$ kubectl apply -f deploy.yaml
configmap/keydb-utils created
service/keydb created
statefulset.apps/keydb created
* 1009 ошибки нет * Но модуль не запланирован со следующей ошибкой
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2020-04-24T15:44:39Z"
message: pod has unbound immediate PersistentVolumeClaims (repeated 3 times)
reason: Unschedulable
status: "False"
type: PodScheduled
phase: Pending
qosClass: Burstable
Когда я проверяю PV C, он создается без режимов доступа или класса хранения.
$ kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
keydb-data-keydb-0 Pending 28m
Пожалуйста help.
Добавлен вывод класса хранения
$ kubectl get sc
NAME PROVISIONER AGE
gp2 (default) kubernetes.io/aws-ebs 32d
local-storage kubernetes.io/no-provisioner 10h
Для этого не созданы PV.