У меня проблема с моим сервером Python Flask, развернутым в облачном движке Google Kubernetes. Код ниже представляет собой простой флеш-сервер, который поддерживает поток текста / событий. Проблема заключается в том, что ровно через 60 секунд бездействия сервера (нет сообщений из потока) клиент показывает ошибку 502, неверный шлюз .
Ошибка: ошибка сервера Обнаружен сервервременная ошибка и не удалось выполнить ваш запрос. Пожалуйста, повторите попытку через 30 секунд.
Клиент больше не будет получать данные с сервера, когда это происходит. Я уже пытался добавить таймауты, как вы можете видеть в конфигурационном файле kubernetes.
Я пытался раскрутить движок облачных вычислений Google без использования kubernetes. Развернул в нем тот же код и добавил домен. К моему удивлению, это работает, оно не показало ошибку 502 неверного запроса, даже если я оставил браузер открытым.
Возможно, это как-то связано с конфигом kubernetes, который я запускаю. Буду признателен за любую помощь или идею, которые я смогу получить.
Обновление 1
Я попытался изменить тип службы kube на LoadBalancer вместо NodePort .
Доступ к сгенерированной конечной точке IP работает без ошибок 502 даже после 60 секунд бездействия.
Обновление 2
Вот ошибки, генерируемые журналами стекового драйвера LoadBalancer
{
httpRequest: {
referer: "http://sse-dev.[REDACTED]/test"
remoteIp: "[REDACTED]"
requestMethod: "GET"
requestSize: "345"
requestUrl: "http://sse-dev.[REDACTED]/stream"
responseSize: "488"
serverIp: "[REDACTED]"
status: 502
userAgent: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36"
}
insertId: "ptb7kfg2w2zz01"
jsonPayload: {
@type: "type.googleapis.com/google.cloud.loadbalancing.type.LoadBalancerLogEntry"
statusDetails: "backend_timeout"
}
logName: "projects/[REDACTED]-assist-dev/logs/requests"
receiveTimestamp: "2020-01-03T06:27:44.361706996Z"
resource: {
labels: {
backend_service_name: "k8s-be-30808--17630a0e8199e99b"
forwarding_rule_name: "k8s-fw-default-[REDACTED]-dev-ingress--17630a0e8199e99b"
project_id: "[REDACTED]-assist-dev"
target_proxy_name: "k8s-tp-default-[REDACTED]-dev-ingress--17630a0e8199e99b"
url_map_name: "k8s-um-default-[REDACTED]-dev-ingress--17630a0e8199e99b"
zone: "global"
}
type: "http_load_balancer"
}
severity: "WARNING"
spanId: "4b0767cace9b9500"
timestamp: "2020-01-03T06:26:43.381613Z"
trace: "projects/[REDACTED]-assist-dev/traces/d467f39f76b94c02d9a8e6998fdca17b"
}
sse.py
from typing import Iterator
import random
import string
from collections import deque
from flask import Response, request
from gevent.queue import Queue
import gevent
def generate_id(size=6, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
class ServerSentEvent(object):
"""Class to handle server-sent events."""
def __init__(self, data, event):
self.data = data
self.event = event
self.event_id = generate_id(),
self.retry = 5000
self.desc_map = {
self.data: "data",
self.event: "event",
self.event_id: "id",
self.retry: 5000
}
def encode(self) -> str:
"""Encodes events as a string."""
if not self.data:
return ""
lines = ["{}: {}".format(name, key)
for key, name in self.desc_map.items() if key]
return "{}\n\n".format("\n".join(lines))
class Channel(object):
def __init__(self, history_size=32):
self.subscriptions = []
self.history = deque(maxlen=history_size)
self.history.append(ServerSentEvent('start_of_history', None))
def notify(self, message):
"""Notify all subscribers with message."""
for sub in self.subscriptions[:]:
sub.put(message)
def event_generator(self, last_id) -> Iterator[ServerSentEvent]:
"""Yields encoded ServerSentEvents."""
q = Queue()
self._add_history(q, last_id)
self.subscriptions.append(q)
try:
while True:
yield q.get()
except GeneratorExit:
self.subscriptions.remove(q)
def subscribe(self):
def gen(last_id) -> Iterator[str]:
for sse in self.event_generator(last_id):
yield sse.encode()
return Response(
gen(request.headers.get('Last-Event-ID')),
mimetype="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Content-Type": "text/event-stream"
})
def _add_history(self, q, last_id):
add = False
for sse in self.history:
if add:
q.put(sse)
if sse.event_id == last_id:
add = True
def publish(self, message, event=None):
sse = ServerSentEvent(str(message), event)
self.history.append(sse)
gevent.spawn(self.notify, sse)
def get_last_id(self) -> str:
return self.history[-1].event_id
service.py
import json
import os
import requests
from app.controllers.sse import Channel
from flask import send_file, \
jsonify, request, Blueprint, Response
from typing import Iterator
blueprint = Blueprint(__name__, __name__, url_prefix='')
flask_channel = Channel()
@blueprint.route("/stream")
def stream():
return flask_channel.subscribe()
@blueprint.route('/sample/create', methods=['GET'])
def sample_create():
branch_id = request.args.get('branch_id', None)
params = request.get_json()
if not params:
params = {
'id': 'sample_id',
'description': 'sample_description'
}
flask_channel.publish(json.dumps(params), event=branch_id)
return jsonify({'success': True}), 200
kubernetes-config.yaml
---
apiVersion: v1
kind: Service
metadata:
name: sse-service
labels:
app: sse-service
spec:
ports:
- port: 80
targetPort: 5000
protocol: TCP
name: http
selector:
app: sse-service
sessionAffinity: ClientIP
type: NodePort
---
apiVersion: "extensions/v1beta1"
kind: "Deployment"
metadata:
name: "sse-service"
namespace: "default"
labels:
app: "sse-service"
spec:
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 25%
selector:
matchLabels:
app: "sse-service"
template:
metadata:
labels:
app: "sse-service"
spec:
containers:
- name: "sse-service"
image: "{{IMAGE_NAME}}"
imagePullPolicy: Always
ports:
- containerPort: 5000
livenessProbe:
httpGet:
path: /health/check
port: 5000
initialDelaySeconds: 25
periodSeconds: 15
readinessProbe:
httpGet:
path: /health/check
port: 5000
initialDelaySeconds: 25
periodSeconds: 15
---
apiVersion: "autoscaling/v2beta1"
kind: "HorizontalPodAutoscaler"
metadata:
name: "sse-service-hpa"
namespace: "default"
labels:
app: "sse-service"
spec:
scaleTargetRef:
kind: "Deployment"
name: "sse-service"
apiVersion: "apps/v1beta1"
minReplicas: 1
maxReplicas: 7
metrics:
- type: "Resource"
resource:
name: "cpu"
targetAverageUtilization: 80
---
apiVersion: cloud.google.com/v1beta1
kind: BackendConfig
metadata:
name: sse-service
spec:
timeoutSec: 120
connectionDraining:
drainingTimeoutSec: 3600
Dockerfile
FROM python:3.6.5-jessie
ENV GUNICORN_PORT=5000
ENV PYTHONUNBUFFERED=TRUE
ENV GOOGLE_APPLICATION_CREDENTIALS=/opt/creds/account.json
COPY requirements.txt /opt/app/requirements.txt
COPY app /opt/app
COPY creds/account.json /opt/creds/account.json
WORKDIR /opt/app
RUN pip install -r requirements.txt
EXPOSE ${GUNICORN_PORT}
CMD gunicorn -b :${GUNICORN_PORT} wsgi:create_app\(\) --reload --timeout=300000 --config=config.py
Base.py
from flask import jsonify, Blueprint
blueprint = Blueprint(__name__, __name__)
@blueprint.route('/health/check', methods=['GET'])
def check_health():
response = {
'message': 'pong!',
'status': 'success'
}
return jsonify(response), 200
bitbucket-pipelines.yml
options:
docker: true
pipelines:
branches:
dev:
- step:
name: Build - Push - Deploy to Dev environment
image: google/cloud-sdk:latest
caches:
- docker
- pip
deployment: development
script:
# Export all bitbucket credentials to the environment
- echo $GOOGLE_APPLICATION_CREDENTIALS | base64 -di > ./creds/account.json
- echo $CONTAINER_CREDENTIALS | base64 -di > ./creds/gcr.json
- export CLOUDSDK_CONFIG='pwd'/creds/account.json
- export GOOGLE_APPLICATION_CREDENTIALS='pwd'/creds/account.json
# Configure docker to use gcp service account
- gcloud auth activate-service-account $KUBERNETES_SERVICE_ACCOUNT --key-file=creds/gcr.json
- gcloud config list
- gcloud auth configure-docker -q
# # Build docker image with name and tag
- export IMAGE_NAME=$HOSTNAME/$PROJECT_ID/$IMAGE:v0.1.$BITBUCKET_BUILD_NUMBER
- docker build -t $IMAGE_NAME .
# # Push image to Google Container Repository
- docker push $IMAGE_NAME
# Initialize configs for kubernetes
- gcloud config set project $PROJECT_ID
- gcloud config set compute/zone $PROJECT_ZONE
- gcloud container clusters get-credentials $PROJECT_CLUSTER
# Run kubernetes configs
- cat kubernetes-config.yaml | sed "s#{{IMAGE_NAME}}#$IMAGE_NAME#g" | kubectl apply -f -
ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
ingress.kubernetes.io/backends: '{"k8s-be-30359--17630a0e8199e99b":"HEALTHY","k8s-be-30599--17630a0e8199e99b":"HEALTHY","k8s-be-30808--17630a0e8199e99b":"HEALTHY","k8s-be-30991--17630a0e8199e99b":"HEALTHY","k8s-be-31055--17630a0e8199e99b":"HEALTHY","k8s-be-31467--17630a0e8199e99b":"HEALTHY","k8s-be-31596--17630a0e8199e99b":"HEALTHY","k8s-be-31948--17630a0e8199e99b":"HEALTHY","k8s-be-32702--17630a0e8199e99b":"HEALTHY"}'
ingress.kubernetes.io/forwarding-rule: k8s-fw-default-[REDACTED]-dev-ingress--17630a0e8199e99b
ingress.kubernetes.io/https-forwarding-rule: k8s-fws-default-[REDACTED]-dev-ingress--17630a0e8199e99b
ingress.kubernetes.io/https-target-proxy: k8s-tps-default-[REDACTED]-dev-ingress--17630a0e8199e99b
ingress.kubernetes.io/ssl-cert: k8s-ssl-d6db2a7a17456a7b-64a79e74837f68e3--17630a0e8199e99b
ingress.kubernetes.io/static-ip: k8s-fw-default-[REDACTED]-dev-ingress--17630a0e8199e99b
ingress.kubernetes.io/target-proxy: k8s-tp-default-[REDACTED]-dev-ingress--17630a0e8199e99b
ingress.kubernetes.io/url-map: k8s-um-default-[REDACTED]-dev-ingress--17630a0e8199e99b
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"extensions/v1beta1","kind":"Ingress","metadata":{"annotations":{},"name":"[REDACTED]-dev-ingress","namespace":"default"},"spec":{"rules":[{"host":"bot-dev.[REDACTED]","http":{"paths":[{"backend":{"serviceName":"bot-service","servicePort":80}}]}},{"host":"client-dev.[REDACTED]","http":{"paths":[{"backend":{"serviceName":"client-service","servicePort":80}}]}},{"host":"team-dev.[REDACTED]","http":{"paths":[{"backend":{"serviceName":"team-service","servicePort":80}}]}},{"host":"chat-dev.[REDACTED]","http":{"paths":[{"backend":{"serviceName":"chat-service","servicePort":80}}]}},{"host":"chatb-dev.[REDACTED]","http":{"paths":[{"backend":{"serviceName":"chat-builder-service","servicePort":80}}]}},{"host":"action-dev.[REDACTED]","http":{"paths":[{"backend":{"serviceName":"action-service","servicePort":80}}]}},{"host":"message-dev.[REDACTED]","http":{"paths":[{"backend":{"serviceName":"message-service","servicePort":80}}]}}],"tls":[{"hosts":["bots-dev.[REDACTED]","client-dev.[REDACTED]","team-dev.[REDACTED]","chat-dev.[REDACTED]","chatb-dev.[REDACTED]","message-dev.[REDACTED]"],"secretName":"[REDACTED]-ssl"}]}}
creationTimestamp: "2019-08-09T09:19:14Z"
generation: 7
name: [REDACTED]-dev-ingress
namespace: default
resourceVersion: "73975381"
selfLink: /apis/extensions/v1beta1/namespaces/default/ingresses/[REDACTED]-dev-ingress
uid: c176cc8c-ba86-11e9-89d6-42010a940181
spec:
rules:
- host: bot-dev.[REDACTED]
http:
paths:
- backend:
serviceName: bot-service
servicePort: 80
- host: client-dev.[REDACTED]
http:
paths:
- backend:
serviceName: client-service
servicePort: 80
- host: team-dev.[REDACTED]
http:
paths:
- backend:
serviceName: team-service
servicePort: 80
- host: chat-dev.[REDACTED]
http:
paths:
- backend:
serviceName: chat-service
servicePort: 80
- host: chatb-dev.[REDACTED]
http:
paths:
- backend:
serviceName: chat-builder-service
servicePort: 80
- host: action-dev.[REDACTED]
http:
paths:
- backend:
serviceName: action-service
servicePort: 80
- host: message-dev.[REDACTED]
http:
paths:
- backend:
serviceName: message-service
servicePort: 80
- host: sse-dev.[REDACTED]
http:
paths:
- backend:
serviceName: sse-service
servicePort: 80
tls:
- hosts:
- bots-dev.[REDACTED]
- client-dev.[REDACTED]
- team-dev.[REDACTED]
- chat-dev.[REDACTED]
- chatb-dev.[REDACTED]
- message-dev.[REDACTED]
- sse-dev.[REDACTED]
secretName: [REDACTED]-ssl
status:
loadBalancer:
ingress:
- ip: [REDACTED]