Ошибка: нет такого хоста из логов Hearbeat в кластере Kubernetes - PullRequest
0 голосов
/ 30 октября 2019

Я установил стек ELK в кластере Kubernetes и в настоящее время пользуюсь такими услугами стека ELK, как elasticsearch:v6.2.4, logstash:6.3.0, logspout, metricbeat и heartbeat.

Вседругие службы работают нормально, кроме сердцебиения.

проблема в сердцебиении в том, что я получаю неправильные журналы

------------------------------------------------------------------------------------------------------

вот мой файл конфигурации Kubernetes , и вы можете увидеть heartbeat.yaml внутри файла.

apiVersion: v1
kind: ConfigMap
metadata:
  name: heartbeat-config
  namespace: kube-system
  labels:
    k8s-app: heartbeat
data:
  heartbeat.yml: |
    heartbeat.monitors:
    - type: http
      schedule: '@every 5s'
      urls: ["http://elasticsearch-logging:9200","http://kibana-logging:5601","http://cfo:3003/v1","http://front-end:5000","http://crm-proxy:3002/v1","http://mongoclient:3000","http://cron-scheduler:3007/v1","http://cso:3005","http://database:27017","http://direct-debits:3009/v1","http://loan-management:3008/v1","http://settings:4001/core"]
      check.response.status: 200
    - type: icmp
      schedule: '@every 5s'
      hosts:
        - elasticsearch-logging
        - kibana-logging
        - cfo
        - front-end
        - crm-proxy
        - cso
        - mongoclient
        - cron-scheduler
        - database
        - direct-debits
        - loan-management
    processors:
    - add_cloud_metadata:
    output.elasticsearch:
      hosts: ['elasticsearch-logging:9200']
      username: elastic
      password: changeme

---

apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: heartbeat
  namespace: kube-system
  labels:
    k8s-app: heartbeat
spec:
  template:
    metadata:
      labels:
        k8s-app: heartbeat
    spec:
      terminationGracePeriodSeconds: 30
      hostNetwork: true
      dnsPolicy: ClusterFirstWithHostNet
      containers:
      - name: heartbeat
        image: docker.elastic.co/beats/heartbeat:6.3.0
        args: [
          "-c", "/usr/share/heartbeat/heartbeat.yml",
          "-e",
          "-system.hostfs=/hostfs",
        ]
        env:
        - name: ELASTICSEARCH_HOST
          value: elasticsearch-logging
        - name: ELASTICSEARCH_PORT
          value: "9200"
        - name: ELASTICSEARCH_USERNAME
          value: elastic
        - name: ELASTICSEARCH_PASSWORD
          value: changeme
        - name: ELASTIC_CLOUD_ID
          value:
        - name: ELASTIC_CLOUD_AUTH
          value:
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        securityContext:
          runAsUser: 0
        resources:
          limits:
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 100Mi
        volumeMounts:
        - name: config
          mountPath: /usr/share/heartbeat/heartbeat.yml
          readOnly: true
          subPath: heartbeat.yml
        - name: dockersock
          mountPath: /var/run/docker.sock
        - name: proc
          mountPath: /hostfs/proc
          readOnly: true
        - name: cgroup
          mountPath: /hostfs/sys/fs/cgroup
          readOnly: true
      volumes:
      - name: proc
        hostPath:
          path: /proc
      - name: cgroup
        hostPath:
          path: /sys/fs/cgroup
      - name: dockersock
        hostPath:
          path: /var/run/docker.sock
      - name: config
        configMap:
          defaultMode: 0600
          name: heartbeat-config
      # We set an `emptyDir` here to ensure the manifest will deploy correctly.
      # It's recommended to change this to a `hostPath` folder, to ensure internal data
      # files survive pod changes (ie: version upgrade)
      - name: data
        emptyDir: {}
---

# Deploy singleton instance in the whole cluster for some unique data sources, like kube-state-metrics
apiVersion: apps/v1beta1
kind: Deployment
metadata:
  namespace: kube-system
  name: heartbeat
  labels:
    k8s-app: heartbeat
spec:
  template:
    metadata:
      labels:
        k8s-app: heartbeat
    spec:
      containers:
      - name: heartbeat
        image: docker.elastic.co/beats/heartbeat:6.3.0
        args: [
          "-c", "/usr/share/heartbeat/heartbeat.yml",
          "-e",
        ]
        env:
        - name: ELASTICSEARCH_HOST
          value: elasticsearch-logging
        - name: ELASTICSEARCH_PORT
          value: "9200"
        - name: ELASTICSEARCH_USERNAME
          value: elastic
        - name: ELASTICSEARCH_PASSWORD
          value: changeme
        - name: ELASTIC_CLOUD_ID
          value:
        - name: ELASTIC_CLOUD_AUTH
          value:
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        securityContext:
          runAsUser: 0
        resources:
          limits:
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 100Mi
        volumeMounts:
        - name: config
          mountPath: /usr/share/heartbeat/heartbeat.yml
          readOnly: true
          subPath: heartbeat.yml
      volumes:
      - name: config
        configMap:
          defaultMode: 0600
          name: heartbeat-config

------------------------------------------------------------------------------------------------------

Журналы

Журналы ICMP - один журнал

{
  "_index": "heartbeat-6.3.0-2019.10.30",
  "_type": "doc",
  "_id": "DA8GG24BaP0t7Q7zj5w-",
  "_score": 1,
  "_source": {
    "@timestamp": "2019-10-30T04:57:24.052Z",
    "beat": {
      "name": "heartbeat-64c4bfc49f-xgx2d",
      "hostname": "heartbeat-64c4bfc49f-xgx2d",
      "version": "6.3.0"
    },
    "meta": {
      "cloud": {
        "region": "eu-central-1",
        "availability_zone": "eu-central-1a",
        "provider": "ec2",
        "instance_id": "i-02f044f80723acc15",
        "machine_type": "t2.medium"
      }
    },
    "resolve": {
      "host": "crm-proxy"
    },
    "error": {
      "type": "io",
      "message": "lookup crm-proxy on 10.100.0.10:53: no such host"
    },
    "monitor": {
      "duration": {
        "us": 13120
      },
      "status": "down",
      "id": "icmp-icmp-host-ip@crm-proxy",
      "name": "icmp",
      "type": "icmp",
      "host": "crm-proxy"
    },
    "type": "monitor",
    "host": {
      "name": "heartbeat-64c4bfc49f-xgx2d"
    }
  },
  "fields": {
    "@timestamp": [
      "2019-10-30T04:57:24.052Z"
    ]
  }
}

Журналы HTTP - один журнал

{
  "_index": "heartbeat-6.3.0-2019.10.30",
  "_type": "doc",
  "_id": "axozHG4BaP0t7Q7zAcFW",
  "_score": 1,
  "_source": {
    "@timestamp": "2019-10-30T10:25:34.051Z",
    "resolve": {
      "host": "crm-proxy"
    },
    "tcp": {
      "port": 3002
    },
    "type": "monitor",
    "host": {
      "name": "heartbeat-64c4bfc49f-xgx2d"
    },
    "beat": {
      "name": "heartbeat-64c4bfc49f-xgx2d",
      "hostname": "heartbeat-64c4bfc49f-xgx2d",
      "version": "6.3.0"
    },
    "meta": {
      "cloud": {
        "instance_id": "i-02f044f80723acc15",
        "machine_type": "t2.medium",
        "region": "eu-central-1",
        "availability_zone": "eu-central-1a",
        "provider": "ec2"
      }
    },
    "error": {
      "message": "lookup crm-proxy on 10.100.0.10:53: no such host",
      "type": "io"
    },
    "monitor": {
      "type": "http",
      "host": "crm-proxy",
      "duration": {
        "us": 34904
      },
      "status": "down",
      "id": "http@http://crm-proxy:3002/v1",
      "scheme": "http",
      "name": "http"
    },
    "http": {
      "url": "http://crm-proxy:3002/v1"
    }
  },
  "fields": {
    "@timestamp": [
      "2019-10-30T10:25:34.051Z"
    ]
  }
}

Пример: если я выполняю exec в службу и curl http://crm -прокси: 3002 / v1 это, я получаю ответ.

Примечание: я запустил те же сервисы на Docker Swarm и использую ту же конфигурацию hearbeat.yaml дляasticsearch, тогда я получаю правильные результаты

Не могу понять, почему служба heartbeat выбрасывает нет такого хоста ошибка.

Это вывод kubectl get svc (пространство имен по умолчанию )

NAME                    TYPE                PORT(S)                   AGE
cfo                     ClusterIP          443/TCP,80/TCP,3003/TCP   14d
crm-proxy               ClusterIP          443/TCP,80/TCP,3002/TCP   78d
cron-scheduler          ClusterIP          443/TCP,80/TCP,3007/TCP   78d
cso                     ClusterIP          443/TCP,80/TCP,3005/TCP   6d5h
database                ClusterIP          27017/TCP                 82d
direct-debits           ClusterIP          443/TCP,80/TCP,3009/TCP   14d
ex-proxy                ClusterIP          443/TCP,80/TCP,3001/TCP   20d
front-end               ClusterIP           443/TCP                   21d
jenkins                 ClusterIP          443/TCP,80/TCP            78d
kubernetes              ClusterIP           443/TCP                   84d
loan-management         ClusterIP          443/TCP,80/TCP,3008/TCP   21d
mock-twinfield          ClusterIP           443/TCP,80/TCP,5678/TCP   41d
mongoclient             ClusterIP           80/TCP                    78d
privatenpm-acceptance   ClusterIP          4873/TCP,443/TCP,80/TCP   69d
settings                ClusterIP          443/TCP,80/TCP,4001/TCP   3h40m

Любая помощь приветствуется!

Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...