весенний logback logger не попадает в эластичный поиск - PullRequest
0 голосов
/ 06 апреля 2020

У меня есть приложение весенней загрузки, настроенное как таковое при входе в систему. xml:

 <property resource="application.properties" />
<property name="LOG_FILE" value="${LOG_FILE:-${LOG_PATH:-${LOG_TEMP:-${java.io.tmpdir:-/tmp}}}/spring.log}"/>
<conversionRule conversionWord="clr" converterClass="org.springframework.boot.logging.logback.ColorConverter" />
<conversionRule conversionWord="wex" converterClass="org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter" />
<conversionRule conversionWord="wEx" converterClass="org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter" />
<property name="CONSOLE_LOG_PATTERN" value="${CONSOLE_LOG_PATTERN:-%clr(%d{${LOG_DATEFORMAT_PATTERN:-yyyy-MM-dd HH:mm:ss.SSS}}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>
<property name="FILE_LOG_PATTERN" value="${FILE_LOG_PATTERN:-%d{${LOG_DATEFORMAT_PATTERN:-yyyy-MM-dd HH:mm:ss.SSS}} ${LOG_LEVEL_PATTERN:-%5p} ${PID:- } --- [%t] %-40.40logger{39} : %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>

<logger name="org.apache.catalina.startup.DigesterFactory" level="ERROR"/>
<logger name="org.apache.catalina.util.LifecycleBase" level="ERROR"/>
<logger name="org.apache.coyote.http11.Http11NioProtocol" level="WARN"/>
<logger name="org.apache.sshd.common.util.SecurityUtils" level="WARN"/>
<logger name="org.apache.tomcat.util.net.NioSelectorPool" level="WARN"/>
<logger name="org.eclipse.jetty.util.component.AbstractLifeCycle" level="ERROR"/>
<logger name="org.hibernate.validator.internal.util.Version" level="WARN"/>
<logger name="org.springframework.data.elasticsearch.client.WIRE" level="trace"/>

<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
    <encoder>
        <pattern>${CONSOLE_LOG_PATTERN}</pattern>
    </encoder>
</appender>
<appender name="loggingLogback" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
        <destination>logstash.myDomain.com:80</destination>
        <!-- encoder is required -->
        <encoder class="net.logstash.logback.encoder.LogstashEncoder" />
    </appender>
    <appender name="accessLogback" class="net.logstash.logback.appender.LogstashAccessTcpSocketAppender">
        <destination>logstash.myDomain.com:80</destination>
        <!-- encoder is required -->
        <encoder class="net.logstash.logback.encoder.LogstashAccessEncoder" />
    </appender>
    <root level="INFO">
        <appender-ref ref="CONSOLE" />
        <appender-ref ref="loggingLogback" />
        <appender-ref ref="accessLogback" />
    </root>

Затем я запускаю свое приложение через мою среду IDE и остальную часть среды через этот docker -компонент setup:

version: "3.0"

services:
  grafana:
    image: grafana/grafana:latest
    container_name: grafana
    restart: always
    env_file:
      - 'env.grafana'
    user: "0"
    volumes:
      - ./volumes/grafana/data:/var/lib/grafana

  elasticsearch:
    container_name: elasticsearch
    restart: always
    image: elastic/elasticsearch:7.6.2
    environment:
      - discovery.type=single-node
      - bootstrap.memory_lock=true
      - ELASTIC_PASSWORD=elasticPAss
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
    ports:
    - 9200:9200
    volumes:
      - ./volumes/elasticsearch/data:/usr/share/elasticsearch/data
      - ./volumes/elasticsearch/logs:/usr/share/elasticsearch/logs
    ulimits:
      memlock:
        soft: -1
        hard: -1

  logstash:
    container_name: logstash
    restart: always
    image: elastic/logstash:7.6.2
    volumes:
      - ./volumes/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
      - ./volumes/logstash/pipeline/jiraElastic.conf:/usr/share/logstash/pipeline/jiraElastic.conf
    depends_on:
      - elasticsearch

  nginx:
    image: nginx:latest
    container_name: nginx
    restart: always
    volumes:
      - ./volumes/nginx/nginx.conf:/etc/nginx/nginx.conf
      - ./volumes/nginx/proxy.conf:/etc/nginx/proxy.conf
      - ./volumes/nginx/sites-enabled-dev:/etc/nginx/sites-enabled
      - ./volumes/nginx/logs/:/var/log/nginx
    depends_on:
      - grafana
    ports:
      - 80:80
      - 443:443

Конфигурационные файлы заметок:

nginx .conf

worker_processes auto;

events {
    worker_connections 20000;
}

http {

        ##
        # Basic Settings
        ##

        sendfile on;
        tcp_nopush on;
        tcp_nodelay on;
        keepalive_timeout 65;
        types_hash_max_size 2048;
        server_names_hash_bucket_size 64;

        include /etc/nginx/mime.types;
        include /etc/nginx/proxy.conf;

        default_type application/octet-stream;

        ##
        # SSL Settings
        ##

        ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
        ssl_prefer_server_ciphers on;
                ssl_session_cache   shared:SSL:10m;
                ssl_session_timeout 10m;

        ##
        # Logging Settings
        ##

        access_log /var/log/nginx/access.log;
        error_log /var/log/nginx/error.log;

        ##
        # Gzip Settings
        ##

        gzip on;

        ##
        # Virtual Host Configs
        ##

        include /etc/nginx/conf.d/*.conf;
        include /etc/nginx/sites-enabled/*;
}

stream {
    include /etc/nginx/streams-enabled/*;
}

proxy.conf

proxy_redirect          off;
proxy_set_header        Host            $host;
proxy_set_header        X-Real-IP       $remote_addr;
proxy_set_header        X-Forwarded-For $proxy_add_x_forwarded_for;
client_body_buffer_size 128k;
proxy_buffers           32 4k;

sites- enabled-dev / logsta sh

server {
    listen 80;
    server_name logstash.myDomain.com;
    keepalive_timeout 70;
    allow all;

    location / {
      proxy_pass     http://logstash:5000;
    }
  }

pipe / jiraElasti c .conf

 input {
    tcp {
      port => 5000
      codec => "json_lines"
    }
}
output {
  elasticsearch {
    hosts => "elasticsearch:9200"
    index => "myIndexName"
  }

  stdout {
    codec => rubydebug
  }
}

в моем файле etc / hosts У меня есть logsta sh .myDomain.com указать на 127.0.0.1

Однако, когда я запускаю elasti c search / _cat / indeces, я получаю пустой результат, а с / _search я получаю

{
    "took": 31,
    "timed_out": false,
    "_shards": {
        "total": 0,
        "successful": 0,
        "skipped": 0,
        "failed": 0
    },
    "hits": {
        "total": {
            "value": 0,
            "relation": "eq"
        },
        "max_score": 0.0,
        "hits": []
    }
}

Я вижу некоторые журналы, которые выглядят как журналы моего приложения внутри nginx access.logs, поэтому я предполагаю, что они зашли так далеко. Например:

192.168.176.1 - - [06/Apr/2020:14:29:29 +0000] "{\x22@timestamp\x22:\x222020-04-06T16:29:28.657+02:00\x22,\x22@version\x22:\x221\x22,\x22message\x22:\x22System Starting up!\x22,\x22logger_name\x22:\x22com.my.project.utils.SystemUptimeLogger\x22,\x22thread_name\x22:\x22main\x22,\x22level\x22:\x22INFO\x22,\x22level_value\x22:20000}" 400 157 "-" "-"

Однако я не вижу никаких выводов в консоли из logsta sh, и у меня сложилось впечатление, что если у меня есть флаг rubydebug, я должен. В logsta sh logs я вижу, что он читает мою конфигурацию, казалось бы, правильно, однако:

Sending Logstash logs to /usr/share/logstash/logs which is now configured via log4j2.properties
[2020-04-06T14:28:05,468][INFO ][logstash.setting.writabledirectory] Creating directory {:setting=>"path.queue", :path=>"/usr/share/logstash/data/queue"}
[2020-04-06T14:28:05,686][INFO ][logstash.setting.writabledirectory] Creating directory {:setting=>"path.dead_letter_queue", :path=>"/usr/share/logstash/data/dead_letter_queue"}
[2020-04-06T14:28:09,587][INFO ][logstash.runner          ] Starting Logstash {"logstash.version"=>"7.6.2"}
[2020-04-06T14:28:09,874][INFO ][logstash.agent           ] No persistent UUID file found. Generating new UUID {:uuid=>"44f03b15-fa7a-497b-81ab-9253333df805", :path=>"/usr/share/logstash/data/uuid"}
[2020-04-06T14:28:33,860][INFO ][org.reflections.Reflections] Reflections took 82 ms to scan 1 urls, producing 20 keys and 40 values 
[2020-04-06T14:28:47,341][INFO ][logstash.outputs.elasticsearch][main] Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://elasticsearch:9200/]}}
[2020-04-06T14:28:48,901][WARN ][logstash.outputs.elasticsearch][main] Restored connection to ES instance {:url=>"http://elasticsearch:9200/"}
[2020-04-06T14:28:49,462][INFO ][logstash.outputs.elasticsearch][main] ES Output version determined {:es_version=>7}
[2020-04-06T14:28:49,466][WARN ][logstash.outputs.elasticsearch][main] Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type {:es_version=>7}
[2020-04-06T14:28:49,862][INFO ][logstash.outputs.elasticsearch][main] New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>["//elasticsearch:9200"]}
[2020-04-06T14:28:50,236][INFO ][logstash.outputs.elasticsearch][main] Using default mapping template
[2020-04-06T14:28:50,632][INFO ][logstash.outputs.elasticsearch][main] Attempting to install template {:manage_template=>{"index_patterns"=>"logstash-*", "version"=>60001, "settings"=>{"index.refresh_interval"=>"5s", "number_of_shards"=>1}, "mappings"=>{"dynamic_templates"=>[{"message_field"=>{"path_match"=>"message", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false}}}, {"string_fields"=>{"match"=>"*", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false, "fields"=>{"keyword"=>{"type"=>"keyword", "ignore_above"=>256}}}}}], "properties"=>{"@timestamp"=>{"type"=>"date"}, "@version"=>{"type"=>"keyword"}, "geoip"=>{"dynamic"=>true, "properties"=>{"ip"=>{"type"=>"ip"}, "location"=>{"type"=>"geo_point"}, "latitude"=>{"type"=>"half_float"}, "longitude"=>{"type"=>"half_float"}}}}}}}
[2020-04-06T14:28:50,684][WARN ][org.logstash.instrument.metrics.gauge.LazyDelegatingGauge][main] A gauge metric of an unknown type (org.jruby.specialized.RubyArrayOneObject) has been created for key: cluster_uuids. This may result in invalid serialization.  It is recommended to log an issue to the responsible developer/development team.
[2020-04-06T14:28:50,708][INFO ][logstash.javapipeline    ][main] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>2, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50, "pipeline.max_inflight"=>250, "pipeline.sources"=>["/usr/share/logstash/pipeline/jiraElastic.conf", "/usr/share/logstash/pipeline/logstash.conf"], :thread=>"#<Thread:0x5f9bdc74 run>"}
[2020-04-06T14:28:56,466][INFO ][logstash.inputs.beats    ][main] Beats inputs: Starting input listener {:address=>"0.0.0.0:5044"}
[2020-04-06T14:28:58,415][INFO ][logstash.javapipeline    ][main] Pipeline started {"pipeline.id"=>"main"}
[2020-04-06T14:28:58,593][INFO ][logstash.inputs.tcp      ][main] Starting tcp input listener {:address=>"0.0.0.0:5000", :ssl_enable=>"false"}
[2020-04-06T14:28:58,878][INFO ][org.logstash.beats.Server][main] Starting server on port: 5044
[2020-04-06T14:28:59,851][INFO ][logstash.agent           ] Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
[2020-04-06T14:29:02,755][INFO ][logstash.agent           ] Successfully started Logstash API endpoint {:port=>9600}

Любая идея, что пропавшая часть? (Такое ощущение, что основная проблема находится где-то между nginx и logsta sh, однако это может быть результатом неверной конфигурации при обратном входе. xml

1 Ответ

0 голосов
/ 06 апреля 2020

, поэтому основная проблема, по-видимому, заключалась в том, что в моей конфигурации logsta sh я использовал индекс дляasticsearch как myIndexName

Это должно быть полностью в нижнем регистре.

Я также удалил порт из <destination>logstash.myDomain.com:80</destination>, но я сомневаюсь, что это имело значение.

...