Google App Engine Docker Контейнер 502 Bad Gateway - PullRequest
0 голосов
/ 04 мая 2020

Я пытаюсь развернуть свое docker изображение в движке приложений Google, мне удачно удалось создать изображение и отправить sh в GCR. И разверните его, используя gcloud app deploy --image 'link-to-image-on-gcr'

Но при доступе к приложению я получаю 502 поврежденных шлюза. Я sh зашел на сервер и проверил журналы контейнера nginx в docker и обнаружил приведенный ниже журнал

2020/05/04 00:52:50 [error] 33#33: *127 connect() failed (111: Connection refused) while connecting to upstream, client: 74.125.24.153, server: , request: "GET /wp-login.php HTTP/1.1", upstream: "http://172.17.0.1:8080/wp-login.php", host: "myappengineservice-myrepo.ue.r.appspot.com"

По умолчанию мое изображение docker имеет только один контейнер (его образ Wordpress), при развертывании в ядре приложения, я полагаю, что по умолчанию движок приложения запустит мой контейнер docker в docker и откроет интерфейс через прокси Nginx, поэтому все запросы будут перенаправлены через прокси Nginx .

Поработав некоторое время, я отредактировал файл конфигурации Nginx и наткнулся на эту строку

location / {
      proxy_pass http://app_server;

Я отредактировал это и заменил его своими внутренними контейнерами Wordpress docker Айпи адрес. (proxy_pass http://172.17.0.6;) И вот, похоже, это сработало, и теперь запросы перенаправляются в мой docker контейнер. Очевидно, это было временное исправление, как я могу сделать это постоянным и любую идею о том, почему это происходит?

app.yaml

runtime: custom
service: my-wordpress
env: flex

nginx .conf (внутри контейнера Nginx)

    daemon off;

worker_processes auto;

events {
  worker_connections 4096;
  multi_accept on;
}

http {
  include mime.types;
  server_tokens off;

  variables_hash_max_size 2048;

  # set max body size to 32m as appengine supports.
  client_max_body_size 32m;

  tcp_nodelay on;
  tcp_nopush on;

  underscores_in_headers on;

  # GCLB uses a 10 minutes keep-alive timeout. Setting it to a bit more here
  # to avoid a race condition between the two timeouts.
  keepalive_timeout 650;
  # Effectively unlimited number of keepalive requests in the case of GAE flex.
  keepalive_requests 4294967295;

  upstream app_server {
    keepalive 192;
    server gaeapp:8080;
  }





  geo $source_type {
     default ext;
     127.0.0.0/8 lo;
     169.254.0.0/16 sb;

     35.191.0.0/16 lb;

     130.211.0.0/22 lb;

     172.16.0.0/12 do;
  }




  map $http_upgrade $ws_connection_header_value {
    default "";
    websocket upgrade;
  }
  # ngx_http_realip_module gets the second IP address from the last of the X-Forwarded-For header
  # X-Forwarded-For: [USER REQUEST PROVIDED X-F-F.]USER-IP.GCLB_IP
  set_real_ip_from  0.0.0.0/0;
  set_real_ip_from  0::/0;
  real_ip_header    X-Forwarded-For;

  iap_jwt_verify off;
  iap_jwt_verify_project_number 96882395728;
  iap_jwt_verify_app_id my-project-id;
  iap_jwt_verify_key_file /iap_watcher/iap_verify_keys.txt;
  iap_jwt_verify_iap_state_file /iap_watcher/iap_state;
  iap_jwt_verify_state_cache_time_sec 300;
  iap_jwt_verify_key_cache_time_sec 43200;
  iap_jwt_verify_logs_only on;

  server {
    iap_jwt_verify on;

    # self signed ssl for load balancer traffic
    listen 8443 default_server ssl;
    ssl_certificate /etc/ssl/localcerts/lb.crt;
    ssl_certificate_key /etc/ssl/localcerts/lb.key;
    ssl_protocols TLSv1.2;
    ssl_ciphers EECDH+AES256:!SHA1;
    ssl_prefer_server_ciphers on;
    ssl_session_timeout 3h;

    proxy_pass_header Server;

    gzip on;
    gzip_proxied any;
    gzip_types text/html text/plain text/css text/xml text/javascript application/json application/javascript application/xml application/xml+rss application/protobuf application/x-protobuf;
    gzip_vary on;

    # Allow more space for request headers.
    large_client_header_buffers 4 32k;

    # Allow more space for response headers. These settings apply for response
    # only, not requests which buffering is disabled below.
    proxy_buffer_size 64k;
    proxy_buffers 32 4k;
    proxy_busy_buffers_size 72k;

    # Explicitly set client buffer size matching nginx default.
    client_body_buffer_size 16k;




    # If version header present, make sure it's correct.


    if ($http_x_appengine_version !~ '(?:^$)|(?:^my-wordpress:20200504t053100(?:\..*)?$)') {
      return 444;
    }

    set $x_forwarded_for_test "";

    # If request comes from sb, lo, or do, do not care about x-forwarded-for header.
    if ($source_type !~ sb|lo|do) {
      set $x_forwarded_for_test $http_x_forwarded_for;
    }

    # For local health checks only.
    if ($http_x_google_vme_health_check = 1) {
      set $x_forwarded_for_test "";
    }



    location / {



      proxy_pass http://app_server;
  proxy_redirect off;
  proxy_http_version 1.1;
  proxy_set_header Connection "";
  proxy_set_header Host $host;

  proxy_set_header X-Forwarded-Host $server_name;

  proxy_send_timeout 3600s;
  proxy_read_timeout 3600s;

  proxy_set_header Upgrade $http_upgrade;
  proxy_set_header Connection $ws_connection_header_value;



      proxy_set_header X-AppEngine-Api-Ticket $http_x_appengine_api_ticket;

      proxy_set_header X-AppEngine-Auth-Domain $http_x_appengine_auth_domain;

      proxy_set_header X-AppEngine-BlobChunkSize $http_x_appengine_blobchunksize;

      proxy_set_header X-AppEngine-BlobSize $http_x_appengine_blobsize;

      proxy_set_header X-AppEngine-BlobUpload $http_x_appengine_blobupload;

      proxy_set_header X-AppEngine-Cron $http_x_appengine_cron;

      proxy_set_header X-AppEngine-Current-Namespace $http_x_appengine_current_namespace;

      proxy_set_header X-AppEngine-Datacenter $http_x_appengine_datacenter;

      proxy_set_header X-AppEngine-Default-Namespace $http_x_appengine_default_namespace;

      proxy_set_header X-AppEngine-Default-Version-Hostname $http_x_appengine_default_version_hostname;

      proxy_set_header X-AppEngine-Federated-Identity $http_x_appengine_federated_identity;

      proxy_set_header X-AppEngine-Federated-Provider $http_x_appengine_federated_provider;

      proxy_set_header X-AppEngine-Https $http_x_appengine_https;

      proxy_set_header X-AppEngine-Inbound-AppId $http_x_appengine_inbound_appid;

      proxy_set_header X-AppEngine-Inbound-User-Email $http_x_appengine_inbound_user_email;

      proxy_set_header X-AppEngine-Inbound-User-Id $http_x_appengine_inbound_user_id;

      proxy_set_header X-AppEngine-Inbound-User-Is-Admin $http_x_appengine_inbound_user_is_admin;

      proxy_set_header X-AppEngine-QueueName $http_x_appengine_queuename;

      proxy_set_header X-AppEngine-Request-Id-Hash $http_x_appengine_request_id_hash;

      proxy_set_header X-AppEngine-Request-Log-Id $http_x_appengine_request_log_id;

      proxy_set_header X-AppEngine-TaskETA $http_x_appengine_tasketa;

      proxy_set_header X-AppEngine-TaskExecutionCount $http_x_appengine_taskexecutioncount;

      proxy_set_header X-AppEngine-TaskName $http_x_appengine_taskname;

      proxy_set_header X-AppEngine-TaskRetryCount $http_x_appengine_taskretrycount;

      proxy_set_header X-AppEngine-TaskRetryReason $http_x_appengine_taskretryreason;

      proxy_set_header X-AppEngine-Upload-Creation $http_x_appengine_upload_creation;

      proxy_set_header X-AppEngine-User-Email $http_x_appengine_user_email;

      proxy_set_header X-AppEngine-User-Id $http_x_appengine_user_id;

      proxy_set_header X-AppEngine-User-Is-Admin $http_x_appengine_user_is_admin;

      proxy_set_header X-AppEngine-User-Nickname $http_x_appengine_user_nickname;

      proxy_set_header X-AppEngine-User-Organization $http_x_appengine_user_organization;

      proxy_set_header X-AppEngine-Version "";


      add_header X-AppEngine-Flex-AppLatency $request_time always;




    }

    include /var/lib/nginx/extra/*.conf;
  }
  server {
    # expose /nginx_status but on a different port (8090) to avoid
    # external visibility / conflicts with the app.
    listen 8090;
    location /nginx_status {
      stub_status on;
      access_log off;
    }
    location / {
      root /dev/null;
    }
  }

  server {
    # expose health checks on a different port to avoid
    # external visibility / conflicts with the app.
    listen 10402 ssl;
    ssl_certificate /etc/ssl/localcerts/lb.crt;
    ssl_certificate_key /etc/ssl/localcerts/lb.key;
    ssl_protocols TLSv1.2;
    ssl_ciphers EECDH+AES256:!SHA1;
    ssl_prefer_server_ciphers on;
    ssl_session_timeout 3h;

    location = /liveness_check {
      if ( -f /tmp/nginx/lameducked ) {

    return 503 'lameducked';
  }




    if ( -f /var/lib/google/ae/unhealthy/sidecars ) {

      return 503 'unhealthy sidecars';
    }





    if ( !-f /var/lib/google/ae/disk_not_full ) {

      return 503 'disk full';
    }


      if ( -f /tmp/nginx/app_lameducked ) {

        return 200 'ok';
      }


         return 200 'ok';

    }

    location = /readiness_check {
      if ( -f /tmp/nginx/lameducked ) {

    return 503 'lameducked';
  }




    if ( -f /var/lib/google/ae/unhealthy/sidecars ) {

      return 503 'unhealthy sidecars';
    }





    if ( !-f /var/lib/google/ae/disk_not_full ) {

      return 503 'disk full';
    }



      if ( -f /tmp/nginx/app_lameducked ) {

        return 503 'app lameducked';
      }


         return 200 'ok';

    }
  }


  # Add session affinity entry to log_format line i.i.f. the GCLB cookie
  # is present.
  map $cookie_gclb $session_affinity_log_entry {
    '' '';
    default sessionAffinity=$cookie_gclb;
  }

  # Output nginx access logs in the standard format, plus additional custom
  # fields containing "X-Cloud-Trace-Context" header, the current epoch
  # timestamp, the request latency, and "X-Forwarded-For" at the end.
  # If you make changes to the log format below, you MUST validate this against
  # the parsing regex at:
  # GoogleCloudPlatform/appengine-sidecars-docker/fluentd_logger/managed_vms.conf
  # (In general, adding to the end of the list does not require a change if the
  # field does not need to be logged.)

  log_format custom '$remote_addr - $remote_user [$time_local] '
                    '"$request" $status $body_bytes_sent '
                    '"$http_referer" "$http_user_agent" '
                    'tracecontext="$http_x_cloud_trace_context" '
                    'timestampSeconds="${msec}000000" '
                    'latencySeconds="$request_time" '
                    'x-forwarded-for="$http_x_forwarded_for" '
                    'uri="$uri" '
                    'appLatencySeconds="$upstream_response_time" '
                    'appStatusCode="$upstream_status" '
                    'upgrade="$http_upgrade" '
                    'iap_jwt_action="$iap_jwt_action" '
                    '$session_affinity_log_entry';

  access_log /var/log/nginx/access.log custom;
  error_log /var/log/nginx/error.log warn;
}

/ etc / hosts (внутри контейнера Nginx)

root@f9c9cb5df8e2:/etc/nginx# cat /etc/hosts
127.0.0.1       localhost
::1     localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.17.0.1      gaeapp
172.17.0.5      f9c9cb5df8e2

docker пс результат

enter image description here

1 Ответ

1 голос
/ 05 мая 2020

Я смог решить проблему, выставив свой Wordpress-сайт через порт 8080 из моего docker контейнера, он был выставлен через порт 80 ранее. Это не имеет особого смысла, но если кто-то знает причину, пожалуйста, сделайте go и объясните.

...