не удалось получить аренду kube-system / kube-controller-manager - PullRequest
0 голосов
/ 07 апреля 2020

Я пытаюсь развернуть высокодоступный кластер OKD 3.11 на AWS с 3 главными узлами. Я попытался использовать как балансировщик нагрузки приложения, так и балансировку нагрузки DNS, оба из которых потерпели неудачу и привели к следующей ошибке.

failed to acquire lease kube-system/kube-controller-manager

Вот мой файл инвентаря:

# OpenShift Inventory Template.
# Note that when the infrastructure is generated by Terraform, this file is
# expanded into './inventory.cfg', based on the rules in:
#
#   ./modules/openshift/08-inventory.tf

# Create an OSEv3 group that contains the masters and nodes groups
[OSEv3:children]
masters
etcd
nodes

# Set variables common for all OSEv3 hosts
[OSEv3:vars]
# SSH user, this user should allow ssh based auth without requiring a password
ansible_ssh_user=maintuser
openshift_disable_check=disk_availability,docker_image_availability

openshift_master_cluster_method=native
openshift_master_cluster_hostname=${public_hostname}
openshift_master_cluster_public_hostname=${public_hostname}

openshift_master_named_certificates=[{"certfile": "/home/maintuser/example.com.crt", "names": ["example.com"], "keyfile": "/home/maintuser/example.com.key", "cafile": "/home/maintuser/example.crt"}]
openshift_master_overwrite_named_certificates=true

debug_level=9

# If ansible_ssh_user is not root, ansible_become must be set to true
ansible_become=true

# Deploy OKD 3.11.
openshift_deployment_type=origin
openshift_release=v3.11


# Use an htpasswd file as the indentity provider.
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]

# Uncomment the line below to enable metrics for the cluster.
# openshift_hosted_metrics_deploy=true

# Use API keys rather than instance roles so that tenant containers don't get
# Openshift's EC2/EBS permissions
openshift_cloudprovider_kind=aws
openshift_cloudprovider_aws_access_key=${access_key}
openshift_cloudprovider_aws_secret_key=${secret_key}

# Set the cluster_id.
openshift_clusterid=${cluster_id}

# Define the standard set of node groups, as per:
#   https://github.com/openshift/openshift-ansible#node-group-definition-and-mapping
openshift_node_groups=[{'name': 'node-config-master', 'labels': ['node-role.kubernetes.io/master=true']}, {'name': 'node-config-infra', 'labels': ['node-role.kubernetes.io/infra=true']}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true']}, {'name': 'node-config-master-infra', 'labels': ['node-role.kubernetes.io/infra=true,node-role.kubernetes.io/master=true']}, {'name': 'node-config-all-in-one', 'labels': ['node-role.kubernetes.io/infra=true,node-role.kubernetes.io/master=true,node-role.kubernetes.io/compute=true']}]
#openshift_node_groups=[{'name': 'node-config-all-in-one', 'labels': ['node-role.kubernetes.io/master=true', 'node-role.kubernetes.io/infra=true', 'node-role.kubernetes.io/compute=true'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}]

# Create the masters host group. Note that due do:
#   https://github.com/dwmkerr/terraform-aws-openshift/issues/40
# We cannot use the internal DNS names (such as master.openshift.local) as there
# is a bug with the installer when using the AWS cloud provider.
# Note that we use the master node as an infra node as well, which is not recommended for production use.
[masters]
${master1_hostname}
${master2_hostname}
${master3_hostname}


# host group for etcd
[etcd]
${master1_hostname}
${master2_hostname}
${master3_hostname}

# all nodes - along with their openshift_node_groups.
[nodes]
${master1_hostname} openshift_node_group_name='node-config-master-infra' openshift_schedulable=true
${master2_hostname} openshift_node_group_name='node-config-master-infra' openshift_schedulable=true
${master3_hostname} openshift_node_group_name='node-config-master-infra' openshift_schedulable=true
${node1_hostname} openshift_node_group_name='node-config-compute'
${node2_hostname} openshift_node_group_name='node-config-compute'
${node3_hostname} openshift_node_group_name='node-config-compute'

План terraform, который заполняет файл инвентаря:

//  Collect together all of the output variables needed to build to the final
//  inventory from the inventory template.
data "template_file" "inventory" {
  template = "${file("${path.cwd}/inventory.template.cfg")}"
  vars = {
    access_key        = "${aws_iam_access_key.openshift-aws-user.id}"
    secret_key        = "${aws_iam_access_key.openshift-aws-user.secret}"
    public_hostname   = "${var.public_hostname}"
    #load_balancer_dns = "${var.load_balancer_dns}"
    master1_hostname  = "${aws_instance.master1.private_dns}"
    master2_hostname  = "${aws_instance.master2.private_dns}"
    master3_hostname  = "${aws_instance.master3.private_dns}"
    node1_hostname    = "${aws_instance.node1.private_dns}"
    node2_hostname    = "${aws_instance.node2.private_dns}"
    node3_hostname    = "${aws_instance.node3.private_dns}"
    cluster_id        = "${var.cluster_id}"
  }
}

//  Create the inventory.
resource "local_file" "inventory" {
  content  = "${data.template_file.inventory.rendered}"
  filename = "${path.cwd}/inventory.cfg"

Я провел прошлую неделю, ударяя по моему голова против стены с нулевым прогрессом. Спасибо заранее за вашу помощь.

Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...