Browse Source

Merge pull request #5945 from kwoodson/elb_fixes

Automatic merge from submit-queue.

Fixing elb creation for infra and adding to scale group.

Elb creation was missing for the infra nodes.  This was either an issue with a refactor or it was missed.

The other issue was that after instances were removed and re-added via a scale group the ELB no longer referenced those hosts.  This PR names the ELBs so they are automatically tied to a scale group instead of an instance.  This allows for instances to come and go but the ELBs will continue to work with all new instances in the ASG.
OpenShift Merge Robot 7 years ago
parent
commit
01e09bac68

+ 30 - 10
roles/openshift_aws/defaults/main.yml

@@ -12,7 +12,6 @@ openshift_aws_clusterid: default
 openshift_aws_region: us-east-1
 openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
 openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}"
-openshift_aws_kubernetes_cluster_status: "{{ openshift_aws_clusterid }}"
 
 openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
 openshift_aws_iam_cert_path: ''
@@ -48,7 +47,14 @@ openshift_aws_elb_health_check:
   unhealthy_threshold: 2
   healthy_threshold: 2
 
-openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}"
+openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}"
+openshift_aws_elb_name_dict:
+  master:
+    external: "{{ openshift_aws_elb_basename }}-external"
+    internal: "{{ openshift_aws_elb_basename }}-internal"
+  infra:
+    external: "{{ openshift_aws_elb_basename }}"
+
 openshift_aws_elb_idle_timout: 400
 openshift_aws_elb_scheme: internet-facing
 openshift_aws_elb_cert_arn: ''
@@ -75,6 +81,18 @@ openshift_aws_elb_listeners:
       load_balancer_port: 443
       instance_protocol: tcp
       instance_port: 443
+  infra:
+    external:
+    - protocol: tcp
+      load_balancer_port: 80
+      instance_protocol: tcp
+      instance_port: 443
+      proxy_protocol: True
+    - protocol: tcp
+      load_balancer_port: 443
+      instance_protocol: tcp
+      instance_port: 443
+      proxy_protocol: True
 
 openshift_aws_node_group_config_master_volumes:
 - device_name: /dev/sdb
@@ -88,7 +106,7 @@ openshift_aws_node_group_config_node_volumes:
   device_type: gp2
   delete_on_termination: True
 
-openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}"
+openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
 openshift_aws_node_group_termination_policy: Default
 openshift_aws_node_group_replace_instances: []
 openshift_aws_node_group_replace_all_instances: False
@@ -114,6 +132,7 @@ openshift_aws_node_group_config:
     wait_for_instances: True
     termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
     replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
+    elbs: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type].keys()| map('extract', openshift_aws_elb_name_dict[openshift_aws_node_group_type]) | list }}"
   compute:
     instance_type: m4.xlarge
     ami: "{{ openshift_aws_ami }}"
@@ -148,21 +167,22 @@ openshift_aws_node_group_config:
       type: infra
     termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
     replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
+    elbs: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type].keys()| map('extract', openshift_aws_elb_name_dict[openshift_aws_node_group_type]) | list }}"
+
+openshift_aws_elb_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
+openshift_aws_elb_az_load_balancing: False
 
 openshift_aws_elb_security_groups:
-- "{{ openshift_aws_clusterid }}"
-- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}"
+- "{{ openshift_aws_clusterid }}"  # default sg
+- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}"  # node type sg
+- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s"  # node type sg k8s
 
 openshift_aws_elb_instance_filter:
   "tag:clusterid": "{{ openshift_aws_clusterid }}"
   "tag:host-type": "{{ openshift_aws_node_group_type }}"
   instance-state-name: running
 
-openshift_aws_launch_config_security_groups:
-- "{{ openshift_aws_clusterid }}"  # default sg
-- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}"  # node type sg
-- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s"  # node type sg k8s
-
+openshift_aws_security_groups_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
 openshift_aws_node_security_groups:
   default:
     name: "{{ openshift_aws_clusterid }}"

+ 3 - 3
roles/openshift_aws/filter_plugins/openshift_aws_filters.py

@@ -9,17 +9,17 @@ class FilterModule(object):
     ''' Custom ansible filters for use by openshift_aws role'''
 
     @staticmethod
-    def build_instance_tags(clusterid, status='owned'):
+    def build_instance_tags(clusterid):
         ''' This function will return a dictionary of the instance tags.
 
             The main desire to have this inside of a filter_plugin is that we
             need to build the following key.
 
-            {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": 'owned'}
+            {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"}
 
         '''
         tags = {'clusterid': clusterid,
-                'kubernetes.io/cluster/{}'.format(clusterid): status}
+                'kubernetes.io/cluster/{}'.format(clusterid): clusterid}
 
         return tags
 

+ 0 - 4
roles/openshift_aws/tasks/build_node_group.yml

@@ -21,10 +21,6 @@
     - "'results' in amiout"
     - amiout.results|length > 0
 
-- when: openshift_aws_create_security_groups
-  name: "Create {{ openshift_aws_node_group_type }} security groups"
-  include: security_group.yml
-
 - when: openshift_aws_create_launch_config
   name: "Create {{ openshift_aws_node_group_type }} launch config"
   include: launch_config.yml

+ 3 - 24
roles/openshift_aws/tasks/elb.yml

@@ -9,12 +9,6 @@
 - name: debug
   debug: var=vpcout
 
-- name: fetch the remote instances
-  ec2_remote_facts:
-    region: "{{ openshift_aws_region }}"
-    filters: "{{ openshift_aws_elb_instance_filter }}"
-  register: instancesout
-
 - name: fetch the default subnet id
   ec2_vpc_subnet_facts:
     region: "{{ openshift_aws_region }}"
@@ -23,7 +17,7 @@
       vpc-id: "{{ vpcout.vpcs[0].id }}"
   register: subnetout
 
-- name:
+- name: dump the elb listeners
   debug:
     msg: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction]
                    if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type
@@ -33,6 +27,7 @@
   ec2_elb_lb:
     name: "{{ l_openshift_aws_elb_name }}"
     state: present
+    cross_az_load_balancing: "{{ openshift_aws_elb_az_load_balancing }}"
     security_group_names: "{{ openshift_aws_elb_security_groups }}"
     idle_timeout: "{{ openshift_aws_elb_idle_timout }}"
     region: "{{ openshift_aws_region }}"
@@ -43,25 +38,9 @@
                    if 'master' in openshift_aws_node_group_type  or 'infra' in openshift_aws_node_group_type
                    else openshift_aws_elb_listeners }}"
     scheme: "{{ openshift_aws_elb_scheme }}"
-    tags:
-      KubernetesCluster: "{{ openshift_aws_clusterid }}"
+    tags: "{{ openshift_aws_elb_tags }}"
   register: new_elb
 
-# It is necessary to ignore_errors here because the instances are not in 'ready'
-#  state when first added to ELB
-- name: "Add instances to ELB {{ l_openshift_aws_elb_name }}"
-  ec2_elb:
-    instance_id: "{{ item.id }}"
-    ec2_elbs: "{{ l_openshift_aws_elb_name }}"
-    state: present
-    region: "{{ openshift_aws_region }}"
-    wait: False
-  with_items: "{{ instancesout.instances }}"
-  ignore_errors: True
-  retries: 10
-  register: elb_call
-  until: elb_call|succeeded
-
 - debug:
     msg: "{{ item }}"
   with_items:

+ 1 - 1
roles/openshift_aws/tasks/launch_config.yml

@@ -19,7 +19,7 @@
 - name: fetch the security groups for launch config
   ec2_group_facts:
     filters:
-      group-name: "{{ openshift_aws_launch_config_security_groups }}"
+      group-name: "{{ openshift_aws_elb_security_groups }}"
       vpc-id: "{{ vpcout.vpcs[0].id }}"
     region: "{{ openshift_aws_region }}"
   register: ec2sgs

+ 4 - 6
roles/openshift_aws/tasks/master_facts.yml

@@ -3,20 +3,18 @@
   ec2_elb_facts:
     region: "{{ openshift_aws_region }}"
     names:
-    - "{{ item }}"
-  with_items:
-  - "{{ openshift_aws_elb_name }}-external"
-  - "{{ openshift_aws_elb_name }}-internal"
+    - "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type]['internal'] }}"
   delegate_to: localhost
   register: elbs
 
 - debug: var=elbs
+  run_once: true
 
 - name: set fact
   set_fact:
-    openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}"
+    openshift_master_cluster_hostname: "{{ elbs.elbs[0].dns_name }}"
     osm_custom_cors_origins:
-    - "{{ elbs.results[1].elbs[0].dns_name }}"
+    - "{{ elbs.elbs[0].dns_name }}"
     - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
     - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
   with_items: "{{ groups['masters'] }}"

+ 32 - 14
roles/openshift_aws/tasks/provision.yml

@@ -7,6 +7,38 @@
   name: create s3 bucket for registry
   include: s3.yml
 
+- when: openshift_aws_create_security_groups
+  block:
+  - name: "Create {{ openshift_aws_node_group_type }} security groups"
+    include: security_group.yml
+
+  - name: "Create {{ openshift_aws_node_group_type }} security groups"
+    include: security_group.yml
+    vars:
+      openshift_aws_node_group_type: infra
+
+- name: create our master internal load balancer
+  include: elb.yml
+  vars:
+    openshift_aws_elb_direction: internal
+    openshift_aws_elb_scheme: internal
+    l_openshift_aws_elb_name: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type]['internal'] }}"
+
+- name: create our master external load balancer
+  include: elb.yml
+  vars:
+    openshift_aws_elb_direction: external
+    openshift_aws_elb_scheme: internet-facing
+    l_openshift_aws_elb_name: "{{ openshift_aws_elb_name_dict[openshift_aws_node_group_type]['external'] }}"
+
+- name: create our infra node external load balancer
+  include: elb.yml
+  vars:
+    l_openshift_aws_elb_name: "{{ openshift_aws_elb_name_dict['infra']['external'] }}"
+    openshift_aws_elb_direction: external
+    openshift_aws_elb_scheme: internet-facing
+    openshift_aws_node_group_type: infra
+
 - name: include scale group creation for master
   include: build_node_group.yml
 
@@ -22,20 +54,6 @@
   delay: 3
   until: instancesout.instances|length > 0
 
-- name: create our master internal load balancers
-  include: elb.yml
-  vars:
-    openshift_aws_elb_direction: internal
-    l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-internal"
-    openshift_aws_elb_scheme: internal
-
-- name: create our master external load balancers
-  include: elb.yml
-  vars:
-    openshift_aws_elb_direction: external
-    l_openshift_aws_elb_name: "{{ openshift_aws_elb_name }}-external"
-    openshift_aws_elb_scheme: internet-facing
-
 - name: wait for ssh to become available
   wait_for:
     port: 22

+ 1 - 2
roles/openshift_aws/tasks/security_group.yml

@@ -38,8 +38,7 @@
 
 - name: tag sg groups with proper tags
   ec2_tag:
-    tags:
-      KubernetesCluster: "{{ openshift_aws_clusterid }}"
+    tags: "{{ openshift_aws_security_groups_tags }}"
     resource: "{{ item.group_id }}"
     region: "{{ openshift_aws_region }}"
   with_items: "{{ k8s_sg_create.results }}"