Ver Fonte

Commit to enable standalone master instances in aws

Chris Callegari há 6 anos atrás
pai
commit
1f90c05186

+ 1 - 1
playbooks/aws/openshift-cluster/install.yml

@@ -1,5 +1,5 @@
 ---
-- name: Setup the master node group
+- name: Setup master instances
   hosts: localhost
   tasks:
   - import_role:

+ 3 - 3
playbooks/aws/openshift-cluster/provision.yml

@@ -14,12 +14,12 @@
 
 - import_playbook: provision_elb.yml
 
-- name: Create the master node group
+- name: Create the master instances
   hosts: localhost
   tasks:
-  - name: provision cluster
+  - name: provision instances
     import_role:
       name: openshift_aws
-      tasks_from: provision.yml
+      tasks_from: provision_masters.yml
 
 - import_playbook: provision_dns.yml

+ 34 - 1
roles/lib_utils/filter_plugins/openshift_aws_filters.py

@@ -11,6 +11,37 @@ class FilterModule(object):
     ''' Custom ansible filters for use by openshift_aws role'''
 
     @staticmethod
+    def subnet_count_list(size, subnets):
+        """This function will modify create a list of subnets."""
+        items = {}
+        count = 0
+        for _ in range(0, int(size)):
+            if subnets[count]['subnets'][0]['subnet_id'] in items:
+                items[subnets[count]['subnets'][0]['subnet_id']] = \
+                    items[subnets[count]['subnets'][0]['subnet_id']] + 1
+            else:
+                items[subnets[count]['subnets'][0]['subnet_id']] = 1
+            if count < (len(subnets) - 1):
+                count = count + 1
+            else:
+                count = 0
+        return items
+
+    @staticmethod
+    def ec2_to_asg_tag(ec2_tag_info):
+        ''' This function will modify ec2 tag list to an asg dictionary.'''
+        tags = []
+        for tag in ec2_tag_info:
+            for key in tag:
+                if 'deployment_serial' in key:
+                    l_dict = {'tags': []}
+                    l_dict['tags'].append({'key': 'deployment_serial',
+                                           'value': tag[key]})
+                    tags.append(l_dict.copy())
+
+        return tags
+
+    @staticmethod
     def scale_groups_serial(scale_group_info, upgrade=False):
         ''' This function will determine what the deployment serial should be and return it
 
@@ -71,4 +102,6 @@ class FilterModule(object):
         ''' returns a mapping of filters to methods '''
         return {'build_instance_tags': self.build_instance_tags,
                 'scale_groups_match_capacity': self.scale_groups_match_capacity,
-                'scale_groups_serial': self.scale_groups_serial}
+                'scale_groups_serial': self.scale_groups_serial,
+                'ec2_to_asg_tag': self.ec2_to_asg_tag,
+                'subnet_count_list': self.subnet_count_list}

+ 21 - 21
roles/openshift_aws/defaults/main.yml

@@ -56,6 +56,8 @@ openshift_aws_vpc:
     us-east-1:
     - cidr: 172.31.48.0/20
       az: "us-east-1c"
+# Uncomment to enable use of multi availability zone
+# Greenfield installs only!  Single-az to multi-az migration is not supported!
 #    - cidr: 172.31.32.0/20
 #      az: "us-east-1e"
 #    - cidr: 172.31.16.0/20
@@ -195,7 +197,7 @@ openshift_aws_elb_dict:
       scheme: internet-facing
       tags: "{{ openshift_aws_kube_tags }}"
 
-openshift_aws_node_group_config_master_volumes:
+openshift_aws_master_volumes:
 - device_name: /dev/sda1
   volume_size: 100
   volume_type: gp2
@@ -260,22 +262,20 @@ openshift_aws_scale_group_health_check:
   type: EC2
 
 # these will be used during upgrade
-openshift_aws_master_group_config:
-  # The 'master' key is always required here.
-  master:
-    instance_type: "{{ openshift_aws_master_group_instance_type | default(openshift_aws_instance_type) }}"
-    volumes: "{{ openshift_aws_node_group_config_master_volumes }}"
-    health_check: "{{ openshift_aws_scale_group_health_check }}"
-    min_size: "{{ openshift_aws_master_group_min_size | default(3) }}"
-    max_size: "{{ openshift_aws_master_group_max_size | default(3) }}"
-    desired_size: "{{ openshift_aws_master_group_desired_size | default(3) }}"
-    wait_for_instances: True
-    termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
-    replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
-    iam_role: "{{ openshift_aws_iam_master_role_name | default(openshift_aws_iam_role_name) }}"
-    policy_name: "{{ openshift_aws_iam_master_role_policy_name | default(openshift_aws_iam_role_policy_name) }}"
-    policy_json: "{{ openshift_aws_iam_master_role_policy_json | default(openshift_aws_iam_role_policy_json) }}"
-    elbs: "{{ openshift_aws_elb_dict | json_query('master.[*][0][*].name') }}"
+openshift_aws_master_instance_config:
+  instance_type: "{{ openshift_aws_master_group_instance_type | default(openshift_aws_instance_type) }}"
+  volumes: "{{ openshift_aws_master_volumes }}"
+  health_check: "{{ openshift_aws_scale_group_health_check }}"
+  exact_count: "{{ openshift_aws_master_group_desired_size | default(3) }}"
+  termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
+  iam_role: "{{ openshift_aws_iam_master_role_name | default(openshift_aws_iam_role_name) }}"
+  policy_name: "{{ openshift_aws_iam_master_role_policy_name | default(openshift_aws_iam_role_policy_name) }}"
+  policy_json: "{{ openshift_aws_iam_master_role_policy_json | default(openshift_aws_iam_role_policy_json) }}"
+  elbs: "{{ openshift_aws_elb_dict | json_query('master.[*][0][*].name') }}"
+  groups:
+  - "{{ openshift_aws_clusterid }}"  # default sg
+  - "{{ openshift_aws_clusterid }}_master"  # node type sg
+  - "{{ openshift_aws_clusterid }}_master_k8s"  # node type sg k8s
 
 openshift_aws_node_group_config:
   # The 'compute' key is always required here.
@@ -312,6 +312,10 @@ openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
 openshift_aws_elb_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
 
 openshift_aws_launch_config_security_groups:
+  master:
+  - "{{ openshift_aws_clusterid }}"  # default sg
+  - "{{ openshift_aws_clusterid }}_master"  # node type sg
+  - "{{ openshift_aws_clusterid }}_master_k8s"  # node type sg k8s
   compute:
   - "{{ openshift_aws_clusterid }}"  # default sg
   - "{{ openshift_aws_clusterid }}_compute"  # node type sg
@@ -320,10 +324,6 @@ openshift_aws_launch_config_security_groups:
   - "{{ openshift_aws_clusterid }}"  # default sg
   - "{{ openshift_aws_clusterid }}_infra"  # node type sg
   - "{{ openshift_aws_clusterid }}_infra_k8s"  # node type sg k8s
-  master:
-  - "{{ openshift_aws_clusterid }}"  # default sg
-  - "{{ openshift_aws_clusterid }}_master"  # node type sg
-  - "{{ openshift_aws_clusterid }}_master_k8s"  # node type sg k8s
 
 openshift_aws_security_groups_tags: "{{ openshift_aws_kube_tags }}"
 

+ 11 - 0
roles/openshift_aws/tasks/elb_reg.yml

@@ -0,0 +1,11 @@
+---
+- name: Register EC2 instances to ELB
+  ec2_elb:
+    ec2_elbs: "{{ l_elb.value.name }}"
+    instance_id: "{{ item }}"
+    region: "{{ openshift_aws_region }}"
+    state: present
+    wait: False
+  register: test
+  with_items: "{{ instancesout.instances | list | map(attribute='instance_id') | list }}"
+  failed_when: "'InvalidInstanceID.NotFound' in test"

+ 26 - 0
roles/openshift_aws/tasks/provision_ec2.yml

@@ -0,0 +1,26 @@
+---
+- name: create instance(s)
+  ec2:
+    assign_public_ip: yes
+    count_tag:
+      host-type: "master"
+    ebs_optimized: True
+    exact_count: "{{ l_subnetout_results[l_loop] }}"
+    group_id: "{{ ec2sgs.security_groups | map(attribute='group_id') | list }}"
+    instance_tags: "{{ l_instance_tags }}"
+    instance_type: "{{ openshift_aws_master_instance_config.instance_type }}"
+    image: "{{ l_image }}"
+    key_name: "{{ openshift_aws_ssh_key_name }}"
+    monitoring: False
+    region: "{{ openshift_aws_region }}"
+    termination_protection: False
+    user_data: "{{ lookup('template', 'user_data.j2') }}"
+    volumes: "{{ openshift_aws_master_instance_config.volumes }}"
+    vpc_subnet_id: "{{ l_loop }}"
+    wait: yes
+  loop: "{{ l_subnetout_results | list }}"
+  loop_control:
+    loop_var: l_loop
+  retries: 3
+  delay: 3
+  register: ec2s

+ 87 - 0
roles/openshift_aws/tasks/provision_ec2_facts.yml

@@ -0,0 +1,87 @@
+---
+# When openshift_aws_use_custom_ami is '' then
+# we retrieve the latest build AMI.
+# Then set openshift_aws_ami to the ami.
+- when:
+  - (openshift_aws_ami == '' and 'master' not in openshift_aws_ami_map) or ('master' in openshift_aws_ami_map and openshift_aws_ami_map['master'] == '')
+  block:
+  - name: fetch recently created AMI
+    ec2_ami_facts:
+      region: "{{ openshift_aws_region }}"
+      filters: "{ 'name': '{{ openshift_aws_ami_name }}*',
+             {%- for key in openshift_aws_ami_tags -%}
+                 'tag:{{ key }}': '{{ openshift_aws_ami_tags[key] }}',
+             {%- endfor -%} }"
+    register: amiout
+    failed_when: "amiout.images|length == 0"
+
+  - name: Set the openshift_aws_ami
+    set_fact:
+      openshift_aws_ami: "{{ ( amiout.images | sort(attribute='creation_date') | map(attribute='image_id') | reverse | list )[0] }}"
+    when:
+    - "'images' in amiout"
+    - amiout.images|length > 0
+
+- block:
+  # query instance's and determine if we need to create the others.
+  # if we find more than 1 for each type, and this isn't an upgrade, then exit
+  - name: fetch all master ec2s for this cluster
+    ec2_instance_facts:
+      region: "{{ openshift_aws_region }}"
+      filters:
+        instance-state-name: running
+        vpc-id: "{{ vpcout.vpcs.0.id }}"
+        "tag:clusterid": "{{ openshift_aws_clusterid }}"
+        "tag:host-type": "master"
+    register: ec2s
+
+  - debug:
+      msg: "{{ ec2s.instances }}"
+
+  - fail:
+      msg: "Found more than 1 group that matches the query for group: master"
+    when:
+    - not openshift_aws_node_group_upgrade
+    - ( ec2s.instances | map(attribute='tags.deployment_serial') | list | unique | count ) > 1
+
+  - fail:
+      msg: "Upgrade: Found more than 2 groups that matches the query for group: master"
+    when:
+    - openshift_aws_node_group_upgrade
+    - asgs.results|length > 2
+
+  - name: Modify ec2 tags dictionary
+    set_fact:
+      ec2s: "{{ ec2s.instances | map(attribute='tags') | list | unique | ec2_to_asg_tag }}"
+
+  - name: set the value for the deployment_serial
+    set_fact:
+      # scale_groups_serial is a custom filter in role lib_utils
+      l_deployment_serial: "{{  openshift_aws_node_group_deployment_serial if openshift_aws_node_group_deployment_serial is defined else ec2s | scale_groups_serial(openshift_aws_node_group_upgrade) }}"
+
+  - name: dump deployment serial
+    debug:
+      msg: "Deployment serial: {{ l_deployment_serial }}"
+
+  - set_fact:
+      l_instance_tags: "{{ openshift_aws_node_group_config_tags
+      | combine((openshift_aws_master_group | selectattr('group', 'match', 'master') | list | first).tags)
+      | combine({'deployment_serial': l_deployment_serial, 'ami': openshift_aws_ami_map['master'] | default(openshift_aws_ami)})
+      | combine({'openshift-node-group-config': (openshift_aws_master_group | selectattr('group', 'match', 'master') | list | first).node_group_config | default('unset') }) }}"
+      l_image: "{{ openshift_aws_ami_map['master'] | default(openshift_aws_ami) }}"
+
+- name: subnets
+  set_fact:
+    l_subnetout_results: "{{ openshift_aws_master_instance_config.exact_count | subnet_count_list(subnetout.results) }}"
+
+- name: dump subnet count
+  debug:
+    msg: "subnet count: {{ l_subnetout_results }}"
+
+- name: fetch the security groups
+  ec2_group_facts:
+    filters:
+      group-name: "{{ openshift_aws_master_instance_config.groups }}"
+      vpc-id: "{{ vpcout.vpcs[0].id }}"
+    region: "{{ openshift_aws_region }}"
+  register: ec2sgs

+ 11 - 8
roles/openshift_aws/tasks/provision.yml

@@ -1,13 +1,10 @@
 ---
-- include_tasks: vpc_and_subnet_id.yml
+- import_tasks: vpc_and_subnet_id.yml
 
-- name: include scale group creation for master
-  include_tasks: build_node_group.yml
-  with_items: "{{ openshift_aws_master_group }}"
-  vars:
-    l_node_group_config: "{{ openshift_aws_master_group_config }}"
-  loop_control:
-    loop_var: openshift_aws_node_group
+- import_tasks: provision_ec2_facts.yml
+
+- name: include master instance creation
+  import_tasks: provision_ec2.yml
 
 - name: fetch newly created instances
   ec2_instance_facts:
@@ -21,6 +18,12 @@
   delay: 3
   until: instancesout.instances|length > 0
 
+- name: include ec2 register
+  include_tasks: elb_reg.yml
+  loop: "{{ openshift_aws_elb_dict['master'] | dict2items }}"
+  loop_control:
+    loop_var: l_elb
+
 - name: wait for ssh to become available
   wait_for:
     port: 22

+ 15 - 6
roles/openshift_aws/templates/user_data.j2

@@ -2,29 +2,38 @@
 {{ openshift_aws_node_user_data }}
 {% else %}
 #cloud-config
+
 write_files:
 - path: /root/openshift_bootstrap/openshift_settings.yaml
   owner: 'root:root'
   permissions: '0640'
   content: |
-    openshift_node_config_name: {{ openshift_aws_node_group.node_group_config | default('unset') }}
-{%   if openshift_aws_node_group.group != 'master' %}
+    openshift_node_config_name: {%
+  if l_instance_tags['host-type'] == 'master' %}
+node-config-master
+{%   elif l_instance_tags['host-type'] == 'node' %}
+{{ openshift_aws_node_group.node_group_config }}
+{%   else %}
+unset
+{%   endif %}
+{%   if l_instance_tags['host-type'] == 'node' %}
 - path: /etc/origin/node/bootstrap.kubeconfig
   owner: 'root:root'
   permissions: '0640'
   encoding: b64
   content: {{ openshift_aws_launch_config_bootstrap_token | b64encode }}
 {%   endif %}
+
 runcmd:
-{%     if openshift_aws_node_run_bootstrap_startup %}
+{%   if openshift_aws_node_run_bootstrap_startup %}
 - [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml]
-{%     endif %}
-{%     if openshift_aws_node_group.group != 'master' %}
+{%   endif %}
+{%   if l_instance_tags['host-type'] == 'node' %}
 {# Restarting systemd-hostnamed ensures that instances will have FQDN
 hostnames following network restart. #}
 - [ systemctl, restart, systemd-hostnamed]
 - [ systemctl, restart, NetworkManager]
 - [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
 - [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
-{%     endif %}
+{%   endif %}
 {% endif %}