Browse Source

Merge pull request #608 from abutcher/native-ha

Native Support for Multi-Master HA
Brenton Leanhardt 9 years ago
parent
commit
c73ec7b6b2
29 changed files with 559 additions and 53 deletions
  1. 17 1
      filter_plugins/oo_filters.py
  2. 37 9
      inventory/byo/hosts.example
  3. 1 0
      playbooks/byo/openshift-cluster/config.yml
  4. 1 1
      playbooks/common/openshift-cluster/config.yml
  5. 8 0
      playbooks/common/openshift-cluster/evaluate_groups.yml
  6. 85 3
      playbooks/common/openshift-master/config.yml
  7. 1 1
      playbooks/gce/openshift-cluster/join_node.yml
  8. 34 0
      roles/haproxy/README.md
  9. 21 0
      roles/haproxy/defaults/main.yml
  10. 5 0
      roles/haproxy/handlers/main.yml
  11. 14 0
      roles/haproxy/meta/main.yml
  12. 25 0
      roles/haproxy/tasks/main.yml
  13. 76 0
      roles/haproxy/templates/haproxy.cfg.j2
  14. 24 13
      roles/openshift_facts/library/openshift_facts.py
  15. 10 0
      roles/openshift_master/handlers/main.yml
  16. 103 5
      roles/openshift_master/tasks/main.yml
  17. 9 0
      roles/openshift_master/templates/atomic-openshift-master-api.j2
  18. 21 0
      roles/openshift_master/templates/atomic-openshift-master-api.service.j2
  19. 9 0
      roles/openshift_master/templates/atomic-openshift-master-controllers.j2
  20. 22 0
      roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2
  21. 13 4
      roles/openshift_master/templates/master.yaml.v1.j2
  22. 7 0
      roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2
  23. 1 0
      roles/openshift_master/vars/main.yml
  24. 2 2
      roles/openshift_master_ca/tasks/main.yml
  25. 0 8
      roles/openshift_master_cluster/tasks/configure_deferred.yml
  26. 1 4
      roles/openshift_master_cluster/tasks/main.yml
  27. 1 0
      roles/openshift_node/meta/main.yml
  28. 10 1
      roles/openshift_node/tasks/main.yml
  29. 1 1
      roles/openshift_repos/tasks/main.yaml

+ 17 - 1
filter_plugins/oo_filters.py

@@ -243,6 +243,21 @@ class FilterModule(object):
         return string.split(separator)
 
     @staticmethod
+    def oo_haproxy_backend_masters(hosts):
+        ''' This takes an array of dicts and returns an array of dicts
+            to be used as a backend for the haproxy role
+        '''
+        servers = []
+        for idx, host_info in enumerate(hosts):
+            server = dict(name="master%s" % idx)
+            server_ip = host_info['openshift']['common']['ip']
+            server_port = host_info['openshift']['master']['api_port']
+            server['address'] = "%s:%s" % (server_ip, server_port)
+            server['opts'] = 'check'
+            servers.append(server)
+        return servers
+
+    @staticmethod
     def oo_filter_list(data, filter_attr=None):
         ''' This returns a list, which contains all items where filter_attr
             evaluates to true
@@ -407,5 +422,6 @@ class FilterModule(object):
             "oo_split": self.oo_split,
             "oo_filter_list": self.oo_filter_list,
             "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs,
-            "oo_parse_certificate_names": self.oo_parse_certificate_names
+            "oo_parse_certificate_names": self.oo_parse_certificate_names,
+            "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters
         }

+ 37 - 9
inventory/byo/hosts.example

@@ -5,6 +5,7 @@
 masters
 nodes
 etcd
+lb
 
 # Set variables common for all OSEv3 hosts
 [OSEv3:vars]
@@ -57,21 +58,29 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # Set cockpit plugins
 #osm_cockpit_plugins=['cockpit-kubernetes']
 
-# master cluster ha variables using pacemaker or RHEL HA
+# Native high availbility cluster method with optional load balancer.
+# If no lb group is defined installer assumes that a load balancer has
+# been preconfigured. For installation the value of
+# openshift_master_cluster_hostname must resolve to the load balancer
+# or to one or all of the masters defined in the inventory if no load
+# balancer is present.
+#openshift_master_cluster_method=native
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# Pacemaker high availability cluster method.
+# Pacemaker HA environment must be able to self provision the
+# configured VIP. For installation openshift_master_cluster_hostname
+# must resolve to the configured VIP.
+#openshift_master_cluster_method=pacemaker
 #openshift_master_cluster_password=openshift_cluster
 #openshift_master_cluster_vip=192.168.133.25
 #openshift_master_cluster_public_vip=192.168.133.25
 #openshift_master_cluster_hostname=openshift-ansible.test.example.com
 #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
 
-# master cluster ha variables when using a different HA solution
-# For installation the value of openshift_master_cluster_hostname must resolve
-# to the first master defined in the inventory.
-# The HA solution must be manually configured after installation and must ensure
-# that the master is running on a single master host.
-#openshift_master_cluster_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_defer_ha=True
+# Override the default controller lease ttl
+#osm_controller_lease_ttl=30
 
 # default subdomain to use for exposed routes
 #osm_default_subdomain=apps.test.example.com
@@ -104,6 +113,22 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # Detected names may be overridden by specifying the "names" key
 #openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}]
 
+# Session options
+#openshift_master_session_name=ssn
+#openshift_master_session_max_seconds=3600
+
+# An authentication and encryption secret will be generated if secrets
+# are not provided. If provided, openshift_master_session_auth_secrets
+# and openshift_master_encryption_secrets must be equal length.
+#
+# Signing secrets, used to authenticate sessions using
+# HMAC. Recommended to use secrets with 32 or 64 bytes.
+#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+#
+# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
+# characters long, to select AES-128, AES-192, or AES-256.
+#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+
 # host group for masters
 [masters]
 ose3-master[1:3]-ansible.test.example.com
@@ -111,6 +136,9 @@ ose3-master[1:3]-ansible.test.example.com
 [etcd]
 ose3-etcd[1:3]-ansible.test.example.com
 
+[lb]
+ose3-lb-ansible.test.example.com
+
 # NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
 # However, in order to ensure that your masters are not burdened with running pods you should
 # make them unschedulable by adding openshift_scheduleable=False any node that's also a master.

+ 1 - 0
playbooks/byo/openshift-cluster/config.yml

@@ -4,6 +4,7 @@
     g_etcd_group: "{{ 'etcd' }}"
     g_masters_group: "{{ 'masters' }}"
     g_nodes_group: "{{ 'nodes' }}"
+    g_lb_group: "{{ 'lb' }}"
     openshift_cluster_id: "{{ cluster_id | default('default') }}"
     openshift_debug_level: 2
     openshift_deployment_type: "{{ deployment_type }}"

+ 1 - 1
playbooks/common/openshift-cluster/config.yml

@@ -8,4 +8,4 @@
 - include: ../openshift-node/config.yml
   vars:
     osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
-    osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
+    osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].cluster_dns_ip }}"

+ 8 - 0
playbooks/common/openshift-cluster/evaluate_groups.yml

@@ -62,3 +62,11 @@
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_sudo: "{{ g_sudo | default(omit) }}"
     when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
+
+  - name: Evaluate oo_lb_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_lb_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    with_items: groups[g_lb_group] | default([])

+ 85 - 3
playbooks/common/openshift-master/config.yml

@@ -34,7 +34,9 @@
       - role: common
         local_facts:
           hostname: "{{ openshift_hostname | default(None) }}"
+          ip: "{{ openshift_ip | default(None) }}"
           public_hostname: "{{ openshift_public_hostname | default(None) }}"
+          public_ip: "{{ openshift_public_ip | default(None) }}"
           deployment_type: "{{ openshift_deployment_type }}"
       - role: master
         local_facts:
@@ -44,7 +46,6 @@
           public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
           cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
           cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
-          cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}"
           console_path: "{{ openshift_master_console_path | default(None) }}"
           console_port: "{{ openshift_master_console_port | default(None) }}"
           console_url: "{{ openshift_master_console_url | default(None) }}"
@@ -168,6 +169,10 @@
     masters_needing_certs: "{{ hostvars
                                | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
                                | oo_filter_list(filter_attr='master_certs_missing') }}"
+    master_hostnames: "{{ hostvars
+                               | oo_select_keys(groups['oo_masters_to_config'])
+                               | oo_collect('openshift.common.all_hostnames')
+                               | oo_flatten | unique }}"
     sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
   roles:
   - openshift_master_certificates
@@ -207,13 +212,76 @@
       parsed_named_certificates: "{{ openshift_master_named_certificates | oo_parse_certificate_names(master_cert_config_dir, openshift.common.internal_hostnames) }}"
     when: openshift_master_named_certificates is defined
 
+- name: Compute haproxy_backend_servers
+  hosts: localhost
+  connection: local
+  sudo: false
+  gather_facts: no
+  tasks:
+  - set_fact:
+      haproxy_backend_servers: "{{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_haproxy_backend_masters }}"
+
+- name: Configure load balancers
+  hosts: oo_lb_to_config
+  vars:
+    sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+    haproxy_frontends:
+    - name: atomic-openshift-api
+      mode: tcp
+      options:
+      - tcplog
+      binds:
+      - "*:{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}"
+      default_backend: atomic-openshift-api
+    haproxy_backends:
+    - name: atomic-openshift-api
+      mode: tcp
+      option: tcplog
+      balance: source
+      servers: "{{ hostvars.localhost.haproxy_backend_servers }}"
+  roles:
+  - role: haproxy
+    when: groups.oo_masters_to_config | length > 1
+
+- name: Generate master session keys
+  hosts: oo_first_master
+  tasks:
+  - fail:
+      msg: "Both openshift_master_session_auth_secrets and openshift_master_session_encryption_secrets must be provided if either variable is set"
+    when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is not defined) or (openshift_master_session_encryption_secrets is defined and openshift_master_session_auth_secrets is not defined)
+  - fail:
+      msg: "openshift_master_session_auth_secrets and openshift_master_encryption_secrets must be equal length"
+    when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is defined) and (openshift_master_session_auth_secrets | length != openshift_master_session_encryption_secrets | length)
+  - name: Generate session authentication key
+    command: /usr/bin/openssl rand -base64 24
+    register: session_auth_output
+    with_sequence: count=1
+    when: openshift_master_session_auth_secrets is undefined
+  - name: Generate session encryption key
+    command: /usr/bin/openssl rand -base64 24
+    register: session_encryption_output
+    with_sequence: count=1
+    when: openshift_master_session_encryption_secrets is undefined
+  - set_fact:
+      session_auth_secret: "{{ openshift_master_session_auth_secrets
+                                | default(session_auth_output.results
+                                | map(attribute='stdout')
+                                | list) }}"
+      session_encryption_secret: "{{ openshift_master_session_encryption_secrets
+                                      | default(session_encryption_output.results
+                                      | map(attribute='stdout')
+                                      | list) }}"
+
 - name: Configure master instances
   hosts: oo_masters_to_config
+  serial: 1
   vars:
     named_certificates: "{{ hostvars[groups['oo_first_master'][0]]['parsed_named_certificates'] | default([])}}"
     sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
     openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
-    embedded_etcd: "{{ openshift.master.embedded_etcd }}"
+    openshift_master_count: "{{ groups.oo_masters_to_config | length }}"
+    openshift_master_session_auth_secrets: "{{ hostvars[groups['oo_first_master'][0]]['session_auth_secret'] }}"
+    openshift_master_session_encryption_secrets: "{{ hostvars[groups['oo_first_master'][0]]['session_encryption_secret'] }}"
   pre_tasks:
   - name: Ensure certificate directory exists
     file:
@@ -242,11 +310,25 @@
     omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ')}}"
   roles:
   - role: openshift_master_cluster
-    when: openshift_master_ha | bool
+    when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
   - openshift_examples
   - role: openshift_cluster_metrics
     when: openshift.common.use_cluster_metrics | bool
 
+- name: Determine cluster dns ip
+  hosts: oo_first_master
+  tasks:
+  - name: Get master service ip
+    command: "{{ openshift.common.client_binary }} get -o template svc kubernetes --template=\\{\\{.spec.clusterIP\\}\\}"
+    register: master_service_ip_output
+    when: openshift.common.version_greater_than_3_1_or_1_1 | bool
+  - set_fact:
+      cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
+    when: not openshift.common.version_greater_than_3_1_or_1_1 | bool
+  - set_fact:
+      cluster_dns_ip: "{{ master_service_ip_output.stdout }}"
+    when: openshift.common.version_greater_than_3_1_or_1_1 | bool
+
 - name: Enable cockpit
   hosts: oo_first_master
   vars:

+ 1 - 1
playbooks/gce/openshift-cluster/join_node.yml

@@ -46,4 +46,4 @@
     openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} "
     os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
     osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
-    osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
+    osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].cluster_dns_ip }}"

+ 34 - 0
roles/haproxy/README.md

@@ -0,0 +1,34 @@
+HAProxy
+=======
+
+TODO
+
+Requirements
+------------
+
+TODO
+
+Role Variables
+--------------
+
+TODO
+
+Dependencies
+------------
+
+TODO
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)

+ 21 - 0
roles/haproxy/defaults/main.yml

@@ -0,0 +1,21 @@
+---
+haproxy_frontends:
+- name: main
+  binds:
+  - "*:80"
+  default_backend: default
+
+haproxy_backends:
+- name: default
+  balance: roundrobin
+  servers:
+  - name: web01
+    address: 127.0.0.1:9000
+    opts: check
+
+os_firewall_use_firewalld: False
+os_firewall_allow:
+- service: haproxy stats
+  port: "9000/tcp"
+- service: haproxy balance
+  port: "8443/tcp"

+ 5 - 0
roles/haproxy/handlers/main.yml

@@ -0,0 +1,5 @@
+---
+- name: restart haproxy
+  service:
+    name: haproxy
+    state: restarted

+ 14 - 0
roles/haproxy/meta/main.yml

@@ -0,0 +1,14 @@
+---
+galaxy_info:
+  author: Jason DeTiberus
+  description: HAProxy
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 1.9
+  platforms:
+  - name: EL
+    versions:
+    - 7
+dependencies:
+- { role: os_firewall }
+- { role: openshift_repos }

+ 25 - 0
roles/haproxy/tasks/main.yml

@@ -0,0 +1,25 @@
+---
+- name: Install haproxy
+  yum:
+    pkg: haproxy
+    state: present
+
+- name: Configure haproxy
+  template:
+    src: haproxy.cfg.j2
+    dest: /etc/haproxy/haproxy.cfg
+    owner: root
+    group: root
+    mode: 0644
+  notify: restart haproxy
+
+- name: Enable and start haproxy
+  service:
+    name: haproxy
+    state: started
+    enabled: yes
+  register: start_result
+
+- name: Pause 30 seconds if haproxy was just started
+  pause: seconds=30
+  when: start_result | changed

+ 76 - 0
roles/haproxy/templates/haproxy.cfg.j2

@@ -0,0 +1,76 @@
+# Global settings
+#---------------------------------------------------------------------
+global
+    chroot      /var/lib/haproxy
+    pidfile     /var/run/haproxy.pid
+    maxconn     4000
+    user        haproxy
+    group       haproxy
+    daemon
+
+    # turn on stats unix socket
+    stats socket /var/lib/haproxy/stats
+
+#---------------------------------------------------------------------
+# common defaults that all the 'listen' and 'backend' sections will
+# use if not designated in their block
+#---------------------------------------------------------------------
+defaults
+    mode                    http
+    log                     global
+    option                  httplog
+    option                  dontlognull
+    option http-server-close
+    option forwardfor       except 127.0.0.0/8
+    option                  redispatch
+    retries                 3
+    timeout http-request    10s
+    timeout queue           1m
+    timeout connect         10s
+    timeout client          300s
+    timeout server          300s
+    timeout http-keep-alive 10s
+    timeout check           10s
+    maxconn                 3000
+
+listen stats :9000
+    mode http
+    stats enable
+    stats uri /
+
+{% for frontend in haproxy_frontends %}
+frontend  {{ frontend.name }}
+{% for bind in frontend.binds %}
+    bind {{ bind }}
+{% endfor %}
+    default_backend {{ frontend.default_backend }}
+{% if 'mode' in frontend %}
+    mode {{ frontend.mode }}
+{% endif %}
+{% if 'options' in frontend %}
+{% for option in frontend.options %}
+    option {{ option }}
+{% endfor %}
+{% endif %}
+{% if 'redirects' in frontend %}
+{% for redirect in frontend.redirects %}
+    redirect {{ redirect }}
+{% endfor %}
+{% endif %}
+{% endfor %}
+
+{% for backend in haproxy_backends %}
+backend {{ backend.name }}
+    balance {{ backend.balance }}
+{% if 'mode' in backend %}
+    mode {{ backend.mode }}
+{% endif %}
+{% if 'options' in backend %}
+{% for option in backend.options %}
+    option {{ option }}
+{% endfor %}
+{% endif %}
+{% for server in backend.servers %}
+    server      {{ server.name }} {{ server.address }} {{ server.opts }}
+{% endfor %}
+{% endfor %}

+ 24 - 13
roles/openshift_facts/library/openshift_facts.py

@@ -407,7 +407,7 @@ def set_identity_providers_if_unset(facts):
                 name='allow_all', challenge=True, login=True,
                 kind='AllowAllPasswordIdentityProvider'
             )
-            if deployment_type == 'enterprise':
+            if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
                 identity_provider = dict(
                     name='deny_all', challenge=True, login=True,
                     kind='DenyAllPasswordIdentityProvider'
@@ -551,18 +551,7 @@ def set_deployment_facts_if_unset(facts):
             facts['common']['config_base'] = config_base
         if 'data_dir' not in facts['common']:
             data_dir = '/var/lib/origin'
-            if deployment_type in ['enterprise', 'online']:
-                data_dir = '/var/lib/openshift'
             facts['common']['data_dir'] = data_dir
-        facts['common']['version'] = version = get_openshift_version()
-        if version is not None:
-            if deployment_type == 'origin':
-                version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('1.0.6')
-            else:
-                version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('3.0.2.900')
-        else:
-            version_gt_3_1_or_1_1 = True
-        facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1
 
     for role in ('master', 'node'):
         if role in facts:
@@ -596,6 +585,27 @@ def set_deployment_facts_if_unset(facts):
 
     return facts
 
+def set_version_facts_if_unset(facts):
+    """ Set version facts. This currently includes common.version and
+        common.version_greater_than_3_1_or_1_1.
+
+        Args:
+            facts (dict): existing facts
+        Returns:
+            dict: the facts dict updated with version facts.
+    """
+    if 'common' in facts:
+        deployment_type = facts['common']['deployment_type']
+        facts['common']['version'] = version = get_openshift_version()
+        if version is not None:
+            if deployment_type == 'origin':
+                version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('1.0.6')
+            else:
+                version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('3.0.2.900')
+        else:
+            version_gt_3_1_or_1_1 = True
+        facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1
+    return facts
 
 def set_sdn_facts_if_unset(facts, system_facts):
     """ Set sdn facts if not already present in facts dict
@@ -905,6 +915,7 @@ class OpenShiftFacts(object):
         facts = set_identity_providers_if_unset(facts)
         facts = set_sdn_facts_if_unset(facts, self.system_facts)
         facts = set_deployment_facts_if_unset(facts)
+        facts = set_version_facts_if_unset(facts)
         facts = set_aggregate_facts(facts)
         return dict(openshift=facts)
 
@@ -944,7 +955,7 @@ class OpenShiftFacts(object):
                           session_name='ssn', session_secrets_file='',
                           access_token_max_seconds=86400,
                           auth_token_max_seconds=500,
-                          oauth_grant_method='auto', cluster_defer_ha=False)
+                          oauth_grant_method='auto')
             defaults['master'] = master
 
         if 'node' in roles:

+ 10 - 0
roles/openshift_master/handlers/main.yml

@@ -2,3 +2,13 @@
 - name: restart master
   service: name={{ openshift.common.service_type }}-master state=restarted
   when: (not openshift_master_ha | bool) and (not master_service_status_changed | default(false))
+
+- name: restart master api
+  service: name={{ openshift.common.service_type }}-master-api state=restarted
+  when: (openshift_master_ha | bool) and (not master_api_service_status_changed | default(false)) and openshift.master.cluster_method == 'native'
+
+# TODO: need to fix up ignore_errors here
+- name: restart master controllers
+  service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+  when: (openshift_master_ha | bool) and (not master_controllers_service_status_changed | default(false)) and openshift.master.cluster_method == 'native'
+  ignore_errors: yes

+ 103 - 5
roles/openshift_master/tasks/main.yml

@@ -9,16 +9,22 @@
   when: openshift_master_oauth_grant_method is defined
 
 - fail:
+    msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations"
+  when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method not in ["native", "pacemaker"]))
+- fail:
+    msg: "'native' high availability is not supported for the requested OpenShift version"
+  when: openshift_master_ha | bool and openshift_master_cluster_method == "native" and not openshift.common.version_greater_than_3_1_or_1_1 | bool
+- fail:
     msg: "openshift_master_cluster_password must be set for multi-master installations"
-  when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool and openshift_master_cluster_password is not defined
+  when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and (openshift_master_cluster_password is not defined or not openshift_master_cluster_password)
 
 - name: Set master facts
   openshift_facts:
     role: master
     local_facts:
+      cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
       cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
       cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
-      cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}"
       debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}"
       api_port: "{{ openshift_master_api_port | default(None) }}"
       api_url: "{{ openshift_master_api_url | default(None) }}"
@@ -41,6 +47,8 @@
       portal_net: "{{ openshift_master_portal_net | default(None) }}"
       session_max_seconds: "{{ openshift_master_session_max_seconds | default(None) }}"
       session_name: "{{ openshift_master_session_name | default(None) }}"
+      session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(None) }}"
+      session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(None) }}"
       session_secrets_file: "{{ openshift_master_session_secrets_file | default(None) }}"
       access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}"
       auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}"
@@ -63,6 +71,8 @@
       controller_args: "{{ osm_controller_args | default(None) }}"
       infra_nodes: "{{ num_infra | default(None) }}"
       disabled_features: "{{ osm_disabled_features | default(None) }}"
+      master_count: "{{ openshift_master_count | default(None) }}"
+      controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"
 
 - name: Install Master package
   yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version  }} state=present
@@ -77,7 +87,7 @@
       domain: cluster.local
   when: openshift.master.embedded_dns
 
-- name: Create config parent directory if it doesn't exist
+- name: Create config parent directory if it does not exist
   file:
     path: "{{ openshift_master_config_dir }}"
     state: directory
@@ -90,6 +100,8 @@
     creates: "{{ openshift_master_policy }}"
   notify:
   - restart master
+  - restart master api
+  - restart master controllers
 
 - name: Create the scheduler config
   template:
@@ -98,6 +110,8 @@
     backup: true
   notify:
   - restart master
+  - restart master api
+  - restart master controllers
 
 - name: Install httpd-tools if needed
   yum: pkg=httpd-tools state=present
@@ -120,6 +134,39 @@
   when: item.kind == 'HTPasswdPasswordIdentityProvider'
   with_items: openshift.master.identity_providers
 
+# workaround for missing systemd unit files for controllers/api
+- name: Create the api service file
+  template:
+    src: atomic-openshift-master-api.service.j2
+    dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-api.service
+    force: no
+- name: Create the controllers service file
+  template:
+    src: atomic-openshift-master-controllers.service.j2
+    dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-controllers.service
+    force: no
+- name: Create the api env file
+  template:
+    src: atomic-openshift-master-api.j2
+    dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+    force: no
+- name: Create the controllers env file
+  template:
+    src: atomic-openshift-master-controllers.j2
+    dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+    force: no
+- command: systemctl daemon-reload
+# end workaround for missing systemd unit files
+
+- name: Create session secrets file
+  template:
+    dest: "{{ openshift.master.session_secrets_file }}"
+    src: sessionSecretsFile.yaml.v1.j2
+    force: no
+  notify:
+  - restart master
+  - restart master api
+
 # TODO: add the validate parameter when there is a validation command to run
 - name: Create master config
   template:
@@ -128,12 +175,15 @@
     backup: true
   notify:
   - restart master
+  - restart master api
+  - restart master controllers
 
 - name: Configure master settings
   lineinfile:
     dest: /etc/sysconfig/{{ openshift.common.service_type }}-master
     regexp: "{{ item.regex }}"
     line: "{{ item.line }}"
+    create: yes
   with_items:
     - regex: '^OPTIONS='
       line: "OPTIONS=--loglevel={{ openshift.master.debug_level }}"
@@ -142,6 +192,32 @@
   notify:
   - restart master
 
+- name: Configure master api settings
+  lineinfile:
+    dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+    regexp: "{{ item.regex }}"
+    line: "{{ item.line }}"
+  with_items:
+    - regex: '^OPTIONS='
+      line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8443 --master=https://{{ openshift.common.ip }}:8443"
+    - regex: '^CONFIG_FILE='
+      line: "CONFIG_FILE={{ openshift_master_config_file }}"
+  notify:
+  - restart master api
+
+- name: Configure master controller settings
+  lineinfile:
+    dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+    regexp: "{{ item.regex }}"
+    line: "{{ item.line }}"
+  with_items:
+    - regex: '^OPTIONS='
+      line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8444"
+    - regex: '^CONFIG_FILE='
+      line: "CONFIG_FILE={{ openshift_master_config_file }}"
+  notify:
+  - restart master controllers
+
 - name: Start and enable master
   service: name={{ openshift.common.service_type }}-master enabled=yes state=started
   when: not openshift_master_ha | bool
@@ -149,15 +225,37 @@
 
 - set_fact:
     master_service_status_changed = start_result | changed
+  when: not openshift_master_ha | bool
+
+- name: Start and enable master api
+  service: name={{ openshift.common.service_type }}-master-api enabled=yes state=started
+  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+  register: start_result
+
+- set_fact:
+    master_api_service_status_changed = start_result | changed
+  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+
+# TODO: fix the ugly workaround of setting ignore_errors
+#       the controllers service tries to start even if it is already started
+- name: Start and enable master controller
+  service: name={{ openshift.common.service_type }}-master-controllers enabled=yes state=started
+  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+  register: start_result
+  ignore_errors: yes
+
+- set_fact:
+    master_controllers_service_status_changed = start_result | changed
+  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
 
 - name: Install cluster packages
   yum: pkg=pcs state=present
-  when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool
+  when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
   register: install_result
 
 - name: Start and enable cluster service
   service: name=pcsd enabled=yes state=started
-  when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool
+  when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
 
 - name: Set the cluster user password
   shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster

+ 9 - 0
roles/openshift_master/templates/atomic-openshift-master-api.j2

@@ -0,0 +1,9 @@
+OPTIONS=
+CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml
+
+# Proxy configuration
+# Origin uses standard HTTP_PROXY environment variables. Be sure to set
+# NO_PROXY for your master
+#NO_PROXY=master.example.com
+#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
+#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT

+ 21 - 0
roles/openshift_master/templates/atomic-openshift-master-api.service.j2

@@ -0,0 +1,21 @@
+[Unit]
+Description=Atomic OpenShift Master API
+Documentation=https://github.com/openshift/origin
+After=network.target
+After=etcd.service
+Before={{ openshift.common.service_type }}-node.service
+Requires=network.target
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=131072
+LimitCORE=infinity
+WorkingDirectory={{ openshift.common.data_dir }}
+SyslogIdentifier=atomic-openshift-master-api
+
+[Install]
+WantedBy=multi-user.target
+WantedBy={{ openshift.common.service_type }}-node.service

+ 9 - 0
roles/openshift_master/templates/atomic-openshift-master-controllers.j2

@@ -0,0 +1,9 @@
+OPTIONS=
+CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml
+
+# Proxy configuration
+# Origin uses standard HTTP_PROXY environment variables. Be sure to set
+# NO_PROXY for your master
+#NO_PROXY=master.example.com
+#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
+#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT

+ 22 - 0
roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2

@@ -0,0 +1,22 @@
+[Unit]
+Description=Atomic OpenShift Master Controllers
+Documentation=https://github.com/openshift/origin
+After=network.target
+After={{ openshift.common.service_type }}-master-api.service
+Before={{ openshift.common.service_type }}-node.service
+Requires=network.target
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=131072
+LimitCORE=infinity
+WorkingDirectory={{ openshift.common.data_dir }}
+SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
+WantedBy={{ openshift.common.service_type }}-node.service

+ 13 - 4
roles/openshift_master/templates/master.yaml.v1.j2

@@ -10,13 +10,18 @@ assetConfig:
   publicURL: {{ openshift.master.public_console_url }}/
   servingInfo:
     bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.console_port }}
+    bindNetwork: tcp4
     certFile: master.server.crt
     clientCA: ""
     keyFile: master.server.key
     maxRequestsInFlight: 0
     requestTimeoutSeconds: 0
+{% if openshift_master_ha | bool %}
+controllerLeaseTTL: {{ openshift.master.controller_lease_ttl | default('30') }}
+{% endif %}
+controllers: '*'
 corsAllowedOrigins:
-{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] | unique %}
+{% for origin in ['127.0.0.1', 'localhost', openshift.common.ip, openshift.common.public_ip] | union(openshift.common.all_hostnames) | unique %}
   - {{ origin }}
 {% endfor %}
 {% for custom_origin in openshift.master.custom_cors_origins | default("") %}
@@ -29,8 +34,10 @@ corsAllowedOrigins:
 disabledFeatures: {{ openshift.master.disabled_features | to_json }}
 {% endif %}
 {% if openshift.master.embedded_dns | bool %}
+disabledFeatures: null
 dnsConfig:
   bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}
+  bindNetwork: tcp4
 {% endif %}
 etcdClientInfo:
   ca: {{ "ca.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }}
@@ -80,9 +87,8 @@ kubernetesMasterConfig:
   - v1
   apiServerArguments: {{ api_server_args if api_server_args is defined else 'null' }}
   controllerArguments: {{ controller_args if controller_args is defined else 'null' }}
-{# TODO: support overriding masterCount #}
-  masterCount: 1
-  masterIP: ""
+  masterCount: {{ openshift.master.master_count }}
+  masterIP: {{ openshift.common.ip }}
   podEvictionTimeout: ""
   proxyClientInfo:
     certFile: master.proxy-client.crt
@@ -106,6 +112,7 @@ networkConfig:
 # serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
   serviceNetworkCIDR: {{ openshift.master.portal_net }}
 {% include 'v1_partials/oauthConfig.j2' %}
+pauseControllers: false
 policyConfig:
   bootstrapPolicyFile: {{ openshift_master_policy }}
   openshiftInfrastructureNamespace: openshift-infra
@@ -121,6 +128,7 @@ projectConfig:
 routingConfig:
   subdomain:  "{{ openshift.master.default_subdomain | default("") }}"
 serviceAccountConfig:
+  limitSecretReferences: false
   managedNames:
   - default
   - builder
@@ -131,6 +139,7 @@ serviceAccountConfig:
   - serviceaccounts.public.key
 servingInfo:
   bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.api_port }}
+  bindNetwork: tcp4
   certFile: master.server.crt
   clientCA: ca.crt
   keyFile: master.server.key

+ 7 - 0
roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2

@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: SessionSecrets
+secrets:
+{% for secret in openshift_master_session_auth_secrets %}
+- authentication: "{{ openshift_master_session_auth_secrets[loop.index0] }}"
+  encryption: "{{ openshift_master_session_encryption_secrets[loop.index0] }}"
+{% endfor %}

+ 1 - 0
roles/openshift_master/vars/main.yml

@@ -2,6 +2,7 @@
 openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
 openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
 openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"
+openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"
 openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
 openshift_version: "{{ openshift_pkg_version | default('') }}"
 

+ 2 - 2
roles/openshift_master_ca/tasks/main.yml

@@ -1,6 +1,6 @@
 ---
 - name: Install the base package for admin tooling
-  yum: pkg={{ openshift.common.service_type }}{{ openshift_version  }} state=present
+  yum: pkg={{ openshift.common.service_type }} state=present
   register: install_result
 
 - name: Reload generated facts
@@ -14,7 +14,7 @@
 - name: Create the master certificates if they do not already exist
   command: >
     {{ openshift.common.admin_binary }} create-master-certs
-      --hostnames={{ openshift.common.all_hostnames | join(',') }}
+      --hostnames={{ master_hostnames | join(',') }}
       --master={{ openshift.master.api_url }}
       --public-master={{ openshift.master.public_api_url }}
       --cert-dir={{ openshift_master_config_dir }} --overwrite=false

+ 0 - 8
roles/openshift_master_cluster/tasks/configure_deferred.yml

@@ -1,8 +0,0 @@
----
-- debug: msg="Deferring config"
-
-- name: Start and enable the master
-  service:
-    name: "{{ openshift.common.service_type }}-master"
-    state: started
-    enabled: yes

+ 1 - 4
roles/openshift_master_cluster/tasks/main.yml

@@ -4,10 +4,7 @@
   register: pcs_status
   changed_when: false
   failed_when: false
-  when: not openshift.master.cluster_defer_ha | bool
+  when: openshift.master.cluster_method == "pacemaker"
 
 - include: configure.yml
   when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr"
-
-- include: configure_deferred.yml
-  when: openshift.master.cluster_defer_ha | bool

+ 1 - 0
roles/openshift_node/meta/main.yml

@@ -13,3 +13,4 @@ galaxy_info:
   - cloud
 dependencies:
 - { role: openshift_common }
+- { role: docker }

+ 10 - 1
roles/openshift_node/tasks/main.yml

@@ -8,7 +8,7 @@
   when: osn_cluster_dns_ip is not defined or not osn_cluster_dns_ip
 - fail:
     msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
-  when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online']
+  when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
 
 - name: Set node facts
   openshift_facts:
@@ -45,6 +45,15 @@
   register: sdn_install_result
   when: openshift.common.use_openshift_sdn
 
+- name: Install Node package
+  yum: pkg={{ openshift.common.service_type }}-node state=present
+  register: node_install_result
+
+- name: Install sdn-ovs package
+  yum: pkg={{ openshift.common.service_type }}-sdn-ovs state=present
+  register: sdn_install_result
+  when: openshift.common.use_openshift_sdn
+
 # TODO: add the validate parameter when there is a validation command to run
 - name: Create the Node config
   template:

+ 1 - 1
roles/openshift_repos/tasks/main.yaml

@@ -8,7 +8,7 @@
 #       proper repos correctly.
 
 - assert:
-    that: openshift_deployment_type in known_openshift_deployment_types
+    that: openshift.common.deployment_type in known_openshift_deployment_types
 
 - name: Ensure libselinux-python is installed
   yum: