Переглянути джерело

Merge pull request #6155 from abutcher/remove-pacemaker

Remove all references to pacemaker (pcs, pcsd) and openshift.master.cluster_method.
Scott Dodson 7 роки тому
батько
коміт
b879f67881

+ 1 - 13
inventory/byo/hosts.example

@@ -298,24 +298,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # Set cockpit plugins
 #osm_cockpit_plugins=['cockpit-kubernetes']
 
-# Native high availability cluster method with optional load balancer.
+# Native high availability (default cluster method)
 # If no lb group is defined, the installer assumes that a load balancer has
 # been preconfigured. For installation the value of
 # openshift_master_cluster_hostname must resolve to the load balancer
 # or to one or all of the masters defined in the inventory if no load
 # balancer is present.
-#openshift_master_cluster_method=native
-#openshift_master_cluster_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-
-# Pacemaker high availability cluster method.
-# Pacemaker HA environment must be able to self provision the
-# configured VIP. For installation openshift_master_cluster_hostname
-# must resolve to the configured VIP.
-#openshift_master_cluster_method=pacemaker
-#openshift_master_cluster_password=openshift_cluster
-#openshift_master_cluster_vip=192.168.133.25
-#openshift_master_cluster_public_vip=192.168.133.25
 #openshift_master_cluster_hostname=openshift-ansible.test.example.com
 #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
 

+ 0 - 5
playbooks/adhoc/uninstall.yml

@@ -62,7 +62,6 @@
     - origin-master
     - origin-master-api
     - origin-master-controllers
-    - pcsd
     failed_when: false
 
 - hosts: etcd
@@ -384,8 +383,6 @@
     - origin-excluder
     - origin-docker-excluder
     - origin-master
-    - pacemaker
-    - pcs
     register: result
     until: result | success
 
@@ -456,8 +453,6 @@
     - /etc/sysconfig/origin-master-api
     - /etc/sysconfig/origin-master-controllers
     - /usr/share/openshift/examples
-    - /var/lib/pacemaker
-    - /var/lib/pcsd
     - /usr/lib/systemd/system/atomic-openshift-master-api.service
     - /usr/lib/systemd/system/atomic-openshift-master-controllers.service
     - /usr/lib/systemd/system/origin-master-api.service

+ 0 - 2
playbooks/openshift-master/private/additional_config.yml

@@ -19,8 +19,6 @@
     openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"
     omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"
   roles:
-  - role: openshift_master_cluster
-    when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
   - role: openshift_project_request_template
     when: openshift_project_request_template_manage
   - role: openshift_examples

+ 0 - 2
playbooks/openshift-master/private/tasks/wire_aggregator.yml

@@ -183,7 +183,6 @@
   systemd: name={{ openshift.common.service_type }}-master-api state=restarted
   when:
   - yedit_output.changed
-  - openshift.master.cluster_method == 'native'
 
 # We retry the controllers because the API may not be 100% initialized yet.
 - name: restart master controllers
@@ -194,7 +193,6 @@
   until: result.rc == 0
   when:
   - yedit_output.changed
-  - openshift.master.cluster_method == 'native'
 
 - name: Verify API Server
   # Using curl here since the uri module requires python-httplib2 and

+ 0 - 3
playbooks/openshift-master/private/validate_restart.yml

@@ -14,9 +14,6 @@
     - role: common
       local_facts:
         rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}"
-    - role: master
-      local_facts:
-        cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
 
 # Creating a temp file on localhost, we then check each system that will
 # be rebooted to see if that file exists, if so we know we're running

+ 0 - 1
playbooks/openstack/advanced-configuration.md

@@ -337,7 +337,6 @@ variables](https://docs.openshift.com/container-platform/3.6/install_config/inst
 in `inventory/group_vars/OSEv3.yml`. For example, given a load balancer node
 under the ansible group named `ext_lb`:
 
-    openshift_master_cluster_method: native
     openshift_master_cluster_hostname: "{{ groups.ext_lb.0 }}"
     openshift_master_cluster_public_hostname: "{{ groups.ext_lb.0 }}"
 

+ 0 - 1
playbooks/openstack/sample-inventory/group_vars/OSEv3.yml

@@ -6,7 +6,6 @@ openshift_deployment_type: origin
 #openshift_release: v3.5
 openshift_master_default_subdomain: "apps.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}"
 
-openshift_master_cluster_method: native
 openshift_master_cluster_public_hostname: "console.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}"
 
 osm_default_node_selector: 'region=primary'

+ 2 - 4
roles/nuage_master/handlers/main.yaml

@@ -3,8 +3,7 @@
   systemd: name={{ openshift.common.service_type }}-master-api state=restarted
   when: >
     (openshift_master_ha | bool) and
-    (not master_api_service_status_changed | default(false)) and
-    openshift.master.cluster_method == 'native'
+    (not master_api_service_status_changed | default(false))
 
 # TODO: need to fix up ignore_errors here
 # We retry the controllers because the API may not be 100% initialized yet.
@@ -16,6 +15,5 @@
   until: result.rc == 0
   when: >
     (openshift_master_ha | bool) and
-    (not master_controllers_service_status_changed | default(false)) and
-    openshift.master.cluster_method == 'native'
+    (not master_controllers_service_status_changed | default(false))
   ignore_errors: yes

+ 2 - 2
roles/openshift_logging/handlers/main.yml

@@ -1,7 +1,7 @@
 ---
 - name: restart master api
   systemd: name={{ openshift.common.service_type }}-master-api state=restarted
-  when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+  when: (not (master_api_service_status_changed | default(false) | bool))
   notify: Verify API Server
 
 # We retry the controllers because the API may not be 100% initialized yet.
@@ -11,7 +11,7 @@
   delay: 5
   register: result
   until: result.rc == 0
-  when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+  when: (not (master_controllers_service_status_changed | default(false) | bool))
 
 - name: Verify API Server
   # Using curl here since the uri module requires python-httplib2 and

+ 0 - 2
roles/openshift_master/handlers/main.yml

@@ -5,7 +5,6 @@
     state: restarted
   when:
   - not (master_api_service_status_changed | default(false) | bool)
-  - openshift.master.cluster_method == 'native'
   notify:
   - Verify API Server
 
@@ -18,7 +17,6 @@
   until: result.rc == 0
   when:
   - not (master_controllers_service_status_changed | default(false) | bool)
-  - openshift.master.cluster_method == 'native'
 
 - name: Verify API Server
   # Using curl here since the uri module requires python-httplib2 and

+ 0 - 51
roles/openshift_master/tasks/main.yml

@@ -11,25 +11,6 @@
   - openshift_master_oauth_grant_method is defined
   - openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods
 
-# HA Variable Validation
-- fail:
-    msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations"
-  when:
-  - openshift.master.ha | bool
-  - (openshift.master.cluster_method is not defined) or (openshift.master.cluster_method is defined and openshift.master.cluster_method not in ["native", "pacemaker"])
-- fail:
-    msg: "openshift_master_cluster_password must be set for multi-master installations"
-  when:
-  - openshift.master.ha | bool
-  - openshift.master.cluster_method == "pacemaker"
-  - openshift_master_cluster_password is not defined or not openshift_master_cluster_password
-- fail:
-    msg: "Pacemaker based HA is not supported at this time when used with containerized installs"
-  when:
-  - openshift.master.ha | bool
-  - openshift.master.cluster_method == "pacemaker"
-  - openshift.common.is_containerized | bool
-
 - name: Open up firewall ports
   import_tasks: firewall.yml
 
@@ -226,7 +207,6 @@
     enabled: yes
     state: started
   when:
-  - openshift.master.cluster_method == 'native'
   - inventory_hostname == openshift_master_hosts[0]
   register: l_start_result
   until: not l_start_result | failed
@@ -241,14 +221,12 @@
 - set_fact:
     master_api_service_status_changed: "{{ l_start_result | changed }}"
   when:
-  - openshift.master.cluster_method == 'native'
   - inventory_hostname == openshift_master_hosts[0]
 
 - pause:
     seconds: 15
   when:
   - openshift.master.ha | bool
-  - openshift.master.cluster_method == 'native'
 
 - name: Start and enable master api all masters
   systemd:
@@ -256,7 +234,6 @@
     enabled: yes
     state: started
   when:
-  - openshift.master.cluster_method == 'native'
   - inventory_hostname != openshift_master_hosts[0]
   register: l_start_result
   until: not l_start_result | failed
@@ -271,14 +248,12 @@
 - set_fact:
     master_api_service_status_changed: "{{ l_start_result | changed }}"
   when:
-  - openshift.master.cluster_method == 'native'
   - inventory_hostname != openshift_master_hosts[0]
 
 # A separate wait is required here for native HA since notifies will
 # be resolved after all tasks in the role.
 - include_tasks: check_master_api_is_ready.yml
   when:
-  - openshift.master.cluster_method == 'native'
   - master_api_service_status_changed | bool
 
 - name: Start and enable master controller service
@@ -286,8 +261,6 @@
     name: "{{ openshift.common.service_type }}-master-controllers"
     enabled: yes
     state: started
-  when:
-  - openshift.master.cluster_method == 'native'
   register: l_start_result
   until: not l_start_result | failed
   retries: 1
@@ -301,30 +274,6 @@
 - name: Set fact master_controllers_service_status_changed
   set_fact:
     master_controllers_service_status_changed: "{{ l_start_result | changed }}"
-  when:
-  - openshift.master.cluster_method == 'native'
-
-- name: Install cluster packages
-  package: name=pcs state=present
-  when:
-  - openshift.master.cluster_method == 'pacemaker'
-  - not openshift.common.is_containerized | bool
-  register: l_install_result
-  until: l_install_result | success
-
-- name: Start and enable cluster service
-  systemd:
-    name: pcsd
-    enabled: yes
-    state: started
-  when:
-  - openshift.master.cluster_method == 'pacemaker'
-  - not openshift.common.is_containerized | bool
-
-- name: Set the cluster user password
-  shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster
-  when:
-  - l_install_result | changed
 
 - name: node bootstrap settings
   include_tasks: bootstrap.yml

+ 0 - 19
roles/openshift_master/tasks/systemd_units.yml

@@ -25,7 +25,6 @@
     state: absent
   ignore_errors: true
   when:
-  - openshift.master.cluster_method == "native"
   - not l_is_master_system_container | bool
 
 # This is the image used for both HA and non-HA clusters:
@@ -43,7 +42,6 @@
     src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2"
     dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master-{{ item }}.service"
   when:
-  - openshift.master.cluster_method == "native"
   - not l_is_master_system_container | bool
   with_items:
   - api
@@ -63,22 +61,17 @@
   - api
   - controllers
   when:
-  - openshift.master.cluster_method == "native"
   - not l_is_master_system_container | bool
 
 - name: Preserve Master API Proxy Config options
   command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api
   register: l_master_api_proxy
-  when:
-  - openshift.master.cluster_method == "native"
   failed_when: false
   changed_when: false
 
 - name: Preserve Master API AWS options
   command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-api
   register: master_api_aws
-  when:
-  - openshift.master.cluster_method == "native"
   failed_when: false
   changed_when: false
 
@@ -87,14 +80,11 @@
     src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2"
     dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
     backup: true
-  when:
-  - openshift.master.cluster_method == "native"
   notify:
   - restart master api
 
 - name: Restore Master API Proxy Config Options
   when:
-  - openshift.master.cluster_method == "native"
   - l_master_api_proxy.rc == 0
   - "'http_proxy' not in openshift.common"
   - "'https_proxy' not in openshift.common"
@@ -105,7 +95,6 @@
 
 - name: Restore Master API AWS Options
   when:
-  - openshift.master.cluster_method == "native"
   - master_api_aws.rc == 0
   - not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)
   lineinfile:
@@ -117,16 +106,12 @@
 - name: Preserve Master Controllers Proxy Config options
   command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
   register: master_controllers_proxy
-  when:
-  - openshift.master.cluster_method == "native"
   failed_when: false
   changed_when: false
 
 - name: Preserve Master Controllers AWS options
   command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
   register: master_controllers_aws
-  when:
-  - openshift.master.cluster_method == "native"
   failed_when: false
   changed_when: false
 
@@ -135,8 +120,6 @@
     src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2"
     dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
     backup: true
-  when:
-  - openshift.master.cluster_method == "native"
   notify:
   - restart master controllers
 
@@ -146,7 +129,6 @@
     line: "{{ item }}"
   with_items: "{{ master_controllers_proxy.stdout_lines | default([]) }}"
   when:
-  - openshift.master.cluster_method == "native"
   - master_controllers_proxy.rc == 0
   - "'http_proxy' not in openshift.common"
   - "'https_proxy' not in openshift.common"
@@ -157,6 +139,5 @@
     line: "{{ item }}"
   with_items: "{{ master_controllers_aws.stdout_lines | default([]) }}"
   when:
-  - openshift.master.cluster_method == "native"
   - master_controllers_aws.rc == 0
   - not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)

+ 1 - 1
roles/openshift_master/templates/master.yaml.v1.j2

@@ -120,7 +120,7 @@ kubernetesMasterConfig:
     - application/vnd.kubernetes.protobuf
 {% endif %}
   controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }}
-  masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }}
+  masterCount: {{ openshift.master.master_count }}
   masterIP: {{ openshift.common.ip }}
   podEvictionTimeout: {{ openshift.master.pod_eviction_timeout | default("") }}
   proxyClientInfo:

+ 0 - 34
roles/openshift_master_cluster/README.md

@@ -1,34 +0,0 @@
-OpenShift Master Cluster
-========================
-
-TODO
-
-Requirements
-------------
-
-* Ansible 2.2
-
-Role Variables
---------------
-
-TODO
-
-Dependencies
-------------
-
-TODO
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License Version 2.0
-
-Author Information
-------------------
-
-Jason DeTiberus (jdetiber@redhat.com)

+ 0 - 15
roles/openshift_master_cluster/meta/main.yml

@@ -1,15 +0,0 @@
----
-galaxy_info:
-  author: Jason DeTiberus
-  description:
-  company: Red Hat, Inc.
-  license: Apache License, Version 2.0
-  min_ansible_version: 2.2
-  platforms:
-  - name: EL
-    versions:
-    - 7
-  categories:
-  - cloud
-  - system
-dependencies: []

+ 0 - 43
roles/openshift_master_cluster/tasks/configure.yml

@@ -1,43 +0,0 @@
----
-- fail:
-    msg: This role requires that openshift_master_cluster_vip is set
-  when: openshift_master_cluster_vip is not defined or not openshift_master_cluster_vip
-- fail:
-    msg: This role requires that openshift_master_cluster_public_vip is set
-  when: openshift_master_cluster_public_vip is not defined or not openshift_master_cluster_public_vip
-
-- name: Authenticate to the cluster
-  command: pcs cluster auth -u hacluster -p {{ openshift_master_cluster_password }} {{ omc_cluster_hosts }}
-
-- name: Create the cluster
-  command: pcs cluster setup --name openshift_master {{ omc_cluster_hosts }}
-
-- name: Start the cluster
-  command: pcs cluster start --all
-
-- name: Enable the cluster on all nodes
-  command: pcs cluster enable --all
-
-- name: Set default resource stickiness
-  command: pcs resource defaults resource-stickiness=100
-
-- name: Add the cluster VIP resource
-  command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_vip }} --group {{ openshift.common.service_type }}-master
-
-- name: Add the cluster public VIP resource
-  command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_public_vip }} --group {{ openshift.common.service_type }}-master
-  when: openshift_master_cluster_public_vip != openshift_master_cluster_vip
-
-- name: Add the cluster master service resource
-  command: pcs resource create master systemd:{{ openshift.common.service_type }}-master op start timeout=90s stop timeout=90s --group {{ openshift.common.service_type }}-master
-
-- name: Disable stonith
-  command: pcs property set stonith-enabled=false
-
-- name: Wait for the clustered master service to be available
-  wait_for:
-    host: "{{ openshift_master_cluster_vip }}"
-    port: "{{ openshift.master.api_port }}"
-    state: started
-    timeout: 180
-    delay: 90

+ 0 - 14
roles/openshift_master_cluster/tasks/main.yml

@@ -1,14 +0,0 @@
----
-- fail:
-    msg: "Not possible on atomic hosts for now"
-  when: openshift.common.is_containerized | bool
-
-- name: Test if cluster is already configured
-  command: pcs status
-  register: pcs_status
-  changed_when: false
-  failed_when: false
-  when: openshift.master.cluster_method == "pacemaker"
-
-- include_tasks: configure.yml
-  when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr"

+ 0 - 26
roles/openshift_master_facts/filter_plugins/openshift_master.py

@@ -485,31 +485,6 @@ class FilterModule(object):
                            Dumper=AnsibleDumper))
 
     @staticmethod
-    def validate_pcs_cluster(data, masters=None):
-        ''' Validates output from "pcs status", ensuring that each master
-            provided is online.
-            Ex: data = ('...',
-                        'PCSD Status:',
-                        'master1.example.com: Online',
-                        'master2.example.com: Online',
-                        'master3.example.com: Online',
-                        '...')
-                masters = ['master1.example.com',
-                           'master2.example.com',
-                           'master3.example.com']
-               returns True
-        '''
-        if not issubclass(type(data), string_types):
-            raise errors.AnsibleFilterError("|failed expects data is a string or unicode")
-        if not issubclass(type(masters), list):
-            raise errors.AnsibleFilterError("|failed expects masters is a list")
-        valid = True
-        for master in masters:
-            if "{0}: Online".format(master) not in data:
-                valid = False
-        return valid
-
-    @staticmethod
     def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True):
         ''' Return certificates to synchronize based on facts. '''
         if not issubclass(type(hostvars), dict):
@@ -553,6 +528,5 @@ class FilterModule(object):
     def filters(self):
         ''' returns a mapping of filters to methods '''
         return {"translate_idps": self.translate_idps,
-                "validate_pcs_cluster": self.validate_pcs_cluster,
                 "certificates_to_synchronize": self.certificates_to_synchronize,
                 "oo_htpasswd_users_from_file": self.oo_htpasswd_users_from_file}

+ 0 - 1
roles/openshift_master_facts/tasks/main.yml

@@ -25,7 +25,6 @@
   openshift_facts:
     role: master
     local_facts:
-      cluster_method: "{{ openshift_master_cluster_method | default('native') }}"
       cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
       cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
       api_port: "{{ openshift_master_api_port | default(None) }}"

+ 2 - 2
roles/openshift_metrics/handlers/main.yml

@@ -1,7 +1,7 @@
 ---
 - name: restart master api
   systemd: name={{ openshift.common.service_type }}-master-api state=restarted
-  when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+  when: (not (master_api_service_status_changed | default(false) | bool))
   notify: Verify API Server
 
 # We retry the controllers because the API may not be 100% initialized yet.
@@ -11,7 +11,7 @@
   delay: 5
   register: result
   until: result.rc == 0
-  when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+  when: (not (master_controllers_service_status_changed | default(false) | bool))
 
 - name: Verify API Server
   # Using curl here since the uri module requires python-httplib2 and

+ 0 - 2
utils/src/ooinstall/openshift_ansible.py

@@ -125,7 +125,6 @@ def write_inventory_vars(base_inventory, lb):
     base_inventory.write('openshift_override_hostname_check=true\n')
 
     if lb is not None:
-        base_inventory.write('openshift_master_cluster_method=native\n')
         base_inventory.write("openshift_master_cluster_hostname={}\n".format(lb.hostname))
         base_inventory.write(
             "openshift_master_cluster_public_hostname={}\n".format(lb.public_hostname))
@@ -266,7 +265,6 @@ def default_facts(hosts, verbose=False):
     facts_env = os.environ.copy()
     facts_env["OO_INSTALL_CALLBACK_FACTS_YAML"] = CFG.settings['ansible_callback_facts_yaml']
     facts_env["ANSIBLE_CALLBACK_PLUGINS"] = CFG.settings['ansible_plugins_directory']
-    facts_env["OPENSHIFT_MASTER_CLUSTER_METHOD"] = 'native'
     if 'ansible_log_path' in CFG.settings:
         facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path']
     if 'ansible_config' in CFG.settings: