Parcourir la source

Merge branch 'master' into pre-post-node-hook

Scott Dodson il y a 7 ans
Parent
commit
54beb36851
100 fichiers modifiés avec 2059 ajouts et 299 suppressions
  1. 1 1
      .dockerignore
  2. 1 1
      .papr.inventory
  3. 1 1
      .tito/packages/openshift-ansible
  4. 21 0
      CONTRIBUTING.md
  5. 21 1
      README.md
  6. 5 3
      images/installer/Dockerfile
  7. 1 1
      images/installer/Dockerfile.rhel7
  8. 51 0
      images/installer/root/usr/local/bin/entrypoint-gcp
  9. 2 0
      images/installer/root/usr/local/bin/user_setup
  10. 1 0
      inventory/.gitignore
  11. 1 0
      inventory/dynamic/gcp/README.md
  12. 45 0
      inventory/dynamic/gcp/ansible.cfg
  13. 42 0
      inventory/dynamic/gcp/group_vars/all/00_defaults.yml
  14. 408 0
      inventory/dynamic/gcp/hosts.py
  15. 15 0
      inventory/dynamic/gcp/hosts.sh
  16. 1 0
      inventory/dynamic/gcp/none
  17. 3 0
      inventory/dynamic/injected/README.md
  18. 12 7
      inventory/hosts.example
  19. 2 1
      inventory/hosts.glusterfs.external.example
  20. 2 1
      inventory/hosts.glusterfs.mixed.example
  21. 2 1
      inventory/hosts.glusterfs.native.example
  22. 2 1
      inventory/hosts.glusterfs.registry-only.example
  23. 2 1
      inventory/hosts.glusterfs.storage-and-registry.example
  24. 26 0
      inventory/hosts.localhost
  25. 367 7
      openshift-ansible.spec
  26. 0 16
      playbooks/adhoc/openshift_hosted_logging_efk.yaml
  27. 18 0
      playbooks/aws/README.md
  28. 0 25
      playbooks/aws/openshift-cluster/hosted.yml
  29. 3 24
      playbooks/aws/openshift-cluster/install.yml
  30. 2 2
      playbooks/aws/openshift-cluster/provision_install.yml
  31. 9 0
      playbooks/aws/openshift-cluster/uninstall_elb.yml
  32. 6 0
      playbooks/aws/openshift-cluster/uninstall_prerequisites.yml
  33. 10 0
      playbooks/aws/openshift-cluster/uninstall_s3.yml
  34. 10 0
      playbooks/aws/openshift-cluster/uninstall_sec_group.yml
  35. 10 0
      playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml
  36. 10 0
      playbooks/aws/openshift-cluster/uninstall_vpc.yml
  37. 23 1
      playbooks/aws/provisioning_vars.yml.example
  38. 20 0
      playbooks/byo/openshift-cluster/upgrades/v3_10/README.md
  39. 5 0
      playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml
  40. 16 0
      playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml
  41. 7 0
      playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml
  42. 2 0
      playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
  43. 0 0
      playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml
  44. 24 0
      playbooks/cluster-operator/aws/components.yml
  45. 1 1
      playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
  46. 3 1
      playbooks/common/openshift-cluster/upgrades/init.yml
  47. 1 1
      playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
  48. 33 1
      playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
  49. 9 4
      playbooks/common/openshift-cluster/upgrades/pre/config.yml
  50. 2 0
      playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
  51. 29 0
      playbooks/common/openshift-cluster/upgrades/pre/version_override.yml
  52. 16 15
      playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
  53. 6 0
      playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
  54. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml
  55. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_10/roles
  56. 7 0
      playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml
  57. 58 0
      playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml
  58. 35 0
      playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml
  59. 7 0
      playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml
  60. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
  61. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
  62. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
  63. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
  64. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
  65. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
  66. 2 49
      playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
  67. 58 26
      playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
  68. 40 0
      playbooks/common/private/components.yml
  69. 34 0
      playbooks/common/private/control_plane.yml
  70. 3 1
      playbooks/container-runtime/config.yml
  71. 3 1
      playbooks/container-runtime/private/build_container_groups.yml
  72. 9 1
      playbooks/container-runtime/private/config.yml
  73. 3 1
      playbooks/container-runtime/private/setup_storage.yml
  74. 3 1
      playbooks/container-runtime/setup_storage.yml
  75. 2 43
      playbooks/deploy_cluster.yml
  76. 163 0
      playbooks/gcp/openshift-cluster/build_base_image.yml
  77. 112 0
      playbooks/gcp/openshift-cluster/build_image.yml
  78. 10 0
      playbooks/gcp/openshift-cluster/deprovision.yml
  79. 33 0
      playbooks/gcp/openshift-cluster/install.yml
  80. 21 0
      playbooks/gcp/openshift-cluster/install_gcp.yml
  81. 10 0
      playbooks/gcp/openshift-cluster/inventory.yml
  82. 12 0
      playbooks/gcp/openshift-cluster/launch.yml
  83. 4 5
      playbooks/gcp/provision.yml
  84. 9 0
      playbooks/gcp/openshift-cluster/publish_image.yml
  85. 1 0
      playbooks/gcp/openshift-cluster/roles
  86. 5 2
      playbooks/init/base_packages.yml
  87. 11 40
      playbooks/init/facts.yml
  88. 42 0
      playbooks/init/cluster_facts.yml
  89. 6 2
      playbooks/init/evaluate_groups.yml
  90. 11 2
      playbooks/init/main.yml
  91. 2 2
      playbooks/init/validate_hostnames.yml
  92. 6 0
      playbooks/openshift-etcd/certificates.yml
  93. 6 0
      playbooks/openshift-etcd/config.yml
  94. 6 0
      playbooks/openshift-etcd/embedded2external.yml
  95. 6 0
      playbooks/openshift-etcd/migrate.yml
  96. 0 1
      playbooks/openshift-etcd/private/ca.yml
  97. 1 1
      playbooks/openshift-etcd/private/certificates-backup.yml
  98. 0 1
      playbooks/openshift-etcd/private/config.yml
  99. 0 2
      playbooks/openshift-etcd/private/master_etcd_certificates.yml
  100. 0 0
      playbooks/openshift-etcd/private/redeploy-ca.yml

+ 1 - 1
.dockerignore

@@ -2,7 +2,7 @@
 bin
 bin
 docs
 docs
 hack
 hack
-inventory
+inventory/hosts.*
 test
 test
 utils
 utils
 **/*.md
 **/*.md

+ 1 - 1
.papr.inventory

@@ -22,6 +22,6 @@ ocp-master
 ocp-master
 ocp-master
 
 
 [nodes]
 [nodes]
-ocp-master openshift_schedulable=false
+ocp-master openshift_schedulable=true
 ocp-node1  openshift_node_labels="{'region':'infra'}"
 ocp-node1  openshift_node_labels="{'region':'infra'}"
 ocp-node2  openshift_node_labels="{'region':'infra'}"
 ocp-node2  openshift_node_labels="{'region':'infra'}"

+ 1 - 1
.tito/packages/openshift-ansible

@@ -1 +1 @@
-3.9.0-0.22.0 ./
+3.9.0-0.45.0 ./

+ 21 - 0
CONTRIBUTING.md

@@ -74,6 +74,27 @@ If you are new to Git, these links might help:
 
 
 ---
 ---
 
 
+## Simple all-in-one localhost installation
+```
+git clone https://github.com/openshift/openshift-ansible
+cd openshift-ansible
+sudo ansible-playbook -i inventory/hosts.localhost playbooks/prerequisites.yml
+sudo ansible-playbook -i inventory/hosts.localhost playbooks/deploy_cluster.yml
+```
+
+## Development process
+Most changes can be applied by re-running the config playbook. However, while
+the config playbook will run faster the second time through it's still going to
+take a very long time. As such, you may wish to run a smaller subsection of the
+installation playbooks. You can for instance run the node, master, or hosted
+playbooks in playbooks/openshift-node/config.yml,
+playbooks/openshift-master/config.yml, playbooks/openshift-hosted/config.yml
+respectively.
+
+We're actively working to refactor the playbooks into smaller discrete
+components and we'll be documenting that structure shortly, for now those are
+the most sensible logical units of work.
+
 ## Running tests and other verification tasks
 ## Running tests and other verification tasks
 
 
 We use [`tox`](http://readthedocs.org/docs/tox/) to manage virtualenvs where
 We use [`tox`](http://readthedocs.org/docs/tox/) to manage virtualenvs where

+ 21 - 1
README.md

@@ -74,7 +74,27 @@ Fedora:
 dnf install -y ansible pyOpenSSL python-cryptography python-lxml
 dnf install -y ansible pyOpenSSL python-cryptography python-lxml
 ```
 ```
 
 
-## OpenShift Installation Documentation:
+Additional requirements:
+
+Logging:
+
+- java-1.8.0-openjdk-headless
+
+Metrics:
+
+- httpd-tools
+
+## Simple all-in-one localhost Installation
+This assumes that you've installed the base dependencies and you're running on
+Fedora or RHEL
+```
+git clone https://github.com/openshift/openshift-ansible
+cd openshift-ansible
+sudo ansible-playbook -i inventory/hosts.localhost playbooks/prerequisites.yml
+sudo ansible-playbook -i inventory/hosts.localhost playbooks/deploy_cluster.yml
+```
+
+## Complete Production Installation Documentation:
 
 
 - [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)
 - [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)
 - [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
 - [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html)

+ 5 - 3
images/installer/Dockerfile

@@ -8,12 +8,14 @@ USER root
 COPY images/installer/origin-extra-root /
 COPY images/installer/origin-extra-root /
 
 
 # install ansible and deps
 # install ansible and deps
-RUN INSTALL_PKGS="python-lxml pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \
+RUN INSTALL_PKGS="python-lxml python-dns pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \
  && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
  && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
- && EPEL_PKGS="ansible python2-boto python2-boto3 google-cloud-sdk-183.0.0 which" \
+ && EPEL_PKGS="ansible python2-boto python2-boto3 python2-crypto google-cloud-sdk-183.0.0 which" \
  && yum install -y epel-release \
  && yum install -y epel-release \
  && yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \
  && yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \
- && rpm -V $INSTALL_PKGS $EPEL_PKGS \
+ && EPEL_TESTING_PKGS="python2-libcloud" \
+ && yum install -y --enablerepo=epel-testing --setopt=tsflags=nodocs $EPEL_TESTING_PKGS \
+ && rpm -V $INSTALL_PKGS $EPEL_PKGS $EPEL_TESTING_PKGS \
  && yum clean all
  && yum clean all
 
 
 LABEL name="openshift/origin-ansible" \
 LABEL name="openshift/origin-ansible" \

+ 1 - 1
images/installer/Dockerfile.rhel7

@@ -5,7 +5,7 @@ MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
 USER root
 USER root
 
 
 # Playbooks, roles, and their dependencies are installed from packages.
 # Playbooks, roles, and their dependencies are installed from packages.
-RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \
+RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto python2-boto3 python2-crypto openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \
  && yum repolist > /dev/null \
  && yum repolist > /dev/null \
  && yum-config-manager --enable rhel-7-server-ose-3.7-rpms \
  && yum-config-manager --enable rhel-7-server-ose-3.7-rpms \
  && yum-config-manager --enable rhel-7-server-rh-common-rpms \
  && yum-config-manager --enable rhel-7-server-rh-common-rpms \

+ 51 - 0
images/installer/root/usr/local/bin/entrypoint-gcp

@@ -0,0 +1,51 @@
+#!/bin/bash
+#
+# This file sets up the user to run in the GCP environment.
+# It provides dynamic inventory that works well when run in
+# a container environment by setting up a default inventory.
+# It assumes the user has provided a GCP service account token
+# and ssh-privatekey file at "$(pwd)/inventory/dynamic/injected"
+# and automatically links any YAML files found into the group
+# vars directory, which allows the playbook to more easily be
+# run in containerized contexts.
+
+WORK=$(pwd)
+FILES="${WORK}/inventory/dynamic/injected"
+
+# Patch /etc/passwd file with the current user info.
+# The current user's entry must be correctly defined in this file in order for
+# the `ssh` command to work within the created container.
+
+if ! whoami &>/dev/null; then
+  echo "${USER:-default}:x:$(id -u):$(id -g):Default User:$HOME:/sbin/nologin" >> /etc/passwd
+fi
+
+# Provide a "files_dir" variable that points to inventory/dynamic/injected
+echo "files_dir: \"${FILES}\"" > "${WORK}/inventory/dynamic/gcp/group_vars/all/00_default_files_dir.yml"
+# Add any injected variable files into the group vars directory
+find "${FILES}" -name '*.yml' -or -name '*.yaml' -or -name vars | xargs -L1 -I {} ln -fs {} "${WORK}/inventory/dynamic/gcp/group_vars/all"
+# Avoid sudo when running locally - nothing in the image requires it.
+mkdir -p "${WORK}/inventory/dynamic/gcp/host_vars/localhost"
+echo "ansible_become: no" > "${WORK}/inventory/dynamic/gcp/host_vars/localhost/00_skip_root.yaml"
+
+if [[ -z "${ANSIBLE_CONFIG-}" ]]; then
+  export ANSIBLE_CONFIG="${WORK}/inventory/dynamic/gcp/ansible.cfg"
+fi
+
+# SSH requires the file to be owned by the current user, but Docker copies
+# files in as root. Put the file into the ssh dir with the right permissions
+if [[ -f "${FILES}/ssh-privatekey" ]]; then
+  keyfile="${HOME}/.ssh/google_compute_engine"
+  mkdir "${HOME}/.ssh"
+  rm -f "${keyfile}"
+  cat "${FILES}/ssh-privatekey" > "${keyfile}"
+  chmod 0600 "${keyfile}"
+  ssh-keygen -y -f "${keyfile}" >  "${keyfile}.pub"
+fi
+if [[ -f "${FILES}/gce.json" ]]; then
+  gcloud auth activate-service-account --key-file="${FILES}/gce.json"
+else
+  echo "No service account file found at ${FILES}/gce.json, bypassing login"
+fi
+
+exec "$@"

+ 2 - 0
images/installer/root/usr/local/bin/user_setup

@@ -12,6 +12,8 @@ chmod g+rw /etc/passwd
 # ensure that the ansible content is accessible
 # ensure that the ansible content is accessible
 chmod -R g+r ${WORK_DIR}
 chmod -R g+r ${WORK_DIR}
 find ${WORK_DIR} -type d -exec chmod g+x {} +
 find ${WORK_DIR} -type d -exec chmod g+x {} +
+# ensure that the dynamic inventory dir can have content created
+find ${WORK_DIR} -type d -exec chmod g+wx {} +
 
 
 # no need for this script to remain in the image after running
 # no need for this script to remain in the image after running
 rm $0
 rm $0

+ 1 - 0
inventory/.gitignore

@@ -1 +1,2 @@
 hosts
 hosts
+/dynamic/gcp/group_vars/all/00_default_files_dir.yml

+ 1 - 0
inventory/dynamic/gcp/README.md

@@ -0,0 +1 @@
+This directory provides dynamic inventory for a GCP cluster configured via the GCP provisioning playbook. Set inventory to `inventory/dynamic/gcp/hosts.sh` to calculate the appropriate host set.

+ 45 - 0
inventory/dynamic/gcp/ansible.cfg

@@ -0,0 +1,45 @@
+# config file for ansible -- http://ansible.com/
+# ==============================================
+
+# This config file provides examples for running
+# the OpenShift playbooks with the provided
+# inventory scripts.
+
+[defaults]
+# Set the log_path
+#log_path = /tmp/ansible.log
+
+private_key_file = $HOME/.ssh/google_compute_engine
+
+# Additional default options for OpenShift Ansible
+forks = 50
+host_key_checking = False
+retry_files_enabled = False
+retry_files_save_path = ~/ansible-installer-retries
+nocows = True
+remote_user = cloud-user
+roles_path = ../../../roles/
+gathering = smart
+fact_caching = jsonfile
+fact_caching_connection = $HOME/ansible/facts
+fact_caching_timeout = 600
+callback_whitelist = profile_tasks
+inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt
+# work around privilege escalation timeouts in ansible:
+timeout = 30
+
+# Uncomment to use the provided example inventory
+inventory = hosts.sh
+
+[inventory]
+# fail more helpfully when the inventory file does not parse (Ansible 2.4+)
+unparsed_is_failed=true
+
+# Additional ssh options for OpenShift Ansible
+[ssh_connection]
+pipelining = True
+ssh_args = -o ControlMaster=auto -o ControlPersist=600s
+timeout = 10
+# shorten the ControlPath which is often too long; when it is,
+# ssh connection reuse silently fails, making everything slower.
+control_path = %(directory)s/%%h-%%r

+ 42 - 0
inventory/dynamic/gcp/group_vars/all/00_defaults.yml

@@ -0,0 +1,42 @@
+# GCP uses non-root users by default, so sudo by default
+---
+ansible_become: yes
+
+openshift_deployment_type: origin
+
+# Debugging settings
+debug_level: 2
+openshift_debug_level: "{{ debug_level }}"
+openshift_master_debug_level: "{{ master_debug_level | default(debug_level, true) }}"
+openshift_node_debug_level: "{{ node_debug_level | default(debug_level, true) }}"
+
+# External API settings
+console_port: 443
+internal_console_port: 8443
+openshift_master_api_port: "8443"
+openshift_master_console_port: "8443"
+openshift_master_cluster_hostname: "internal-openshift-master.{{ public_hosted_zone }}"
+openshift_master_cluster_public_hostname: "openshift-master.{{ public_hosted_zone }}"
+openshift_master_default_subdomain: "{{ wildcard_zone }}"
+
+# Cloud specific settings
+openshift_cloudprovider_kind: gce
+openshift_hosted_registry_storage_provider: gcs
+
+openshift_master_access_token_max_seconds: 2419200
+openshift_master_identity_providers:
+
+# Networking settings
+openshift_node_port_range: 30000-32000
+openshift_node_open_ports: [{"service":"Router stats port", "port":"1936/tcp"}, {"service":"Allowed open host ports", "port":"9000-10000/tcp"}, {"service":"Allowed open host ports", "port":"9000-10000/udp"}]
+openshift_node_sdn_mtu: 1410
+osm_cluster_network_cidr: 172.16.0.0/16
+osm_host_subnet_length: 9
+openshift_portal_net: 172.30.0.0/16
+
+# Default cluster configuration
+openshift_master_cluster_method: native
+openshift_schedulable: true
+# TODO: change to upstream conventions
+openshift_hosted_infra_selector: "role=infra"
+osm_default_node_selector: "role=app"

+ 408 - 0
inventory/dynamic/gcp/hosts.py

@@ -0,0 +1,408 @@
+#!/usr/bin/env python
+# Copyright 2013 Google Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
+
+# This is a derivative of gce.py that adds support for filtering
+# the returned inventory to only include instances that have tags
+# as specified by GCE_TAGGED_INSTANCES. This prevents dynamic 
+# inventory for multiple clusters within the same project from
+# accidentally stomping each other.
+
+# pylint: skip-file
+
+'''
+GCE external inventory script
+=================================
+
+Generates inventory that Ansible can understand by making API requests
+Google Compute Engine via the libcloud library.  Full install/configuration
+instructions for the gce* modules can be found in the comments of
+ansible/test/gce_tests.py.
+
+When run against a specific host, this script returns the following variables
+based on the data obtained from the libcloud Node object:
+ - gce_uuid
+ - gce_id
+ - gce_image
+ - gce_machine_type
+ - gce_private_ip
+ - gce_public_ip
+ - gce_name
+ - gce_description
+ - gce_status
+ - gce_zone
+ - gce_tags
+ - gce_metadata
+ - gce_network
+
+When run in --list mode, instances are grouped by the following categories:
+ - zone:
+   zone group name examples are us-central1-b, europe-west1-a, etc.
+ - instance tags:
+   An entry is created for each tag.  For example, if you have two instances
+   with a common tag called 'foo', they will both be grouped together under
+   the 'tag_foo' name.
+ - network name:
+   the name of the network is appended to 'network_' (e.g. the 'default'
+   network will result in a group named 'network_default')
+ - machine type
+   types follow a pattern like n1-standard-4, g1-small, etc.
+ - running status:
+   group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
+ - image:
+   when using an ephemeral/scratch disk, this will be set to the image name
+   used when creating the instance (e.g. debian-7-wheezy-v20130816).  when
+   your instance was created with a root persistent disk it will be set to
+   'persistent_disk' since there is no current way to determine the image.
+
+Examples:
+  Execute uname on all instances in the us-central1-a zone
+  $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
+
+  Use the GCE inventory script to print out instance specific information
+  $ contrib/inventory/gce.py --host my_instance
+
+Author: Eric Johnson <erjohnso@google.com>
+Contributors: Matt Hite <mhite@hotmail.com>
+Version: 0.0.2
+'''
+
+__requires__ = ['pycrypto>=2.6']
+try:
+    import pkg_resources
+except ImportError:
+    # Use pkg_resources to find the correct versions of libraries and set
+    # sys.path appropriately when there are multiversion installs.  We don't
+    # fail here as there is code that better expresses the errors where the
+    # library is used.
+    pass
+
+USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
+USER_AGENT_VERSION="v2"
+
+import sys
+import os
+import time
+import argparse
+import ConfigParser
+
+import logging
+logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
+
+try:
+    import json
+except ImportError:
+    import simplejson as json
+
+try:
+    from libcloud.compute.types import Provider
+    from libcloud.compute.providers import get_driver
+    from libcloud.common.google import ResourceNotFoundError
+    _ = Provider.GCE
+except:
+    sys.exit("GCE inventory script requires libcloud >= 0.13")
+
+
+class GceInventory(object):
+    def __init__(self):
+        # Read settings and parse CLI arguments
+        self.parse_cli_args()
+        self.config = self.get_config()
+        self.driver = self.get_gce_driver()
+        self.ip_type = self.get_inventory_options()
+        if self.ip_type:
+            self.ip_type = self.ip_type.lower()
+
+        # Just display data for specific host
+        if self.args.host:
+            print(self.json_format_dict(self.node_to_dict(
+                    self.get_instance(self.args.host)),
+                    pretty=self.args.pretty))
+            sys.exit(0)
+
+        zones = self.parse_env_zones()
+
+        # Otherwise, assume user wants all instances grouped
+        print(self.json_format_dict(self.group_instances(zones),
+            pretty=self.args.pretty))
+        sys.exit(0)
+
+    def get_config(self):
+        """
+        Populates a SafeConfigParser object with defaults and
+        attempts to read an .ini-style configuration from the filename
+        specified in GCE_INI_PATH. If the environment variable is
+        not present, the filename defaults to gce.ini in the current
+        working directory.
+        """
+        gce_ini_default_path = os.path.join(
+            os.path.dirname(os.path.realpath(__file__)), "gce.ini")
+        gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
+
+        # Create a ConfigParser.
+        # This provides empty defaults to each key, so that environment
+        # variable configuration (as opposed to INI configuration) is able
+        # to work.
+        config = ConfigParser.SafeConfigParser(defaults={
+            'gce_service_account_email_address': '',
+            'gce_service_account_pem_file_path': '',
+            'gce_project_id': '',
+            'libcloud_secrets': '',
+            'inventory_ip_type': '',
+        })
+        if 'gce' not in config.sections():
+            config.add_section('gce')
+        if 'inventory' not in config.sections():
+            config.add_section('inventory')
+
+        config.read(gce_ini_path)
+
+        #########
+        # Section added for processing ini settings
+        #########
+
+        # Set the instance_states filter based on config file options
+        self.instance_states = []
+        if config.has_option('gce', 'instance_states'):
+            states = config.get('gce', 'instance_states')
+            # Ignore if instance_states is an empty string.
+            if states:
+                self.instance_states = states.split(',')
+
+        return config
+
+    def get_inventory_options(self):
+        """Determine inventory options. Environment variables always
+        take precedence over configuration files."""
+        ip_type = self.config.get('inventory', 'inventory_ip_type')
+        # If the appropriate environment variables are set, they override
+        # other configuration
+        ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
+        return ip_type
+
+    def get_gce_driver(self):
+        """Determine the GCE authorization settings and return a
+        libcloud driver.
+        """
+        # Attempt to get GCE params from a configuration file, if one
+        # exists.
+        secrets_path = self.config.get('gce', 'libcloud_secrets')
+        secrets_found = False
+        try:
+            import secrets
+            args = list(getattr(secrets, 'GCE_PARAMS', []))
+            kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
+            secrets_found = True
+        except:
+            pass
+
+        if not secrets_found and secrets_path:
+            if not secrets_path.endswith('secrets.py'):
+                err = "Must specify libcloud secrets file as "
+                err += "/absolute/path/to/secrets.py"
+                sys.exit(err)
+            sys.path.append(os.path.dirname(secrets_path))
+            try:
+                import secrets
+                args = list(getattr(secrets, 'GCE_PARAMS', []))
+                kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
+                secrets_found = True
+            except:
+                pass
+        if not secrets_found:
+            args = [
+                self.config.get('gce','gce_service_account_email_address'),
+                self.config.get('gce','gce_service_account_pem_file_path')
+            ]
+            kwargs = {'project': self.config.get('gce', 'gce_project_id')}
+
+        # If the appropriate environment variables are set, they override
+        # other configuration; process those into our args and kwargs.
+        args[0] = os.environ.get('GCE_EMAIL', args[0])
+        args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
+        kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
+
+        # Retrieve and return the GCE driver.
+        gce = get_driver(Provider.GCE)(*args, **kwargs)
+        gce.connection.user_agent_append(
+            '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
+        )
+        return gce
+
+    def parse_env_zones(self):
+        '''returns a list of comma seperated zones parsed from the GCE_ZONE environment variable.
+        If provided, this will be used to filter the results of the grouped_instances call'''
+        import csv
+        reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True)
+        zones = [r for r in reader]
+        return [z for z in zones[0]]
+
+    def parse_cli_args(self):
+        ''' Command line argument processing '''
+
+        parser = argparse.ArgumentParser(
+                description='Produce an Ansible Inventory file based on GCE')
+        parser.add_argument('--list', action='store_true', default=True,
+                           help='List instances (default: True)')
+        parser.add_argument('--host', action='store',
+                           help='Get all information about an instance')
+        parser.add_argument('--tagged', action='store',
+                           help='Only include instances with this tag')
+        parser.add_argument('--pretty', action='store_true', default=False,
+                           help='Pretty format (default: False)')
+        self.args = parser.parse_args()
+
+        tag_env = os.environ.get('GCE_TAGGED_INSTANCES')
+        if not self.args.tagged and tag_env:
+            self.args.tagged = tag_env
+
+    def node_to_dict(self, inst):
+        md = {}
+
+        if inst is None:
+            return {}
+
+        if inst.extra['metadata'].has_key('items'):
+            for entry in inst.extra['metadata']['items']:
+                md[entry['key']] = entry['value']
+
+        net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
+        # default to exernal IP unless user has specified they prefer internal
+        if self.ip_type == 'internal':
+            ssh_host = inst.private_ips[0]
+        else:
+            ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
+
+        return {
+            'gce_uuid': inst.uuid,
+            'gce_id': inst.id,
+            'gce_image': inst.image,
+            'gce_machine_type': inst.size,
+            'gce_private_ip': inst.private_ips[0],
+            'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
+            'gce_name': inst.name,
+            'gce_description': inst.extra['description'],
+            'gce_status': inst.extra['status'],
+            'gce_zone': inst.extra['zone'].name,
+            'gce_tags': inst.extra['tags'],
+            'gce_metadata': md,
+            'gce_network': net,
+            # Hosts don't have a public name, so we add an IP
+            'ansible_host': ssh_host
+        }
+
+    def get_instance(self, instance_name):
+        '''Gets details about a specific instance '''
+        try:
+            return self.driver.ex_get_node(instance_name)
+        except Exception as e:
+            return None
+
+    def group_instances(self, zones=None):
+        '''Group all instances'''
+        groups = {}
+        meta = {}
+        meta["hostvars"] = {}
+
+        # list_nodes will fail if a disk is in the process of being deleted
+        # from a node, which is not uncommon if other playbooks are managing
+        # the same project. Retry if we receive a not found error.
+        nodes = []
+        tries = 0
+        while True:
+            try:
+                nodes = self.driver.list_nodes()
+                break
+            except ResourceNotFoundError:
+                tries = tries + 1
+                if tries > 15:
+                    raise e
+                time.sleep(1)
+                continue
+
+        for node in nodes:
+
+            # This check filters on the desired instance states defined in the
+            # config file with the instance_states config option.
+            #
+            # If the instance_states list is _empty_ then _ALL_ states are returned.
+            #
+            # If the instance_states list is _populated_ then check the current
+            # state against the instance_states list
+            if self.instance_states and not node.extra['status'] in self.instance_states:
+                continue
+
+            name = node.name
+
+            if self.args.tagged and self.args.tagged not in node.extra['tags']:
+                continue
+
+            meta["hostvars"][name] = self.node_to_dict(node)
+
+            zone = node.extra['zone'].name
+
+            # To avoid making multiple requests per zone
+            # we list all nodes and then filter the results
+            if zones and zone not in zones:
+                continue
+
+            if groups.has_key(zone): groups[zone].append(name)
+            else: groups[zone] = [name]
+
+            tags = node.extra['tags']
+            for t in tags:
+                if t.startswith('group-'):
+                    tag = t[6:]
+                else:
+                    tag = 'tag_%s' % t
+                if groups.has_key(tag): groups[tag].append(name)
+                else: groups[tag] = [name]
+
+            net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
+            net = 'network_%s' % net
+            if groups.has_key(net): groups[net].append(name)
+            else: groups[net] = [name]
+
+            machine_type = node.size
+            if groups.has_key(machine_type): groups[machine_type].append(name)
+            else: groups[machine_type] = [name]
+
+            image = node.image and node.image or 'persistent_disk'
+            if groups.has_key(image): groups[image].append(name)
+            else: groups[image] = [name]
+
+            status = node.extra['status']
+            stat = 'status_%s' % status.lower()
+            if groups.has_key(stat): groups[stat].append(name)
+            else: groups[stat] = [name]
+
+        groups["_meta"] = meta
+
+        return groups
+
+    def json_format_dict(self, data, pretty=False):
+        ''' Converts a dict to a JSON object and dumps it as a formatted
+        string '''
+
+        if pretty:
+            return json.dumps(data, sort_keys=True, indent=2)
+        else:
+            return json.dumps(data)
+
+
+# Run the script
+GceInventory()

+ 15 - 0
inventory/dynamic/gcp/hosts.sh

@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -euo pipefail
+
+# Use a playbook to calculate the inventory dynamically from
+# the provided cluster variables.
+src="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+if ! out="$( ansible-playbook --inventory-file "${src}/none" ${src}/../../../playbooks/gcp/openshift-cluster/inventory.yml 2>&1 )"; then
+  echo "error: Inventory configuration failed" 1>&2
+  echo "$out" 1>&2
+  echo "{}"
+  exit 1
+fi
+source "/tmp/inventory.sh"
+exec ${src}/hosts.py

+ 1 - 0
inventory/dynamic/gcp/none

@@ -0,0 +1 @@
+{}

+ 3 - 0
inventory/dynamic/injected/README.md

@@ -0,0 +1,3 @@
+This directory may be used to inject inventory into openshift-ansible
+when used in a container. Other scripts like the cloud provider entrypoints
+will automatically use the content of this directory as inventory.

+ 12 - 7
inventory/hosts.example

@@ -325,7 +325,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # or to one or all of the masters defined in the inventory if no load
 # or to one or all of the masters defined in the inventory if no load
 # balancer is present.
 # balancer is present.
 #openshift_master_cluster_hostname=openshift-ansible.test.example.com
 #openshift_master_cluster_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# If an external load balancer is used public hostname should resolve to
+# external load balancer address
+#openshift_master_cluster_public_hostname=openshift-ansible.public.example.com
 
 
 # Configure controller arguments
 # Configure controller arguments
 #osm_controller_args={'resource-quota-sync-period': ['10s']}
 #osm_controller_args={'resource-quota-sync-period': ['10s']}
@@ -845,12 +848,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # See: https://github.com/nickhammond/ansible-logrotate
 # See: https://github.com/nickhammond/ansible-logrotate
 #logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
 #logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
 
 
-# openshift-ansible will wait indefinitely for your input when it detects that the
+# The OpenShift-Ansible installer will fail when it detects that the
 # value of openshift_hostname resolves to an IP address not bound to any local
 # value of openshift_hostname resolves to an IP address not bound to any local
 # interfaces. This mis-configuration is problematic for any pod leveraging host
 # interfaces. This mis-configuration is problematic for any pod leveraging host
 # networking and liveness or readiness probes.
 # networking and liveness or readiness probes.
-# Setting this variable to true will override that check.
-#openshift_override_hostname_check=true
+# Setting this variable to false will override that check.
+#openshift_hostname_check=true
 
 
 # openshift_use_dnsmasq is deprecated.  This must be true, or installs will fail
 # openshift_use_dnsmasq is deprecated.  This must be true, or installs will fail
 # in versions >= 3.6
 # in versions >= 3.6
@@ -931,6 +934,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # Force a specific image version to use when pulling the service catalog image
 # Force a specific image version to use when pulling the service catalog image
 #openshift_service_catalog_image_version=v3.7
 #openshift_service_catalog_image_version=v3.7
 
 
+# TSB image tag
+#template_service_broker_version='v3.7'
+
 # Configure one of more namespaces whose templates will be served by the TSB
 # Configure one of more namespaces whose templates will be served by the TSB
 #openshift_template_service_broker_namespaces=['openshift']
 #openshift_template_service_broker_namespaces=['openshift']
 
 
@@ -1114,10 +1120,9 @@ ose3-etcd[1:3]-ansible.test.example.com
 ose3-lb-ansible.test.example.com containerized=false
 ose3-lb-ansible.test.example.com containerized=false
 
 
 # NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
 # NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
-# However, in order to ensure that your masters are not burdened with running pods you should
-# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
 [nodes]
 [nodes]
-ose3-master[1:3]-ansible.test.example.com
+# masters should be schedulable to run web console pods
+ose3-master[1:3]-ansible.test.example.com openshift_schedulable=True
 ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
 ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
 
 
 [nfs]
 [nfs]

+ 2 - 1
inventory/hosts.glusterfs.external.example

@@ -35,7 +35,8 @@ openshift_storage_glusterfs_heketi_url=172.0.0.1
 master
 master
 
 
 [nodes]
 [nodes]
-master  openshift_schedulable=False
+# masters should be schedulable to run web console pods
+master  openshift_schedulable=True
 node0   openshift_schedulable=True
 node0   openshift_schedulable=True
 node1   openshift_schedulable=True
 node1   openshift_schedulable=True
 node2   openshift_schedulable=True
 node2   openshift_schedulable=True

+ 2 - 1
inventory/hosts.glusterfs.mixed.example

@@ -38,7 +38,8 @@ openshift_storage_glusterfs_heketi_ssh_keyfile=/root/id_rsa
 master
 master
 
 
 [nodes]
 [nodes]
-master  openshift_schedulable=False
+# masters should be schedulable to run web console pods
+master  openshift_schedulable=True
 node0   openshift_schedulable=True
 node0   openshift_schedulable=True
 node1   openshift_schedulable=True
 node1   openshift_schedulable=True
 node2   openshift_schedulable=True
 node2   openshift_schedulable=True

+ 2 - 1
inventory/hosts.glusterfs.native.example

@@ -28,7 +28,8 @@ openshift_deployment_type=origin
 master
 master
 
 
 [nodes]
 [nodes]
-master  openshift_schedulable=False
+# masters should be schedulable to run web console pods
+master  openshift_schedulable=True
 # A hosted registry, by default, will only be deployed on nodes labeled
 # A hosted registry, by default, will only be deployed on nodes labeled
 # "region=infra".
 # "region=infra".
 node0   openshift_schedulable=True
 node0   openshift_schedulable=True

+ 2 - 1
inventory/hosts.glusterfs.registry-only.example

@@ -34,7 +34,8 @@ openshift_hosted_registry_storage_kind=glusterfs
 master
 master
 
 
 [nodes]
 [nodes]
-master  openshift_schedulable=False
+# masters should be schedulable to run web console pods
+master  openshift_schedulable=True
 # A hosted registry, by default, will only be deployed on nodes labeled
 # A hosted registry, by default, will only be deployed on nodes labeled
 # "region=infra".
 # "region=infra".
 node0   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
 node0   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True

+ 2 - 1
inventory/hosts.glusterfs.storage-and-registry.example

@@ -35,7 +35,8 @@ openshift_hosted_registry_storage_kind=glusterfs
 master
 master
 
 
 [nodes]
 [nodes]
-master  openshift_schedulable=False
+# masters should be schedulable to run web console pods
+master  openshift_schedulable=True
 # It is recommended to not use a single cluster for both general and registry
 # It is recommended to not use a single cluster for both general and registry
 # storage, so two three-node clusters will be required.
 # storage, so two three-node clusters will be required.
 node0   openshift_schedulable=True
 node0   openshift_schedulable=True

+ 26 - 0
inventory/hosts.localhost

@@ -0,0 +1,26 @@
+#bare minimum hostfile
+
+[OSEv3:children]
+masters
+nodes
+etcd
+
+[OSEv3:vars]
+# if your target hosts are Fedora uncomment this
+#ansible_python_interpreter=/usr/bin/python3
+openshift_deployment_type=origin
+openshift_release=3.7
+osm_cluster_network_cidr=10.128.0.0/14
+openshift_portal_net=172.30.0.0/16
+osm_host_subnet_length=9
+# localhost likely doesn't meet the minimum requirements
+openshift_disable_check=disk_availability,memory_availability
+
+[masters]
+localhost ansible_connection=local
+
+[etcd]
+localhost ansible_connection=local
+
+[nodes]
+localhost  ansible_connection=local openshift_schedulable=true openshift_node_labels="{'region': 'infra', 'zone': 'default'}"

+ 367 - 7
openshift-ansible.spec

@@ -10,7 +10,7 @@
 
 
 Name:           openshift-ansible
 Name:           openshift-ansible
 Version:        3.9.0
 Version:        3.9.0
-Release:        0.22.0%{?dist}
+Release:        0.45.0%{?dist}
 Summary:        Openshift and Atomic Enterprise Ansible
 Summary:        Openshift and Atomic Enterprise Ansible
 License:        ASL 2.0
 License:        ASL 2.0
 URL:            https://github.com/openshift/openshift-ansible
 URL:            https://github.com/openshift/openshift-ansible
@@ -28,6 +28,7 @@ Requires:      java-1.8.0-openjdk-headless
 Requires:      httpd-tools
 Requires:      httpd-tools
 Requires:      libselinux-python
 Requires:      libselinux-python
 Requires:      python-passlib
 Requires:      python-passlib
+Requires:      python2-crypto
 
 
 %description
 %description
 Openshift and Atomic Enterprise Ansible
 Openshift and Atomic Enterprise Ansible
@@ -48,7 +49,8 @@ popd
 %install
 %install
 # Base openshift-ansible install
 # Base openshift-ansible install
 mkdir -p %{buildroot}%{_datadir}/%{name}
 mkdir -p %{buildroot}%{_datadir}/%{name}
-mkdir -p %{buildroot}%{_datadir}/ansible/%{name}
+mkdir -p %{buildroot}%{_datadir}/ansible/%{name}/inventory
+cp -rp inventory/dynamic %{buildroot}%{_datadir}/ansible/%{name}/inventory
 
 
 # openshift-ansible-bin install
 # openshift-ansible-bin install
 mkdir -p %{buildroot}%{_bindir}
 mkdir -p %{buildroot}%{_bindir}
@@ -62,10 +64,7 @@ rm -f %{buildroot}%{python_sitelib}/openshift_ansible/gce
 # openshift-ansible-docs install
 # openshift-ansible-docs install
 # Install example inventory into docs/examples
 # Install example inventory into docs/examples
 mkdir -p docs/example-inventories
 mkdir -p docs/example-inventories
-cp inventory/* docs/example-inventories/
-
-# openshift-ansible-files install
-cp -rp files %{buildroot}%{_datadir}/ansible/%{name}/
+cp inventory/hosts.* inventory/README.md docs/example-inventories/
 
 
 # openshift-ansible-playbooks install
 # openshift-ansible-playbooks install
 cp -rp playbooks %{buildroot}%{_datadir}/ansible/%{name}/
 cp -rp playbooks %{buildroot}%{_datadir}/ansible/%{name}/
@@ -100,7 +99,7 @@ popd
 %doc README*
 %doc README*
 %license LICENSE
 %license LICENSE
 %dir %{_datadir}/ansible/%{name}
 %dir %{_datadir}/ansible/%{name}
-%{_datadir}/ansible/%{name}/files
+%{_datadir}/ansible/%{name}/inventory/dynamic
 %ghost %{_datadir}/ansible/%{name}/playbooks/common/openshift-master/library.rpmmoved
 %ghost %{_datadir}/ansible/%{name}/playbooks/common/openshift-master/library.rpmmoved
 
 
 # ----------------------------------------------------------------------------------
 # ----------------------------------------------------------------------------------
@@ -202,6 +201,367 @@ Atomic OpenShift Utilities includes
 
 
 
 
 %changelog
 %changelog
+* Thu Feb 15 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.45.0
+- 
+
+* Thu Feb 15 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.44.0
+- 
+
+* Thu Feb 15 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.43.0
+- Changing conditional_set_fact from module to action_plugin since it does not
+  need to access hosts to be effective and to reduce playbook output
+  (ewolinet@redhat.com)
+- Revert "Bug 1512825 - add mux pod failed for Serial number 02 has already
+  been issued" (mkhan@redhat.com)
+- Fix metadata access in OpenStack inventory (tomas@sedovic.cz)
+- Adding ability to yedit json files. (kwoodson@redhat.com)
+- Simplify double upgrade version logic (mgugino@redhat.com)
+- Whenever we create a new es node ignore health checks, changing prometheus pw
+  gen for increased secret idempotency (ewolinet@redhat.com)
+- oc_adm_csr: Add fail_on_timeout parameter which causes module to fail when
+  timeout was reached. (abutcher@redhat.com)
+- Adding missing template (ewolinet@redhat.com)
+- Move installation of packages before container_runtime to ensure bind mounts
+  are avaialable. (kwoodson@redhat.com)
+- Use curl --noproxy option for internal apiserver access (takayoshi@gmail.com)
+- Revert openshift_version to previous state (mgugino@redhat.com)
+- Add openshift_gcp_multizone bool (mgugino@redhat.com)
+- Invert logic to decide when to re-deploy certs (sdodson@redhat.com)
+- etcd_scaleup: use inventory_hostname when etcd ca host is being picked
+  (vrutkovs@redhat.com)
+- Fix docker_upgrade variable (mgugino@redhat.com)
+- Fix gcp variable warnings (mgugino@redhat.com)
+- Disable console install when not 3.9 or newer (spadgett@redhat.com)
+- Fix etcd scaleup plays (mgugino@redhat.com)
+- Add playbook to install components for cluster operator (cewong@redhat.com)
+- Remove cluster_facts.yml from the install.yml (tomas@sedovic.cz)
+- Allow for blank StorageClass in PVC creation (jarrpa@redhat.com)
+- Add service catalog to be upgraded (jpeeler@redhat.com)
+- Remove node start from bootstrap.yml. (abutcher@redhat.com)
+- Restart systemd-hostnamed before restarting NetworkManager in node user-data.
+  (abutcher@redhat.com)
+- additional mounts: specify 'type' in container_runtime_crio_additional_mounts
+  (vrutkovs@redhat.com)
+- Fix openshift_openstack_provision_user_commands (bdobreli@redhat.com)
+- origin-dns: make sure cluster.local DNS server is listed first
+  (vrutkovs@redhat.com)
+- Fix OpenStack playbooks (tomas@sedovic.cz)
+- Backport changes for glusterfs, heketi, s3 and block templates
+  (sarumuga@redhat.com)
+- Fix indentation to make yamllint happy (vrutkovs@redhat.com)
+- Use r_etcd_common_etcdctl_command instead of hardcoded binary name to support
+  containerized upgrade (vrutkovs@redhat.com)
+- Verify that requested services have schedulable nodes matching the selectors
+  (vrutkovs@redhat.com)
+- Normalize the time we wait for pods to 5s * 60 retries (sdodson@redhat.com)
+- Pause for console rollout (spadgett@redhat.com)
+- Fix wording (bdobreli@redhat.com)
+- Fix cloud init runcmd templating (bdobreli@redhat.com)
+- Note ignored Heat user data changes for openstack (bdobreli@redhat.com)
+- Clarify the ansible playbook vs cloud-init (bdobreli@redhat.com)
+- Fix openstack cloud-init runcmd templating (bdobreli@redhat.com)
+- [openstack] custom user commands for cloud-init (bdobreli@redhat.com)
+- Limit host scope during plays (mgugino@redhat.com)
+- Fix upgrade-control plane post_control_plane.yml (mgugino@redhat.com)
+- erase data only if variable is set. fix block indentatation
+  (sarumuga@redhat.com)
+- uninstall playbook for GlusterFS (sarumuga@redhat.com)
+- Removing prefix and replacing with cidr, pool_start and pool_end variables.
+  (mbruzek@gmail.com)
+- Make node start options configurable (celebdor@gmail.com)
+- Support master node high availability (jihoon.o@samsung.com)
+
+* Fri Feb 09 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.42.0
+- xPaaS v1.4.8 for v3.7 (sdodson@redhat.com)
+- xPaaS v1.4.8-1 for v3.8 (sdodson@redhat.com)
+- xPaaS v1.4.8-1 for v3.9 (sdodson@redhat.com)
+- Bump xpaas version (sdodson@redhat.com)
+- Bug 1524805- CFME example now works disconnected (fabian@fabianism.us)
+- Only try to yaml.load a file if it ends in .yml or .yaml in logging facts
+  (ewolinet@redhat.com)
+- Set default image tag to openshift_image_tag for services
+  (vrutkovs@redhat.com)
+- Redeploy router certificates during upgrade only when secure.
+  (kwoodson@redhat.com)
+- GlusterFS: Fix block StorageClass heketi route (jarrpa@redhat.com)
+- changed oc to {{ openshift_client_binary }} (datarace101@gmail.com)
+- Use v3.9 web-console image for now (sdodson@redhat.com)
+- Adding ability to provide additional mounts to crio system container.
+  (kwoodson@redhat.com)
+- Remove spaces introduced at the start of the line
+  (geoff.newson@googlemail.com)
+- Changing the check for the number of etcd nodes (geoff.newson@gmail.com)
+- aws ami: make it so the tags from the orinal AMI are used with the newly
+  created AMI (mwoodson@redhat.com)
+- Setup docker excluder if requested before container_runtime is installed
+  (vrutkovs@redhat.com)
+- openshift_node: Remove master from aws node building (smilner@redhat.com)
+- Use wait_for_connection to validate ssh transport is alive
+  (sdodson@redhat.com)
+- Bug 1541625- properly cast provided ip address to unicode
+  (fabian@fabianism.us)
+- Add base package installation to upgrade playbooks (rteague@redhat.com)
+- 3.9 upgrade: fix typos in restart masters procedure (vrutkovs@redhat.com)
+- quick installer: disable broken test_get_hosts_to_run_on6 test
+  (vrutkovs@redhat.com)
+- Quick installer: run prerequistes first and update path to main playbook
+  (vrutkovs@redhat.com)
+- Fix uninstall using openshift_prometheus_state=absent (zgalor@redhat.com)
+- Detect config changes in console liveness probe (spadgett@redhat.com)
+- Fix master and node system container variables (mgugino@redhat.com)
+- Correct the list of certificates checked in openshift_master_certificates
+  s.t. masters do not incorrectly report that master certs are missing.
+  (abutcher@redhat.com)
+- tag fix without ose- (rcook@redhat.com)
+- lib_utils_oo_collect: Allow filtering on dot separated keys.
+  (abutcher@redhat.com)
+- Determine which etcd host is the etcd_ca_host rather than assume it is the
+  first host in the etcd host group. (abutcher@redhat.com)
+- Attempt to back up generated certificates on every etcd host.
+  (abutcher@redhat.com)
+- Remove pre upgrade verification step re: etcd ca host. (abutcher@redhat.com)
+- Revert "GlusterFS: Remove image option from heketi command" (hansmi@vshn.ch)
+
+* Wed Feb 07 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.41.0
+- Allow OVS 2.7 in OCP 3.10 (sdodson@redhat.com)
+- GlusterFS: Minor documentation update (jarrpa@redhat.com)
+- Make sure to include upgrade_pre when upgrading master nodes
+  (sdodson@redhat.com)
+
+* Wed Feb 07 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.40.0
+- health checks: tolerate ovs 2.9 (lmeyer@redhat.com)
+- Fix docker rpm upgrade install task wording (mgugino@redhat.com)
+- Initial support for 3.10 (sdodson@redhat.com)
+- add deprovisioning for ELB (and IAM certs) (jdiaz@redhat.com)
+- [6632] fix indentation of terminationGracePeriodSeconds var
+  (jsanda@redhat.com)
+
+* Tue Feb 06 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.39.0
+- Update code to not fail when rc != 0 (kwoodson@redhat.com)
+- Upgrades: pass openshift_manage_node_is_master to master nodes during upgrade
+  (vrutkovs@redhat.com)
+- Updates to configure monitoring container. (kwoodson@redhat.com)
+- Move cert SAN update logic to openshift-etcd (rteague@redhat.com)
+- Swapping container order for es pod (ewolinet@redhat.com)
+- Adding support for ES 5.x tech preview opt in (ewolinet@redhat.com)
+- bug 1540799: openshift_prometheus: update alertmanager config file flag
+  (pgier@redhat.com)
+- parameterize various master scale group bits (jdiaz@redhat.com)
+- Use rollout instead of deploy (deprecated) (rteague@redhat.com)
+- cri-o: export variables defined in crio-network (gscrivan@redhat.com)
+
+* Mon Feb 05 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.38.0
+- Moving upgrade sg playbook to 3.9 (kwoodson@redhat.com)
+- remove openshift_upgrade_{pre,post}_storage_migration_enabled from
+  failed_when (nakayamakenjiro@gmail.com)
+- Fix version handling in 3.8/3.9 control plane upgrades (rteague@redhat.com)
+- add S3 bucket cleanup (jdiaz@redhat.com)
+- dynamic inventory bug when group exists but its empty (m.judeikis@gmail.com)
+- dynamic inventory bug when group exists but its empty (m.judeikis@gmail.com)
+- Parameterize user and disable_root options in cloud config
+  (nelluri@redhat.com)
+- Fix softlinks broken by d3fefc32a727fe3c13159c4e9fe4399f35b487a8
+  (Klaas-@users.noreply.github.com)
+
+* Fri Feb 02 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.37.0
+- Don't use 'omit' for package module (vrutkovs@redhat.com)
+- Adding requirements for logging and metrics (ewolinet@redhat.com)
+- Disable master controllers before upgrade and re-enable those when restart
+  mode is system (vrutkovs@redhat.com)
+- upgrade: run upgrade_control_plane and upgrade_nodes playbooks during full
+  upgrade (vrutkovs@redhat.com)
+
+* Fri Feb 02 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.36.0
+- Add missing tasks file (sdodson@redhat.com)
+- Upgrade to migrate to using push to DNS for registries. (kwoodson@redhat.com)
+- Adding defaults for the gcp variables to fix an undefined ansible exception.
+  (kwoodson@redhat.com)
+- Fix vsphere sanitization (sdodson@redhat.com)
+- Set a default for required vsphere variable (sdodson@redhat.com)
+- Add python2-crypto package (ccoleman@redhat.com)
+- hosts.example: clarify usage of openshift_master_cluster_public_hostname
+  (vrutkovs@redhat.com)
+- Conditionally create pvcs for metrics depending on whether or not it already
+  exists (ewolinet@redhat.com)
+- Update hosts examples with a note about scheduling on masters
+  (vrutkovs@redhat.com)
+- Fixing file write issue. (kwoodson@redhat.com)
+- Only perform console configmap ops when >= 3.9 (sdodson@redhat.com)
+- Remove playbooks/adhoc/openshift_hosted_logging_efk.yaml (sdodson@redhat.com)
+- upgrades: use openshift_version as a regexp when checking
+  openshift.common.version (vrutkovs@redhat.com)
+- Don't update master-config.yaml with logging/metrics urls >= 3.9
+  (sdodson@redhat.com)
+- Make master schedulable (vrutkovs@redhat.com)
+- Re-add openshift_aws_elb_cert_arn. (abutcher@redhat.com)
+- Ignore openshift_pkg_version during 3.8 upgrade (rteague@redhat.com)
+- bug 1537857. Fix retrieving prometheus metrics (jcantril@redhat.com)
+- Remove master_ha bool checks (mgugino@redhat.com)
+- Don't restart docker when re-deploying node certificates (sdodson@redhat.com)
+- vsphere storage default add (davis.phillips@gmail.com)
+
+* Wed Jan 31 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.35.0
+- add glusterblock support for ansible (m.judeikis@gmail.com)
+- Add a bare minimum localhost hosts file (sdodson@redhat.com)
+- copy etcd client certificates for nuage openshift monitor
+  (siva_teja.areti@nokia.com)
+- fix hostvars parameter name (tzumainn@redhat.com)
+- remove mountpoint parameter (tzumainn@redhat.com)
+- flake cleanup (tzumainn@redhat.com)
+- code simplification and lint cleanup (tzumainn@redhat.com)
+- Symlink kubectl to oc instead of openshift (mfojtik@redhat.com)
+- Rework provisioners vars to support different prefix/version for Origin/OSE
+  (vrutkovs@redhat.com)
+- add cinder mountpoint to inventory (tzumainn@redhat.com)
+- allow setting of kibana env vars (jcantril@redhat.com)
+- No longer compare with legacy hosted var (ewolinet@redhat.com)
+- Preserving ES dc storage type unless overridden by inventory variable
+  (ewolinet@redhat.com)
+- Fix: e2e tests failing due to :1936/metrics unaccessible.
+  (jmencak@redhat.com)
+
+* Tue Jan 30 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.34.0
+- docker_creds: decode docker_config for py3 only if its a string
+  (vrutkovs@redhat.com)
+- Removing ability to change default cassandra_pvc_prefix based on metrics
+  volume name (ewolinet@redhat.com)
+- Don't deploy the console if disabled or registry subtype (sdodson@redhat.com)
+- [1538960] Correct ability to overried openshift_management_app_template
+  (rteague@redhat.com)
+
+* Tue Jan 30 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.33.0
+- 
+
+* Tue Jan 30 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.32.0
+- Revert "Revert "use non-deprecated REGISTRY_OPENSHIFT_SERVER_ADDR variable to
+  set the registry hostname"" (bparees@users.noreply.github.com)
+- Rebase Prometheus example for new scrape endpoints and expose alert manager
+  (m.judeikis@gmail.com)
+- Revert "use non-deprecated REGISTRY_OPENSHIFT_SERVER_ADDR variable to set the
+  registry hostname" (bparees@users.noreply.github.com)
+- Bug 1539182: Detect if ClusterResourceOverrides enabled during console
+  install (spadgett@redhat.com)
+- Fix container_runtime variable typo (mgugino@redhat.com)
+- Correct 3.7 to 3.9 upgrade openshift_image_tag (mgugino@redhat.com)
+- Fix misaligned ports for sg,elb,api (mazzystr@gmail.com)
+- Add GPG keys in the base image and don't install docker (ccoleman@redhat.com)
+- Change catalog roles install to use aggregation (jpeeler@redhat.com)
+- Make IP object a string (fabian@fabianism.us)
+- Add kube service ipaddress to no_proxy list (sdodson@redhat.com)
+
+* Sat Jan 27 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.31.0
+- removed references to 'files' dir in spec file (dyocum@redhat.com)
+- files in ansible roles do not need to have the path specified to them when
+  referenced by a builtin module, i.e., copy: (dyocum@redhat.com)
+- moving files to their correct <role>/files dir for the openshift_web_console
+  and template_service_broker roles (dyocum@redhat.com)
+
+* Fri Jan 26 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.30.0
+- Removing dependency on the extra stroage device. (kwoodson@redhat.com)
+
+* Fri Jan 26 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.29.0
+- Add prometheus annotations to console service (spadgett@redhat.com)
+- Add resource requests to console template (spadgett@redhat.com)
+- ignore 'users' field in oc_group module (jdiaz@redhat.com)
+
+* Fri Jan 26 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.28.0
+- Updating deprecations to use callback plugin (ewolinet@redhat.com)
+- Run console pods on the master (spadgett@redhat.com)
+
+* Fri Jan 26 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.26.0
+- docker_image_availability: containerized overrides (lmeyer@redhat.com)
+- Remove old assetConfig from master-config.yaml (spadgett@redhat.com)
+- Don't emit assetConfig on 3.9 (sdodson@redhat.com)
+
+* Fri Jan 26 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.25.0
+- [1502838] Correct certificate alt name parsing (rteague@redhat.com)
+- sync imagestreams+templates from origin master for v3.9 (bparees@redhat.com)
+- node: specify bind option to /root/.docker (gscrivan@redhat.com)
+- [1530403] Improve etcd group error message (rteague@redhat.com)
+- Only automatically restart if cluster is in yellow or green state
+  (ewolinet@redhat.com)
+- openshift_manage_node: Label nodes in one pass (vrutkovs@redhat.com)
+- Redeploy etcd certificates during upgrade when etcd hostname not present in
+  etcd serving cert SAN. (abutcher@redhat.com)
+- Create swapoff module (mgugino@redhat.com)
+- Label masters with node-role.kubernetes.io/master. This PR also sets these
+  labels and scheduling status during upgrades (vrutkovs@redhat.com)
+- [1537946] Correct conditional check for GlusterFS IPs (rteague@redhat.com)
+- Remove unused node.lables from openshift_facts (mgugino@redhat.com)
+- Change dnsmasq Requires to Wants.
+  https://bugzilla.redhat.com/show_bug.cgi?id=1532960 (rchopra@redhat.com)
+- Set a default for openshift_hosted_registry_storage_azure_blob_realm
+  (sdodson@redhat.com)
+- openshift_prometheus: remove block duration settings (pgier@redhat.com)
+
+* Wed Jan 24 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.24.0
+- Update CF 4.6 Beta templates in openshift_management directory
+  (simaishi@redhat.com)
+- installer: increase content width for commands, which may output URLs
+  (vrutkovs@redhat.com)
+- Only rollout console if config changed (spadgett@redhat.com)
+- Protect master installed version during node upgrades (mgugino@redhat.com)
+- [1506866] Update haproxy.cfg.j2 (rteague@redhat.com)
+- Split control plane and component install in deploy_cluster
+  (ccoleman@redhat.com)
+- Add clusterResourceOverridesEnabled to console config (spadgett@redhat.com)
+- [1537105] Add openshift_facts to flannel role (rteague@redhat.com)
+- PyYAML is required by openshift_facts on nodes (ccoleman@redhat.com)
+- Move origin-gce roles and playbooks into openshift-ansible
+  (ccoleman@redhat.com)
+- Directly select the ansible version (ccoleman@redhat.com)
+- use non-deprecated REGISTRY_OPENSHIFT_SERVER_ADDR variable to set the
+  registry hostname (bparees@redhat.com)
+- update Dockerfile to add boto3 dependency (jdiaz@redhat.com)
+- Lowercase node names when creating certificates (vrutkovs@redhat.com)
+- NFS Storage: make sure openshift_hosted_*_storage_nfs_directory are quoted
+  (vrutkovs@redhat.com)
+- Fix etcd scaleup playbook (mgugino@redhat.com)
+- Bug 1524805- ServiceCatalog now works disconnected (fabian@fabianism.us)
+- [1506750] Ensure proper hostname check override (rteague@redhat.com)
+- failed_when lists are implicitely ANDs, not ORs (vrutkovs@redhat.com)
+- un-hardcode default subnet az (jdiaz@redhat.com)
+- Ensure that node names are lowerecased before matching (sdodson@redhat.com)
+- Bug 1534020 - Only set logging and metrics URLs if console config map exists
+  (spadgett@redhat.com)
+- Add templates to v3.9 (simaishi@redhat.com)
+- Use Beta repo path (simaishi@redhat.com)
+- CF 4.6 templates (simaishi@redhat.com)
+- Add ability to mount volumes into system container nodes (mgugino@redhat.com)
+- Fix to master-internal elb scheme (mazzystr@gmail.com)
+- Allow 5 etcd hosts (sdodson@redhat.com)
+- Remove unused symlink (sdodson@redhat.com)
+- docker_creds: fix python3 exception (gscrivan@redhat.com)
+- docker_creds: fix python3 exception (gscrivan@redhat.com)
+- docker: use image from CentOS and Fedora registries (gscrivan@redhat.com)
+- crio: use Docker and CentOS registries for the image (gscrivan@redhat.com)
+- The provision_install file ends in yml not yaml! Ansible requirement
+  clarification. (mbruzek@gmail.com)
+
+* Tue Jan 23 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.23.0
+- docker_image_availability: enable skopeo to use proxies (lmeyer@redhat.com)
+- Install base_packages earlier (mgugino@redhat.com)
+- allow uninstalling AWS objects created by prerequisite playbook
+  (jdiaz@redhat.com)
+- Bug 1536262: Default console and TSB node selector to
+  openshift_hosted_infra_selector (spadgett@redhat.com)
+- Migrate master-config.yaml asset config (spadgett@redhat.com)
+- Fix master scaleup play (mgugino@redhat.com)
+- use admin credentials for tsb install operations (bparees@redhat.com)
+- Fix etcd-upgrade sanity checks (mgugino@redhat.com)
+- Bug 1536253: Pass `--config` flag on oc commands when installing console
+  (spadgett@redhat.com)
+- Fix enterprise registry-console prefix (sdodson@redhat.com)
+- [release-3.7] Fix enterprise registry console image prefix
+  (sdodson@redhat.com)
+- [release-3.6] Fix enterprise registry console image prefix
+  (sdodson@redhat.com)
+- Bug 1512825 - add mux pod failed for Serial number 02 has already been issued
+  (nhosoi@redhat.com)
+- Remove old console asset config (spadgett@redhat.com)
+- Add support for Amazon EC2 C5 instance types (rteague@redhat.com)
+- Fix provider network support at openstack playbook (ltomasbo@redhat.com)
+
 * Fri Jan 19 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.22.0
 * Fri Jan 19 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.22.0
 - Fix OpenStack readme (tomas@sedovic.cz)
 - Fix OpenStack readme (tomas@sedovic.cz)
 - Quick installer: deprecate upgrades (vrutkovs@redhat.com)
 - Quick installer: deprecate upgrades (vrutkovs@redhat.com)

+ 0 - 16
playbooks/adhoc/openshift_hosted_logging_efk.yaml

@@ -1,16 +0,0 @@
----
-- hosts: masters[0]
-  roles:
-  - role: openshift_logging
-    openshift_hosted_logging_cleanup: no
-
-- name: Update master-config for publicLoggingURL
-  hosts: masters:!masters[0]
-  pre_tasks:
-  - set_fact:
-      openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}"
-  tasks:
-  - import_role:
-      name: openshift_logging
-      tasks_from: update_master_config
-    when: openshift_hosted_logging_deploy | default(false) | bool

+ 18 - 0
playbooks/aws/README.md

@@ -198,3 +198,21 @@ At this point your cluster should be ready for workloads.  Proceed to deploy app
 ### Still to come
 ### Still to come
 
 
 There are more enhancements that are arriving for provisioning.  These will include more playbooks that enhance the provisioning capabilities.
 There are more enhancements that are arriving for provisioning.  These will include more playbooks that enhance the provisioning capabilities.
+
+## Uninstall / Deprovisioning
+
+To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You will have needed to remove any of the other objects (ie ELBs, instances, etc) before attempting. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning.
+
+```
+ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_prerequisites.yml
+```
+
+This should result in removal of the security groups and VPC that were created.
+
+Cleaning up the S3 bucket contents can be accomplished with:
+
+```
+ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_s3.yml
+```
+
+NOTE: If you want to also remove the ssh keys that were uploaded (**these ssh keys would be shared if you are running multiple clusters in the same AWS account** so we don't remove these by default) then you should add 'openshift_aws_enable_uninstall_shared_objects: True' to your provisioning_vars.yml file.

+ 0 - 25
playbooks/aws/openshift-cluster/hosted.yml

@@ -1,25 +0,0 @@
----
-- import_playbook: ../../openshift-hosted/private/config.yml
-
-- import_playbook: ../../openshift-metrics/private/config.yml
-  when: openshift_metrics_install_metrics | default(false) | bool
-
-- import_playbook: ../../openshift-logging/private/config.yml
-  when: openshift_logging_install_logging | default(false) | bool
-
-- import_playbook: ../../openshift-prometheus/private/config.yml
-  when: openshift_hosted_prometheus_deploy | default(false) | bool
-
-- import_playbook: ../../openshift-service-catalog/private/config.yml
-  when: openshift_enable_service_catalog | default(false) | bool
-
-- import_playbook: ../../openshift-management/private/config.yml
-  when: openshift_management_install_management | default(false) | bool
-
-- name: Print deprecated variable warning message if necessary
-  hosts: oo_first_master
-  gather_facts: no
-  tasks:
-  - debug: msg="{{__deprecation_message}}"
-    when:
-    - __deprecation_message | default ('') | length > 0

+ 3 - 24
playbooks/aws/openshift-cluster/install.yml

@@ -18,29 +18,8 @@
 - name: run the init
 - name: run the init
   import_playbook: ../../init/main.yml
   import_playbook: ../../init/main.yml
 
 
-- name: perform the installer openshift-checks
-  import_playbook: ../../openshift-checks/private/install.yml
+- name: configure the control plane
+  import_playbook: ../../common/private/control_plane.yml
 
 
-- name: etcd install
-  import_playbook: ../../openshift-etcd/private/config.yml
-
-- name: include nfs
-  import_playbook: ../../openshift-nfs/private/config.yml
-  when: groups.oo_nfs_to_config | default([]) | count > 0
-
-- name: include loadbalancer
-  import_playbook: ../../openshift-loadbalancer/private/config.yml
-  when: groups.oo_lb_to_config | default([]) | count > 0
-
-- name: include openshift-master config
-  import_playbook: ../../openshift-master/private/config.yml
-
-- name: include master additional config
-  import_playbook: ../../openshift-master/private/additional_config.yml
-
-- name: include master additional config
+- name: ensure the masters are configured as nodes
   import_playbook: ../../openshift-node/private/config.yml
   import_playbook: ../../openshift-node/private/config.yml
-
-- name: include openshift-glusterfs
-  import_playbook: ../../openshift-glusterfs/private/config.yml
-  when: groups.oo_glusterfs_to_config | default([]) | count > 0

+ 2 - 2
playbooks/aws/openshift-cluster/provision_install.yml

@@ -15,5 +15,5 @@
 - name: Include the accept.yml playbook to accept nodes into the cluster
 - name: Include the accept.yml playbook to accept nodes into the cluster
   import_playbook: accept.yml
   import_playbook: accept.yml
 
 
-- name: Include the hosted.yml playbook to finish the hosted configuration
-  import_playbook: hosted.yml
+- name: Include the components playbook to finish the hosted configuration
+  import_playbook: ../../common/private/components.yml

+ 9 - 0
playbooks/aws/openshift-cluster/uninstall_elb.yml

@@ -0,0 +1,9 @@
+---
+- name: Delete elb
+  hosts: localhost
+  connection: local
+  tasks:
+  - name: deprovision elb
+    include_role:
+      name: openshift_aws
+      tasks_from: uninstall_elb.yml

+ 6 - 0
playbooks/aws/openshift-cluster/uninstall_prerequisites.yml

@@ -0,0 +1,6 @@
+---
+- import_playbook: uninstall_sec_group.yml
+
+- import_playbook: uninstall_vpc.yml
+
+- import_playbook: uninstall_ssh_keypair.yml

+ 10 - 0
playbooks/aws/openshift-cluster/uninstall_s3.yml

@@ -0,0 +1,10 @@
+---
+- name: Empty/delete s3 bucket
+  hosts: localhost
+  connection: local
+  tasks:
+  - name: empty/delete s3 bucket
+    include_role:
+      name: openshift_aws
+      tasks_from: uninstall_s3.yml
+    when: openshift_aws_create_s3 | default(true) | bool

+ 10 - 0
playbooks/aws/openshift-cluster/uninstall_sec_group.yml

@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+  connection: local
+  gather_facts: no
+  tasks:
+  - name: delete security groups
+    include_role:
+      name: openshift_aws
+      tasks_from: uninstall_security_group.yml
+    when: openshift_aws_create_security_groups | default(True) | bool

+ 10 - 0
playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml

@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+  connection: local
+  gather_facts: no
+  tasks:
+  - name: remove ssh keypair(s)
+    include_role:
+      name: openshift_aws
+      tasks_from: uninstall_ssh_keys.yml
+    when: openshift_aws_users | default([]) | length  > 0

+ 10 - 0
playbooks/aws/openshift-cluster/uninstall_vpc.yml

@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+  connection: local
+  gather_facts: no
+  tasks:
+  - name: delete vpc
+    include_role:
+      name: openshift_aws
+      tasks_from: uninstall_vpc.yml
+    when: openshift_aws_create_vpc | default(True) | bool

+ 23 - 1
playbooks/aws/provisioning_vars.yml.example

@@ -21,6 +21,12 @@ openshift_release: # v3.7
 # This will be dependent on the version provided by the yum repository
 # This will be dependent on the version provided by the yum repository
 openshift_pkg_version: # -3.7.0
 openshift_pkg_version: # -3.7.0
 
 
+# OpenShift api port
+# Fulfills a chicken/egg scenario with how Ansible treats host inventory file
+# and extra_vars.  This is used for SecurityGroups, ELB Listeners as well as
+# an override to installer inventory openshift_master_api_port key
+# openshift_master_api_port: 8443
+
 # specify a clusterid
 # specify a clusterid
 # This value is also used as the default value for many other components.
 # This value is also used as the default value for many other components.
 #openshift_aws_clusterid: default
 #openshift_aws_clusterid: default
@@ -41,11 +47,27 @@ openshift_pkg_version: # -3.7.0
 # a vpc, set this to false.
 # a vpc, set this to false.
 #openshift_aws_create_vpc: true
 #openshift_aws_create_vpc: true
 
 
+# when openshift_aws_create_vpc is true (the default), the VPC defined in
+# openshift_aws_vpc will be created
+#openshift_aws_vpc:
+#  name: "{{ openshift_aws_vpc_name }}"
+#  cidr: 172.31.0.0/16
+#  subnets:
+#    us-east-1:
+#    - cidr: 172.31.48.0/20
+#      az: "us-east-1c"
+#      default_az: true
+#    - cidr: 172.31.32.0/20
+#      az: "us-east-1e"
+#    - cidr: 172.31.16.0/20
+#      az: "us-east-1a"
+
 # Name of the vpc.  Needs to be set if using a pre-existing vpc.
 # Name of the vpc.  Needs to be set if using a pre-existing vpc.
 #openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
 #openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
 
 
 # Name of the subnet in the vpc to use.  Needs to be set if using a pre-existing
 # Name of the subnet in the vpc to use.  Needs to be set if using a pre-existing
-# vpc + subnet.
+# vpc + subnet. Otherwise will use the subnet with 'default_az' set (see above
+# example VPC structure)
 #openshift_aws_subnet_az:
 #openshift_aws_subnet_az:
 
 
 # -------------- #
 # -------------- #

+ 20 - 0
playbooks/byo/openshift-cluster/upgrades/v3_10/README.md

@@ -0,0 +1,20 @@
+# v3.10 Major and Minor Upgrade Playbook
+
+## Overview
+This playbook currently performs the following steps.
+
+ * Upgrade and restart master services
+ * Unschedule node
+ * Upgrade and restart docker
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+
+```
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml
+```

+ 5 - 0
playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml

@@ -0,0 +1,5 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_10/upgrade.yml

+ 16 - 0
playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml

@@ -0,0 +1,16 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml
+
+- import_playbook: ../../../../openshift-master/private/restart.yml

+ 7 - 0
playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml

@@ -0,0 +1,7 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml

+ 2 - 0
playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml

@@ -12,3 +12,5 @@
 # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
 # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
 #
 #
 - import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
 - import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+
+- import_playbook: ../../../../openshift-master/private/restart.yml

playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml → playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml


+ 24 - 0
playbooks/cluster-operator/aws/components.yml

@@ -0,0 +1,24 @@
+---
+- name: Alert user to variables needed
+  hosts: localhost
+  tasks:
+  - name: Alert user to variables needed - clusterid
+    debug:
+      msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
+
+  - name: Alert user to variables needed - region
+    debug:
+      msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
+
+- name: Setup the master node group
+  hosts: localhost
+  tasks:
+  - import_role:
+      name: openshift_aws
+      tasks_from: setup_master_group.yml
+
+- name: run the init
+  import_playbook: ../../init/main.yml
+
+- name: Include the components playbook to finish the hosted configuration
+  import_playbook: ../../common/private/components.yml

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml

@@ -19,7 +19,7 @@
   - import_role:
   - import_role:
       name: container_runtime
       name: container_runtime
       tasks_from: docker_upgrade_check.yml
       tasks_from: docker_upgrade_check.yml
-    when: docker_upgrade is not defined or docker_upgrade | bool
+    when: docker_upgrade | default(True) | bool
 
 
 
 
 # If a node fails, halt everything, the admin will need to clean up and we
 # If a node fails, halt everything, the admin will need to clean up and we

+ 3 - 1
playbooks/common/openshift-cluster/upgrades/init.yml

@@ -5,7 +5,9 @@
     g_new_master_hosts: []
     g_new_master_hosts: []
     g_new_node_hosts: []
     g_new_node_hosts: []
 
 
-- import_playbook: ../../../init/facts.yml
+- import_playbook: ../../../init/basic_facts.yml
+- import_playbook: ../../../init/base_packages.yml
+- import_playbook: ../../../init/cluster_facts.yml
 
 
 - name: Ensure firewall is not switched during upgrade
 - name: Ensure firewall is not switched during upgrade
   hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}"
   hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}"

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml

@@ -31,7 +31,7 @@
       with_items: " {{ groups['oo_nodes_to_config'] }}"
       with_items: " {{ groups['oo_nodes_to_config'] }}"
       when:
       when:
       - hostvars[item].openshift is defined
       - hostvars[item].openshift is defined
-      - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
+      - hostvars[item].openshift.common.hostname | lower in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
       changed_when: false
       changed_when: false
 
 
   # Build up the oo_nodes_to_upgrade group, use the list filtered by label if
   # Build up the oo_nodes_to_upgrade group, use the list filtered by label if

+ 33 - 1
playbooks/common/openshift-cluster/upgrades/post_control_plane.yml

@@ -6,7 +6,9 @@
   hosts: oo_first_master
   hosts: oo_first_master
   roles:
   roles:
   - role: openshift_web_console
   - role: openshift_web_console
-    when: openshift_web_console_install | default(true) | bool
+    when:
+    - openshift_web_console_install | default(true) | bool
+    - openshift_upgrade_target is version_compare('3.9','>=')
 
 
 - name: Upgrade default router and default registry
 - name: Upgrade default router and default registry
   hosts: oo_first_master
   hosts: oo_first_master
@@ -111,6 +113,29 @@
     registry_url: "{{ openshift.master.registry_url }}"
     registry_url: "{{ openshift.master.registry_url }}"
     openshift_hosted_templates_import_command: replace
     openshift_hosted_templates_import_command: replace
 
 
+  post_tasks:
+  # Do not perform these tasks when the registry is insecure.  The default registry is insecure in openshift_hosted/defaults/main.yml
+  - when: not (openshift_docker_hosted_registry_insecure | default(True))
+    block:
+    # we need to migrate customers to the new pattern of pushing to the registry via dns
+    # Step 1: verify the certificates have the docker registry service name
+    - name: shell command to determine if the docker-registry.default.svc is found in the registry certificate
+      shell: >
+        echo -n | openssl s_client -showcerts -servername docker-registry.default.svc -connect docker-registry.default.svc:5000  | openssl x509 -text |  grep -A1 'X509v3 Subject Alternative Name:' | grep -Pq 'DNS:docker-registry\.default\.svc(,|$)'
+      register: cert_output
+      changed_when: false
+      failed_when:
+      - cert_output.rc not in [0, 1]
+
+    # Step 2: Set a fact to be used to determine if we should run the redeploy of registry certs
+    - name: set a fact to include the registry certs playbook if needed
+      set_fact:
+        openshift_hosted_rollout_certs_and_registry: "{{ cert_output.rc != 0  }}"
+
+# Run the redeploy certs based upon the certificates. Defaults to False for insecure registries
+- when: (hostvars[groups.oo_first_master.0].openshift_hosted_rollout_certs_and_registry | default(False)) | bool
+  import_playbook: ../../../openshift-hosted/private/redeploy-registry-certificates.yml
+
 # Check for warnings to be printed at the end of the upgrade:
 # Check for warnings to be printed at the end of the upgrade:
 - name: Clean up and display warnings
 - name: Clean up and display warnings
   hosts: oo_masters_to_config
   hosts: oo_masters_to_config
@@ -140,3 +165,10 @@
       msg: "WARNING the shared-resource-viewer role could not be upgraded to 3.6 spec because it's marked protected, please see https://bugzilla.redhat.com/show_bug.cgi?id=1493213"
       msg: "WARNING the shared-resource-viewer role could not be upgraded to 3.6 spec because it's marked protected, please see https://bugzilla.redhat.com/show_bug.cgi?id=1493213"
     when:
     when:
     - __shared_resource_viewer_protected | default(false)
     - __shared_resource_viewer_protected | default(false)
+
+- name: Upgrade Service Catalog
+  hosts: oo_first_master
+  roles:
+  - role: openshift_service_catalog
+    when:
+    - openshift_enable_service_catalog | default(true) | bool

+ 9 - 4
playbooks/common/openshift-cluster/upgrades/pre/config.yml

@@ -5,8 +5,6 @@
 # Pre-upgrade
 # Pre-upgrade
 - import_playbook: ../initialize_nodes_to_upgrade.yml
 - import_playbook: ../initialize_nodes_to_upgrade.yml
 
 
-- import_playbook: verify_cluster.yml
-
 - name: Update repos on upgrade hosts
 - name: Update repos on upgrade hosts
   hosts: "{{ l_upgrade_repo_hosts }}"
   hosts: "{{ l_upgrade_repo_hosts }}"
   roles:
   roles:
@@ -49,10 +47,16 @@
     # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
     # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
     # defined, and overriding the normal behavior of protecting the installed version
     # defined, and overriding the normal behavior of protecting the installed version
     openshift_release: "{{ openshift_upgrade_target }}"
     openshift_release: "{{ openshift_upgrade_target }}"
-    openshift_protect_installed_version: False
+    # openshift_protect_installed_version is passed n via upgrade_control_plane.yml
     # l_openshift_version_set_hosts is passed via upgrade_control_plane.yml
     # l_openshift_version_set_hosts is passed via upgrade_control_plane.yml
     # l_openshift_version_check_hosts is passed via upgrade_control_plane.yml
     # l_openshift_version_check_hosts is passed via upgrade_control_plane.yml
 
 
+# version_override will set various version-related variables during a double upgrade.
+- import_playbook: version_override.yml
+  when: l_double_upgrade_cp | default(False)
+
+- import_playbook: verify_cluster.yml
+
 # If we're only upgrading nodes, we need to ensure masters are already upgraded
 # If we're only upgrading nodes, we need to ensure masters are already upgraded
 - name: Verify masters are already upgraded
 - name: Verify masters are already upgraded
   hosts: oo_masters_to_config
   hosts: oo_masters_to_config
@@ -60,7 +64,7 @@
   - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
   - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
     when:
     when:
     - l_upgrade_nodes_only | default(False) | bool
     - l_upgrade_nodes_only | default(False) | bool
-    - openshift.common.version != openshift_version
+    - not openshift.common.version | match(openshift_version)
 
 
 # If we're only upgrading nodes, skip this.
 # If we're only upgrading nodes, skip this.
 - import_playbook: ../../../../openshift-master/private/validate_restart.yml
 - import_playbook: ../../../../openshift-master/private/validate_restart.yml
@@ -79,3 +83,4 @@
   - import_role:
   - import_role:
       name: container_runtime
       name: container_runtime
       tasks_from: docker_upgrade_check.yml
       tasks_from: docker_upgrade_check.yml
+    when: docker_upgrade | default(True) | bool

+ 2 - 0
playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml

@@ -17,6 +17,7 @@
         valid version for a {{ openshift_upgrade_target }} upgrade
         valid version for a {{ openshift_upgrade_target }} upgrade
     when:
     when:
     - openshift_pkg_version is defined
     - openshift_pkg_version is defined
+    - openshift_pkg_version != ""
     - openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<')
     - openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<')
 
 
   - fail:
   - fail:
@@ -25,6 +26,7 @@
         valid version for a {{ openshift_upgrade_target }} upgrade
         valid version for a {{ openshift_upgrade_target }} upgrade
     when:
     when:
     - openshift_image_tag is defined
     - openshift_image_tag is defined
+    - openshift_image_tag != ""
     - openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<')
     - openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<')
 
 
   - set_fact:
   - set_fact:

+ 29 - 0
playbooks/common/openshift-cluster/upgrades/pre/version_override.yml

@@ -0,0 +1,29 @@
+---
+# This playbook overrides normal version setting during double upgrades.
+
+- name: Set proper version values for upgrade
+  hosts: "{{ l_version_override_hosts | default('all:!all') }}"
+  tasks:
+    - set_fact:
+        # All of these will either have been set by openshift_version or
+        # provided by the user; we need to save these for later.
+        l_double_upgrade_saved_version: "{{ openshift_version }}"
+        l_double_upgrade_saved_release: "{{ openshift_release | default(openshift_upgrade_target) }}"
+        l_double_upgrade_saved_tag: "{{ openshift_image_tag }}"
+        l_double_upgrade_saved_pkgv: "{{ openshift_pkg_version }}"
+    - set_fact:
+        # We already ran openshift_version for the second of two upgrades;
+        # here we need to set some variables to enable the first upgrade.
+        # openshift_version, openshift_image_tag, and openshift_pkg_version
+        # will be modified by openshift_version; we want to ensure these
+        # are initially set to first versions to ensure no accidental usage of
+        # second versions (eg, 3.8 and 3.9 respectively) are used.
+        l_double_upgrade_cp_reset_version: True
+        openshift_version: "{{ l_double_upgrade_first_version }}"
+        openshift_release: "{{ l_double_upgrade_first_release }}"
+        openshift_upgrade_target: '3.8'
+        openshift_upgrade_min: '3.7'
+
+# Now that we have force-set a different version, we need to update a few things
+# to ensure we have settings that actually match what's in repos/registries.
+- import_playbook: ../../../../init/version.yml

+ 16 - 15
playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml

@@ -2,6 +2,7 @@
 ###############################################################################
 ###############################################################################
 # Upgrade Masters
 # Upgrade Masters
 ###############################################################################
 ###############################################################################
+
 - name: Backup and upgrade etcd
 - name: Backup and upgrade etcd
   import_playbook: ../../../openshift-etcd/private/upgrade_main.yml
   import_playbook: ../../../openshift-etcd/private/upgrade_main.yml
 
 
@@ -32,7 +33,6 @@
     register: l_pb_upgrade_control_plane_pre_upgrade_storage
     register: l_pb_upgrade_control_plane_pre_upgrade_storage
     when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
     when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
     failed_when:
     failed_when:
-    - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
     - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
     - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
     - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
     - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
 
 
@@ -48,8 +48,6 @@
 # support for optional hooks to be defined.
 # support for optional hooks to be defined.
 - name: Upgrade master
 - name: Upgrade master
   hosts: oo_masters_to_config
   hosts: oo_masters_to_config
-  vars:
-    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
   serial: 1
   serial: 1
   roles:
   roles:
   - openshift_facts
   - openshift_facts
@@ -72,6 +70,12 @@
   - include_tasks: "{{ openshift_master_upgrade_hook }}"
   - include_tasks: "{{ openshift_master_upgrade_hook }}"
     when: openshift_master_upgrade_hook is defined
     when: openshift_master_upgrade_hook is defined
 
 
+  - name: Disable master controller
+    service:
+      name: "{{ openshift_service_type }}-master-controllers"
+      enabled: false
+    when: openshift.common.rolling_restart_mode == 'system'
+
   - include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml
   - include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml
     when: openshift.common.rolling_restart_mode == 'system'
     when: openshift.common.rolling_restart_mode == 'system'
 
 
@@ -94,7 +98,6 @@
     - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
     - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
     - openshift_version is version_compare('3.7','<')
     - openshift_version is version_compare('3.7','<')
     failed_when:
     failed_when:
-    - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
     - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
     - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
     - openshift_upgrade_post_storage_migration_fatal | default(false) | bool
     - openshift_upgrade_post_storage_migration_fatal | default(false) | bool
     run_once: true
     run_once: true
@@ -230,7 +233,6 @@
     register: l_pb_upgrade_control_plane_post_upgrade_storage
     register: l_pb_upgrade_control_plane_post_upgrade_storage
     when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
     when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
     failed_when:
     failed_when:
-    - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
     - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
     - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
     - openshift_upgrade_post_storage_migration_fatal | default(false) | bool
     - openshift_upgrade_post_storage_migration_fatal | default(false) | bool
 
 
@@ -309,14 +311,13 @@
   post_tasks:
   post_tasks:
   - import_role:
   - import_role:
       name: openshift_node
       name: openshift_node
+      tasks_from: upgrade_pre.yml
+  - import_role:
+      name: openshift_node
       tasks_from: upgrade.yml
       tasks_from: upgrade.yml
-  - name: Set node schedulability
-    oc_adm_manage_node:
-      node: "{{ openshift.node.nodename | lower }}"
-      schedulable: True
-    delegate_to: "{{ groups.oo_first_master.0 }}"
-    retries: 10
-    delay: 5
-    register: node_schedulable
-    until: node_schedulable is succeeded
-    when: node_unschedulable is changed
+  - import_role:
+      name: openshift_manage_node
+      tasks_from: config.yml
+    vars:
+      openshift_master_host: "{{ groups.oo_first_master.0 }}"
+      openshift_manage_node_is_master: true

+ 6 - 0
playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml

@@ -76,6 +76,12 @@
     until: node_schedulable is succeeded
     until: node_schedulable is succeeded
     when: node_unschedulable is changed
     when: node_unschedulable is changed
 
 
+  - import_role:
+      name: openshift_manage_node
+      tasks_from: config.yml
+    vars:
+      openshift_master_host: "{{ groups.oo_first_master.0 }}"
+
 - name: Re-enable excluders
 - name: Re-enable excluders
   hosts: oo_nodes_to_upgrade:!oo_masters_to_config
   hosts: oo_nodes_to_upgrade:!oo_masters_to_config
   tasks:
   tasks:

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml

@@ -0,0 +1 @@
+---

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_10/roles

@@ -0,0 +1 @@
+../../../../../roles/

+ 7 - 0
playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml

@@ -0,0 +1,7 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- import_playbook: upgrade_control_plane.yml
+
+- import_playbook: upgrade_nodes.yml

+ 58 - 0
playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml

@@ -0,0 +1,58 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- import_playbook: ../init.yml
+  vars:
+    l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+
+- name: Configure the upgrade target for the common upgrade tasks 3.10
+  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+  tasks:
+  - meta: clear_facts
+  - set_fact:
+      openshift_upgrade_target: '3.10'
+      openshift_upgrade_min: '3.9'
+      openshift_release: '3.10'
+
+- import_playbook: ../pre/config.yml
+  # These vars a meant to exclude oo_nodes from plays that would otherwise include
+  # them by default.
+  vars:
+    l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+    l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
+    l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+    l_upgrade_excluder_hosts: "oo_masters_to_config"
+    openshift_protect_installed_version: False
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+  hosts: oo_masters_to_config:oo_etcd_to_config
+  tasks:
+  - set_fact:
+      pre_upgrade_complete: True
+
+- import_playbook: ../upgrade_control_plane.yml
+  vars:
+    openshift_release: '3.10'
+
+- import_playbook: ../post_control_plane.yml
+
+- hosts: oo_masters
+  tasks:
+  - import_role:
+      name: openshift_web_console
+      tasks_from: remove_old_asset_config

+ 35 - 0
playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml

@@ -0,0 +1,35 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- import_playbook: ../init.yml
+
+- name: Configure the upgrade target for the common upgrade tasks
+  hosts: oo_all_hosts
+  tasks:
+  - set_fact:
+      openshift_upgrade_target: '3.10'
+      openshift_upgrade_min: '3.9'
+      openshift_release: '3.10'
+
+- import_playbook: ../pre/config.yml
+  vars:
+    l_upgrade_repo_hosts: "oo_nodes_to_config"
+    l_upgrade_no_proxy_hosts: "oo_all_hosts"
+    l_upgrade_health_check_hosts: "oo_nodes_to_config"
+    l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+    l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+    l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+    l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+  tasks:
+  - set_fact:
+      pre_upgrade_complete: True
+
+# Pre-upgrade completed
+
+- import_playbook: ../upgrade_nodes.yml

+ 7 - 0
playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml

@@ -0,0 +1,7 @@
+---
+- name: Verify 3.8 specific upgrade checks
+  hosts: oo_first_master
+  roles:
+  - { role: lib_openshift }
+  tasks:
+  - debug: msg="noop"

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml

@@ -23,6 +23,7 @@
     l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
     l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
     l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
     l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
     l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
     l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
+    openshift_protect_installed_version: False
 
 
 - import_playbook: validator.yml
 - import_playbook: validator.yml
 
 

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml

@@ -35,6 +35,7 @@
     l_upgrade_verify_targets_hosts: "oo_masters_to_config"
     l_upgrade_verify_targets_hosts: "oo_masters_to_config"
     l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
     l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
     l_upgrade_excluder_hosts: "oo_masters_to_config"
     l_upgrade_excluder_hosts: "oo_masters_to_config"
+    openshift_protect_installed_version: False
 
 
 - import_playbook: validator.yml
 - import_playbook: validator.yml
 
 

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml

@@ -23,6 +23,7 @@
     l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
     l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
     l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
     l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
     l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
     l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
+    openshift_protect_installed_version: False
 
 
 - import_playbook: validator.yml
 - import_playbook: validator.yml
 
 

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml

@@ -35,6 +35,7 @@
     l_upgrade_verify_targets_hosts: "oo_masters_to_config"
     l_upgrade_verify_targets_hosts: "oo_masters_to_config"
     l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
     l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
     l_upgrade_excluder_hosts: "oo_masters_to_config"
     l_upgrade_excluder_hosts: "oo_masters_to_config"
+    openshift_protect_installed_version: False
 
 
 - import_playbook: validator.yml
 - import_playbook: validator.yml
 
 

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml

@@ -23,6 +23,7 @@
     l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
     l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
     l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
     l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
     l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
     l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
+    openshift_protect_installed_version: False
 
 
 - import_playbook: validator.yml
 - import_playbook: validator.yml
 
 

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml

@@ -36,6 +36,7 @@
     l_upgrade_verify_targets_hosts: "oo_masters_to_config"
     l_upgrade_verify_targets_hosts: "oo_masters_to_config"
     l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
     l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
     l_upgrade_excluder_hosts: "oo_masters_to_config"
     l_upgrade_excluder_hosts: "oo_masters_to_config"
+    openshift_protect_installed_version: False
 
 
 - import_playbook: validator.yml
 - import_playbook: validator.yml
 
 

+ 2 - 49
playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml

@@ -2,53 +2,6 @@
 #
 #
 # Full Control Plane + Nodes Upgrade
 # Full Control Plane + Nodes Upgrade
 #
 #
-- import_playbook: ../init.yml
+- import_playbook: upgrade_control_plane.yml
 
 
-- name: Configure the upgrade target for the common upgrade tasks
-  hosts: oo_all_hosts
-  tasks:
-  - set_fact:
-      openshift_upgrade_target: '3.9'
-      openshift_upgrade_min: '3.7'
-      openshift_release: '3.9'
-
-- import_playbook: ../pre/config.yml
-  vars:
-    l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
-    l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
-    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-    l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
-    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
-    l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
-
-- import_playbook: validator.yml
-
-- name: Flag pre-upgrade checks complete for hosts without errors
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
-  tasks:
-  - set_fact:
-      pre_upgrade_complete: True
-
-# Pre-upgrade completed
-
-- import_playbook: ../upgrade_control_plane.yml
-
-# All controllers must be stopped at the same time then restarted
-- name: Cycle all controller services to force new leader election mode
-  hosts: oo_masters_to_config
-  gather_facts: no
-  roles:
-  - role: openshift_facts
-  tasks:
-  - name: Stop {{ openshift_service_type }}-master-controllers
-    systemd:
-      name: "{{ openshift_service_type }}-master-controllers"
-      state: stopped
-  - name: Start {{ openshift_service_type }}-master-controllers
-    systemd:
-      name: "{{ openshift_service_type }}-master-controllers"
-      state: started
-
-- import_playbook: ../upgrade_nodes.yml
-
-- import_playbook: ../post_control_plane.yml
+- import_playbook: upgrade_nodes.yml

+ 58 - 26
playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml

@@ -15,24 +15,34 @@
   vars:
   vars:
     l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
     l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
     l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
     l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_base_packages_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
 
 
-## Check to see if they're running 3.7 and if so upgrade them to 3.8 on control plan
-## If they've specified pkg_version or image_tag preserve that for later use
-- name: Configure the upgrade target for the common upgrade tasks 3.8
+- name: Configure the initial upgrade target for the common upgrade tasks
   hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
   hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
   tasks:
   tasks:
   - set_fact:
   - set_fact:
-      openshift_upgrade_target: '3.8'
+      # We use 3.9 here so when we run openshift_version we can get
+      # correct values for 3.9, 3.8 we will hard-code the values in
+      # ../pre/version_override.yml, if necessary.
+      openshift_upgrade_target: '3.9'
       openshift_upgrade_min: '3.7'
       openshift_upgrade_min: '3.7'
-      openshift_release: '3.8'
-      _requested_pkg_version: "{{openshift_pkg_version if openshift_pkg_version is defined else omit }}"
-      _requested_image_tag: "{{openshift_image_tag if openshift_image_tag is defined else omit }}"
+
+## Check to see if we need to double upgrade (3.7 -> 3.8 -> 3.9)
+- name: Configure variables for double upgrade
+  hosts: oo_masters_to_config:oo_etcd_to_config
+  tasks:
+  - set_fact:
+      l_double_upgrade_cp: True
+      l_version_override_hosts: "oo_masters_to_config:oo_etcd_to_config"
+      l_double_upgrade_first_version: "3.8"
+      l_double_upgrade_first_release: "3.8"
     when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
     when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
 
 
 - import_playbook: ../pre/config.yml
 - import_playbook: ../pre/config.yml
   # These vars a meant to exclude oo_nodes from plays that would otherwise include
   # These vars a meant to exclude oo_nodes from plays that would otherwise include
   # them by default.
   # them by default.
   vars:
   vars:
+    l_version_override_hosts: "oo_masters_to_config:oo_etcd_to_config"
     l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
     l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
     l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
     l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
     l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
     l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
@@ -41,34 +51,49 @@
     l_upgrade_verify_targets_hosts: "oo_masters_to_config"
     l_upgrade_verify_targets_hosts: "oo_masters_to_config"
     l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
     l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
     l_upgrade_excluder_hosts: "oo_masters_to_config"
     l_upgrade_excluder_hosts: "oo_masters_to_config"
-  when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+    openshift_protect_installed_version: False
+  when: l_double_upgrade_cp | default(False)
 
 
 - name: Flag pre-upgrade checks complete for hosts without errors 3.8
 - name: Flag pre-upgrade checks complete for hosts without errors 3.8
   hosts: oo_masters_to_config:oo_etcd_to_config
   hosts: oo_masters_to_config:oo_etcd_to_config
   tasks:
   tasks:
   - set_fact:
   - set_fact:
       pre_upgrade_complete: True
       pre_upgrade_complete: True
-    when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+    when: l_double_upgrade_cp | default(False)
 
 
 # Pre-upgrade completed
 # Pre-upgrade completed
 
 
-- import_playbook: ../upgrade_control_plane.yml
-  vars:
-    openshift_release: '3.8'
-  when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+- name: Intermediate 3.8 Upgrade
+  import_playbook: ../upgrade_control_plane.yml
+  when: l_double_upgrade_cp | default(False)
+
+- name: Restore 3.9 version variables
+  hosts: oo_masters_to_config:oo_etcd_to_config
+  tasks:
+  - set_fact:
+      # all:!all == 0 hosts
+      l_version_override_hosts: "all:!all"
+      openshift_version: "{{ l_double_upgrade_saved_version }}"
+      openshift_release: "{{ l_double_upgrade_saved_release }}"
+      openshift_image_tag: "{{ l_double_upgrade_saved_tag }}"
+      openshift_pkg_version: "{{ l_double_upgrade_saved_pkgv }}"
+    when: l_double_upgrade_cp | default(False)
 
 
 ## 3.8 upgrade complete we should now be able to upgrade to 3.9
 ## 3.8 upgrade complete we should now be able to upgrade to 3.9
+- name: Clear some values now that we're done with double upgrades.
+  hosts: oo_masters_to_config:oo_etcd_to_config
+  tasks:
+  - set_fact:
+      l_double_upgrade_cp: False
+      l_double_upgrade_cp_reset_version: False
 
 
-- name: Configure the upgrade target for the common upgrade tasks 3.9
+# We should be on 3.8 at this point, need to set upgrade_target to 3.9
+- name: Configure the upgrade target for second upgrade
   hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
   hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
   tasks:
   tasks:
-  - meta: clear_facts
   - set_fact:
   - set_fact:
       openshift_upgrade_target: '3.9'
       openshift_upgrade_target: '3.9'
       openshift_upgrade_min: '3.8'
       openshift_upgrade_min: '3.8'
-      openshift_release: '3.9'
-      openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}"
-      openshift_image_tag: "{{ _requested_image_tag | default('v3.9') }}"
 
 
 - import_playbook: ../pre/config.yml
 - import_playbook: ../pre/config.yml
   # These vars a meant to exclude oo_nodes from plays that would otherwise include
   # These vars a meant to exclude oo_nodes from plays that would otherwise include
@@ -82,6 +107,7 @@
     l_upgrade_verify_targets_hosts: "oo_masters_to_config"
     l_upgrade_verify_targets_hosts: "oo_masters_to_config"
     l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
     l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
     l_upgrade_excluder_hosts: "oo_masters_to_config"
     l_upgrade_excluder_hosts: "oo_masters_to_config"
+    openshift_protect_installed_version: False
 
 
 - name: Flag pre-upgrade checks complete for hosts without errors
 - name: Flag pre-upgrade checks complete for hosts without errors
   hosts: oo_masters_to_config:oo_etcd_to_config
   hosts: oo_masters_to_config:oo_etcd_to_config
@@ -90,8 +116,6 @@
       pre_upgrade_complete: True
       pre_upgrade_complete: True
 
 
 - import_playbook: ../upgrade_control_plane.yml
 - import_playbook: ../upgrade_control_plane.yml
-  vars:
-    openshift_release: '3.9'
 
 
 # All controllers must be stopped at the same time then restarted
 # All controllers must be stopped at the same time then restarted
 - name: Cycle all controller services to force new leader election mode
 - name: Cycle all controller services to force new leader election mode
@@ -100,13 +124,21 @@
   roles:
   roles:
   - role: openshift_facts
   - role: openshift_facts
   tasks:
   tasks:
-  - name: Stop {{ openshift_service_type }}-master-controllers
-    systemd:
+  - name: Restart master controllers to force new leader election mode
+    service:
       name: "{{ openshift_service_type }}-master-controllers"
       name: "{{ openshift_service_type }}-master-controllers"
-      state: stopped
-  - name: Start {{ openshift_service_type }}-master-controllers
-    systemd:
+      state: restarted
+    when: openshift.common.rolling_restart_mode == 'services'
+  - name: Re-enable master controllers to force new leader election mode
+    service:
       name: "{{ openshift_service_type }}-master-controllers"
       name: "{{ openshift_service_type }}-master-controllers"
-      state: started
+      enabled: true
+    when: openshift.common.rolling_restart_mode == 'system'
 
 
 - import_playbook: ../post_control_plane.yml
 - import_playbook: ../post_control_plane.yml
+
+- hosts: oo_masters
+  tasks:
+  - import_role:
+      name: openshift_web_console
+      tasks_from: remove_old_asset_config

+ 40 - 0
playbooks/common/private/components.yml

@@ -0,0 +1,40 @@
+---
+# These are the core component plays that configure the layers above the control
+# plane. A component is generally considered any part of OpenShift that runs on
+# top of the cluster and may be considered optional. Over time, much of OpenShift
+# above the Kubernetes apiserver and masters may be considered components.
+#
+# Preconditions:
+#
+# 1. The control plane is configured and reachable from nodes inside the cluster
+# 2. An admin kubeconfig file in /etc/origin/master/admin.kubeconfig that can
+#    perform root level actions against the cluster
+# 3. On cloud providers, persistent volume provisioners are configured
+# 4. A subset of nodes is available to allow components to schedule - this must
+#    include the masters and usually includes infra nodes.
+# 5. The init/main.yml playbook has been invoked
+
+- import_playbook: ../../openshift-glusterfs/private/config.yml
+  when: groups.oo_glusterfs_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-hosted/private/config.yml
+
+- import_playbook: ../../openshift-web-console/private/config.yml
+  when:
+    - openshift_web_console_install | default(true) | bool
+    - openshift.common.version_gte_3_9
+
+- import_playbook: ../../openshift-metrics/private/config.yml
+  when: openshift_metrics_install_metrics | default(false) | bool
+
+- import_playbook: ../../openshift-logging/private/config.yml
+  when: openshift_logging_install_logging | default(false) | bool
+
+- import_playbook: ../../openshift-prometheus/private/config.yml
+  when: openshift_hosted_prometheus_deploy | default(false) | bool
+
+- import_playbook: ../../openshift-service-catalog/private/config.yml
+  when: openshift_enable_service_catalog | default(true) | bool
+
+- import_playbook: ../../openshift-management/private/config.yml
+  when: openshift_management_install_management | default(false) | bool

+ 34 - 0
playbooks/common/private/control_plane.yml

@@ -0,0 +1,34 @@
+---
+# These are the control plane plays that configure a control plane on top of hosts
+# identified as masters. Over time, some of the pieces of the current control plane
+# may be moved to the components list.
+#
+# It is not required for any nodes to be configured, or passed to be configured,
+# when this playbook is invoked.
+#
+# Preconditions:
+#
+# 1. A set of machines have been identified to act as masters
+# 2. On cloud providers, a load balancer has been configured to point to the masters
+#    and that load balancer has a DNS name
+# 3. The init/main.yml playbook has been invoked
+#
+# Postconditions:
+#
+# 1. The control plane is reachable from the outside of the cluster
+# 2. The master has an /etc/origin/master/admin.kubeconfig file that gives cluster-admin
+#    access.
+
+- import_playbook: ../../openshift-checks/private/install.yml
+
+- import_playbook: ../../openshift-etcd/private/config.yml
+
+- import_playbook: ../../openshift-nfs/private/config.yml
+  when: groups.oo_nfs_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-loadbalancer/private/config.yml
+  when: groups.oo_lb_to_config | default([]) | count > 0
+
+- import_playbook: ../../openshift-master/private/config.yml
+
+- import_playbook: ../../openshift-master/private/additional_config.yml

+ 3 - 1
playbooks/container-runtime/config.yml

@@ -1,6 +1,8 @@
 ---
 ---
 - import_playbook: ../init/main.yml
 - import_playbook: ../init/main.yml
   vars:
   vars:
-    skip_verison: True
+    skip_version: True
+    l_openshift_version_set_hosts: "all:!all"
+    l_openshift_version_check_hosts: "all:!all"
 
 
 - import_playbook: private/config.yml
 - import_playbook: private/config.yml

+ 3 - 1
playbooks/container-runtime/private/build_container_groups.yml

@@ -1,6 +1,8 @@
 ---
 ---
+# l_build_container_groups_hosts is passed in via prerequisites.yml during
+# etcd scaleup plays.
 - name: create oo_hosts_containerized_managed_true host group
 - name: create oo_hosts_containerized_managed_true host group
-  hosts: oo_all_hosts:!oo_nodes_to_config
+  hosts: "{{ l_build_container_groups_hosts | default('oo_all_hosts:!oo_nodes_to_config') }}"
   tasks:
   tasks:
   - group_by:
   - group_by:
       key: oo_hosts_containerized_managed_{{ (openshift_is_containerized | default(False)) | ternary('true','false') }}
       key: oo_hosts_containerized_managed_{{ (openshift_is_containerized | default(False)) | ternary('true','false') }}

+ 9 - 1
playbooks/container-runtime/private/config.yml

@@ -1,15 +1,23 @@
 ---
 ---
 # l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
 # l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+# l_etcd_scale_up_hosts may be passed in via prerequisites.yml during etcd
+# scaleup plays.
 
 
 - import_playbook: build_container_groups.yml
 - import_playbook: build_container_groups.yml
 
 
-- hosts: "{{ l_scale_up_hosts | default(l_default_container_runtime_hosts) }}"
+- hosts: "{{ l_etcd_scale_up_hosts | default(l_scale_up_hosts) | default(l_default_container_runtime_hosts) }}"
   vars:
   vars:
     l_default_container_runtime_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true"
     l_default_container_runtime_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true"
   roles:
   roles:
     - role: container_runtime
     - role: container_runtime
   tasks:
   tasks:
     - import_role:
     - import_role:
+        name: openshift_excluder
+        tasks_from: enable.yml
+      vars:
+        r_openshift_excluder_action: enable
+        r_openshift_excluder_enable_openshift_excluder: false
+    - import_role:
         name: container_runtime
         name: container_runtime
         tasks_from: package_docker.yml
         tasks_from: package_docker.yml
       when:
       when:

+ 3 - 1
playbooks/container-runtime/private/setup_storage.yml

@@ -1,9 +1,11 @@
 ---
 ---
 # l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
 # l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+# l_etcd_scale_up_hosts may be passed in via prerequisites.yml during etcd
+# scaleup plays.
 
 
 - import_playbook: build_container_groups.yml
 - import_playbook: build_container_groups.yml
 
 
-- hosts: "{{ l_scale_up_hosts | default(l_default_container_storage_hosts) }}"
+- hosts: "{{ l_etcd_scale_up_hosts | default(l_scale_up_hosts) | default(l_default_container_storage_hosts) }}"
   vars:
   vars:
     l_default_container_storage_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true"
     l_default_container_storage_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true"
     l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}"
     l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}"

+ 3 - 1
playbooks/container-runtime/setup_storage.yml

@@ -1,6 +1,8 @@
 ---
 ---
 - import_playbook: ../init/main.yml
 - import_playbook: ../init/main.yml
   vars:
   vars:
-    skip_verison: True
+    skip_version: True
+    l_openshift_version_set_hosts: "all:!all"
+    l_openshift_version_check_hosts: "all:!all"
 
 
 - import_playbook: private/setup_storage.yml
 - import_playbook: private/setup_storage.yml

+ 2 - 43
playbooks/deploy_cluster.yml

@@ -1,49 +1,8 @@
 ---
 ---
 - import_playbook: init/main.yml
 - import_playbook: init/main.yml
 
 
-- import_playbook: openshift-checks/private/install.yml
-
-- import_playbook: openshift-etcd/private/config.yml
-
-- import_playbook: openshift-nfs/private/config.yml
-  when: groups.oo_nfs_to_config | default([]) | count > 0
-
-- import_playbook: openshift-loadbalancer/private/config.yml
-  when: groups.oo_lb_to_config | default([]) | count > 0
-
-- import_playbook: openshift-master/private/config.yml
-
-- import_playbook: openshift-master/private/additional_config.yml
+- import_playbook: common/private/control_plane.yml
 
 
 - import_playbook: openshift-node/private/config.yml
 - import_playbook: openshift-node/private/config.yml
 
 
-- import_playbook: openshift-glusterfs/private/config.yml
-  when: groups.oo_glusterfs_to_config | default([]) | count > 0
-
-- import_playbook: openshift-hosted/private/config.yml
-
-- import_playbook: openshift-web-console/private/config.yml
-  when: openshift_web_console_install | default(true) | bool
-
-- import_playbook: openshift-metrics/private/config.yml
-  when: openshift_metrics_install_metrics | default(false) | bool
-
-- import_playbook: openshift-logging/private/config.yml
-  when: openshift_logging_install_logging | default(false) | bool
-
-- import_playbook: openshift-prometheus/private/config.yml
-  when: openshift_hosted_prometheus_deploy | default(false) | bool
-
-- import_playbook: openshift-service-catalog/private/config.yml
-  when: openshift_enable_service_catalog | default(true) | bool
-
-- import_playbook: openshift-management/private/config.yml
-  when: openshift_management_install_management | default(false) | bool
-
-- name: Print deprecated variable warning message if necessary
-  hosts: oo_first_master
-  gather_facts: no
-  tasks:
-  - debug: msg="{{__deprecation_message}}"
-    when:
-    - __deprecation_message | default ('') | length > 0
+- import_playbook: common/private/components.yml

+ 163 - 0
playbooks/gcp/openshift-cluster/build_base_image.yml

@@ -0,0 +1,163 @@
+---
+# This playbook ensures that a base image is up to date with all of the required settings
+- name: Launch image build instance
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  tasks:
+  - name: Require openshift_gcp_root_image
+    fail:
+      msg: "A root OS image name or family is required for base image building.  Please ensure `openshift_gcp_root_image` is defined."
+    when: openshift_gcp_root_image is undefined
+
+  - name: Create the image instance disk
+    gce_pd:
+      service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+      credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+      project_id: "{{ openshift_gcp_project }}"
+      zone: "{{ openshift_gcp_zone }}"
+      name: "{{ openshift_gcp_prefix }}build-image-instance"
+      disk_type: pd-ssd
+      image: "{{ openshift_gcp_root_image }}"
+      size_gb: 10
+      state: present
+
+  - name: Launch the image build instance
+    gce:
+      service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+      credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+      project_id: "{{ openshift_gcp_project }}"
+      zone: "{{ openshift_gcp_zone }}"
+      machine_type: n1-standard-1
+      instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+      state: present
+      tags:
+      - build-image-instance
+      disk_auto_delete: false
+      disks:
+      - "{{ openshift_gcp_prefix }}build-image-instance"
+    register: gce
+
+  - add_host:
+      hostname: "{{ item.public_ip }}"
+      groupname: build_instance_ips
+    with_items: "{{ gce.instance_data }}"
+
+  - name: Wait for instance to respond to SSH
+    wait_for:
+      delay: 1
+      host: "{{ item.public_ip }}"
+      port: 22
+      state: started
+      timeout: 120
+    with_items: "{{ gce.instance_data }}"
+
+- name: Prepare instance content sources
+  pre_tasks:
+  - set_fact:
+      allow_rhel_subscriptions: "{{ rhsub_skip | default('no', True) | lower in ['no', 'false'] }}"
+  - set_fact:
+      using_rhel_subscriptions: "{{ (deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise'] or ansible_distribution == 'RedHat') and allow_rhel_subscriptions }}"
+  hosts: build_instance_ips
+  roles:
+  - role: rhel_subscribe
+    when: using_rhel_subscriptions
+  - role: openshift_repos
+    vars:
+      openshift_additional_repos: []
+  post_tasks:
+  - name: Add custom repositories
+    include_role:
+      name: openshift_gcp
+      tasks_from: add_custom_repositories.yml
+  - name: Add the Google Cloud repo
+    yum_repository:
+      name: google-cloud
+      description: Google Cloud Compute
+      baseurl: https://packages.cloud.google.com/yum/repos/google-cloud-compute-el7-x86_64
+      gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+      gpgcheck: yes
+      repo_gpgcheck: yes
+      state: present
+    when: ansible_os_family == "RedHat"
+  - name: Add the jdetiber-qemu-user-static copr repo
+    yum_repository:
+      name: jdetiber-qemu-user-static
+      description: QEMU user static COPR
+      baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/epel-7-$basearch/
+      gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/pubkey.gpg
+      gpgcheck: yes
+      repo_gpgcheck: no
+      state: present
+    when: ansible_os_family == "RedHat"
+  - name: Accept GPG keys for the repos
+    command: yum -q makecache -y --disablerepo='*' --enablerepo='google-cloud,jdetiber-qemu-user-static'
+  - name: Install qemu-user-static
+    package:
+      name: qemu-user-static
+      state: present
+  - name: Start and enable systemd-binfmt service
+    systemd:
+      name: systemd-binfmt
+      state: started
+      enabled: yes
+
+- name: Build image
+  hosts: build_instance_ips
+  pre_tasks:
+  - name: Set up core host GCP configuration
+    include_role:
+      name: openshift_gcp
+      tasks_from: configure_gcp_base_image.yml
+  roles:
+  - role: os_update_latest
+  post_tasks:
+  - name: Disable all repos on RHEL
+    command: subscription-manager repos --disable="*"
+    when: using_rhel_subscriptions
+  - name: Enable repos for packages on RHEL
+    command: subscription-manager repos --enable="rhel-7-server-rpms" --enable="rhel-7-server-extras-rpms"
+    when: using_rhel_subscriptions
+  - name: Install common image prerequisites
+    package: name={{ item }} state=latest
+    with_items:
+    # required by Ansible
+    - PyYAML
+    - google-compute-engine
+    - google-compute-engine-init
+    - google-config
+    - wget
+    - git
+    - net-tools
+    - bind-utils
+    - iptables-services
+    - bridge-utils
+    - bash-completion
+  - name: Clean yum metadata
+    command: yum clean all
+    args:
+      warn: no
+    when: ansible_os_family == "RedHat"
+
+- name: Commit image
+  hosts: localhost
+  connection: local
+  tasks:
+  - name: Terminate the image build instance
+    gce:
+      service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+      credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+      project_id: "{{ openshift_gcp_project }}"
+      zone: "{{ openshift_gcp_zone }}"
+      instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+      state: absent
+  - name: Save the new image
+    command: gcloud --project "{{ openshift_gcp_project}}" compute images create "{{ openshift_gcp_base_image_name | default(openshift_gcp_base_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" --source-disk "{{ openshift_gcp_prefix }}build-image-instance" --source-disk-zone "{{ openshift_gcp_zone }}" --family "{{ openshift_gcp_base_image }}"
+  - name: Remove the image instance disk
+    gce_pd:
+      service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+      credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+      project_id: "{{ openshift_gcp_project }}"
+      zone: "{{ openshift_gcp_zone }}"
+      name: "{{ openshift_gcp_prefix }}build-image-instance"
+      state: absent

+ 112 - 0
playbooks/gcp/openshift-cluster/build_image.yml

@@ -0,0 +1,112 @@
+---
+- name: Verify prerequisites for image build
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  tasks:
+  - name: Require openshift_gcp_base_image
+    fail:
+      msg: "A base image name or family is required for image building.  Please ensure `openshift_gcp_base_image` is defined."
+    when: openshift_gcp_base_image is undefined
+
+- name: Launch image build instance
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  tasks:
+  - name: Set facts
+    set_fact:
+      openshift_node_bootstrap: True
+      openshift_master_unsupported_embedded_etcd: True
+
+  - name: Create the image instance disk
+    gce_pd:
+      service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+      credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+      project_id: "{{ openshift_gcp_project }}"
+      zone: "{{ openshift_gcp_zone }}"
+      name: "{{ openshift_gcp_prefix }}build-image-instance"
+      disk_type: pd-ssd
+      image: "{{ openshift_gcp_base_image }}"
+      size_gb: 10
+      state: present
+
+  - name: Launch the image build instance
+    gce:
+      service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+      credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+      project_id: "{{ openshift_gcp_project }}"
+      zone: "{{ openshift_gcp_zone }}"
+      machine_type: n1-standard-1
+      instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+      state: present
+      tags:
+      - build-image-instance
+      disk_auto_delete: false
+      disks:
+      - "{{ openshift_gcp_prefix }}build-image-instance"
+    register: gce
+
+  - name: add host to nodes
+    add_host:
+      hostname: "{{ item.public_ip }}"
+      groupname: nodes
+    with_items: "{{ gce.instance_data }}"
+
+  - name: Wait for instance to respond to SSH
+    wait_for:
+      delay: 1
+      host: "{{ item.public_ip }}"
+      port: 22
+      state: started
+      timeout: 120
+    with_items: "{{ gce.instance_data }}"
+
+- name: Wait for full SSH connection
+  hosts: nodes
+  gather_facts: no
+  tasks:
+  - wait_for_connection:
+
+- hosts: nodes
+  tasks:
+  - name: Set facts
+    set_fact:
+      openshift_node_bootstrap: True
+
+# This is the part that installs all of the software and configs for the instance
+# to become a node.
+- import_playbook: ../../openshift-node/private/image_prep.yml
+
+# Add additional GCP specific behavior
+- hosts: nodes
+  tasks:
+  - include_role:
+      name: openshift_gcp
+      tasks_from: node_cloud_config.yml
+  - include_role:
+      name: openshift_gcp
+      tasks_from: frequent_log_rotation.yml
+
+- name: Commit image
+  hosts: localhost
+  connection: local
+  tasks:
+  - name: Terminate the image build instance
+    gce:
+      service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+      credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+      project_id: "{{ openshift_gcp_project }}"
+      zone: "{{ openshift_gcp_zone }}"
+      instance_names: "{{ openshift_gcp_prefix }}build-image-instance"
+      state: absent
+  - name: Save the new image
+    command: gcloud --project "{{ openshift_gcp_project}}" compute images create "{{ openshift_gcp_image_name | default(openshift_gcp_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" --source-disk "{{ openshift_gcp_prefix }}build-image-instance" --source-disk-zone "{{ openshift_gcp_zone }}" --family "{{ openshift_gcp_image }}"
+  - name: Remove the image instance disk
+    gce_pd:
+      service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}"
+      credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}"
+      project_id: "{{ openshift_gcp_project }}"
+      zone: "{{ openshift_gcp_zone }}"
+      name: "{{ openshift_gcp_prefix }}build-image-instance"
+      state: absent

+ 10 - 0
playbooks/gcp/openshift-cluster/deprovision.yml

@@ -0,0 +1,10 @@
+# This playbook terminates a running cluster
+---
+- name: Terminate running cluster and remove all supporting resources in GCE
+  hosts: localhost
+  connection: local
+  tasks:
+  - include_role:
+      name: openshift_gcp
+    vars:
+      state: absent

+ 33 - 0
playbooks/gcp/openshift-cluster/install.yml

@@ -0,0 +1,33 @@
+# This playbook installs onto a provisioned cluster
+---
+- hosts: localhost
+  connection: local
+  tasks:
+  - name: place all scale groups into Ansible groups
+    include_role:
+      name: openshift_gcp
+      tasks_from: setup_scale_group_facts.yml
+
+- name: run the init
+  import_playbook: ../../init/main.yml
+
+- name: configure the control plane
+  import_playbook: ../../common/private/control_plane.yml
+
+- name: ensure the masters are configured as nodes
+  import_playbook: ../../openshift-node/private/config.yml
+
+- name: run the GCP specific post steps
+  import_playbook: install_gcp.yml
+
+- name: install components
+  import_playbook: ../../common/private/components.yml
+
+- hosts: primary_master
+  gather_facts: no
+  tasks:
+  - name: Retrieve cluster configuration
+    fetch:
+      src: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
+      dest: "/tmp/"
+      flat: yes

+ 21 - 0
playbooks/gcp/openshift-cluster/install_gcp.yml

@@ -0,0 +1,21 @@
+---
+- hosts: masters
+  gather_facts: no
+  tasks:
+  - name: create master health check service
+    include_role:
+      name: openshift_gcp
+      tasks_from: configure_master_healthcheck.yml
+  - name: configure node bootstrapping
+    include_role:
+      name: openshift_gcp
+      tasks_from: configure_master_bootstrap.yml
+    when:
+    - openshift_master_bootstrap_enabled | default(False)
+  - name: configure node bootstrap autoapprover
+    include_role:
+      name: openshift_bootstrap_autoapprover
+      tasks_from: main
+    when:
+    - openshift_master_bootstrap_enabled | default(False)
+    - openshift_master_bootstrap_auto_approve | default(False) | bool

+ 10 - 0
playbooks/gcp/openshift-cluster/inventory.yml

@@ -0,0 +1,10 @@
+---
+- name: Set up the connection variables for retrieving inventory from GCE
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  tasks:
+  - name: materialize the inventory
+    include_role:
+      name: openshift_gcp
+      tasks_from: dynamic_inventory.yml

+ 12 - 0
playbooks/gcp/openshift-cluster/launch.yml

@@ -0,0 +1,12 @@
+# This playbook launches a new cluster or converges it if already launched
+---
+- import_playbook: build_image.yml
+  when: openshift_gcp_build_image | default(False) | bool
+
+- import_playbook: provision.yml
+
+- hosts: localhost
+  tasks:
+  - meta: refresh_inventory
+
+- import_playbook: install.yml

+ 4 - 5
playbooks/gcp/provision.yml

@@ -3,11 +3,10 @@
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
   gather_facts: no
   gather_facts: no
+  roles:
+  - openshift_gcp
   tasks:
   tasks:
-
-  - name: provision a GCP cluster in the specified project
+  - name: recalculate the dynamic inventory
     import_role:
     import_role:
       name: openshift_gcp
       name: openshift_gcp
-
-- name: run the cluster deploy
-  import_playbook: ../deploy_cluster.yml
+      tasks_from: dynamic_inventory.yml

+ 9 - 0
playbooks/gcp/openshift-cluster/publish_image.yml

@@ -0,0 +1,9 @@
+---
+- name: Publish the most recent image
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  tasks:
+  - import_role:
+      name: openshift_gcp
+      tasks_from: publish_image.yml

+ 1 - 0
playbooks/gcp/openshift-cluster/roles

@@ -0,0 +1 @@
+../../../roles

+ 5 - 2
playbooks/init/base_packages.yml

@@ -1,8 +1,9 @@
 ---
 ---
-# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays.
+# l_base_packages_hosts may be passed in via prerequisites.yml during scaleup plays
+# and upgrade_control_plane.yml upgrade plays.
 
 
 - name: Install packages necessary for installer
 - name: Install packages necessary for installer
-  hosts: "{{ l_scale_up_hosts | default('oo_all_hosts') }}"
+  hosts: "{{ l_base_packages_hosts | default('oo_all_hosts') }}"
   any_errors_fatal: true
   any_errors_fatal: true
   tasks:
   tasks:
   - when:
   - when:
@@ -16,7 +17,9 @@
       - iproute
       - iproute
       - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
       - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
       - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
       - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
+      - "{{ 'python-ipaddress' if ansible_distribution != 'Fedora' else '' }}"
       - yum-utils
       - yum-utils
+      when: item != ''
       register: result
       register: result
       until: result is succeeded
       until: result is succeeded
 
 

+ 11 - 40
playbooks/init/facts.yml

@@ -4,15 +4,13 @@
   any_errors_fatal: true
   any_errors_fatal: true
   tasks:
   tasks:
 
 
-- name: Initialize host facts
+- name: Initialize basic host facts
   # l_init_fact_hosts is passed in via play during control-plane-only
   # l_init_fact_hosts is passed in via play during control-plane-only
   # upgrades and scale-up plays; otherwise oo_all_hosts is used.
   # upgrades and scale-up plays; otherwise oo_all_hosts is used.
   hosts: "{{ l_init_fact_hosts | default('oo_all_hosts') }}"
   hosts: "{{ l_init_fact_hosts | default('oo_all_hosts') }}"
+  roles:
+  - role: openshift_facts
   tasks:
   tasks:
-  - name: load openshift_facts module
-    import_role:
-      name: openshift_facts
-
   # TODO: Should this role be refactored into health_checks??
   # TODO: Should this role be refactored into health_checks??
   - name: Run openshift_sanitize_inventory to set variables
   - name: Run openshift_sanitize_inventory to set variables
     import_role:
     import_role:
@@ -58,41 +56,6 @@
         - l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=')
         - l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=')
         msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
         msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
 
 
-  - name: Gather Cluster facts
-    openshift_facts:
-      role: common
-      local_facts:
-        deployment_type: "{{ openshift_deployment_type }}"
-        deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
-        hostname: "{{ openshift_hostname | default(None) }}"
-        ip: "{{ openshift_ip | default(None) }}"
-        public_hostname: "{{ openshift_public_hostname | default(None) }}"
-        public_ip: "{{ openshift_public_ip | default(None) }}"
-        portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
-        http_proxy: "{{ openshift_http_proxy | default(None) }}"
-        https_proxy: "{{ openshift_https_proxy | default(None) }}"
-        no_proxy: "{{ openshift_no_proxy | default(None) }}"
-        generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
-
-  - name: Set fact of no_proxy_internal_hostnames
-    openshift_facts:
-      role: common
-      local_facts:
-        no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
-                                             | union(groups['oo_masters_to_config'])
-                                             | union(groups['oo_etcd_to_config'] | default([])))
-                                         | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
-                                         }}"
-    when:
-    - openshift_http_proxy is defined or openshift_https_proxy is defined
-    - openshift_generate_no_proxy_hosts | default(True) | bool
-
-  - name: Initialize openshift.node.sdn_mtu
-    openshift_facts:
-      role: node
-      local_facts:
-        sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
-
 - name: Initialize special first-master variables
 - name: Initialize special first-master variables
   hosts: oo_first_master
   hosts: oo_first_master
   roles:
   roles:
@@ -104,3 +67,11 @@
       first_master_client_binary: "{{  openshift_client_binary }}"
       first_master_client_binary: "{{  openshift_client_binary }}"
       #Some roles may require this to be set for first master
       #Some roles may require this to be set for first master
       openshift_client_binary: "{{ openshift_client_binary }}"
       openshift_client_binary: "{{ openshift_client_binary }}"
+
+- name: Disable web console if required
+  hosts: oo_masters_to_config
+  gather_facts: no
+  tasks:
+  - set_fact:
+      openshift_web_console_install: False
+    when: openshift_deployment_subtype == 'registry' or ( osm_disabled_features is defined and 'WebConsole' in osm_disabled_features )

+ 42 - 0
playbooks/init/cluster_facts.yml

@@ -0,0 +1,42 @@
+---
+- name: Initialize cluster facts
+  # l_init_fact_hosts is passed in via play during control-plane-only
+  # upgrades and scale-up plays; otherwise oo_all_hosts is used.
+  hosts: "{{ l_init_fact_hosts | default('oo_all_hosts') }}"
+  roles:
+  - role: openshift_facts
+  tasks:
+  - name: Gather Cluster facts
+    openshift_facts:
+      role: common
+      local_facts:
+        deployment_type: "{{ openshift_deployment_type }}"
+        deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
+        hostname: "{{ openshift_hostname | default(None) }}"
+        ip: "{{ openshift_ip | default(None) }}"
+        public_hostname: "{{ openshift_public_hostname | default(None) }}"
+        public_ip: "{{ openshift_public_ip | default(None) }}"
+        portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+        http_proxy: "{{ openshift_http_proxy | default(None) }}"
+        https_proxy: "{{ openshift_https_proxy | default(None) }}"
+        no_proxy: "{{ openshift_no_proxy | default(None) }}"
+        generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
+
+  - name: Set fact of no_proxy_internal_hostnames
+    openshift_facts:
+      role: common
+      local_facts:
+        no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
+                                             | union(groups['oo_masters_to_config'])
+                                             | union(groups['oo_etcd_to_config'] | default([])))
+                                         | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
+                                         }}"
+    when:
+    - openshift_http_proxy is defined or openshift_https_proxy is defined
+    - openshift_generate_no_proxy_hosts | default(True) | bool
+
+  - name: Initialize openshift.node.sdn_mtu
+    openshift_facts:
+      role: node
+      local_facts:
+        sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"

+ 6 - 2
playbooks/init/evaluate_groups.yml

@@ -45,9 +45,13 @@
   - name: Evaluate groups - Fail if no etcd hosts group is defined
   - name: Evaluate groups - Fail if no etcd hosts group is defined
     fail:
     fail:
       msg: >
       msg: >
-        Running etcd as an embedded service is no longer supported.
+        Running etcd as an embedded service is no longer supported. If this is a
+        new install please define an 'etcd' group with either one, three or five
+        hosts. These hosts may be the same hosts as your masters. If this is an
+        upgrade please see https://docs.openshift.com/container-platform/latest/install_config/upgrading/migrating_embedded_etcd.html
+        for documentation on how to migrate from embedded to external etcd.
     when:
     when:
-    - g_etcd_hosts | default([]) | length not in [3,1]
+    - g_etcd_hosts | default([]) | length == 0
     - not (openshift_node_bootstrap | default(False))
     - not (openshift_node_bootstrap | default(False))
 
 
   - name: Evaluate oo_all_hosts
   - name: Evaluate oo_all_hosts

+ 11 - 2
playbooks/init/main.yml

@@ -1,4 +1,7 @@
 ---
 ---
+# skip_version and l_install_base_packages are passed in via prerequistes.yml.
+# skip_sanity_checks is passed in via openshift-node/private/image_prep.yml
+
 - name: Initialization Checkpoint Start
 - name: Initialization Checkpoint Start
   hosts: all
   hosts: all
   gather_facts: false
   gather_facts: false
@@ -15,10 +18,16 @@
 
 
 - import_playbook: evaluate_groups.yml
 - import_playbook: evaluate_groups.yml
 
 
-- import_playbook: facts.yml
+- import_playbook: basic_facts.yml
+
+# base_packages needs to be setup for openshift_facts.py to run correctly.
+- import_playbook: base_packages.yml
+  when: l_install_base_packages | default(False) | bool
+
+- import_playbook: cluster_facts.yml
 
 
 - import_playbook: version.yml
 - import_playbook: version.yml
-  when: not (skip_verison | default(False))
+  when: not (skip_version | default(False))
 
 
 - import_playbook: sanity_checks.yml
 - import_playbook: sanity_checks.yml
   when: not (skip_sanity_checks | default(False))
   when: not (skip_sanity_checks | default(False))

+ 2 - 2
playbooks/init/validate_hostnames.yml

@@ -25,7 +25,7 @@
     when:
     when:
     - lookupip.stdout != '127.0.0.1'
     - lookupip.stdout != '127.0.0.1'
     - lookupip.stdout not in ansible_all_ipv4_addresses
     - lookupip.stdout not in ansible_all_ipv4_addresses
-    - openshift_hostname_check | default(true)
+    - openshift_hostname_check | default(true) | bool
 
 
   - name: Validate openshift_ip exists on node when defined
   - name: Validate openshift_ip exists on node when defined
     fail:
     fail:
@@ -40,4 +40,4 @@
     when:
     when:
     - openshift_ip is defined
     - openshift_ip is defined
     - openshift_ip not in ansible_all_ipv4_addresses
     - openshift_ip not in ansible_all_ipv4_addresses
-    - openshift_ip_check | default(true)
+    - openshift_ip_check | default(true) | bool

+ 6 - 0
playbooks/openshift-etcd/certificates.yml

@@ -1,5 +1,11 @@
 ---
 ---
 - import_playbook: ../init/main.yml
 - import_playbook: ../init/main.yml
+  vars:
+    skip_version: True
+    l_openshift_version_set_hosts: "all:!all"
+    l_openshift_version_check_hosts: "all:!all"
+    l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}"
 
 
 - import_playbook: private/ca.yml
 - import_playbook: private/ca.yml
 
 

+ 6 - 0
playbooks/openshift-etcd/config.yml

@@ -1,4 +1,10 @@
 ---
 ---
 - import_playbook: ../init/main.yml
 - import_playbook: ../init/main.yml
+  vars:
+    skip_version: True
+    l_openshift_version_set_hosts: "all:!all"
+    l_openshift_version_check_hosts: "all:!all"
+    l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}"
 
 
 - import_playbook: private/config.yml
 - import_playbook: private/config.yml

+ 6 - 0
playbooks/openshift-etcd/embedded2external.yml

@@ -1,4 +1,10 @@
 ---
 ---
 - import_playbook: ../init/main.yml
 - import_playbook: ../init/main.yml
+  vars:
+    skip_version: True
+    l_openshift_version_set_hosts: "all:!all"
+    l_openshift_version_check_hosts: "all:!all"
+    l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}"
 
 
 - import_playbook: private/embedded2external.yml
 - import_playbook: private/embedded2external.yml

+ 6 - 0
playbooks/openshift-etcd/migrate.yml

@@ -1,4 +1,10 @@
 ---
 ---
 - import_playbook: ../init/main.yml
 - import_playbook: ../init/main.yml
+  vars:
+    skip_version: True
+    l_openshift_version_set_hosts: "all:!all"
+    l_openshift_version_check_hosts: "all:!all"
+    l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}"
 
 
 - import_playbook: private/migrate.yml
 - import_playbook: private/migrate.yml

+ 0 - 1
playbooks/openshift-etcd/private/ca.yml

@@ -10,7 +10,6 @@
       tasks_from: ca.yml
       tasks_from: ca.yml
     vars:
     vars:
       etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
       etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
-      etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
       etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
       etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
     when:
     when:
     - etcd_ca_setup | default(True) | bool
     - etcd_ca_setup | default(True) | bool

+ 1 - 1
playbooks/openshift-etcd/private/certificates-backup.yml

@@ -1,6 +1,6 @@
 ---
 ---
 - name: Backup and remove generated etcd certificates
 - name: Backup and remove generated etcd certificates
-  hosts: oo_first_etcd
+  hosts: oo_etcd_to_config
   any_errors_fatal: true
   any_errors_fatal: true
   tasks:
   tasks:
   - import_role:
   - import_role:

+ 0 - 1
playbooks/openshift-etcd/private/config.yml

@@ -22,7 +22,6 @@
   - role: openshift_clock
   - role: openshift_clock
   - role: openshift_etcd
   - role: openshift_etcd
     etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
     etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
-    etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
     etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
     etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
   - role: nickhammond.logrotate
   - role: nickhammond.logrotate
 
 

+ 0 - 2
playbooks/openshift-etcd/private/master_etcd_certificates.yml

@@ -5,9 +5,7 @@
   roles:
   roles:
     - role: openshift_etcd_facts
     - role: openshift_etcd_facts
     - role: openshift_etcd_client_certificates
     - role: openshift_etcd_client_certificates
-      etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
       etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
       etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
       etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
       etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
       etcd_cert_prefix: "master.etcd-"
       etcd_cert_prefix: "master.etcd-"
-      openshift_ca_host: "{{ groups.oo_first_master.0 }}"
       when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
       when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config

+ 0 - 0
playbooks/openshift-etcd/private/redeploy-ca.yml


Certains fichiers n'ont pas été affichés car il y a eu trop de fichiers modifiés dans ce diff