Browse Source

Merge branch 'master' into mbruzek-openshift-openstack

Matt Bruzek 7 years ago
parent
commit
cb581bfb67
100 changed files with 950 additions and 1556 deletions
  1. 1 1
      .papr.inventory
  2. 1 1
      .tito/packages/openshift-ansible
  3. 1 1
      DEPLOYMENT_TYPES.md
  4. 1 5
      ansible.cfg
  5. 4 4
      docs/proposals/crt_management_proposal.md
  6. 7 7
      docs/proposals/role_decomposition.md
  7. 3 3
      files/origin-components/apiserver-template.yaml
  8. 42 0
      files/origin-components/console-config.yaml
  9. 38 0
      files/origin-components/console-rbac-template.yaml
  10. 121 0
      files/origin-components/console-template.yaml
  11. 0 1001
      filter_plugins/oo_filters.py
  12. 0 69
      filter_plugins/openshift_version.py
  13. 1 1
      images/installer/Dockerfile
  14. 1 1
      images/installer/Dockerfile.rhel7
  15. 8 0
      images/installer/origin-extra-root/etc/yum.repos.d/google-cloud-sdk.repo
  16. 31 2
      inventory/hosts.example
  17. 12 0
      inventory/hosts.grafana.example
  18. 0 1
      lookup_plugins/README.md
  19. 216 91
      openshift-ansible.spec
  20. 1 1
      playbooks/adhoc/openshift_hosted_logging_efk.yaml
  21. 7 8
      playbooks/adhoc/uninstall.yml
  22. 0 1
      playbooks/aws/openshift-cluster/filter_plugins
  23. 2 2
      playbooks/aws/openshift-cluster/install.yml
  24. 0 1
      playbooks/aws/openshift-cluster/lookup_plugins
  25. 9 3
      playbooks/aws/openshift-cluster/provision.yml
  26. 9 0
      playbooks/aws/openshift-cluster/provision_elb.yml
  27. 1 1
      playbooks/aws/openshift-cluster/provision_instance.yml
  28. 1 1
      playbooks/aws/openshift-cluster/provision_nodes.yml
  29. 10 0
      playbooks/aws/openshift-cluster/provision_s3.yml
  30. 1 1
      playbooks/aws/openshift-cluster/provision_sec_group.yml
  31. 1 1
      playbooks/aws/openshift-cluster/provision_ssh_keypair.yml
  32. 1 1
      playbooks/aws/openshift-cluster/provision_vpc.yml
  33. 1 1
      playbooks/aws/openshift-cluster/seal_ami.yml
  34. 6 1
      playbooks/aws/provisioning_vars.yml.example
  35. 0 1
      playbooks/byo/filter_plugins
  36. 0 1
      playbooks/byo/lookup_plugins
  37. 0 1
      playbooks/byo/openshift-cluster/filter_plugins
  38. 0 1
      playbooks/byo/openshift-cluster/lookup_plugins
  39. 3 3
      playbooks/byo/rhel_subscribe.yml
  40. 21 0
      playbooks/cluster-operator/aws/infrastructure.yml
  41. 1 0
      playbooks/cluster-operator/aws/roles
  42. 0 1
      playbooks/common/openshift-cluster/filter_plugins
  43. 0 1
      playbooks/common/openshift-cluster/library
  44. 0 1
      playbooks/common/openshift-cluster/lookup_plugins
  45. 8 3
      playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
  46. 11 8
      playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
  47. 1 1
      playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
  48. 1 1
      playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
  49. 0 1
      playbooks/common/openshift-cluster/upgrades/filter_plugins
  50. 0 1
      playbooks/common/openshift-cluster/upgrades/lookup_plugins
  51. 11 5
      playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
  52. 7 3
      playbooks/common/openshift-cluster/upgrades/pre/config.yml
  53. 1 6
      playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
  54. 5 5
      playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
  55. 28 24
      playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
  56. 12 10
      playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
  57. 5 5
      playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
  58. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins
  59. 1 1
      playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
  60. 6 1
      playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
  61. 1 1
      playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
  62. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins
  63. 5 0
      playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
  64. 2 1
      playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
  65. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins
  66. 0 2
      playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
  67. 6 2
      playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
  68. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins
  69. 0 19
      playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml
  70. 5 6
      playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
  71. 57 10
      playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
  72. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
  73. 1 1
      playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml
  74. 6 0
      playbooks/container-runtime/private/build_container_groups.yml
  75. 6 9
      playbooks/container-runtime/private/config.yml
  76. 18 0
      playbooks/container-runtime/private/setup_storage.yml
  77. 6 0
      playbooks/container-runtime/setup_storage.yml
  78. 3 0
      playbooks/deploy_cluster.yml
  79. 1 1
      playbooks/gcp/provision.yml
  80. 37 0
      playbooks/init/base_packages.yml
  81. 0 1
      playbooks/init/evaluate_groups.yml
  82. 28 69
      playbooks/init/facts.yml
  83. 3 6
      playbooks/init/main.yml
  84. 5 5
      playbooks/init/repos.yml
  85. 12 48
      playbooks/init/sanity_checks.yml
  86. 22 10
      playbooks/init/version.yml
  87. 1 0
      playbooks/openshift-checks/adhoc.yml
  88. 1 1
      playbooks/openshift-etcd/private/ca.yml
  89. 3 3
      playbooks/openshift-etcd/private/certificates-backup.yml
  90. 16 14
      playbooks/openshift-etcd/private/embedded2external.yml
  91. 0 1
      playbooks/openshift-etcd/private/filter_plugins
  92. 0 1
      playbooks/openshift-etcd/private/lookup_plugins
  93. 11 13
      playbooks/openshift-etcd/private/migrate.yml
  94. 20 18
      playbooks/openshift-etcd/private/redeploy-ca.yml
  95. 2 2
      playbooks/openshift-etcd/private/restart.yml
  96. 4 4
      playbooks/openshift-etcd/private/scaleup.yml
  97. 1 1
      playbooks/openshift-etcd/private/server_certificates.yml
  98. 3 4
      playbooks/openshift-etcd/private/upgrade_backup.yml
  99. 3 3
      playbooks/openshift-etcd/private/upgrade_image_members.yml
  100. 0 0
      playbooks/openshift-etcd/private/upgrade_main.yml

+ 1 - 1
.papr.inventory

@@ -6,7 +6,7 @@ etcd
 [OSEv3:vars]
 ansible_ssh_user=root
 ansible_python_interpreter=/usr/bin/python3
-deployment_type=origin
+openshift_deployment_type=origin
 openshift_image_tag="{{ lookup('env', 'OPENSHIFT_IMAGE_TAG') }}"
 openshift_master_default_subdomain="{{ lookup('env', 'RHCI_ocp_node1_IP') }}.xip.io"
 openshift_check_min_host_disk_gb=1.5

+ 1 - 1
.tito/packages/openshift-ansible

@@ -1 +1 @@
-3.9.0-0.10.0 ./
+3.9.0-0.21.0 ./

+ 1 - 1
DEPLOYMENT_TYPES.md

@@ -13,5 +13,5 @@ The table below outlines the defaults per `openshift_deployment_type`:
 | **openshift_service_type** (also used for package names)        | origin                                   | atomic-openshift                       |
 | **openshift.common.config_base**                                | /etc/origin                              | /etc/origin                            |
 | **openshift_data_dir**                                          | /var/lib/origin                          | /var/lib/origin                        |
-| **openshift.master.registry_url openshift.node.registry_url**   | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} |
+| **openshift.master.registry_url oreg_url_node**                 | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} |
 | **Image Streams**                                               | centos                                   | rhel                                   |

+ 1 - 5
ansible.cfg

@@ -10,10 +10,6 @@
 #log_path = /tmp/ansible.log
 
 # Additional default options for OpenShift Ansible
-callback_plugins = callback_plugins/
-filter_plugins = filter_plugins/
-lookup_plugins = lookup_plugins/
-library = library/
 forks = 20
 host_key_checking = False
 retry_files_enabled = False
@@ -26,7 +22,7 @@ fact_caching = jsonfile
 fact_caching_connection = $HOME/ansible/facts
 fact_caching_timeout = 600
 callback_whitelist = profile_tasks
-inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt
+inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt, .ini
 # work around privilege escalation timeouts in ansible:
 timeout = 30
 

+ 4 - 4
docs/proposals/crt_management_proposal.md

@@ -30,7 +30,7 @@ configure, restart, or change the container runtime as much as feasible.
 ## Design
 
 The container_runtime role should be comprised of 3 'pseudo-roles' which will be
-consumed using include_role; each component area should be enabled/disabled with
+consumed using import_role; each component area should be enabled/disabled with
 a boolean value, defaulting to true.
 
 I call them 'pseudo-roles' because they are more or less independent functional
@@ -46,15 +46,15 @@ an abundance of roles), and make things as modular as possible.
 # container_runtime_setup.yml
 - hosts: "{{ openshift_runtime_manage_hosts | default('oo_nodes_to_config') }}"
   tasks:
-    - include_role:
+    - import_role:
         name: container_runtime
         tasks_from: install.yml
       when: openshift_container_runtime_install | default(True) | bool
-    - include_role:
+    - import_role:
         name: container_runtime
         tasks_from: storage.yml
       when: openshift_container_runtime_storage | default(True) | bool
-    - include_role:
+    - import_role:
         name: container_runtime
         tasks_from: configure.yml
       when: openshift_container_runtime_configure | default(True) | bool

+ 7 - 7
docs/proposals/role_decomposition.md

@@ -115,12 +115,12 @@ providing the location of the generated certificates to the individual roles.
     generated_certs_dir: "{{openshift.common.config_base}}/logging"
 
 ## Elasticsearch
-- include_role:
+- import_role:
     name: openshift_logging_elasticsearch
   vars:
     generated_certs_dir: "{{openshift.common.config_base}}/logging"
 
-- include_role:
+- import_role:
     name: openshift_logging_elasticsearch
   vars:
     generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -130,7 +130,7 @@ providing the location of the generated certificates to the individual roles.
 
 
 ## Kibana
-- include_role:
+- import_role:
     name: openshift_logging_kibana
   vars:
     generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -144,7 +144,7 @@ providing the location of the generated certificates to the individual roles.
     openshift_logging_kibana_es_port: "{{ openshift_logging_es_port }}"
     openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
 
-- include_role:
+- import_role:
     name: openshift_logging_kibana
   vars:
     generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -173,7 +173,7 @@ providing the location of the generated certificates to the individual roles.
 
 
 ## Curator
-- include_role:
+- import_role:
     name: openshift_logging_curator
   vars:
     generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -183,7 +183,7 @@ providing the location of the generated certificates to the individual roles.
     openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"
     openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
 
-- include_role:
+- import_role:
     name: openshift_logging_curator
   vars:
     generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -201,7 +201,7 @@ providing the location of the generated certificates to the individual roles.
 
 
 ## Fluentd
-- include_role:
+- import_role:
     name: openshift_logging_fluentd
   vars:
     generated_certs_dir: "{{openshift.common.config_base}}/logging"

+ 3 - 3
files/origin-components/apiserver-template.yaml

@@ -4,7 +4,7 @@ metadata:
   name: template-service-broker-apiserver
 parameters:
 - name: IMAGE
-  value: openshift/origin:latest
+  value: openshift/origin-template-service-broker:latest
 - name: NAMESPACE
   value: openshift-template-service-broker
 - name: LOGLEVEL
@@ -40,14 +40,14 @@ objects:
           image: ${IMAGE}
           imagePullPolicy: IfNotPresent
           command:
-          - "/usr/bin/openshift"
+          - "/usr/bin/template-service-broker"
           - "start"
           - "template-service-broker"
           - "--secure-port=8443"
           - "--audit-log-path=-"
           - "--tls-cert-file=/var/serving-cert/tls.crt"
           - "--tls-private-key-file=/var/serving-cert/tls.key"
-          - "--loglevel=${LOGLEVEL}"
+          - "--v=${LOGLEVEL}"
           - "--config=/var/apiserver-config/apiserver-config.yaml"
           ports:
           - containerPort: 8443

+ 42 - 0
files/origin-components/console-config.yaml

@@ -0,0 +1,42 @@
+apiVersion: webconsole.config.openshift.io/v1
+kind: WebConsoleConfiguration
+clusterInfo:
+  consolePublicURL: https://127.0.0.1:8443/console/
+  loggingPublicURL: ""
+  logoutPublicURL: ""
+  masterPublicURL: https://127.0.0.1:8443
+  metricsPublicURL: ""
+# TODO: The new extensions properties cannot be set until
+# origin-web-console-server has been updated with the API changes since
+# `extensions` in the old asset config was an array.
+#extensions:
+#  scriptURLs: []
+#  stylesheetURLs: []
+#  properties: null
+features:
+  inactivityTimeoutMinutes: 0
+servingInfo:
+  bindAddress: 0.0.0.0:8443
+  bindNetwork: tcp4
+  certFile: /var/serving-cert/tls.crt
+  clientCA: ""
+  keyFile: /var/serving-cert/tls.key
+  maxRequestsInFlight: 0
+  namedCertificates: null
+  requestTimeoutSeconds: 0
+
+# START deprecated properties
+# These properties have been renamed and will be removed from the install
+# in a future pull. Keep both the old and new properties for now so that
+# the install is not broken while the origin-web-console image is updated.
+extensionDevelopment: false
+extensionProperties: null
+extensionScripts: null
+extensionStylesheets: null
+extensions: null
+loggingPublicURL: ""
+logoutURL: ""
+masterPublicURL: https://127.0.0.1:8443
+metricsPublicURL: ""
+publicURL: https://127.0.0.1:8443/console/
+# END deprecated properties

+ 38 - 0
files/origin-components/console-rbac-template.yaml

@@ -0,0 +1,38 @@
+apiVersion: template.openshift.io/v1
+kind: Template
+metadata:
+  name: web-console-server-rbac
+parameters:
+- name: NAMESPACE
+  # This namespace cannot be changed. Only `openshift-web-console` is supported.
+  value: openshift-web-console
+objects:
+
+
+# allow grant powers to the webconsole server for cluster inspection
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+  kind: ClusterRole
+  metadata:
+    name: system:openshift:web-console-server
+  rules:
+  - apiGroups:
+    - "servicecatalog.k8s.io"
+    resources:
+    - clusterservicebrokers
+    verbs:
+    - get
+    - list
+    - watch
+
+# Grant the service account for the web console
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+  kind: ClusterRoleBinding
+  metadata:
+    name: system:openshift:web-console-server
+  roleRef:
+    kind: ClusterRole
+    name: system:openshift:web-console-server
+  subjects:
+  - kind: ServiceAccount
+    namespace: ${NAMESPACE}
+    name: webconsole

+ 121 - 0
files/origin-components/console-template.yaml

@@ -0,0 +1,121 @@
+apiVersion: template.openshift.io/v1
+kind: Template
+metadata:
+  name: openshift-web-console
+  annotations:
+    openshift.io/display-name: OpenShift Web Console
+    description: The server for the OpenShift web console.
+    iconClass: icon-openshift
+    tags: openshift,infra
+    openshift.io/documentation-url: https://github.com/openshift/origin-web-console-server
+    openshift.io/support-url: https://access.redhat.com
+    openshift.io/provider-display-name: Red Hat, Inc.
+parameters:
+- name: IMAGE
+  value: openshift/origin-web-console:latest
+- name: NAMESPACE
+  # This namespace cannot be changed. Only `openshift-web-console` is supported.
+  value: openshift-web-console
+- name: LOGLEVEL
+  value: "0"
+- name: API_SERVER_CONFIG
+- name: NODE_SELECTOR
+  value: "{}"
+- name: REPLICA_COUNT
+  value: "1"
+objects:
+
+# to create the web console server
+- apiVersion: apps/v1beta1
+  kind: Deployment
+  metadata:
+    namespace: ${NAMESPACE}
+    name: webconsole
+    labels:
+      app: openshift-web-console
+      webconsole: "true"
+  spec:
+    replicas: "${{REPLICA_COUNT}}"
+    strategy:
+      type: Recreate
+    template:
+      metadata:
+        name: webconsole
+        labels:
+          webconsole: "true"
+      spec:
+        serviceAccountName: webconsole
+        containers:
+        - name: webconsole
+          image: ${IMAGE}
+          imagePullPolicy: IfNotPresent
+          command:
+          - "/usr/bin/origin-web-console"
+          - "--audit-log-path=-"
+          - "-v=${LOGLEVEL}"
+          - "--config=/var/webconsole-config/webconsole-config.yaml"
+          ports:
+          - containerPort: 8443
+          volumeMounts:
+          - mountPath: /var/serving-cert
+            name: serving-cert
+          - mountPath: /var/webconsole-config
+            name: webconsole-config
+          readinessProbe:
+            httpGet:
+              path: /healthz
+              port: 8443
+              scheme: HTTPS
+          livenessProbe:
+            httpGet:
+              path: /
+              port: 8443
+              scheme: HTTPS
+        nodeSelector: "${{NODE_SELECTOR}}"
+        volumes:
+        - name: serving-cert
+          secret:
+            defaultMode: 400
+            secretName: webconsole-serving-cert
+        - name: webconsole-config
+          configMap:
+            defaultMode: 440
+            name: webconsole-config
+
+# to create the config for the web console
+- apiVersion: v1
+  kind: ConfigMap
+  metadata:
+    namespace: ${NAMESPACE}
+    name: webconsole-config
+    labels:
+      app: openshift-web-console
+  data:
+    webconsole-config.yaml: ${API_SERVER_CONFIG}
+
+# to be able to assign powers to the process
+- apiVersion: v1
+  kind: ServiceAccount
+  metadata:
+    namespace: ${NAMESPACE}
+    name: webconsole
+    labels:
+      app: openshift-web-console
+
+# to be able to expose web console inside the cluster
+- apiVersion: v1
+  kind: Service
+  metadata:
+    namespace: ${NAMESPACE}
+    name: webconsole
+    labels:
+      app: openshift-web-console
+    annotations:
+      service.alpha.openshift.io/serving-cert-secret-name: webconsole-serving-cert
+  spec:
+    selector:
+      webconsole: "true"
+    ports:
+    - name: https
+      port: 443
+      targetPort: 8443

File diff suppressed because it is too large
+ 0 - 1001
filter_plugins/oo_filters.py


+ 0 - 69
filter_plugins/openshift_version.py

@@ -1,69 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-"""
-Custom version comparison filters for use in openshift-ansible
-"""
-
-# pylint can't locate distutils.version within virtualenv
-# https://github.com/PyCQA/pylint/issues/73
-# pylint: disable=no-name-in-module, import-error
-from distutils.version import LooseVersion
-
-
-def gte_function_builder(name, gte_version):
-    """
-    Build and return a version comparison function.
-
-    Ex: name = 'oo_version_gte_3_6'
-        version = '3.6'
-
-        returns oo_version_gte_3_6, a function which based on the
-        version will return true if the provided version is greater
-        than or equal to the function's version
-    """
-    def _gte_function(version):
-        """
-        Dynamic function created by gte_function_builder.
-
-        Ex: version = '3.1'
-            returns True/False
-        """
-        version_gte = False
-        if str(version) >= LooseVersion(gte_version):
-            version_gte = True
-        return version_gte
-    _gte_function.__name__ = name
-    return _gte_function
-
-
-# pylint: disable=too-few-public-methods
-class FilterModule(object):
-    """
-    Filters for version checking.
-    """
-    # Each element of versions is composed of (major, minor_start, minor_end)
-    # Origin began versioning 3.x with 3.6, so begin 3.x with 3.6.
-    versions = [(3, 6, 10)]
-
-    def __init__(self):
-        """
-        Creates a new FilterModule for ose version checking.
-        """
-        self._filters = {}
-
-        # For each set of (major, minor, minor_iterations)
-        for major, minor_start, minor_end in self.versions:
-            # For each minor version in the range
-            for minor in range(minor_start, minor_end):
-                # Create the function name
-                func_name = 'oo_version_gte_{}_{}'.format(major, minor)
-                # Create the function with the builder
-                func = gte_function_builder(func_name, "{}.{}.0".format(major, minor))
-                # Add the function to the mapping
-                self._filters[func_name] = func
-
-    def filters(self):
-        """
-        Return the filters mapping.
-        """
-        return self._filters

+ 1 - 1
images/installer/Dockerfile

@@ -10,7 +10,7 @@ COPY images/installer/origin-extra-root /
 # install ansible and deps
 RUN INSTALL_PKGS="python-lxml pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \
  && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
- && EPEL_PKGS="ansible python2-boto" \
+ && EPEL_PKGS="ansible python2-boto python2-boto3 google-cloud-sdk-183.0.0 which" \
  && yum install -y epel-release \
  && yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \
  && rpm -V $INSTALL_PKGS $EPEL_PKGS \

+ 1 - 1
images/installer/Dockerfile.rhel7

@@ -5,7 +5,7 @@ MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
 USER root
 
 # Playbooks, roles, and their dependencies are installed from packages.
-RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto openssl java-1.8.0-openjdk-headless httpd-tools" \
+RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \
  && yum repolist > /dev/null \
  && yum-config-manager --enable rhel-7-server-ose-3.7-rpms \
  && yum-config-manager --enable rhel-7-server-rh-common-rpms \

+ 8 - 0
images/installer/origin-extra-root/etc/yum.repos.d/google-cloud-sdk.repo

@@ -0,0 +1,8 @@
+[google-cloud-sdk]
+name=google-cloud-sdk
+baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el7-x86_64
+enabled=1
+gpgcheck=1
+repo_gpgcheck=1
+gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
+       https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg

+ 31 - 2
inventory/hosts.example

@@ -84,6 +84,9 @@ openshift_release=v3.7
 
 # Configure extensions in the master config for console customization
 # See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
+#openshift_master_oauth_templates:
+#  login: /path/to/login-template.html
+# openshift_master_oauth_template is deprecated.  Use openshift_master_oauth_templates instead.
 #openshift_master_oauth_template=/path/to/login-template.html
 
 # Configure imagePolicyConfig in the master config
@@ -125,7 +128,7 @@ openshift_release=v3.7
 #openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest"
 # NOTE: The following crio docker-gc items are tech preview and likely shouldn't be used
 # unless you know what you are doing!!
-# The following two variables are used when opneshift_use_crio is True
+# The following two variables are used when openshift_use_crio is True
 # and cleans up after builds that pass through docker.
 # Enable docker garbage collection when using cri-o
 #openshift_crio_enable_docker_gc=false
@@ -194,6 +197,10 @@ openshift_release=v3.7
 #openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
 #openshift_repos_enable_testing=false
 
+# If the image for etcd needs to be pulled from anywhere else than registry.access.redhat.com, e.g. in
+# a disconnected and containerized installation, use osm_etcd_image to specify the image to use:
+#osm_etcd_image=rhel7/etcd
+
 # htpasswd auth
 openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
 # Defining htpasswd users
@@ -279,8 +286,21 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 #openshift_cloudprovider_openstack_region=region
 #openshift_cloudprovider_openstack_lb_subnet_id=subnet_id
 #
+# Note: If you're getting a "BS API version autodetection failed" when provisioning cinder volumes you may need this setting
+#openshift_cloudprovider_openstack_blockstorage_version=v2
+#
 # GCE
 #openshift_cloudprovider_kind=gce
+#
+# vSphere
+#openshift_cloudprovider_kind=vsphere
+#openshift_cloudprovider_vsphere_username=username
+#openshift_cloudprovider_vsphere_password=password
+#openshift_cloudprovider_vsphere_host=vcenter_host or vsphere_host
+#openshift_cloudprovider_vsphere_datacenter=datacenter
+#openshift_cloudprovider_vsphere_datastore=datastore
+#openshift_cloudprovider_vsphere_folder=optional_folder_name
+
 
 # Project Configuration
 #osm_project_request_message=''
@@ -892,6 +912,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 #openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
 #openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}
 #openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'}
+#openshift_buildoverrides_tolerations=[{'key':'mykey1','value':'myvalue1','effect':'NoSchedule','operator':'Equal'}]
 
 # Or you may optionally define your own build overrides configuration serialized as json
 #openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
@@ -941,7 +962,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 #openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
 
 # Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
-# by deployment_type=origin
+# by openshift_deployment_type=origin
 #openshift_enable_origin_repo=false
 
 # Validity of the auto-generated OpenShift certificates in days.
@@ -988,6 +1009,14 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # where as this would not
 # openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
 #
+# A timeout to wait for nodes to drain pods can be specified to ensure that the
+# upgrade continues even if nodes fail to drain pods in the allowed time. The
+# default value of 0 will wait indefinitely allowing the admin to investigate
+# the root cause and ensuring that disruption budgets are respected. If the
+# a timeout of 0 is used there will also be one attempt to re-try draining the
+# node. If a non zero timeout is specified there will be no attempt to retry.
+#openshift_upgrade_nodes_drain_timeout=0
+#
 # Multiple data migrations take place and if they fail they will fail the upgrade
 # You may wish to disable these or make them non fatal
 #

+ 12 - 0
inventory/hosts.grafana.example

@@ -0,0 +1,12 @@
+[OSEv3:children]
+masters
+nodes
+
+[OSEv3:vars]
+# Grafana Configuration
+#gf_datasource_name="example"
+#gf_prometheus_namespace="openshift-metrics"
+#gf_oauth=true
+
+[masters]
+master

+ 0 - 1
lookup_plugins/README.md

@@ -1 +0,0 @@
-openshift-ansible lookup plugins.

+ 216 - 91
openshift-ansible.spec

@@ -10,7 +10,7 @@
 
 Name:           openshift-ansible
 Version:        3.9.0
-Release:        0.10.0%{?dist}
+Release:        0.21.0%{?dist}
 Summary:        Openshift and Atomic Enterprise Ansible
 License:        ASL 2.0
 URL:            https://github.com/openshift/openshift-ansible
@@ -24,9 +24,6 @@ Requires:      tar
 Requires:      %{name}-docs = %{version}-%{release}
 Requires:      %{name}-playbooks = %{version}-%{release}
 Requires:      %{name}-roles = %{version}-%{release}
-Requires:      %{name}-filter-plugins = %{version}-%{release}
-Requires:      %{name}-lookup-plugins = %{version}-%{release}
-Requires:      %{name}-callback-plugins = %{version}-%{release}
 Requires:      java-1.8.0-openjdk-headless
 Requires:      httpd-tools
 Requires:      libselinux-python
@@ -52,8 +49,6 @@ popd
 # Base openshift-ansible install
 mkdir -p %{buildroot}%{_datadir}/%{name}
 mkdir -p %{buildroot}%{_datadir}/ansible/%{name}
-mkdir -p %{buildroot}%{_datadir}/ansible_plugins
-cp -rp library %{buildroot}%{_datadir}/ansible/%{name}/
 
 # openshift-ansible-bin install
 mkdir -p %{buildroot}%{_bindir}
@@ -88,31 +83,6 @@ rm -rf %{buildroot}%{_datadir}/ansible/%{name}/roles/contiv/*
 # touch a file in contiv so that it can be added to SCM's
 touch %{buildroot}%{_datadir}/ansible/%{name}/roles/contiv/.empty_dir
 
-# openshift_master_facts symlinks filter_plugins/oo_filters.py from ansible_plugins/filter_plugins
-pushd %{buildroot}%{_datadir}/ansible/%{name}/roles/openshift_master_facts/filter_plugins
-ln -sf ../../../../../ansible_plugins/filter_plugins/oo_filters.py oo_filters.py
-popd
-
-# openshift-ansible-filter-plugins install
-cp -rp filter_plugins %{buildroot}%{_datadir}/ansible_plugins/
-
-# openshift-ansible-lookup-plugins install
-cp -rp lookup_plugins %{buildroot}%{_datadir}/ansible_plugins/
-
-# openshift-ansible-callback-plugins install
-cp -rp callback_plugins %{buildroot}%{_datadir}/ansible_plugins/
-
-# create symlinks from /usr/share/ansible/plugins/lookup ->
-# /usr/share/ansible_plugins/lookup_plugins
-pushd %{buildroot}%{_datadir}
-mkdir -p ansible/plugins
-pushd ansible/plugins
-ln -s ../../ansible_plugins/lookup_plugins lookup
-ln -s ../../ansible_plugins/filter_plugins filter
-ln -s ../../ansible_plugins/callback_plugins callback
-popd
-popd
-
 # atomic-openshift-utils install
 pushd utils
 %{__python} setup.py install --skip-build --root %{buildroot}
@@ -131,7 +101,6 @@ popd
 %license LICENSE
 %dir %{_datadir}/ansible/%{name}
 %{_datadir}/ansible/%{name}/files
-%{_datadir}/ansible/%{name}/library
 %ghost %{_datadir}/ansible/%{name}/playbooks/common/openshift-master/library.rpmmoved
 
 # ----------------------------------------------------------------------------------
@@ -155,9 +124,6 @@ BuildArch:     noarch
 Summary:       Openshift and Atomic Enterprise Ansible Playbooks
 Requires:      %{name} = %{version}-%{release}
 Requires:      %{name}-roles = %{version}-%{release}
-Requires:      %{name}-lookup-plugins = %{version}-%{release}
-Requires:      %{name}-filter-plugins = %{version}-%{release}
-Requires:      %{name}-callback-plugins = %{version}-%{release}
 BuildArch:     noarch
 
 %description playbooks
@@ -198,9 +164,9 @@ end
 # ----------------------------------------------------------------------------------
 Summary:       Openshift and Atomic Enterprise Ansible roles
 Requires:      %{name} = %{version}-%{release}
-Requires:      %{name}-lookup-plugins = %{version}-%{release}
-Requires:      %{name}-filter-plugins = %{version}-%{release}
-Requires:      %{name}-callback-plugins = %{version}-%{release}
+Obsoletes:      %{name}-lookup-plugins
+Obsoletes:      %{name}-filter-plugins
+Obsoletes:      %{name}-callback-plugins
 BuildArch:     noarch
 
 %description roles
@@ -209,55 +175,6 @@ BuildArch:     noarch
 %files roles
 %{_datadir}/ansible/%{name}/roles
 
-
-# ----------------------------------------------------------------------------------
-# openshift-ansible-filter-plugins subpackage
-# ----------------------------------------------------------------------------------
-%package filter-plugins
-Summary:       Openshift and Atomic Enterprise Ansible filter plugins
-Requires:      %{name} = %{version}-%{release}
-BuildArch:     noarch
-Requires:      pyOpenSSL
-
-%description filter-plugins
-%{summary}.
-
-%files filter-plugins
-%{_datadir}/ansible_plugins/filter_plugins
-%{_datadir}/ansible/plugins/filter
-
-
-# ----------------------------------------------------------------------------------
-# openshift-ansible-lookup-plugins subpackage
-# ----------------------------------------------------------------------------------
-%package lookup-plugins
-Summary:       Openshift and Atomic Enterprise Ansible lookup plugins
-Requires:      %{name} = %{version}-%{release}
-BuildArch:     noarch
-
-%description lookup-plugins
-%{summary}.
-
-%files lookup-plugins
-%{_datadir}/ansible_plugins/lookup_plugins
-%{_datadir}/ansible/plugins/lookup
-
-
-# ----------------------------------------------------------------------------------
-# openshift-ansible-callback-plugins subpackage
-# ----------------------------------------------------------------------------------
-%package callback-plugins
-Summary:       Openshift and Atomic Enterprise Ansible callback plugins
-Requires:      %{name} = %{version}-%{release}
-BuildArch:     noarch
-
-%description callback-plugins
-%{summary}.
-
-%files callback-plugins
-%{_datadir}/ansible_plugins/callback_plugins
-%{_datadir}/ansible/plugins/callback
-
 # ----------------------------------------------------------------------------------
 # atomic-openshift-utils subpackage
 # ----------------------------------------------------------------------------------
@@ -285,6 +202,214 @@ Atomic OpenShift Utilities includes
 
 
 %changelog
+* Wed Jan 17 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.21.0
+- Add call to 3.8 playbook in 3.9 upgrade (sdodson@redhat.com)
+- Remove 3.8 and 3.9 specific steps right now (sdodson@redhat.com)
+- Exclude 3.9 packages during 3.8 upgrade (sdodson@redhat.com)
+- fix typos (sdodson@redhat.com)
+- Ensure openshift_client_binary is set (sdodson@redhat.com)
+- Add init/main.yml to etc-upgrade (mgugino@redhat.com)
+- Fix a typo in "Determine if growpart is installed" (vrutkovs@redhat.com)
+- Check rc for commands with openshift_client_binary and failed_when
+  (vrutkovs@redhat.com)
+- Update console config for API changes (spadgett@redhat.com)
+- include elasticsearch container name (jvallejo@redhat.com)
+- openshift_checks: repair adhoc list-checks mode (lmeyer@redhat.com)
+- Remove tuned-profiles from list of master packages upgraded
+  (sdodson@redhat.com)
+- Add missing task that got dropped in a refactor (sdodson@redhat.com)
+- Web Console: use a different var for asset config (vrutkovs@redhat.com)
+- Document the inventory change (tomas@sedovic.cz)
+- Move the OpenStack dynamic inventory from sample (tomas@sedovic.cz)
+- fix bug 1534271 (wmeng@redhat.com)
+- Don't use from ansible.module_utils.six as its no longer available in Ansible
+  2.4 (vrutkovs@redhat.com)
+- Add console RBAC template (spadgett@redhat.com)
+- Setup master groups in order to use the master group's ansible_ssh_user to
+  pull bootstrap kubeconfig. (abutcher@redhat.com)
+- adding ability to add network policy objects. (shawn.hurley21@gmail.com)
+- add python2-boto3 package for centos-based origin-ansible container image
+  (jdiaz@redhat.com)
+- adding ability to interact with network resources. (shawn.hurley21@gmail.com)
+- Adding .ini to inventory_ignore_extensions (bedin@redhat.com)
+
+* Mon Jan 15 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.20.0
+- Adjust openstack provider dependencies versions (bdobreli@redhat.com)
+- Fix openstack provider playbook name in docs (bdobreli@redhat.com)
+- Install web console on upgrade (spadgett@redhat.com)
+- Add var for controller to enable async bindings (jpeeler@redhat.com)
+- Add cluster-operator playbook directory. (abutcher@redhat.com)
+- Move s3 & elb provisioning into their own playbooks s.t. they are applied
+  outside of the openshift_aws master provisioning tasks. (abutcher@redhat.com)
+- Update to AWS EC2 root vol size so that Health Check tasks pass
+  (mazzystr@gmail.com)
+- Configure Kuryr CNI daemon (mdulko@redhat.com)
+- Clean up host-local IPAM data while nodes are drained (danw@redhat.com)
+
+* Fri Jan 12 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.19.0
+- 
+
+* Fri Jan 12 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.18.0
+- 
+
+* Fri Jan 12 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.17.0
+- Update latest image streams and templates (sdodson@redhat.com)
+- Use webconsole.config.openshift.io/v1 API group (spadgett@redhat.com)
+- Add missing v3.9 gluster templates (sdodson@redhat.com)
+- Spelling and grammar changes to the advanced-configuration.md file.
+  (mbruzek@gmail.com)
+- Fixing openshift_hosted variable. (kwoodson@redhat.com)
+- Update deployment and apiserver with new certs (jpeeler@redhat.com)
+- Move more plugins to lib_utils (mgugino@redhat.com)
+- Add the ability to specify a timeout for node drain operations
+  (sdodson@redhat.com)
+- Add defaults for openshift_pkg_version (mgugino@redhat.com)
+- Fix typo in the advanced config docs (tomas@sedovic.cz)
+- Write guide on setting up PVs with Cinder (tomas@sedovic.cz)
+- Allow using server names in openstack dynamic inv (tomas@sedovic.cz)
+- Specify the Cinder version in the inventory (tomas@sedovic.cz)
+- Add documentation example (joel.pearson@gmail.com)
+- Add blockstorage version for openstack (joel.pearson@gmail.com)
+- logging: fix jinja filters to support py3 (vrutkovs@redhat.com)
+- Ability to specify override tolerations via the buildconfig overrider
+  (cdaley@redhat.com)
+- Chmod temp dirs created on localhost (mgugino@redhat.com)
+- Bug 1532787 - Add empty node selector to openshift-web-console namespace
+  (spadgett@redhat.com)
+- Remove become statements (mgugino@redhat.com)
+- Bug 1527178 - installation of logging stack failed: Invalid version specified
+  for Elasticsearch (nhosoi@redhat.com)
+- Limit host group scope on control-plane upgrades (mgugino@redhat.com)
+- Refactor version and move some checks into sanity_checks.py
+  (mgugino@redhat.com)
+- Updating tsb image names and template (ewolinet@redhat.com)
+- Ensure that openshift_facts role is imported whenever we rely on
+  openshift_client_binary (sdodson@redhat.com)
+- Add key check for facts_for_clusterrolebindings (nakayamakenjiro@gmail.com)
+- Update web console template (spadgett@redhat.com)
+- Use openshift_node_use_openshift_sdn when doing a containerized node upgrade
+  (vrutkovs@redhat.com)
+- Add iptables save handler (ichavero@redhat.com)
+- Fix: change import_role to include_role (mgugino@redhat.com)
+- docker storage setup for ami building (jdiaz@redhat.com)
+- ensure containerized bools are cast (mgugino@redhat.com)
+- Properly cast crio boolean variables to bool (mgugino@redhat.com)
+- Build containerized host group dynamically (mgugino@redhat.com)
+- install base_packages on oo_all_hosts (mgugino@redhat.com)
+- Add key existing check to collect facts for rolebidings
+  (nakayamakenjiro@gmail.com)
+- 3.9 upgrade: remove openshift.common.service_type (vrutkovs@redhat.com)
+- container-engine: move registry_auth.yml before pull (gscrivan@redhat.com)
+- Fix error in variable in comment (mscherer@users.noreply.github.com)
+- Switch back to dynamic include_role in logging loops (sdodson@redhat.com)
+- Use Contiv version 1.2.0 (flamingo@2thebatcave.com)
+- Contiv multi-master and other fixes (flamingo@2thebatcave.com)
+- Add missing dependency on openshift_facts (sdodson@redhat.com)
+- upgrades: set openshift_client_binary fact when running on oo_first_master
+  host (vrutkovs@redhat.com)
+- Install web console server (spadgett@redhat.com)
+- Remove become=no from various roles and tasks (mgugino@redhat.com)
+- Don't overwrite node's systemd units for containerized install
+  (vrutkovs@redhat.com)
+- Migrate to import_role for static role inclusion (sdodson@redhat.com)
+- docker_upgrade_check: skip repoquery calls on containerized setups
+  (vrutkovs@redhat.com)
+- Adding logic to disable and reenable external communication to ES during full
+  restart (ewolinet@redhat.com)
+- Provide example on how to use osm_etcd_image in a disconnected and
+  containerized installation (tkarlsso@redhat.com)
+- crio: create /etc/sysconfig/crio-storage (gscrivan@redhat.com)
+- crio: configure proxy variables (gscrivan@redhat.com)
+- Fix docker_image_availability checks (mgugino@redhat.com)
+- Install node packages in one task instead of 3 (mgugino@redhat.com)
+- Don't hardcode the network interface in the openshift_logging_mux role
+  (nkinder@redhat.com)
+- failure_summary: make sure msg is always a string (vrutkovs@redhat.com)
+- Adding logic to do a full cluster restart if we are incrementing our major
+  versions of ES (ewolinet@redhat.com)
+- test_oc_scale: add more scale test cases (vrutkovs@redhat.com)
+- test_oc_scale: fix test docstrings (vrutkovs@redhat.com)
+- Import prerequisites.yml for OpenStack (tomas@sedovic.cz)
+- Set the correct path to the openstack.conf file (tomas@sedovic.cz)
+- Return a openshift_node_labels as a dict (tomas@sedovic.cz)
+- Remove last of openshift_node role meta-depends (mgugino@redhat.com)
+- OpenStack provisioning -- support cns. (jmencak@redhat.com)
+- Fix yaml syntax error in the sample inventory (tomas@sedovic.cz)
+- Adding ability to update ami drive size. (kwoodson@redhat.com)
+- Add origin- prefix to ASB image (fabian@fabianism.us)
+- lint issues (davis.phillips@gmail.com)
+- add vsphere examples in hosts.example (davis.phillips@gmail.com)
+- add template and vsphere.conf (davis.phillips@gmail.com)
+- add vsphere cloud providers (davis.phillips@gmail.com)
+- Fix wrong indentation (ichavero@redhat.com)
+- Fix yaml indentation (ichavero@redhat.com)
+- Add iptables rules for flannel (ichavero@redhat.com)
+
+* Wed Jan 03 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.16.0
+- Add gluster 3.9 templates (sdodson@redhat.com)
+- Add in-tree CI scripts (mgugino@redhat.com)
+
+* Wed Jan 03 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.15.0
+- 
+
+* Wed Jan 03 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.14.0
+- Cast openshift_docker_use_system_container to bool (mgugino@redhat.com)
+- Correct kublet_args cloud-provider directories (mgugino@redhat.com)
+- Updating logging_facts to be able to pull values from config maps yaml files,
+  use diffs to keep custom changes, white list certain settings when creating
+  diffs (ewolinet@redhat.com)
+- Add docker auth credentials to system container install (mgugino@redhat.com)
+- Move wait_for_pods to it's own play openshift_hosted (mgugino@redhat.com)
+- Remove oauth_template bits from openshift_facts (mgugino@redhat.com)
+
+* Tue Jan 02 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.13.0
+- Bug 1527178 - installation of logging stack failed: Invalid version specified
+  for Elasticsearch (nhosoi@redhat.com)
+- Remove bootstrap.yml from main.yml in openshift_node role
+  (mgugino@redhat.com)
+
+* Tue Jan 02 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.12.0
+- 
+
+* Mon Jan 01 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.11.0
+- aws: Fix misnamed variable in provisioning_vars.yml.example
+  (mbarnes@fedoraproject.org)
+- Fix container_runtime openshift_containerized_host_groups
+  (mgugino@redhat.com)
+- Remove references to deployment_type (mgugino@redhat.com)
+- Must directly specify google-cloud-sdk version (ccoleman@redhat.com)
+- daemonset config role. (kwoodson@redhat.com)
+- Move validate_hosts to prerequisites.yml (mgugino@redhat.com)
+- Move sanity_checks into custom action plugin (mgugino@redhat.com)
+- Remove openshift.common.{is_atomic|is_containerized} (mgugino@redhat.com)
+- Adding support for docker-storage-setup on overlay (kwoodson@redhat.com)
+- Add gcloud to the installer image (ccoleman@redhat.com)
+- Remove some small items from openshift_facts (mgugino@redhat.com)
+- Relocate filter plugins to lib_utils (mgugino@redhat.com)
+- Fix hosted_reg_router selectors (mgugino@redhat.com)
+- set repos after registration: convert to match task -> import_role model.
+  (markllama@gmail.com)
+- Remove openshift_node_facts role (mgugino@redhat.com)
+- Move node group tags to openshift_aws_{master,node}_group.
+  (abutcher@redhat.com)
+- Add CentOS-OpenShift-Origin37 repo template. (abutcher@redhat.com)
+- Adding no_log to registry_auth. (kwoodson@redhat.com)
+- Fix rhel_repos disable command (mazzystr@gmail.com)
+- Fix rhel_subscribe boolean (mgugino@redhat.com)
+- Move repo and subscribe to prerequisites (mgugino@redhat.com)
+- Deprecate using Ansible tests as filters (rteague@redhat.com)
+- Removing config trigger for ES DC, updating to use a handler to rollout ES at
+  the end of a deployment, allowing for override with variable
+  (ewolinet@redhat.com)
+- openshift_logging_{fluentd,mux}_file_buffer_limit mismatch
+  (nhosoi@redhat.com)
+- Update version check to Ansible 2.4.1 (rteague@redhat.com)
+- Remove openshift_node_facts part 1 (mgugino@redhat.com)
+- Validate node hostname and IP address (rteague@redhat.com)
+- Add missing openshift_service_type (mgugino@redhat.com)
+- prevent TSB pods from spinning on inappropriate nodes (jminter@redhat.com)
+- Add readiness probe to kuryr controller pod (ltomasbo@redhat.com)
+
 * Thu Dec 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.10.0
 - Bump requirements.txt to Ansible 2.4.1 (rteague@redhat.com)
 - Commit to stabalize RHSM operations.  This code is derived from contrib
@@ -426,7 +551,7 @@ Atomic OpenShift Utilities includes
 - Update prometheus to 2.0.0 GA (zgalor@redhat.com)
 - remove schedulable from openshift_facts (mgugino@redhat.com)
 - inventory: Add example for service catalog vars (smilner@redhat.com)
-- Correct usage of include_role (rteague@redhat.com)
+- Correct usage of import_role (rteague@redhat.com)
 - Remove openshift.common.cli_image (mgugino@redhat.com)
 - Fix openshift_env fact creation within openshift_facts. (abutcher@redhat.com)
 - Combine openshift_node and openshift_node_dnsmasq (mgugino@redhat.com)
@@ -1019,7 +1144,7 @@ Atomic OpenShift Utilities includes
 - Renaming csr to bootstrap for consistency. (kwoodson@redhat.com)
 - Add master config upgrade hook to upgrade-all plays (mgugino@redhat.com)
 - Remove 'Not Started' status from playbook checkpoint (rteague@redhat.com)
-- Force include_role to static for loading openshift_facts module
+- Force import_role to static for loading openshift_facts module
   (rteague@redhat.com)
 - Make openshift-ansible depend on all subpackages (sdodson@redhat.com)
 - Refactor health check playbooks (rteague@redhat.com)
@@ -3747,9 +3872,9 @@ Atomic OpenShift Utilities includes
 - run node upgrade if master is node as part of the control plan upgrade only
   (jchaloup@redhat.com)
 - Appease yamllint (sdodson@redhat.com)
-- Adding include_role to block to resolve when eval (ewolinet@redhat.com)
+- Adding import_role to block to resolve when eval (ewolinet@redhat.com)
 - Updating oc_apply to use command instead of shell (ewolinet@redhat.com)
-- Wrap openshift_hosted_logging include_role within a block.
+- Wrap openshift_hosted_logging import_role within a block.
   (abutcher@redhat.com)
 - Adding unit test.  Fixed redudant calls to get. (kwoodson@redhat.com)
 - Fixing doc and generating new label with updated base. (kwoodson@redhat.com)

+ 1 - 1
playbooks/adhoc/openshift_hosted_logging_efk.yaml

@@ -10,7 +10,7 @@
   - set_fact:
       openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}"
   tasks:
-  - include_role:
+  - import_role:
       name: openshift_logging
       tasks_from: update_master_config
     when: openshift_hosted_logging_deploy | default(false) | bool

+ 7 - 8
playbooks/adhoc/uninstall.yml

@@ -18,9 +18,8 @@
 
   # Since we're not calling openshift_facts we'll do this for now
   - set_fact:
-      is_atomic: "{{ ostree_output.rc == 0 }}"
-  - set_fact:
-      is_containerized: "{{ is_atomic or containerized | default(false) | bool }}"
+      openshift_is_atomic: "{{ ostree_output.rc == 0 }}"
+      openshift_is_containerized: "{{ ostree_output.rc == 0 or containerized | default(false) | bool }}"
 
 # Stop services on all hosts prior to removing files.
 - hosts: nodes
@@ -133,7 +132,7 @@
         when: openshift_use_flannel | default(false) | bool
         register: result
         until: result is succeeded
-      when: not is_atomic | bool
+      when: not openshift_is_atomic | bool
 
     - shell: systemctl reset-failed
       changed_when: False
@@ -363,7 +362,7 @@
 
   - name: Remove packages
     package: name={{ item }} state=absent
-    when: not is_atomic | bool and openshift_remove_all | default(True) | bool
+    when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool
     with_items:
     - atomic-openshift
     - atomic-openshift-clients
@@ -487,14 +486,14 @@
 
   - name: Stop additional atomic services
     service: name={{ item }} state=stopped
-    when: is_containerized | bool
+    when: openshift_is_containerized | bool
     with_items:
     - etcd_container
     failed_when: false
 
   - name: Remove packages
     package: name={{ item }} state=absent
-    when: not is_atomic | bool and openshift_remove_all | default(True) | bool
+    when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool
     with_items:
     - etcd
     - etcd3
@@ -554,7 +553,7 @@
 
   - name: Remove packages
     package: name={{ item }} state=absent
-    when: not is_atomic | bool and openshift_remove_all | default(True) | bool
+    when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool
     with_items:
     - haproxy
     register: result

+ 0 - 1
playbooks/aws/openshift-cluster/filter_plugins

@@ -1 +0,0 @@
-../../../filter_plugins

+ 2 - 2
playbooks/aws/openshift-cluster/install.yml

@@ -2,7 +2,7 @@
 - name: Setup the master node group
   hosts: localhost
   tasks:
-  - include_role:
+  - import_role:
       name: openshift_aws
       tasks_from: setup_master_group.yml
 
@@ -11,7 +11,7 @@
   gather_facts: no
   remote_user: root
   tasks:
-  - include_role:
+  - import_role:
       name: openshift_aws
       tasks_from: master_facts.yml
 

+ 0 - 1
playbooks/aws/openshift-cluster/lookup_plugins

@@ -1 +0,0 @@
-../../../lookup_plugins

+ 9 - 3
playbooks/aws/openshift-cluster/provision.yml

@@ -1,8 +1,7 @@
 ---
-- name: Setup the elb and the master node group
+- name: Alert user to variables needed
   hosts: localhost
   tasks:
-
   - name: Alert user to variables needed - clusterid
     debug:
       msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
@@ -11,7 +10,14 @@
     debug:
       msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
 
+- import_playbook: provision_s3.yml
+
+- import_playbook: provision_elb.yml
+
+- name: Create the master node group
+  hosts: localhost
+  tasks:
   - name: provision cluster
-    include_role:
+    import_role:
       name: openshift_aws
       tasks_from: provision.yml

+ 9 - 0
playbooks/aws/openshift-cluster/provision_elb.yml

@@ -0,0 +1,9 @@
+---
+- name: Create elb
+  hosts: localhost
+  connection: local
+  tasks:
+  - name: provision elb
+    include_role:
+      name: openshift_aws
+      tasks_from: provision_elb.yml

+ 1 - 1
playbooks/aws/openshift-cluster/provision_instance.yml

@@ -7,6 +7,6 @@
   gather_facts: no
   tasks:
   - name: create an instance and prepare for ami
-    include_role:
+    import_role:
       name: openshift_aws
       tasks_from: provision_instance.yml

+ 1 - 1
playbooks/aws/openshift-cluster/provision_nodes.yml

@@ -13,6 +13,6 @@
       msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
 
   - name: create the node groups
-    include_role:
+    import_role:
       name: openshift_aws
       tasks_from: provision_nodes.yml

+ 10 - 0
playbooks/aws/openshift-cluster/provision_s3.yml

@@ -0,0 +1,10 @@
+---
+- name: Create s3 bucket
+  hosts: localhost
+  connection: local
+  tasks:
+  - name: create s3 bucket
+    include_role:
+      name: openshift_aws
+      tasks_from: s3.yml
+    when: openshift_aws_create_s3 | default(true) | bool

+ 1 - 1
playbooks/aws/openshift-cluster/provision_sec_group.yml

@@ -7,7 +7,7 @@
   gather_facts: no
   tasks:
   - name: create security groups
-    include_role:
+    import_role:
       name: openshift_aws
       tasks_from: security_group.yml
     when: openshift_aws_create_security_groups | default(True) | bool

+ 1 - 1
playbooks/aws/openshift-cluster/provision_ssh_keypair.yml

@@ -4,7 +4,7 @@
   gather_facts: no
   tasks:
   - name: create an instance and prepare for ami
-    include_role:
+    import_role:
       name: openshift_aws
       tasks_from: ssh_keys.yml
     vars:

+ 1 - 1
playbooks/aws/openshift-cluster/provision_vpc.yml

@@ -4,7 +4,7 @@
   gather_facts: no
   tasks:
   - name: create a vpc
-    include_role:
+    import_role:
       name: openshift_aws
       tasks_from: vpc.yml
     when: openshift_aws_create_vpc | default(True) | bool

+ 1 - 1
playbooks/aws/openshift-cluster/seal_ami.yml

@@ -7,6 +7,6 @@
   become: no
   tasks:
   - name: seal the ami
-    include_role:
+    import_role:
       name: openshift_aws
       tasks_from: seal_ami.yml

+ 6 - 1
playbooks/aws/provisioning_vars.yml.example

@@ -46,7 +46,7 @@ openshift_pkg_version: # -3.7.0
 
 # Name of the subnet in the vpc to use.  Needs to be set if using a pre-existing
 # vpc + subnet.
-#openshift_aws_subnet_name:
+#openshift_aws_subnet_az:
 
 # -------------- #
 # Security Group #
@@ -93,6 +93,11 @@ openshift_aws_ssh_key_name: # myuser_key
 # --------- #
 # Variables in this section apply to building a node AMI for use in your
 # openshift cluster.
+# openshift-ansible will perform the container runtime storage setup when specified
+# The current storage setup with require a drive if using a separate storage device
+# for the container runtime.
+container_runtime_docker_storage_type: overlay2
+container_runtime_docker_storage_setup_device: /dev/xvdb
 
 # must specify a base_ami when building an AMI
 openshift_aws_base_ami: # ami-12345678

+ 0 - 1
playbooks/byo/filter_plugins

@@ -1 +0,0 @@
-../../filter_plugins

+ 0 - 1
playbooks/byo/lookup_plugins

@@ -1 +0,0 @@
-../../lookup_plugins

+ 0 - 1
playbooks/byo/openshift-cluster/filter_plugins

@@ -1 +0,0 @@
-../../../filter_plugins

+ 0 - 1
playbooks/byo/openshift-cluster/lookup_plugins

@@ -1 +0,0 @@
-../../../lookup_plugins

+ 3 - 3
playbooks/byo/rhel_subscribe.yml

@@ -6,9 +6,9 @@
   roles:
   - role: rhel_subscribe
     when:
-    - deployment_type == 'openshift-enterprise'
+    - openshift_deployment_type == 'openshift-enterprise'
     - ansible_distribution == "RedHat"
-    - rhsub_user | default(False)
-    - rhsub_pass | default(False)
+    - rhsub_user is defined
+    - rhsub_pass is defined
   - role: openshift_repos
   - role: os_update_latest

+ 21 - 0
playbooks/cluster-operator/aws/infrastructure.yml

@@ -0,0 +1,21 @@
+---
+- name: Alert user to variables needed
+  hosts: localhost
+  tasks:
+  - name: Alert user to variables needed - clusterid
+    debug:
+      msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
+
+  - name: Alert user to variables needed - region
+    debug:
+      msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
+
+- import_playbook: ../../aws/openshift-cluster/provision_vpc.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_ssh_keypair.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_sec_group.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_s3.yml
+
+- import_playbook: ../../aws/openshift-cluster/provision_elb.yml

+ 1 - 0
playbooks/cluster-operator/aws/roles

@@ -0,0 +1 @@
+../../../roles

+ 0 - 1
playbooks/common/openshift-cluster/filter_plugins

@@ -1 +0,0 @@
-../../../filter_plugins

+ 0 - 1
playbooks/common/openshift-cluster/library

@@ -1 +0,0 @@
-../../../library/

+ 0 - 1
playbooks/common/openshift-cluster/lookup_plugins

@@ -1 +0,0 @@
-../../../lookup_plugins

+ 8 - 3
playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml

@@ -2,7 +2,6 @@
 - name: Create local temp directory for syncing certs
   hosts: localhost
   connection: local
-  become: no
   gather_facts: no
   tasks:
   - name: Create local temp directory for syncing certs
@@ -11,8 +10,15 @@
     changed_when: false
     when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
 
+  - name: Chmod local temp directory
+    local_action: command chmod 777 "{{ local_cert_sync_tmpdir.stdout }}"
+    changed_when: false
+    when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
+
 - name: Create service signer certificate
   hosts: oo_first_master
+  roles:
+  - openshift_facts
   tasks:
   - name: Create remote temp directory for creating certs
     command: mktemp -d /tmp/openshift-ansible-XXXXXXX
@@ -22,7 +28,7 @@
 
   - name: Create service signer certificate
     command: >
-      {{ openshift.common.client_binary }} adm ca create-signer-cert
+      {{ openshift_client_binary }} adm ca create-signer-cert
       --cert="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.crt
       --key="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.key
       --name="{{ remote_cert_create_tmpdir.stdout }}/"openshift-service-serving-signer
@@ -65,7 +71,6 @@
 - name: Delete local temp directory
   hosts: localhost
   connection: local
-  become: no
   gather_facts: no
   tasks:
   - name: Delete local temp directory

+ 11 - 8
playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml

@@ -12,14 +12,11 @@
   roles:
   - openshift_facts
   tasks:
-  - set_fact:
-      repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
-
   - fail:
       msg: Cannot upgrade Docker on Atomic operating systems.
-    when: openshift.common.is_atomic | bool
+    when: openshift_is_atomic | bool
 
-  - include_role:
+  - import_role:
       name: container_runtime
       tasks_from: docker_upgrade_check.yml
     when: docker_upgrade is not defined or docker_upgrade | bool
@@ -54,13 +51,19 @@
 
   - name: Drain Node for Kubelet upgrade
     command: >
-      {{ openshift.common.client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+      --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      --force --delete-local-data --ignore-daemonsets
+      --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
     register: l_docker_upgrade_drain_result
     until: not (l_docker_upgrade_drain_result is failed)
-    retries: 60
-    delay: 60
+    retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+    delay: 5
+    failed_when:
+    - l_docker_upgrade_drain_result is failed
+    - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
 
   - include_tasks: tasks/upgrade.yml
     when: l_docker_upgrade is defined and l_docker_upgrade | bool

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml

@@ -15,7 +15,7 @@
     - "{{ openshift_service_type }}-master-controllers"
     - "{{ openshift_service_type }}-node"
   failed_when: false
-  when: openshift.common.is_containerized | bool
+  when: openshift_is_containerized | bool
 
 - name: Wait for master API to come back online
   wait_for:

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml

@@ -10,7 +10,7 @@
     - etcd_container
     - openvswitch
   failed_when: false
-  when: openshift.common.is_containerized | bool
+  when: openshift_is_containerized | bool
 
 - name: Check Docker image count
   shell: "docker images -aq | wc -l"

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/filter_plugins

@@ -1 +0,0 @@
-../../../../filter_plugins

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/lookup_plugins

@@ -1 +0,0 @@
-../../../../lookup_plugins

+ 11 - 5
playbooks/common/openshift-cluster/upgrades/post_control_plane.yml

@@ -1,7 +1,13 @@
 ---
-###############################################################################
-# Post upgrade - Upgrade default router, default registry and examples
-###############################################################################
+####################################################################################
+# Post upgrade - Upgrade web console, default router, default registry, and examples
+####################################################################################
+- name: Upgrade web console
+  hosts: oo_first_master
+  roles:
+  - role: openshift_web_console
+    when: openshift_web_console_install | default(true) | bool
+
 - name: Upgrade default router and default registry
   hosts: oo_first_master
   vars:
@@ -27,8 +33,8 @@
 
   - set_fact:
       haproxy_routers: "{{ all_routers.results.results[0]['items'] |
-                           oo_pods_match_component(openshift_deployment_type, 'haproxy-router') |
-                           oo_select_keys_from_list(['metadata']) }}"
+                           lib_utils_oo_pods_match_component(openshift_deployment_type, 'haproxy-router') |
+                           lib_utils_oo_select_keys_from_list(['metadata']) }}"
     when:
     - all_routers.results.returncode == 0
 

+ 7 - 3
playbooks/common/openshift-cluster/upgrades/pre/config.yml

@@ -1,4 +1,6 @@
 ---
+# for control-plane upgrade, several variables may be passed in to this play
+# why may affect the tasks here and in imported playbooks.
 
 # Pre-upgrade
 - import_playbook: ../initialize_nodes_to_upgrade.yml
@@ -14,10 +16,10 @@
   hosts: "{{ l_upgrade_no_proxy_hosts }}"
   tasks:
   - set_fact:
-      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+      openshift_no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
                                                     | union(groups['oo_masters_to_config'])
                                                     | union(groups['oo_etcd_to_config'] | default([])))
-                                                | oo_collect('openshift.common.hostname') | default([]) | join (',')
+                                                | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
                                                 }}"
     when:
     - openshift_http_proxy is defined or openshift_https_proxy is defined
@@ -48,6 +50,8 @@
     # defined, and overriding the normal behavior of protecting the installed version
     openshift_release: "{{ openshift_upgrade_target }}"
     openshift_protect_installed_version: False
+    # l_openshift_version_set_hosts is passed via upgrade_control_plane.yml
+    # l_openshift_version_check_hosts is passed via upgrade_control_plane.yml
 
 # If we're only upgrading nodes, we need to ensure masters are already upgraded
 - name: Verify masters are already upgraded
@@ -72,6 +76,6 @@
 - name: Verify docker upgrade targets
   hosts: "{{ l_upgrade_docker_target_hosts }}"
   tasks:
-  - include_role:
+  - import_role:
       name: container_runtime
       tasks_from: docker_upgrade_check.yml

+ 1 - 6
playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml

@@ -5,11 +5,6 @@
   hosts: oo_first_master
   gather_facts: no
   tasks:
-  - fail:
-      msg: >
-        This upgrade is only supported for origin and openshift-enterprise
-        deployment types
-    when: deployment_type not in ['origin','openshift-enterprise']
 
   # Error out in situations where the user has older versions specified in their
   # inventory in any of the openshift_release, openshift_image_tag, and
@@ -71,7 +66,7 @@
       local_facts:
         ha: "{{ groups.oo_masters_to_config | length > 1 }}"
 
-  - when: openshift.common.is_containerized | bool
+  - when: openshift_is_containerized | bool
     block:
     - set_fact:
         master_services:

+ 5 - 5
playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml

@@ -5,7 +5,7 @@
   when: openshift.common.version is not defined
 
 - name: Update oreg_auth docker login credentials if necessary
-  include_role:
+  import_role:
     name: container_runtime
     tasks_from: registry_auth.yml
   when: oreg_auth_user is defined
@@ -15,13 +15,13 @@
     docker pull {{ openshift_cli_image }}:{{ openshift_image_tag }}
   register: pull_result
   changed_when: "'Downloaded newer image' in pull_result.stdout"
-  when: openshift.common.is_containerized | bool
+  when: openshift_is_containerized | bool
 
-- when: not openshift.common.is_containerized | bool
+- when: not openshift_is_containerized | bool
   block:
   - name: Check latest available OpenShift RPM version
     repoquery:
-      name: "{{ openshift_service_type }}"
+      name: "{{ openshift_service_type }}{{ '-' ~ openshift_release ~ '*' if openshift_release is defined else '' }}"
       ignore_excluders: true
     register: repoquery_out
 
@@ -49,5 +49,5 @@
   fail:
     msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
   when:
-  - deployment_type == 'origin'
+  - openshift_deployment_type == 'origin'
   - openshift.common.version is version_compare(openshift_upgrade_min,'<')

+ 28 - 24
playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml

@@ -22,10 +22,12 @@
 # See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
 - name: Pre master upgrade - Upgrade all storage
   hosts: oo_first_master
+  roles:
+  - openshift_facts
   tasks:
   - name: Upgrade all storage
     command: >
-      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       migrate storage --include=* --confirm
     register: l_pb_upgrade_control_plane_pre_upgrade_storage
     when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
@@ -49,10 +51,9 @@
   vars:
     openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
   serial: 1
+  roles:
+  - openshift_facts
   tasks:
-  - include_role:
-      name: openshift_facts
-
   # Run the pre-upgrade hook if defined:
   - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"
     when: openshift_master_upgrade_pre_hook is defined
@@ -60,7 +61,7 @@
   - include_tasks: "{{ openshift_master_upgrade_pre_hook }}"
     when: openshift_master_upgrade_pre_hook is defined
 
-  - include_role:
+  - import_role:
       name: openshift_master
       tasks_from: upgrade.yml
 
@@ -86,7 +87,7 @@
 
   - name: Post master upgrade - Upgrade clusterpolicies storage
     command: >
-      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       migrate storage --include=clusterpolicies --confirm
     register: l_pb_upgrade_control_plane_post_upgrade_storage
     when:
@@ -108,12 +109,11 @@
 - name: Gate on master update
   hosts: localhost
   connection: local
-  become: no
   tasks:
   - set_fact:
       master_update_completed: "{{ hostvars
-                                 | oo_select_keys(groups.oo_masters_to_config)
-                                 | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+                                 | lib_utils_oo_select_keys(groups.oo_masters_to_config)
+                                 | lib_utils_oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
   - set_fact:
       master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) | list }}"
   - fail:
@@ -128,12 +128,13 @@
   hosts: oo_masters_to_config
   roles:
   - { role: openshift_cli }
+  - { role: openshift_facts }
   vars:
     __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml"
   tasks:
   - name: Reconcile Cluster Roles
     command: >
-      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       policy reconcile-cluster-roles --additive-only=true --confirm -o name
     register: reconcile_cluster_role_result
     when: openshift_version is version_compare('3.7','<')
@@ -144,7 +145,7 @@
 
   - name: Reconcile Cluster Role Bindings
     command: >
-      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       policy reconcile-cluster-role-bindings
       --exclude-groups=system:authenticated
       --exclude-groups=system:authenticated:oauth
@@ -160,7 +161,7 @@
 
   - name: Reconcile Jenkins Pipeline Role Bindings
     command: >
-      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name
+      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name
     run_once: true
     register: reconcile_jenkins_role_binding_result
     changed_when:
@@ -214,7 +215,7 @@
 
   - name: Reconcile Security Context Constraints
     command: >
-      {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
+      {{ openshift_client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
     register: reconcile_scc_result
     changed_when:
     - reconcile_scc_result.stdout != ''
@@ -223,7 +224,7 @@
 
   - name: Migrate storage post policy reconciliation
     command: >
-      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       migrate storage --include=* --confirm
     run_once: true
     register: l_pb_upgrade_control_plane_post_upgrade_storage
@@ -242,12 +243,11 @@
 - name: Gate on reconcile
   hosts: localhost
   connection: local
-  become: no
   tasks:
   - set_fact:
       reconcile_completed: "{{ hostvars
-                                 | oo_select_keys(groups.oo_masters_to_config)
-                                 | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+                                 | lib_utils_oo_select_keys(groups.oo_masters_to_config)
+                                 | lib_utils_oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
   - set_fact:
       reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) | list }}"
   - fail:
@@ -262,7 +262,7 @@
   - openshift_facts
   tasks:
   - include_tasks: docker/tasks/upgrade.yml
-    when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
+    when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift_is_atomic | bool
 
 - name: Drain and upgrade master nodes
   hosts: oo_masters_to_config:&oo_nodes_to_upgrade
@@ -291,21 +291,25 @@
 
   - name: Drain Node for Kubelet upgrade
     command: >
-      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+      --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      --force --delete-local-data --ignore-daemonsets
+      --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
     delegate_to: "{{ groups.oo_first_master.0 }}"
     register: l_upgrade_control_plane_drain_result
     until: not (l_upgrade_control_plane_drain_result is failed)
-    retries: 60
-    delay: 60
+    retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+    delay: 5
+    failed_when:
+    - l_upgrade_control_plane_drain_result is failed
+    - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
 
   roles:
   - openshift_facts
   post_tasks:
-  - include_role:
+  - import_role:
       name: openshift_node
       tasks_from: upgrade.yml
-    vars:
-      openshift_node_upgrade_in_progress: True
   - name: Set node schedulability
     oc_adm_manage_node:
       node: "{{ openshift.node.nodename | lower }}"

+ 12 - 10
playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml

@@ -4,11 +4,9 @@
   roles:
   - role: openshift_facts
   tasks:
-  - include_role:
+  - import_role:
       name: openshift_node
       tasks_from: upgrade_pre.yml
-    vars:
-      openshift_node_upgrade_in_progress: True
 
 - name: Drain and upgrade nodes
   hosts: oo_nodes_to_upgrade:!oo_masters_to_config
@@ -35,19 +33,23 @@
 
   - name: Drain Node for Kubelet upgrade
     command: >
-      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+      --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      --force --delete-local-data --ignore-daemonsets
+      --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
     delegate_to: "{{ groups.oo_first_master.0 }}"
     register: l_upgrade_nodes_drain_result
     until: not (l_upgrade_nodes_drain_result is failed)
-    retries: 60
-    delay: 60
+    retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
+    delay: 5
+    failed_when:
+    - l_upgrade_nodes_drain_result is failed
+    - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
 
   post_tasks:
-  - include_role:
+  - import_role:
       name: openshift_node
       tasks_from: upgrade.yml
-    vars:
-      openshift_node_upgrade_in_progress: True
   - name: Set node schedulability
     oc_adm_manage_node:
       node: "{{ openshift.node.nodename | lower }}"
@@ -62,7 +64,7 @@
 - name: Re-enable excluders
   hosts: oo_nodes_to_upgrade:!oo_masters_to_config
   tasks:
-  - include_role:
+  - import_role:
       name: openshift_excluder
     vars:
       r_openshift_excluder_action: enable

+ 5 - 5
playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml

@@ -3,7 +3,7 @@
   hosts: localhost
   tasks:
   - name: build upgrade scale groups
-    include_role:
+    import_role:
       name: openshift_aws
       tasks_from: upgrade_node_group.yml
 
@@ -43,24 +43,24 @@
   tasks:
   - name: Drain Node for Kubelet upgrade
     command: >
-      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }}
+      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
       --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       --force --delete-local-data --ignore-daemonsets
       --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
     delegate_to: "{{ groups.oo_first_master.0 }}"
     register: l_upgrade_nodes_drain_result
     until: not (l_upgrade_nodes_drain_result is failed)
-    retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0  | int }}"
+    retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
     delay: 5
     failed_when:
     - l_upgrade_nodes_drain_result is failed
-    - openshift_upgrade_nodes_drain_timeout | default(0) == '0'
+    - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
 
 # Alright, let's clean up!
 - name: clean up the old scale group
   hosts: localhost
   tasks:
   - name: clean up scale group
-    include_role:
+    import_role:
       name: openshift_aws
       tasks_from: remove_scale_group.yml

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins

@@ -1 +0,0 @@
-../../../../../filter_plugins/

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml

@@ -13,7 +13,7 @@
   tasks:
   - set_fact:
       openshift_upgrade_target: '3.6'
-      openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+      openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
 
 - import_playbook: ../pre/config.yml
   vars:

+ 6 - 1
playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml

@@ -14,16 +14,21 @@
 - import_playbook: ../init.yml
   vars:
     l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
 
 - name: Configure the upgrade target for the common upgrade tasks
   hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
   tasks:
   - set_fact:
       openshift_upgrade_target: '3.6'
-      openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+      openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
 
 - import_playbook: ../pre/config.yml
+  # These vars a meant to exclude oo_nodes from plays that would otherwise include
+  # them by default.
   vars:
+    l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+    l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
     l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
     l_upgrade_no_proxy_hosts: "oo_masters_to_config"
     l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml

@@ -15,7 +15,7 @@
   tasks:
   - set_fact:
       openshift_upgrade_target: '3.6'
-      openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+      openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
 
 - import_playbook: ../pre/config.yml
   vars:

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins

@@ -1 +0,0 @@
-../../../../../filter_plugins/

+ 5 - 0
playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml

@@ -14,6 +14,7 @@
 - import_playbook: ../init.yml
   vars:
     l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
 
 - name: Configure the upgrade target for the common upgrade tasks
   hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
@@ -23,7 +24,11 @@
       openshift_upgrade_min: '3.6'
 
 - import_playbook: ../pre/config.yml
+  # These vars a meant to exclude oo_nodes from plays that would otherwise include
+  # them by default.
   vars:
+    l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+    l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
     l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
     l_upgrade_no_proxy_hosts: "oo_masters_to_config"
     l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"

+ 2 - 1
playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml

@@ -7,6 +7,7 @@
   hosts: oo_first_master
   roles:
   - { role: lib_openshift }
+  - { role: openshift_facts }
 
   tasks:
   - name: Check for invalid namespaces and SDN errors
@@ -14,7 +15,7 @@
   # DO NOT DISABLE THIS, YOUR UPGRADE WILL FAIL IF YOU DO SO
   - name: Confirm OpenShift authorization objects are in sync
     command: >
-      {{ openshift.common.client_binary }} adm migrate authorization
+      {{ openshift_client_binary }} adm migrate authorization
     when:
     - openshift_currently_installed_version is version_compare('3.7','<')
     - openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins

@@ -1 +0,0 @@
-../../../../../filter_plugins/

+ 0 - 2
playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml

@@ -35,8 +35,6 @@
 # Pre-upgrade completed
 
 - import_playbook: ../upgrade_control_plane.yml
-  vars:
-    master_config_hook: "v3_7/master_config_upgrade.yml"
 
 # All controllers must be stopped at the same time then restarted
 - name: Cycle all controller services to force new leader election mode

+ 6 - 2
playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml

@@ -14,6 +14,8 @@
 - import_playbook: ../init.yml
   vars:
     l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+  when: not skip_version_info | default(false)
 
 - name: Configure the upgrade target for the common upgrade tasks
   hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
@@ -23,7 +25,11 @@
       openshift_upgrade_min: '3.7'
 
 - import_playbook: ../pre/config.yml
+  # These vars a meant to exclude oo_nodes from plays that would otherwise include
+  # them by default.
   vars:
+    l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+    l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
     l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
     l_upgrade_no_proxy_hosts: "oo_masters_to_config"
     l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
@@ -42,8 +48,6 @@
 # Pre-upgrade completed
 
 - import_playbook: ../upgrade_control_plane.yml
-  vars:
-    master_config_hook: "v3_7/master_config_upgrade.yml"
 
 # All controllers must be stopped at the same time then restarted
 - name: Cycle all controller services to force new leader election mode

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins

@@ -1 +0,0 @@
-../../../../../filter_plugins/

+ 0 - 19
playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml

@@ -1,20 +1 @@
 ---
-- modify_yaml:
-    dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
-    yaml_key: 'controllerConfig.election.lockName'
-    yaml_value: 'openshift-master-controllers'
-
-- modify_yaml:
-    dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
-    yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
-    yaml_value: service-signer.crt
-
-- modify_yaml:
-    dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
-    yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
-    yaml_value: service-signer.key
-
-- modify_yaml:
-    dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
-    yaml_key: servingInfo.clientCA
-    yaml_value: ca.crt

+ 5 - 6
playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml

@@ -10,6 +10,7 @@
   - set_fact:
       openshift_upgrade_target: '3.9'
       openshift_upgrade_min: '3.7'
+      openshift_release: '3.9'
 
 - import_playbook: ../pre/config.yml
   vars:
@@ -31,8 +32,6 @@
 # Pre-upgrade completed
 
 - import_playbook: ../upgrade_control_plane.yml
-  vars:
-    master_config_hook: "v3_7/master_config_upgrade.yml"
 
 # All controllers must be stopped at the same time then restarted
 - name: Cycle all controller services to force new leader election mode
@@ -41,13 +40,13 @@
   roles:
   - role: openshift_facts
   tasks:
-  - name: Stop {{ openshift.common.service_type }}-master-controllers
+  - name: Stop {{ openshift_service_type }}-master-controllers
     systemd:
-      name: "{{ openshift.common.service_type }}-master-controllers"
+      name: "{{ openshift_service_type }}-master-controllers"
       state: stopped
-  - name: Start {{ openshift.common.service_type }}-master-controllers
+  - name: Start {{ openshift_service_type }}-master-controllers
     systemd:
-      name: "{{ openshift.common.service_type }}-master-controllers"
+      name: "{{ openshift_service_type }}-master-controllers"
       state: started
 
 - import_playbook: ../upgrade_nodes.yml

+ 57 - 10
playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml

@@ -14,37 +14,84 @@
 - import_playbook: ../init.yml
   vars:
     l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
 
-- name: Configure the upgrade target for the common upgrade tasks
+## Check to see if they're running 3.7 and if so upgrade them to 3.8 on control plan
+## If they've specified pkg_version or image_tag preserve that for later use
+- name: Configure the upgrade target for the common upgrade tasks 3.8
   hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
   tasks:
   - set_fact:
-      openshift_upgrade_target: '3.9'
+      openshift_upgrade_target: '3.8'
       openshift_upgrade_min: '3.7'
+      openshift_release: '3.8'
+      _requested_pkg_version: "{{openshift_pkg_version if openshift_pkg_version is defined else omit }}"
+      _requested_image_tag: "{{openshift_image_tag if openshift_image_tag is defined else omit }}"
+    when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
 
 - import_playbook: ../pre/config.yml
+  # These vars a meant to exclude oo_nodes from plays that would otherwise include
+  # them by default.
   vars:
+    l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+    l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
     l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
     l_upgrade_no_proxy_hosts: "oo_masters_to_config"
     l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
     l_upgrade_verify_targets_hosts: "oo_masters_to_config"
     l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
     l_upgrade_excluder_hosts: "oo_masters_to_config"
+  when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
 
-- import_playbook: validator.yml
-
-- name: Flag pre-upgrade checks complete for hosts without errors
+- name: Flag pre-upgrade checks complete for hosts without errors 3.8
   hosts: oo_masters_to_config:oo_etcd_to_config
   tasks:
   - set_fact:
       pre_upgrade_complete: True
+    when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
 
 # Pre-upgrade completed
 
+- import_playbook: ../upgrade_control_plane.yml
+  vars:
+    openshift_release: '3.8'
+  when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
+
+## 3.8 upgrade complete we should now be able to upgrade to 3.9
+
+- name: Configure the upgrade target for the common upgrade tasks 3.9
+  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+  tasks:
+  - meta: clear_facts
+  - set_fact:
+      openshift_upgrade_target: '3.9'
+      openshift_upgrade_min: '3.8'
+      openshift_release: '3.9'
+      openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}"
+      openshift_image_tag: "{{ _requested_image_tag | default('v3.9') }}"
+
+- import_playbook: ../pre/config.yml
+  # These vars a meant to exclude oo_nodes from plays that would otherwise include
+  # them by default.
+  vars:
+    l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
+    l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
+    l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+    l_upgrade_excluder_hosts: "oo_masters_to_config"
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+  hosts: oo_masters_to_config:oo_etcd_to_config
+  tasks:
+  - set_fact:
+      pre_upgrade_complete: True
 
 - import_playbook: ../upgrade_control_plane.yml
   vars:
-    master_config_hook: "v3_7/master_config_upgrade.yml"
+    openshift_release: '3.9'
 
 # All controllers must be stopped at the same time then restarted
 - name: Cycle all controller services to force new leader election mode
@@ -53,13 +100,13 @@
   roles:
   - role: openshift_facts
   tasks:
-  - name: Stop {{ openshift.common.service_type }}-master-controllers
+  - name: Stop {{ openshift_service_type }}-master-controllers
     systemd:
-      name: "{{ openshift.common.service_type }}-master-controllers"
+      name: "{{ openshift_service_type }}-master-controllers"
       state: stopped
-  - name: Start {{ openshift.common.service_type }}-master-controllers
+  - name: Start {{ openshift_service_type }}-master-controllers
     systemd:
-      name: "{{ openshift.common.service_type }}-master-controllers"
+      name: "{{ openshift_service_type }}-master-controllers"
       state: started
 
 - import_playbook: ../post_control_plane.yml

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml

@@ -12,6 +12,7 @@
   - set_fact:
       openshift_upgrade_target: '3.9'
       openshift_upgrade_min: '3.7'
+      openshift_release: '3.9'
 
 - import_playbook: ../pre/config.yml
   vars:

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml

@@ -1,5 +1,5 @@
 ---
-- name: Verify 3.9 specific upgrade checks
+- name: Verify 3.8 specific upgrade checks
   hosts: oo_first_master
   roles:
   - { role: lib_openshift }

+ 6 - 0
playbooks/container-runtime/private/build_container_groups.yml

@@ -0,0 +1,6 @@
+---
+- name: create oo_hosts_containerized_managed_true host group
+  hosts: oo_all_hosts:!oo_nodes_to_config
+  tasks:
+  - group_by:
+      key: oo_hosts_containerized_managed_{{ (containerized | default(False)) | ternary('true','false') }}

+ 6 - 9
playbooks/container-runtime/private/config.yml

@@ -1,26 +1,23 @@
 ---
-- hosts: "{{ l_containerized_host_groups }}"
-  vars:
-    l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}"
-    l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}"
-  # role: container_runtime is necessary  here to bring role default variables
-  # into the play scope.
+- import_playbook: build_container_groups.yml
+
+- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true
   roles:
     - role: container_runtime
   tasks:
-    - include_role:
+    - import_role:
         name: container_runtime
         tasks_from: package_docker.yml
       when:
         - not openshift_docker_use_system_container | bool
         - not openshift_use_crio_only | bool
-    - include_role:
+    - import_role:
         name: container_runtime
         tasks_from: systemcontainer_docker.yml
       when:
         - openshift_docker_use_system_container | bool
         - not openshift_use_crio_only | bool
-    - include_role:
+    - import_role:
         name: container_runtime
         tasks_from: systemcontainer_crio.yml
       when:

+ 18 - 0
playbooks/container-runtime/private/setup_storage.yml

@@ -0,0 +1,18 @@
+---
+- import_playbook: build_container_groups.yml
+
+- hosts: oo_nodes_to_config:oo_hosts_containerized_managed_true
+  vars:
+    l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}"
+    l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}"
+  # role: container_runtime is necessary  here to bring role default variables
+  # into the play scope.
+  roles:
+    - role: container_runtime
+  tasks:
+    - import_role:
+        name: container_runtime
+        tasks_from: docker_storage_setup_overlay.yml
+      when:
+        - container_runtime_docker_storage_type|default('') == "overlay2"
+        - openshift_docker_is_node_or_master | bool

+ 6 - 0
playbooks/container-runtime/setup_storage.yml

@@ -0,0 +1,6 @@
+---
+- import_playbook: ../init/main.yml
+  vars:
+    skip_verison: True
+
+- import_playbook: private/setup_storage.yml

+ 3 - 0
playbooks/deploy_cluster.yml

@@ -22,6 +22,9 @@
 
 - import_playbook: openshift-hosted/private/config.yml
 
+- import_playbook: openshift-web-console/private/config.yml
+  when: openshift_web_console_install | default(true) | bool
+
 - import_playbook: openshift-metrics/private/config.yml
   when: openshift_metrics_install_metrics | default(false) | bool
 

+ 1 - 1
playbooks/gcp/provision.yml

@@ -6,7 +6,7 @@
   tasks:
 
   - name: provision a GCP cluster in the specified project
-    include_role:
+    import_role:
       name: openshift_gcp
 
 - name: run the cluster deploy

+ 37 - 0
playbooks/init/base_packages.yml

@@ -0,0 +1,37 @@
+---
+- name: Install packages necessary for installer
+  hosts: oo_all_hosts
+  any_errors_fatal: true
+  tasks:
+  - when:
+    - not openshift_is_atomic | bool
+    block:
+    - name: Ensure openshift-ansible installer package deps are installed
+      package:
+        name: "{{ item }}"
+        state: present
+      with_items:
+      - iproute
+      - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
+      - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
+      - yum-utils
+      register: result
+      until: result is succeeded
+
+    - name: Ensure various deps for running system containers are installed
+      package:
+        name: "{{ item }}"
+        state: present
+      with_items:
+      - atomic
+      - ostree
+      - runc
+      when:
+      - >
+        (openshift_use_system_containers | default(False)) | bool
+        or (openshift_use_etcd_system_container | default(False)) | bool
+        or (openshift_use_openvswitch_system_container | default(False)) | bool
+        or (openshift_use_node_system_container | default(False)) | bool
+        or (openshift_use_master_system_container | default(False)) | bool
+      register: result
+      until: result is succeeded

+ 0 - 1
playbooks/init/evaluate_groups.yml

@@ -2,7 +2,6 @@
 - name: Populate config host groups
   hosts: localhost
   connection: local
-  become: no
   gather_facts: no
   tasks:
   - name: Load group name mapping variables

+ 28 - 69
playbooks/init/facts.yml

@@ -5,7 +5,9 @@
   tasks:
 
 - name: Initialize host facts
-  hosts: oo_all_hosts
+  # l_upgrade_non_node_hosts is passed in via play during control-plane-only
+  # upgrades; otherwise oo_all_hosts is used.
+  hosts: "{{ l_upgrade_non_node_hosts | default('oo_all_hosts') }}"
   tasks:
   - name: load openshift_facts module
     import_role:
@@ -13,7 +15,7 @@
 
   # TODO: Should this role be refactored into health_checks??
   - name: Run openshift_sanitize_inventory to set variables
-    include_role:
+    import_role:
       name: openshift_sanitize_inventory
 
   - name: Detecting Operating System from ostree_booted
@@ -21,40 +23,24 @@
       path: /run/ostree-booted
     register: ostree_booted
 
-  # Locally setup containerized facts for now
-  - name: initialize_facts set fact l_is_atomic
-    set_fact:
-      l_is_atomic: "{{ ostree_booted.stat.exists }}"
-
-  - name: initialize_facts set fact for containerized and l_is_*_system_container
+  # TODO(michaelgugino) remove this line once CI is updated.
+  - name: set openshift_deployment_type if unset
     set_fact:
-      l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
-
-  # TODO: Should this be moved into health checks??
-  # Seems as though any check that happens with a corresponding fail should move into health_checks
-  - name: Validate python version - ans_dist is fedora and python is v3
-    fail:
-      msg: |
-        openshift-ansible requires Python 3 for {{ ansible_distribution }};
-        For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html
+      openshift_deployment_type: "{{ deployment_type }}"
     when:
-    - ansible_distribution == 'Fedora'
-    - ansible_python['version']['major'] != 3
+    - openshift_deployment_type is undefined
+    - deployment_type is defined
 
-  # TODO: Should this be moved into health checks??
-  # Seems as though any check that happens with a corresponding fail should move into health_checks
-  - name: Validate python version - ans_dist not Fedora and python must be v2
-    fail:
-      msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}"
-    when:
-    - ansible_distribution != 'Fedora'
-    - ansible_python['version']['major'] != 2
+  - name: initialize_facts set fact openshift_is_atomic and openshift_is_containerized
+    set_fact:
+      openshift_is_atomic: "{{ ostree_booted.stat.exists }}"
+      openshift_is_containerized: "{{ ostree_booted.stat.exists or (containerized | default(false) | bool) }}"
 
   # TODO: Should this be moved into health checks??
   # Seems as though any check that happens with a corresponding fail should move into health_checks
   # Fail as early as possible if Atomic and old version of Docker
   - when:
-    - l_is_atomic | bool
+    - openshift_is_atomic | bool
     block:
 
     # See https://access.redhat.com/articles/2317361
@@ -72,40 +58,7 @@
         - l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=')
         msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
 
-  - when:
-    - not l_is_atomic | bool
-    block:
-    - name: Ensure openshift-ansible installer package deps are installed
-      package:
-        name: "{{ item }}"
-        state: present
-      with_items:
-      - iproute
-      - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
-      - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
-      - yum-utils
-      register: result
-      until: result is succeeded
-
-    - name: Ensure various deps for running system containers are installed
-      package:
-        name: "{{ item }}"
-        state: present
-      with_items:
-      - atomic
-      - ostree
-      - runc
-      when:
-      - >
-        (openshift_use_system_containers | default(False)) | bool
-        or (openshift_use_etcd_system_container | default(False)) | bool
-        or (openshift_use_openvswitch_system_container | default(False)) | bool
-        or (openshift_use_node_system_container | default(False)) | bool
-        or (openshift_use_master_system_container | default(False)) | bool
-      register: result
-      until: result is succeeded
-
-  - name: Gather Cluster facts and set is_containerized if needed
+  - name: Gather Cluster facts
     openshift_facts:
       role: common
       local_facts:
@@ -113,7 +66,6 @@
         deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
         hostname: "{{ openshift_hostname | default(None) }}"
         ip: "{{ openshift_ip | default(None) }}"
-        is_containerized: "{{ l_is_containerized | default(None) }}"
         public_hostname: "{{ openshift_public_hostname | default(None) }}"
         public_ip: "{{ openshift_public_ip | default(None) }}"
         portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
@@ -126,10 +78,10 @@
     openshift_facts:
       role: common
       local_facts:
-        no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+        no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
                                              | union(groups['oo_masters_to_config'])
                                              | union(groups['oo_etcd_to_config'] | default([])))
-                                         | oo_collect('openshift.common.hostname') | default([]) | join (',')
+                                         | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
                                          }}"
     when:
     - openshift_http_proxy is defined or openshift_https_proxy is defined
@@ -141,7 +93,14 @@
       local_facts:
         sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
 
-  - name: initialize_facts set_fact repoquery command
-    set_fact:
-      repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
-      repoquery_installed: "{{ 'dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins --installed' }}"
+- name: Initialize special first-master variables
+  hosts: oo_first_master
+  roles:
+  - role: openshift_facts
+  tasks:
+  - set_fact:
+      # We need to setup openshift_client_binary here for special uses of delegate_to in
+      # later roles and plays.
+      first_master_client_binary: "{{  openshift_client_binary }}"
+      #Some roles may require this to be set for first master
+      openshift_client_binary: "{{ openshift_client_binary }}"

+ 3 - 6
playbooks/init/main.yml

@@ -17,15 +17,12 @@
 
 - import_playbook: facts.yml
 
-- import_playbook: sanity_checks.yml
-  when: not (skip_sanity_checks | default(False))
-
-- import_playbook: validate_hostnames.yml
-  when: not (skip_validate_hostnames | default(False))
-
 - import_playbook: version.yml
   when: not (skip_verison | default(False))
 
+- import_playbook: sanity_checks.yml
+  when: not (skip_sanity_checks | default(False))
+
 - name: Initialization Checkpoint End
   hosts: all
   gather_facts: false

+ 5 - 5
playbooks/init/repos.yml

@@ -4,13 +4,13 @@
   gather_facts: no
   tasks:
   - name: subscribe instances to Red Hat Subscription Manager
-    include_role:
+    import_role:
       name: rhel_subscribe
     when:
     - ansible_distribution == 'RedHat'
-    - deployment_type == 'openshift-enterprise'
-    - rhsub_user | default(False)
-    - rhsub_pass | default(False)
+    - openshift_deployment_type == 'openshift-enterprise'
+    - rhsub_user is defined
+    - rhsub_pass is defined
   - name: initialize openshift repos
-    include_role:
+    import_role:
       name: openshift_repos

+ 12 - 48
playbooks/init/sanity_checks.yml

@@ -1,51 +1,15 @@
 ---
 - name: Verify Requirements
-  hosts: oo_all_hosts
+  hosts: oo_first_master
+  roles:
+  - role: lib_utils
   tasks:
-  - fail:
-      msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel
-    when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
-
-  - fail:
-      msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
-    when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
-
-  - fail:
-      msg: Nuage sdn can not be used with flannel
-    when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
-
-  - fail:
-      msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv
-    when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool
-
-  - fail:
-      msg: Contiv can not be used with flannel
-    when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool
-
-  - fail:
-      msg: Contiv can not be used with nuage
-    when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool
-
-  - fail:
-      msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico
-    when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
-
-  - fail:
-      msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both.
-    when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
-
-  - fail:
-      msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both
-    when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool
-
-  - fail:
-      msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both
-    when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
-
-  - fail:
-      msg: openshift_hostname must be 63 characters or less
-    when: openshift_hostname is defined and openshift_hostname | length > 63
-
-  - fail:
-      msg: openshift_public_hostname must be 63 characters or less
-    when: openshift_public_hostname is defined and openshift_public_hostname | length > 63
+  # sanity_checks is a custom action plugin defined in lib_utils.
+  # This module will loop through all the hostvars for each host
+  # specified in check_hosts.
+  # Since sanity_checks is an action_plugin, it executes on the control host.
+  # Thus, sanity_checks cannot gather new information about any hosts.
+  - name: Run variable sanity checks
+    sanity_checks:
+      check_hosts: "{{ groups['oo_all_hosts'] }}"
+    run_once: True

+ 22 - 10
playbooks/init/version.yml

@@ -2,20 +2,32 @@
 # NOTE: requires openshift_facts be run
 - name: Determine openshift_version to configure on first master
   hosts: oo_first_master
-  roles:
-  - openshift_version
+  tasks:
+  - include_role:
+      name: openshift_version
+      tasks_from: first_master.yml
+  - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version | default('') }}"
 
 # NOTE: We set this even on etcd hosts as they may also later run as masters,
 # and we don't want to install wrong version of docker and have to downgrade
 # later.
 - name: Set openshift_version for etcd, node, and master hosts
-  hosts: oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master
+  hosts: "{{ l_openshift_version_set_hosts | default(l_default_version_set_hosts) }}"
   vars:
-    openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
-  pre_tasks:
+    l_default_version_set_hosts: "oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master"
+    l_first_master_openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
+    l_first_master_openshift_pkg_version: "{{ hostvars[groups.oo_first_master.0].openshift_pkg_version | default('') }}"
+    l_first_master_openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag}}"
+  tasks:
   - set_fact:
-      openshift_pkg_version: -{{ openshift_version }}
-    when: openshift_pkg_version is not defined
-  - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}"
-  roles:
-  - openshift_version
+      openshift_version: "{{ l_first_master_openshift_version }}"
+      openshift_pkg_version: "{{ l_first_master_openshift_pkg_version }}"
+      openshift_image_tag: "{{ l_first_master_openshift_image_tag }}"
+
+# NOTE: These steps should only be run against masters and nodes.
+- name: Ensure the requested version packages are available.
+  hosts: "{{ l_openshift_version_check_hosts | default('oo_nodes_to_config:oo_masters_to_config:!oo_first_master') }}"
+  tasks:
+  - include_role:
+      name: openshift_version
+      tasks_from: masters_and_nodes.yml

+ 1 - 0
playbooks/openshift-checks/adhoc.yml

@@ -11,6 +11,7 @@
   # usage. Running this play only in localhost speeds up execution.
   hosts: localhost
   connection: local
+  gather_facts: false
   roles:
   - openshift_health_checker
   vars:

+ 1 - 1
playbooks/openshift-etcd/private/ca.yml

@@ -5,7 +5,7 @@
   - role: openshift_clock
   - role: openshift_etcd_facts
   tasks:
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: ca.yml
     vars:

+ 3 - 3
playbooks/openshift-etcd/private/certificates-backup.yml

@@ -3,10 +3,10 @@
   hosts: oo_first_etcd
   any_errors_fatal: true
   tasks:
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: backup_generated_certificates.yml
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: remove_generated_certificates.yml
 
@@ -14,6 +14,6 @@
   hosts: oo_etcd_to_config
   any_errors_fatal: true
   tasks:
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: backup_server_certificates.yml

+ 16 - 14
playbooks/openshift-etcd/private/embedded2external.yml

@@ -18,7 +18,7 @@
   - role: openshift_facts
   tasks:
   - name: Check the master API is ready
-    include_role:
+    import_role:
       name: openshift_master
       tasks_from: check_master_api_is_ready.yml
   - set_fact:
@@ -31,8 +31,8 @@
       name: "{{ master_service }}"
       state: stopped
   # 2. backup embedded etcd
-  # Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285
-  - include_role:
+  # Can't use with_items with import_role: https://github.com/ansible/ansible/issues/21285
+  - import_role:
       name: etcd
       tasks_from: backup.yml
     vars:
@@ -40,7 +40,7 @@
       r_etcd_common_embedded_etcd: "{{ true }}"
       r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
 
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: backup.archive.yml
     vars:
@@ -56,7 +56,7 @@
 - name: Backup etcd client certificates for master host
   hosts: oo_first_master
   tasks:
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: backup_master_etcd_certificates.yml
 
@@ -73,10 +73,10 @@
   hosts: oo_etcd_to_config[0]
   gather_facts: no
   pre_tasks:
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: disable_etcd.yml
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: clean_data.yml
 
@@ -89,9 +89,12 @@
     local_action: command mktemp -d /tmp/etcd_backup-XXXXXXX
     register: g_etcd_client_mktemp
     changed_when: False
-    become: no
 
-  - include_role:
+  - name: Chmod local temp directory for syncing etcd backup
+    local_action: command chmod 777 "{{ g_etcd_client_mktemp.stdout }}"
+    changed_when: False
+
+  - import_role:
       name: etcd
       tasks_from: backup.fetch.yml
     vars:
@@ -101,7 +104,7 @@
       r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
     delegate_to: "{{ groups.oo_first_master[0] }}"
 
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: backup.copy.yml
     vars:
@@ -116,20 +119,19 @@
   - name: Delete temporary directory
     local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent
     changed_when: False
-    become: no
 
 # 7. force new cluster from the backup
 - name: Force new etcd cluster
   hosts: oo_etcd_to_config[0]
   tasks:
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: backup.unarchive.yml
     vars:
       r_etcd_common_backup_tag: pre-migrate
       r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
 
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: backup.force_new_cluster.yml
     vars:
@@ -143,7 +145,7 @@
 - name: Configure master to use external etcd
   hosts: oo_first_master
   tasks:
-  - include_role:
+  - import_role:
       name: openshift_master
       tasks_from: configure_external_etcd.yml
     vars:

+ 0 - 1
playbooks/openshift-etcd/private/filter_plugins

@@ -1 +0,0 @@
-../../../filter_plugins

+ 0 - 1
playbooks/openshift-etcd/private/lookup_plugins

@@ -1 +0,0 @@
-../../../lookup_plugins

+ 11 - 13
playbooks/openshift-etcd/private/migrate.yml

@@ -2,7 +2,6 @@
 - name: Check if the master has embedded etcd
   hosts: localhost
   connection: local
-  become: no
   gather_facts: no
   tags:
   - always
@@ -15,7 +14,7 @@
 - name: Run pre-checks
   hosts: oo_etcd_to_migrate
   tasks:
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: migrate.pre_check.yml
     vars:
@@ -43,7 +42,7 @@
   roles:
   - role: openshift_facts
   post_tasks:
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: backup.yml
     vars:
@@ -53,12 +52,11 @@
 - name: Gate on etcd backup
   hosts: localhost
   connection: local
-  become: no
   tasks:
   - set_fact:
       etcd_backup_completed: "{{ hostvars
-                                 | oo_select_keys(groups.oo_etcd_to_migrate)
-                                 | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
+                                 | lib_utils_oo_select_keys(groups.oo_etcd_to_migrate)
+                                 | lib_utils_oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
   - set_fact:
       etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) | list }}"
   - fail:
@@ -70,7 +68,7 @@
   hosts: oo_etcd_to_migrate
   gather_facts: no
   pre_tasks:
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: disable_etcd.yml
 
@@ -78,7 +76,7 @@
   hosts: oo_etcd_to_migrate[0]
   gather_facts: no
   tasks:
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: migrate.yml
     vars:
@@ -90,7 +88,7 @@
   hosts: oo_etcd_to_migrate[1:]
   gather_facts: no
   tasks:
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: clean_data.yml
     vars:
@@ -118,15 +116,15 @@
   tasks:
   - set_fact:
       etcd_migration_completed: "{{ hostvars
-                                 | oo_select_keys(groups.oo_etcd_to_migrate)
-                                 | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}"
+                                 | lib_utils_oo_select_keys(groups.oo_etcd_to_migrate)
+                                 | lib_utils_oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}"
   - set_fact:
       etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) | list }}"
 
 - name: Add TTLs on the first master
   hosts: oo_first_master[0]
   tasks:
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: migrate.add_ttls.yml
     vars:
@@ -138,7 +136,7 @@
 - name: Configure masters if etcd data migration is succesfull
   hosts: oo_masters_to_config
   tasks:
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: migrate.configure_master.yml
     when: etcd_migration_failed | length == 0

+ 20 - 18
playbooks/openshift-etcd/private/redeploy-ca.yml

@@ -14,10 +14,10 @@
 - name: Backup existing etcd CA certificate directories
   hosts: oo_etcd_to_config
   tasks:
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: backup_ca_certificates.yml
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: remove_ca_certificates.yml
 
@@ -26,7 +26,6 @@
 - name: Create temp directory for syncing certs
   hosts: localhost
   connection: local
-  become: no
   gather_facts: no
   tasks:
   - name: Create local temp directory for syncing certs
@@ -34,10 +33,14 @@
     register: g_etcd_mktemp
     changed_when: false
 
+  - name: Chmod local temp directory for syncing certs
+    local_action: command chmod 777 "{{ g_etcd_mktemp.stdout }}"
+    changed_when: false
+
 - name: Distribute etcd CA to etcd hosts
   hosts: oo_etcd_to_config
   tasks:
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: distribute_ca.yml
     vars:
@@ -47,14 +50,14 @@
 - import_playbook: restart.yml
   # Do not restart etcd when etcd certificates were previously expired.
   when: ('expired' not in (hostvars
-                           | oo_select_keys(groups['etcd'])
-                           | oo_collect('check_results.check_results.etcd')
-                           | oo_collect('health')))
+                           | lib_utils_oo_select_keys(groups['etcd'])
+                           | lib_utils_oo_collect('check_results.check_results.etcd')
+                           | lib_utils_oo_collect('health')))
 
 - name: Retrieve etcd CA certificate
   hosts: oo_first_etcd
   tasks:
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: retrieve_ca_certificates.yml
     vars:
@@ -74,7 +77,6 @@
 - name: Delete temporary directory on localhost
   hosts: localhost
   connection: local
-  become: no
   gather_facts: no
   tasks:
   - file:
@@ -87,15 +89,15 @@
   when:
   # masters
   - ('expired' not in hostvars
-      | oo_select_keys(groups['oo_masters_to_config'])
-      | oo_collect('check_results.check_results.ocp_certs')
-      | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+      | lib_utils_oo_select_keys(groups['oo_masters_to_config'])
+      | lib_utils_oo_collect('check_results.check_results.ocp_certs')
+      | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
   - ('expired' not in hostvars
-      | oo_select_keys(groups['oo_masters_to_config'])
-      | oo_collect('check_results.check_results.ocp_certs')
-      | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+      | lib_utils_oo_select_keys(groups['oo_masters_to_config'])
+      | lib_utils_oo_collect('check_results.check_results.ocp_certs')
+      | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
   # etcd
   - ('expired' not in (hostvars
-      | oo_select_keys(groups['etcd'])
-      | oo_collect('check_results.check_results.etcd')
-      | oo_collect('health')))
+      | lib_utils_oo_select_keys(groups['etcd'])
+      | lib_utils_oo_collect('check_results.check_results.etcd')
+      | lib_utils_oo_collect('health')))

+ 2 - 2
playbooks/openshift-etcd/private/restart.yml

@@ -3,7 +3,7 @@
   hosts: oo_etcd_to_config
   serial: 1
   tasks:
-    - include_role:
+    - import_role:
         name: etcd
         tasks_from: restart.yml
       when:
@@ -12,7 +12,7 @@
 - name: Restart etcd
   hosts: oo_etcd_to_config
   tasks:
-    - include_role:
+    - import_role:
         name: etcd
         tasks_from: restart.yml
       when:

+ 4 - 4
playbooks/openshift-etcd/private/scaleup.yml

@@ -30,7 +30,7 @@
     retries: 3
     delay: 10
     until: etcd_add_check.rc == 0
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: server_certificates.yml
     vars:
@@ -69,13 +69,13 @@
     etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
     openshift_ca_host: "{{ groups.oo_first_master.0 }}"
     openshift_master_etcd_hosts: "{{ hostvars
-                                     | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) ))
-                                     | oo_collect('openshift.common.hostname')
+                                     | lib_utils_oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) ))
+                                     | lib_utils_oo_collect('openshift.common.hostname')
                                      | default(none, true) }}"
     openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
   roles:
   - role: openshift_master_facts
   post_tasks:
-  - include_role:
+  - import_role:
       name: openshift_master
       tasks_from: update_etcd_client_urls.yml

+ 1 - 1
playbooks/openshift-etcd/private/server_certificates.yml

@@ -5,7 +5,7 @@
   roles:
     - role: openshift_etcd_facts
   post_tasks:
-    - include_role:
+    - import_role:
         name: etcd
         tasks_from: server_certificates.yml
       vars:

+ 3 - 4
playbooks/openshift-etcd/private/upgrade_backup.yml

@@ -4,7 +4,7 @@
   roles:
   - role: openshift_etcd_facts
   post_tasks:
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: backup.yml
     vars:
@@ -14,12 +14,11 @@
 - name: Gate on etcd backup
   hosts: localhost
   connection: local
-  become: no
   tasks:
   - set_fact:
       etcd_backup_completed: "{{ hostvars
-                                 | oo_select_keys(groups.oo_etcd_hosts_to_backup)
-                                 | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
+                                 | lib_utils_oo_select_keys(groups.oo_etcd_hosts_to_backup)
+                                 | lib_utils_oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
   - set_fact:
       etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) | list }}"
   - fail:

+ 3 - 3
playbooks/openshift-etcd/private/upgrade_image_members.yml

@@ -1,12 +1,12 @@
 ---
 # INPUT etcd_upgrade_version
 # INPUT etcd_container_version
-# INPUT openshift.common.is_containerized
+# INPUT openshift_is_containerized
 - name: Upgrade containerized hosts to {{ etcd_upgrade_version }}
   hosts: oo_etcd_hosts_to_upgrade
   serial: 1
   tasks:
-  - include_role:
+  - import_role:
       name: etcd
       tasks_from: upgrade_image.yml
     vars:
@@ -14,4 +14,4 @@
       etcd_peer: "{{ openshift.common.hostname }}"
     when:
     - etcd_container_version | default('99') is version_compare(etcd_upgrade_version,'<')
-    - openshift.common.is_containerized | bool
+    - openshift_is_containerized | bool

+ 0 - 0
playbooks/openshift-etcd/private/upgrade_main.yml


Some files were not shown because too many files changed in this diff