Explorar o código

Remove openshift.common.{is_atomic|is_containerized}

We set these variables using facts in init, no need
to duplicate the logic all around the codebase.
Michael Gugino %!s(int64=7) %!d(string=hai) anos
pai
achega
e6c159afb4
Modificáronse 100 ficheiros con 233 adicións e 291 borrados
  1. 7 8
      playbooks/adhoc/uninstall.yml
  2. 1 1
      playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
  3. 2 2
      playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
  4. 1 1
      playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
  5. 1 1
      playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
  6. 1 1
      playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
  7. 2 2
      playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
  8. 9 9
      playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
  9. 1 1
      playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
  10. 1 1
      playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
  11. 1 1
      playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
  12. 16 11
      playbooks/init/facts.yml
  13. 2 2
      playbooks/openshift-etcd/private/upgrade_image_members.yml
  14. 2 2
      playbooks/openshift-etcd/private/upgrade_rpm_members.yml
  15. 1 1
      playbooks/openshift-etcd/private/upgrade_step.yml
  16. 4 4
      playbooks/openshift-hosted/private/redeploy-registry-certificates.yml
  17. 6 6
      playbooks/openshift-hosted/private/redeploy-router-certificates.yml
  18. 1 1
      playbooks/openshift-master/private/additional_config.yml
  19. 2 2
      playbooks/openshift-master/private/tasks/wire_aggregator.yml
  20. 1 1
      playbooks/openshift-node/private/restart.yml
  21. 1 1
      playbooks/openshift-node/private/setup.yml
  22. 1 1
      roles/calico_master/tasks/main.yml
  23. 1 0
      roles/cockpit-ui/meta/main.yml
  24. 1 1
      roles/cockpit-ui/tasks/main.yml
  25. 2 2
      roles/cockpit/tasks/main.yml
  26. 3 3
      roles/container_runtime/tasks/common/syscontainer_packages.yml
  27. 3 3
      roles/container_runtime/tasks/docker_upgrade_check.yml
  28. 2 2
      roles/container_runtime/tasks/package_docker.yml
  29. 1 1
      roles/container_runtime/tasks/systemcontainer_crio.yml
  30. 1 1
      roles/container_runtime/tasks/systemcontainer_docker.yml
  31. 0 1
      roles/contiv/defaults/main.yml
  32. 1 1
      roles/contiv/tasks/packageManagerInstall.yml
  33. 0 15
      roles/contiv_facts/tasks/main.yml
  34. 1 1
      roles/etcd/defaults/main.yaml
  35. 1 1
      roles/etcd/tasks/auxiliary/drop_etcdctl.yml
  36. 1 1
      roles/etcd/tasks/migration/add_ttls.yml
  37. 1 1
      roles/etcd/tasks/migration/migrate.yml
  38. 2 2
      roles/etcd/tasks/version_detect.yml
  39. 1 1
      roles/flannel/tasks/main.yml
  40. 1 1
      roles/nickhammond.logrotate/tasks/main.yml
  41. 1 1
      roles/nuage_ca/tasks/main.yaml
  42. 3 3
      roles/nuage_common/tasks/main.yml
  43. 9 9
      roles/nuage_master/tasks/main.yaml
  44. 1 1
      roles/nuage_master/tasks/serviceaccount.yml
  45. 4 4
      roles/nuage_node/tasks/main.yaml
  46. 4 4
      roles/openshift_ca/tasks/main.yml
  47. 4 4
      roles/openshift_cli/tasks/main.yml
  48. 2 2
      roles/openshift_etcd_facts/vars/main.yml
  49. 1 1
      roles/openshift_examples/defaults/main.yml
  50. 8 8
      roles/openshift_examples/tasks/main.yml
  51. 1 1
      roles/openshift_excluder/tasks/install.yml
  52. 2 2
      roles/openshift_expand_partition/tasks/main.yml
  53. 2 0
      roles/openshift_facts/defaults/main.yml
  54. 1 33
      roles/openshift_facts/library/openshift_facts.py
  55. 1 1
      roles/openshift_health_checker/openshift_checks/docker_image_availability.py
  56. 2 2
      roles/openshift_health_checker/openshift_checks/etcd_traffic.py
  57. 4 4
      roles/openshift_health_checker/openshift_checks/mixins.py
  58. 14 27
      roles/openshift_health_checker/test/docker_image_availability_test.py
  59. 4 4
      roles/openshift_health_checker/test/docker_storage_test.py
  60. 4 8
      roles/openshift_health_checker/test/etcd_traffic_test.py
  61. 3 3
      roles/openshift_health_checker/test/mixins_test.py
  62. 3 3
      roles/openshift_health_checker/test/ovs_version_test.py
  63. 3 3
      roles/openshift_health_checker/test/package_availability_test.py
  64. 3 3
      roles/openshift_health_checker/test/package_version_test.py
  65. 1 1
      roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
  66. 3 3
      roles/openshift_hosted/tasks/wait_for_pod.yml
  67. 1 1
      roles/openshift_hosted_templates/defaults/main.yml
  68. 1 1
      roles/openshift_hosted_templates/tasks/main.yml
  69. 7 7
      roles/openshift_loadbalancer/tasks/main.yml
  70. 1 1
      roles/openshift_loadbalancer/templates/haproxy.cfg.j2
  71. 1 1
      roles/openshift_logging/tasks/annotate_ops_projects.yaml
  72. 2 2
      roles/openshift_logging/tasks/delete_logging.yaml
  73. 1 1
      roles/openshift_logging/tasks/generate_certs.yaml
  74. 1 1
      roles/openshift_logging/tasks/install_logging.yaml
  75. 1 1
      roles/openshift_logging/tasks/procure_server_certs.yaml
  76. 1 1
      roles/openshift_logging_elasticsearch/tasks/main.yaml
  77. 1 1
      roles/openshift_manage_node/tasks/main.yml
  78. 6 6
      roles/openshift_master/tasks/main.yml
  79. 1 1
      roles/openshift_master/tasks/registry_auth.yml
  80. 4 4
      roles/openshift_master/tasks/set_loopback_context.yml
  81. 2 2
      roles/openshift_master/tasks/systemd_units.yml
  82. 1 1
      roles/openshift_master/tasks/upgrade.yml
  83. 1 1
      roles/openshift_master/templates/atomic-openshift-master.j2
  84. 1 1
      roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
  85. 1 1
      roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
  86. 2 2
      roles/openshift_master_certificates/tasks/main.yml
  87. 1 1
      roles/openshift_metrics/tasks/generate_certificates.yaml
  88. 1 1
      roles/openshift_metrics/tasks/install_cassandra.yaml
  89. 1 1
      roles/openshift_metrics/tasks/install_hawkular.yaml
  90. 1 1
      roles/openshift_metrics/tasks/install_heapster.yaml
  91. 1 1
      roles/openshift_metrics/tasks/install_metrics.yaml
  92. 3 3
      roles/openshift_metrics/tasks/oc_apply.yaml
  93. 1 1
      roles/openshift_metrics/tasks/pre_install.yaml
  94. 1 1
      roles/openshift_metrics/tasks/setup_certificate.yaml
  95. 3 3
      roles/openshift_metrics/tasks/start_metrics.yaml
  96. 3 3
      roles/openshift_metrics/tasks/stop_metrics.yaml
  97. 2 2
      roles/openshift_metrics/tasks/uninstall_hosa.yaml
  98. 2 2
      roles/openshift_metrics/tasks/uninstall_metrics.yaml
  99. 1 1
      roles/openshift_node/handlers/main.yml
  100. 0 0
      roles/openshift_node/tasks/config.yml

+ 7 - 8
playbooks/adhoc/uninstall.yml

@@ -18,9 +18,8 @@
 
   # Since we're not calling openshift_facts we'll do this for now
   - set_fact:
-      is_atomic: "{{ ostree_output.rc == 0 }}"
-  - set_fact:
-      is_containerized: "{{ is_atomic or containerized | default(false) | bool }}"
+      openshift_is_atomic: "{{ ostree_output.rc == 0 }}"
+      openshift_is_containerized: "{{ ostree_output.rc == 0 or containerized | default(false) | bool }}"
 
 # Stop services on all hosts prior to removing files.
 - hosts: nodes
@@ -133,7 +132,7 @@
         when: openshift_use_flannel | default(false) | bool
         register: result
         until: result is succeeded
-      when: not is_atomic | bool
+      when: not openshift_is_atomic | bool
 
     - shell: systemctl reset-failed
       changed_when: False
@@ -363,7 +362,7 @@
 
   - name: Remove packages
     package: name={{ item }} state=absent
-    when: not is_atomic | bool and openshift_remove_all | default(True) | bool
+    when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool
     with_items:
     - atomic-openshift
     - atomic-openshift-clients
@@ -487,14 +486,14 @@
 
   - name: Stop additional atomic services
     service: name={{ item }} state=stopped
-    when: is_containerized | bool
+    when: openshift_is_containerized | bool
     with_items:
     - etcd_container
     failed_when: false
 
   - name: Remove packages
     package: name={{ item }} state=absent
-    when: not is_atomic | bool and openshift_remove_all | default(True) | bool
+    when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool
     with_items:
     - etcd
     - etcd3
@@ -554,7 +553,7 @@
 
   - name: Remove packages
     package: name={{ item }} state=absent
-    when: not is_atomic | bool and openshift_remove_all | default(True) | bool
+    when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool
     with_items:
     - haproxy
     register: result

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml

@@ -22,7 +22,7 @@
 
   - name: Create service signer certificate
     command: >
-      {{ openshift.common.client_binary }} adm ca create-signer-cert
+      {{ openshift_client_binary }} adm ca create-signer-cert
       --cert="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.crt
       --key="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.key
       --name="{{ remote_cert_create_tmpdir.stdout }}/"openshift-service-serving-signer

+ 2 - 2
playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml

@@ -17,7 +17,7 @@
 
   - fail:
       msg: Cannot upgrade Docker on Atomic operating systems.
-    when: openshift.common.is_atomic | bool
+    when: openshift_is_atomic | bool
 
   - include_role:
       name: container_runtime
@@ -54,7 +54,7 @@
 
   - name: Drain Node for Kubelet upgrade
     command: >
-      {{ openshift.common.client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+      {{ openshift_client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
     register: l_docker_upgrade_drain_result

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml

@@ -15,7 +15,7 @@
     - "{{ openshift_service_type }}-master-controllers"
     - "{{ openshift_service_type }}-node"
   failed_when: false
-  when: openshift.common.is_containerized | bool
+  when: openshift_is_containerized | bool
 
 - name: Wait for master API to come back online
   wait_for:

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml

@@ -10,7 +10,7 @@
     - etcd_container
     - openvswitch
   failed_when: false
-  when: openshift.common.is_containerized | bool
+  when: openshift_is_containerized | bool
 
 - name: Check Docker image count
   shell: "docker images -aq | wc -l"

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml

@@ -71,7 +71,7 @@
       local_facts:
         ha: "{{ groups.oo_masters_to_config | length > 1 }}"
 
-  - when: openshift.common.is_containerized | bool
+  - when: openshift_is_containerized | bool
     block:
     - set_fact:
         master_services:

+ 2 - 2
playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml

@@ -15,9 +15,9 @@
     docker pull {{ openshift_cli_image }}:{{ openshift_image_tag }}
   register: pull_result
   changed_when: "'Downloaded newer image' in pull_result.stdout"
-  when: openshift.common.is_containerized | bool
+  when: openshift_is_containerized | bool
 
-- when: not openshift.common.is_containerized | bool
+- when: not openshift_is_containerized | bool
   block:
   - name: Check latest available OpenShift RPM version
     repoquery:

+ 9 - 9
playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml

@@ -25,7 +25,7 @@
   tasks:
   - name: Upgrade all storage
     command: >
-      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       migrate storage --include=* --confirm
     register: l_pb_upgrade_control_plane_pre_upgrade_storage
     when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
@@ -86,7 +86,7 @@
 
   - name: Post master upgrade - Upgrade clusterpolicies storage
     command: >
-      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       migrate storage --include=clusterpolicies --confirm
     register: l_pb_upgrade_control_plane_post_upgrade_storage
     when:
@@ -133,7 +133,7 @@
   tasks:
   - name: Reconcile Cluster Roles
     command: >
-      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       policy reconcile-cluster-roles --additive-only=true --confirm -o name
     register: reconcile_cluster_role_result
     when: openshift_version is version_compare('3.7','<')
@@ -144,7 +144,7 @@
 
   - name: Reconcile Cluster Role Bindings
     command: >
-      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       policy reconcile-cluster-role-bindings
       --exclude-groups=system:authenticated
       --exclude-groups=system:authenticated:oauth
@@ -160,7 +160,7 @@
 
   - name: Reconcile Jenkins Pipeline Role Bindings
     command: >
-      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name
+      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name
     run_once: true
     register: reconcile_jenkins_role_binding_result
     changed_when:
@@ -214,7 +214,7 @@
 
   - name: Reconcile Security Context Constraints
     command: >
-      {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
+      {{ openshift_client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
     register: reconcile_scc_result
     changed_when:
     - reconcile_scc_result.stdout != ''
@@ -223,7 +223,7 @@
 
   - name: Migrate storage post policy reconciliation
     command: >
-      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       migrate storage --include=* --confirm
     run_once: true
     register: l_pb_upgrade_control_plane_post_upgrade_storage
@@ -262,7 +262,7 @@
   - openshift_facts
   tasks:
   - include_tasks: docker/tasks/upgrade.yml
-    when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
+    when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift_is_atomic | bool
 
 - name: Drain and upgrade master nodes
   hosts: oo_masters_to_config:&oo_nodes_to_upgrade
@@ -291,7 +291,7 @@
 
   - name: Drain Node for Kubelet upgrade
     command: >
-      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
     delegate_to: "{{ groups.oo_first_master.0 }}"
     register: l_upgrade_control_plane_drain_result
     until: not (l_upgrade_control_plane_drain_result is failed)

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml

@@ -35,7 +35,7 @@
 
   - name: Drain Node for Kubelet upgrade
     command: >
-      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
     delegate_to: "{{ groups.oo_first_master.0 }}"
     register: l_upgrade_nodes_drain_result
     until: not (l_upgrade_nodes_drain_result is failed)

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml

@@ -43,7 +43,7 @@
   tasks:
   - name: Drain Node for Kubelet upgrade
     command: >
-      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }}
+      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
       --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       --force --delete-local-data --ignore-daemonsets
       --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml

@@ -14,7 +14,7 @@
   # DO NOT DISABLE THIS, YOUR UPGRADE WILL FAIL IF YOU DO SO
   - name: Confirm OpenShift authorization objects are in sync
     command: >
-      {{ openshift.common.client_binary }} adm migrate authorization
+      {{ openshift_client_binary }} adm migrate authorization
     when:
     - openshift_currently_installed_version is version_compare('3.7','<')
     - openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool

+ 16 - 11
playbooks/init/facts.yml

@@ -21,14 +21,10 @@
       path: /run/ostree-booted
     register: ostree_booted
 
-  # Locally setup containerized facts for now
-  - name: initialize_facts set fact l_is_atomic
+  - name: initialize_facts set fact openshift_is_atomic and openshift_is_containerized
     set_fact:
-      l_is_atomic: "{{ ostree_booted.stat.exists }}"
-
-  - name: initialize_facts set fact for containerized and l_is_*_system_container
-    set_fact:
-      l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
+      openshift_is_atomic: "{{ ostree_booted.stat.exists }}"
+      openshift_is_containerized: "{{ ostree_booted.stat.exists or (containerized | default(false) | bool) }}"
 
   # TODO: Should this be moved into health checks??
   # Seems as though any check that happens with a corresponding fail should move into health_checks
@@ -54,7 +50,7 @@
   # Seems as though any check that happens with a corresponding fail should move into health_checks
   # Fail as early as possible if Atomic and old version of Docker
   - when:
-    - l_is_atomic | bool
+    - openshift_is_atomic | bool
     block:
 
     # See https://access.redhat.com/articles/2317361
@@ -73,7 +69,7 @@
         msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
 
   - when:
-    - not l_is_atomic | bool
+    - not openshift_is_atomic | bool
     block:
     - name: Ensure openshift-ansible installer package deps are installed
       package:
@@ -105,7 +101,7 @@
       register: result
       until: result is succeeded
 
-  - name: Gather Cluster facts and set is_containerized if needed
+  - name: Gather Cluster facts
     openshift_facts:
       role: common
       local_facts:
@@ -113,7 +109,6 @@
         deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
         hostname: "{{ openshift_hostname | default(None) }}"
         ip: "{{ openshift_ip | default(None) }}"
-        is_containerized: "{{ l_is_containerized | default(None) }}"
         public_hostname: "{{ openshift_public_hostname | default(None) }}"
         public_ip: "{{ openshift_public_ip | default(None) }}"
         portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
@@ -145,3 +140,13 @@
     set_fact:
       repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
       repoquery_installed: "{{ 'dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins --installed' }}"
+
+- name: Initialize special first-master variables
+  hosts: oo_first_master
+  roles:
+  - role: openshift_facts
+  tasks:
+  - set_fact:
+      # We need to setup openshift_client_binary here for special uses of delegate_to in
+      # later roles and plays.
+      first_master_client_binary: "{{  openshift_client_binary }}"

+ 2 - 2
playbooks/openshift-etcd/private/upgrade_image_members.yml

@@ -1,7 +1,7 @@
 ---
 # INPUT etcd_upgrade_version
 # INPUT etcd_container_version
-# INPUT openshift.common.is_containerized
+# INPUT openshift_is_containerized
 - name: Upgrade containerized hosts to {{ etcd_upgrade_version }}
   hosts: oo_etcd_hosts_to_upgrade
   serial: 1
@@ -14,4 +14,4 @@
       etcd_peer: "{{ openshift.common.hostname }}"
     when:
     - etcd_container_version | default('99') is version_compare(etcd_upgrade_version,'<')
-    - openshift.common.is_containerized | bool
+    - openshift_is_containerized | bool

+ 2 - 2
playbooks/openshift-etcd/private/upgrade_rpm_members.yml

@@ -1,7 +1,7 @@
 ---
 # INPUT etcd_upgrade_version
 # INPUT etcd_rpm_version
-# INPUT openshift.common.is_containerized
+# INPUT openshift_is_containerized
 - name: Upgrade to {{ etcd_upgrade_version }}
   hosts: oo_etcd_hosts_to_upgrade
   serial: 1
@@ -15,4 +15,4 @@
     when:
     - etcd_rpm_version.stdout | default('99') is version_compare(etcd_upgrade_version, '<')
     - ansible_distribution == 'RedHat'
-    - not openshift.common.is_containerized | bool
+    - not openshift_is_containerized | bool

+ 1 - 1
playbooks/openshift-etcd/private/upgrade_step.yml

@@ -61,4 +61,4 @@
       etcd_peer: "{{ openshift.common.hostname }}"
     when:
     - ansible_distribution == 'Fedora'
-    - not openshift.common.is_containerized | bool
+    - not openshift_is_containerized | bool

+ 4 - 4
playbooks/openshift-hosted/private/redeploy-registry-certificates.yml

@@ -17,7 +17,7 @@
 
   - name: Determine if docker-registry exists
     command: >
-      {{ openshift.common.client_binary }} get dc/docker-registry -o json
+      {{ openshift_client_binary }} get dc/docker-registry -o json
       --config={{ mktemp.stdout }}/admin.kubeconfig
       -n default
     register: l_docker_registry_dc
@@ -38,7 +38,7 @@
   # Replace dc/docker-registry environment variable certificate data if set.
   - name: Update docker-registry environment variables
     shell: >
-      {{ openshift.common.client_binary }} env dc/docker-registry
+      {{ openshift_client_binary }} env dc/docker-registry
       OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)"
       OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-registry.crt)"
       OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-registry.key)"
@@ -62,7 +62,7 @@
 
     - name: Generate registry certificate
       command: >
-        {{ openshift.common.client_binary }} adm ca create-server-cert
+        {{ openshift_client_binary }} adm ca create-server-cert
         --signer-cert={{ openshift.common.config_base }}/master/ca.crt
         --signer-key={{ openshift.common.config_base }}/master/ca.key
         --signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt
@@ -88,7 +88,7 @@
 
   - name: Redeploy docker registry
     command: >
-      {{ openshift.common.client_binary }} deploy dc/docker-registry
+      {{ openshift_client_binary }} deploy dc/docker-registry
       --latest
       --config={{ mktemp.stdout }}/admin.kubeconfig
       -n default

+ 6 - 6
playbooks/openshift-hosted/private/redeploy-router-certificates.yml

@@ -17,7 +17,7 @@
 
   - name: Determine if router exists
     command: >
-      {{ openshift.common.client_binary }} get dc/router -o json
+      {{ openshift_client_binary }} get dc/router -o json
       --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
       -n default
     register: l_router_dc
@@ -26,7 +26,7 @@
 
   - name: Determine if router service exists
     command: >
-      {{ openshift.common.client_binary }} get svc/router -o json
+      {{ openshift_client_binary }} get svc/router -o json
       --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
       -n default
     register: l_router_svc
@@ -52,7 +52,7 @@
 
   - name: Update router environment variables
     shell: >
-      {{ openshift.common.client_binary }} env dc/router
+      {{ openshift_client_binary }} env dc/router
       OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)"
       OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-router.crt)"
       OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-router.key)"
@@ -78,7 +78,7 @@
 
     - name: Remove router service annotations
       command: >
-        {{ openshift.common.client_binary }} annotate service/router
+        {{ openshift_client_binary }} annotate service/router
         service.alpha.openshift.io/serving-cert-secret-name-
         service.alpha.openshift.io/serving-cert-signed-by-
         --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
@@ -86,7 +86,7 @@
 
     - name: Add serving-cert-secret annotation to router service
       command: >
-        {{ openshift.common.client_binary }} annotate service/router
+        {{ openshift_client_binary }} annotate service/router
         service.alpha.openshift.io/serving-cert-secret-name=router-certs
         --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
         -n default
@@ -129,7 +129,7 @@
 
   - name: Redeploy router
     command: >
-      {{ openshift.common.client_binary }} deploy dc/router
+      {{ openshift_client_binary }} deploy dc/router
       --latest
       --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
       -n default

+ 1 - 1
playbooks/openshift-master/private/additional_config.yml

@@ -30,7 +30,7 @@
     when: openshift_use_manageiq | default(true) | bool
   - role: cockpit
     when:
-    - not openshift.common.is_atomic | bool
+    - not openshift_is_atomic | bool
     - deployment_type == 'openshift-enterprise'
     - osm_use_cockpit is undefined or osm_use_cockpit | bool
     - openshift.common.deployment_subtype != 'registry'

+ 2 - 2
playbooks/openshift-master/private/tasks/wire_aggregator.yml

@@ -21,7 +21,7 @@
 # TODO: this currently has a bug where hostnames are required
 - name: Creating First Master Aggregator signer certs
   command: >
-    {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm ca create-signer-cert
+    {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm ca create-signer-cert
     --cert=/etc/origin/master/front-proxy-ca.crt
     --key=/etc/origin/master/front-proxy-ca.key
     --serial=/etc/origin/master/ca.serial.txt
@@ -84,7 +84,7 @@
 - block:
   - name: Create first master api-client config for Aggregator
     command: >
-      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm create-api-client-config
+      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm create-api-client-config
       --certificate-authority=/etc/origin/master/front-proxy-ca.crt
       --signer-cert=/etc/origin/master/front-proxy-ca.crt
       --signer-key=/etc/origin/master/front-proxy-ca.key

+ 1 - 1
playbooks/openshift-node/private/restart.yml

@@ -28,7 +28,7 @@
     - "{{ openshift_service_type }}-master-controllers"
     - "{{ openshift_service_type }}-node"
     failed_when: false
-    when: openshift.common.is_containerized | bool
+    when: openshift_is_containerized | bool
 
   - name: Wait for master API to come back online
     wait_for:

+ 1 - 1
playbooks/openshift-node/private/setup.yml

@@ -21,6 +21,6 @@
     when:
     - hostvars[item].openshift is defined
     - hostvars[item].openshift.common is defined
-    - hostvars[item].openshift.common.is_containerized | bool
+    - hostvars[item].openshift_is_containerized | bool
     - (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
     changed_when: False

+ 1 - 1
roles/calico_master/tasks/main.yml

@@ -19,7 +19,7 @@
 
 - name: Calico Master | Launch Calico Policy Controller
   command: >
-    {{ openshift.common.client_binary }} create
+    {{ openshift_client_binary }} create
     -f {{ mktemp.stdout }}/calico-policy-controller.yml
     --config={{ openshift.common.config_base }}/master/admin.kubeconfig
   register: calico_create_output

+ 1 - 0
roles/cockpit-ui/meta/main.yml

@@ -14,3 +14,4 @@ galaxy_info:
 dependencies:
 - role: lib_utils
 - role: lib_openshift
+- role: openshift_facts

+ 1 - 1
roles/cockpit-ui/tasks/main.yml

@@ -39,7 +39,7 @@
 
   - name: Deploy registry-console
     command: >
-      {{ openshift.common.client_binary }} new-app --template=registry-console
+      {{ openshift_client_binary }} new-app --template=registry-console
       {% if openshift_cockpit_deployer_prefix is defined  %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %}
       {% if openshift_cockpit_deployer_basename is defined  %}-p IMAGE_BASENAME="{{ openshift_cockpit_deployer_basename }}"{% endif %}
       {% if openshift_cockpit_deployer_version is defined  %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %}

+ 2 - 2
roles/cockpit/tasks/main.yml

@@ -10,7 +10,7 @@
     - cockpit-bridge
     - cockpit-docker
     - "{{ cockpit_plugins }}"
-  when: not openshift.common.is_containerized | bool
+  when: not openshift_is_containerized | bool
   register: result
   until: result is succeeded
 
@@ -19,4 +19,4 @@
     name: cockpit.socket
     enabled: true
     state: started
-  when: not openshift.common.is_containerized | bool
+  when: not openshift_is_containerized | bool

+ 3 - 3
roles/container_runtime/tasks/common/syscontainer_packages.yml

@@ -4,7 +4,7 @@
   package:
     name: container-selinux
     state: present
-  when: not openshift.common.is_atomic | bool
+  when: not openshift_is_atomic | bool
   register: result
   until: result is succeeded
 
@@ -13,7 +13,7 @@
   package:
     name: atomic
     state: present
-  when: not openshift.common.is_atomic | bool
+  when: not openshift_is_atomic | bool
   register: result
   until: result is succeeded
 
@@ -23,6 +23,6 @@
   package:
     name: runc
     state: present
-  when: not openshift.common.is_atomic | bool
+  when: not openshift_is_atomic | bool
   register: result
   until: result is succeeded

+ 3 - 3
roles/container_runtime/tasks/docker_upgrade_check.yml

@@ -61,14 +61,14 @@
 - name: Determine available Docker
   shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
   register: g_atomic_docker_version_result
-  when: openshift.common.is_atomic | bool
+  when: openshift_is_atomic | bool
 
 - set_fact:
     l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
-  when: openshift.common.is_atomic | bool
+  when: openshift_is_atomic | bool
 
 - fail:
     msg: This playbook requires access to Docker 1.12 or later
   when:
-  - openshift.common.is_atomic | bool
+  - openshift_is_atomic | bool
   - l_docker_version.avail_version | default(l_docker_version.curr_version, true) is version_compare('1.12','<')

+ 2 - 2
roles/container_runtime/tasks/package_docker.yml

@@ -3,7 +3,7 @@
 
 - name: Get current installed Docker version
   command: "{{ repoquery_installed }} --qf '%{version}' docker"
-  when: not openshift.common.is_atomic | bool
+  when: not openshift_is_atomic | bool
   register: curr_docker_version
   retries: 4
   until: curr_docker_version is succeeded
@@ -20,7 +20,7 @@
     name: "docker{{ '-' + docker_version if docker_version is defined else '' }}"
     state: present
   when:
-  - not (openshift.common.is_atomic | bool)
+  - not (openshift_is_atomic | bool)
   - not (curr_docker_version is skipped)
   - not (curr_docker_version.stdout != '')
   register: result

+ 1 - 1
roles/container_runtime/tasks/systemcontainer_crio.yml

@@ -3,7 +3,7 @@
 - name: Check we are not using node as a Docker container with CRI-O
   fail: msg='Cannot use CRI-O with node configured as a Docker container'
   when:
-    - openshift.common.is_containerized | bool
+    - openshift_is_containerized | bool
     - not l_is_node_system_container | bool
 
 - include_tasks: common/pre.yml

+ 1 - 1
roles/container_runtime/tasks/systemcontainer_docker.yml

@@ -18,7 +18,7 @@
 # Make sure Docker is installed so we are able to use the client
 - name: Install Docker so we can use the client
   package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
-  when: not openshift.common.is_atomic | bool
+  when: not openshift_is_atomic | bool
   register: result
   until: result is succeeded
 

+ 0 - 1
roles/contiv/defaults/main.yml

@@ -101,7 +101,6 @@ apic_epg_bridge_domain: not_specified
 apic_configure_default_policy: false
 apic_default_external_contract: "uni/tn-common/brc-default"
 apic_default_app_profile: "contiv-infra-app-profile"
-is_atomic: False
 kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master"
 master_name: "{{ groups['masters'][0] }}"
 contiv_etcd_port: 22379

+ 1 - 1
roles/contiv/tasks/packageManagerInstall.yml

@@ -5,7 +5,7 @@
 
 - include_tasks: pkgMgrInstallers/centos-install.yml
   when: (ansible_os_family == "RedHat") and
-        not is_atomic
+        not openshift_is_atomic
 
 - name: Package Manager | Set fact saying we did CentOS package install
   set_fact:

+ 0 - 15
roles/contiv_facts/tasks/main.yml

@@ -1,19 +1,4 @@
 ---
-- name: Determine if Atomic
-  stat: path=/run/ostree-booted
-  register: s
-  changed_when: false
-  check_mode: no
-
-- name: Init the is_atomic fact
-  set_fact:
-    is_atomic: false
-
-- name: Set the is_atomic fact
-  set_fact:
-    is_atomic: true
-  when: s.stat.exists
-
 - name: Determine if CoreOS
   raw: "grep '^NAME=' /etc/os-release | sed s'/NAME=//'"
   register: distro

+ 1 - 1
roles/etcd/defaults/main.yaml

@@ -5,7 +5,7 @@ r_etcd_common_backup_sufix_name: ''
 l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
 
 # runc, docker, host
-r_etcd_common_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}"
+r_etcd_common_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if openshift_is_containerized else 'host' }}"
 r_etcd_common_embedded_etcd: false
 
 osm_etcd_image: 'registry.access.redhat.com/rhel7/etcd'

+ 1 - 1
roles/etcd/tasks/auxiliary/drop_etcdctl.yml

@@ -1,7 +1,7 @@
 ---
 - name: Install etcd for etcdctl
   package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
-  when: not openshift.common.is_atomic | bool
+  when: not openshift_is_atomic | bool
   register: result
   until: result is succeeded
 

+ 1 - 1
roles/etcd/tasks/migration/add_ttls.yml

@@ -11,7 +11,7 @@
 
 - name: Re-introduce leases (as a replacement for key TTLs)
   command: >
-    {{ openshift.common.client_binary }} adm migrate etcd-ttl \
+    {{ openshift_client_binary }} adm migrate etcd-ttl \
     --cert {{ r_etcd_common_master_peer_cert_file }} \
     --key {{ r_etcd_common_master_peer_key_file }} \
     --cacert {{ r_etcd_common_master_peer_ca_file }} \

+ 1 - 1
roles/etcd/tasks/migration/migrate.yml

@@ -1,7 +1,7 @@
 ---
 # Should this be run in a serial manner?
 - set_fact:
-    l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}"
+    l_etcd_service: "{{ 'etcd_container' if openshift_is_containerized else 'etcd' }}"
 
 - name: Migrate etcd data
   command: >

+ 2 - 2
roles/etcd/tasks/version_detect.yml

@@ -12,7 +12,7 @@
   - debug:
       msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected"
   when:
-  - not openshift.common.is_containerized | bool
+  - not openshift_is_containerized | bool
 
 - block:
   - name: Record containerized etcd version (docker)
@@ -52,4 +52,4 @@
   - debug:
       msg: "Etcd containerized version {{ etcd_container_version }} detected"
   when:
-  - openshift.common.is_containerized | bool
+  - openshift_is_containerized | bool

+ 1 - 1
roles/flannel/tasks/main.yml

@@ -2,7 +2,7 @@
 - name: Install flannel
   become: yes
   package: name=flannel state=present
-  when: not openshift.common.is_atomic | bool
+  when: not openshift_is_atomic | bool
   register: result
   until: result is succeeded
 

+ 1 - 1
roles/nickhammond.logrotate/tasks/main.yml

@@ -1,7 +1,7 @@
 ---
 - name: nickhammond.logrotate | Install logrotate
   package: name=logrotate state=present
-  when: not openshift.common.is_atomic | bool
+  when: not openshift_is_atomic | bool
   register: result
   until: result is succeeded
 

+ 1 - 1
roles/nuage_ca/tasks/main.yaml

@@ -1,7 +1,7 @@
 ---
 - name: Install openssl
   package: name=openssl state=present
-  when: not openshift.common.is_atomic | bool
+  when: not openshift_is_atomic | bool
   register: result
   until: result is succeeded
 

+ 3 - 3
roles/nuage_common/tasks/main.yml

@@ -2,17 +2,17 @@
 - name: Set the Nuage plugin openshift directory fact to handle Atomic host install
   set_fact:
     nuage_node_plugin_dir: /var/usr/share/vsp-openshift
-  when: openshift.common.is_atomic | bool
+  when: openshift_is_atomic | bool
 
 - name: Set the Nuage CNI network config directory fact to handle Atomic host install
   set_fact:
     nuage_node_cni_netconf_dir: /var/etc/cni/net.d/
-  when: openshift.common.is_atomic | bool
+  when: openshift_is_atomic | bool
 
 - name: Set the Nuage CNI binary directory fact to handle Atomic host install
   set_fact:
     nuage_node_cni_bin_dir: /var/opt/cni/bin/
-  when: openshift.common.is_atomic | bool
+  when: openshift_is_atomic | bool
 
 - name: Assure CNI plugin config dir exists before daemon set install
   become: yes

+ 9 - 9
roles/nuage_master/tasks/main.yaml

@@ -5,22 +5,22 @@
 - name: Set the Nuage certificate directory fact for Atomic hosts
   set_fact:
     cert_output_dir: /var/usr/share/nuage-openshift-monitor
-  when: openshift.common.is_atomic | bool
+  when: openshift_is_atomic | bool
 
 - name: Set the Nuage kubeconfig file path fact for Atomic hosts
   set_fact:
     kube_config: /var/usr/share/nuage-openshift-monitor/nuage.kubeconfig
-  when: openshift.common.is_atomic | bool
+  when: openshift_is_atomic | bool
 
 - name: Set the Nuage monitor yaml location fact for Atomic hosts
   set_fact:
     kubemon_yaml: /var/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml
-  when: openshift.common.is_atomic | bool
+  when: openshift_is_atomic | bool
 
 - name: Set the Nuage monitor certs location fact for Atomic hosts
   set_fact:
     nuage_master_crt_dir: /var/usr/share/nuage-openshift-monitor/
-  when: openshift.common.is_atomic | bool
+  when: openshift_is_atomic | bool
 
 - name: Set the Nuage master config directory for daemon sets install
   set_fact:
@@ -35,27 +35,27 @@
 - name: Set the Nuage CNI plugin binary directory for daemon sets install
   set_fact:
     nuage_cni_bin_dsets_mount_dir: /var/opt/cni/bin
-  when: openshift.common.is_atomic | bool
+  when: openshift_is_atomic | bool
 
 - name: Create directory /usr/share/nuage-openshift-monitor
   become: yes
   file: path=/usr/share/nuage-openshift-monitor state=directory
-  when: not openshift.common.is_atomic | bool
+  when: not openshift_is_atomic | bool
 
 - name: Create directory /var/usr/share/nuage-openshift-monitor
   become: yes
   file: path=/var/usr/share/nuage-openshift-monitor state=directory
-  when: openshift.common.is_atomic | bool
+  when: openshift_is_atomic | bool
 
 - name: Create directory /var/usr/bin for monitor binary on atomic
   become: yes
   file: path=/var/usr/bin state=directory
-  when: openshift.common.is_atomic | bool
+  when: openshift_is_atomic | bool
 
 - name: Create CNI bin directory /var/opt/cni/bin
   become: yes
   file: path=/var/opt/cni/bin state=directory
-  when: openshift.common.is_atomic | bool
+  when: openshift_is_atomic | bool
 
 - name: Create the log directory
   become: yes

+ 1 - 1
roles/nuage_master/tasks/serviceaccount.yml

@@ -19,7 +19,7 @@
 
 - name: Generate the node client config
   command: >
-    {{ openshift.common.client_binary }} adm create-api-client-config
+    {{ openshift_client_binary }} adm create-api-client-config
       --certificate-authority={{ openshift_master_ca_cert }}
       --client-dir={{ cert_output_dir }}
       --master={{ openshift.master.api_url }}

+ 4 - 4
roles/nuage_node/tasks/main.yaml

@@ -2,17 +2,17 @@
 - name: Set the Nuage plugin openshift directory fact for Atomic hosts
   set_fact:
     vsp_openshift_dir: /var/usr/share/vsp-openshift
-  when: openshift.common.is_atomic | bool
+  when: openshift_is_atomic | bool
 
 - name: Set the Nuage CNI binary directory fact for Atomic hosts
   set_fact:
     cni_bin_dir: /var/opt/cni/bin/
-  when: openshift.common.is_atomic | bool
+  when: openshift_is_atomic | bool
 
 - name: Set the Nuage plugin certs directory fact for Atomic hosts
   set_fact:
     nuage_plugin_crt_dir: /var/usr/share/vsp-openshift
-  when: openshift.common.is_atomic | bool
+  when: openshift_is_atomic | bool
 
 - name: Assure CNI conf dir exists
   become: yes
@@ -36,7 +36,7 @@
 - name: Add additional Docker mounts for Nuage for atomic hosts
   become: yes
   lineinfile: dest="{{ openshift_atomic_node_config_file }}" line="{{ nuage_atomic_docker_additional_mounts }}"
-  when: openshift.common.is_atomic | bool
+  when: openshift_is_atomic | bool
 
 - name: Restart node services
   command: /bin/true

+ 4 - 4
roles/openshift_ca/tasks/main.yml

@@ -11,7 +11,7 @@
   package:
     name: "{{ openshift_service_type }}{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
     state: present
-  when: not openshift.common.is_containerized | bool
+  when: not openshift_is_containerized | bool
   register: install_result
   until: install_result is succeeded
   delegate_to: "{{ openshift_ca_host }}"
@@ -87,7 +87,7 @@
 # This should NOT replace the CA due to --overwrite=false when a CA already exists.
 - name: Create the master certificates if they do not already exist
   command: >
-    {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-master-certs
+    {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-master-certs
     {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}
     --certificate-authority {{ named_ca_certificate }}
     {% endfor %}
@@ -137,7 +137,7 @@
 
 - name: Test local loopback context
   command: >
-    {{ hostvars[openshift_ca_host].openshift.common.client_binary }} config view
+    {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} config view
     --config={{ openshift_master_loopback_config }}
   changed_when: false
   register: loopback_config
@@ -154,7 +154,7 @@
     register: openshift_ca_loopback_tmpdir
   - name: Generate the loopback master client config
     command: >
-      {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+      {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config
         --certificate-authority={{ openshift_ca_cert }}
         {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}
         --certificate-authority {{ named_ca_certificate }}

+ 4 - 4
roles/openshift_cli/tasks/main.yml

@@ -1,7 +1,7 @@
 ---
 - name: Install clients
   package: name={{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }} state=present
-  when: not openshift.common.is_containerized | bool
+  when: not openshift_is_containerized | bool
   register: result
   until: result is succeeded
 
@@ -18,7 +18,7 @@
       tag: "{{ openshift_image_tag }}"
       backend: "docker"
   when:
-  - openshift.common.is_containerized | bool
+  - openshift_is_containerized | bool
   - not l_use_cli_atomic_image | bool
 
 - block:
@@ -34,7 +34,7 @@
       tag: "{{ openshift_image_tag }}"
       backend: "atomic"
   when:
-  - openshift.common.is_containerized | bool
+  - openshift_is_containerized | bool
   - l_use_cli_atomic_image | bool
 
 - name: Reload facts to pick up installed OpenShift version
@@ -42,6 +42,6 @@
 
 - name: Install bash completion for oc tools
   package: name=bash-completion state=present
-  when: not openshift.common.is_containerized | bool
+  when: not openshift_is_containerized | bool
   register: result
   until: result is succeeded

+ 2 - 2
roles/openshift_etcd_facts/vars/main.yml

@@ -1,6 +1,6 @@
 ---
-etcd_is_containerized: "{{ openshift.common.is_containerized }}"
-etcd_is_atomic: "{{ openshift.common.is_atomic }}"
+etcd_is_containerized: "{{ openshift_is_containerized }}"
+etcd_is_atomic: "{{ openshift_is_atomic }}"
 etcd_hostname: "{{ openshift.common.hostname }}"
 etcd_ip: "{{ openshift.common.ip }}"
 etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}"

+ 1 - 1
roles/openshift_examples/defaults/main.yml

@@ -8,7 +8,7 @@ openshift_examples_load_quickstarts: true
 
 content_version: "{{ openshift.common.examples_content_version }}"
 
-examples_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples"
+examples_base: "{{ openshift.common.config_base if openshift_is_containerized | bool else '/usr/share/openshift' }}/examples"
 image_streams_base: "{{ examples_base }}/image-streams"
 centos_image_streams:
   - "{{ image_streams_base }}/image-streams-centos7.json"

+ 8 - 8
roles/openshift_examples/tasks/main.yml

@@ -53,7 +53,7 @@
 # RHEL and Centos image streams are mutually exclusive
 - name: Import RHEL streams
   command: >
-    {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }}
+    {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }}
   when: openshift_examples_load_rhel | bool
   with_items:
     - "{{ rhel_image_streams }}"
@@ -63,7 +63,7 @@
 
 - name: Import Centos Image streams
   command: >
-    {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }}
+    {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }}
   when: openshift_examples_load_centos | bool
   with_items:
     - "{{ centos_image_streams }}"
@@ -73,7 +73,7 @@
 
 - name: Import db templates
   command: >
-    {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ db_templates_base }}
+    {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ db_templates_base }}
   when: openshift_examples_load_db_templates | bool
   register: oex_import_db_templates
   failed_when: "'already exists' not in oex_import_db_templates.stderr and oex_import_db_templates.rc != 0"
@@ -90,7 +90,7 @@
     - "{{ quickstarts_base }}/django.json"
 
 - name: Remove defunct quickstart templates from openshift namespace
-  command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}"
+  command: "{{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}"
   with_items:
     - nodejs-example
     - cakephp-example
@@ -102,7 +102,7 @@
 
 - name: Import quickstart-templates
   command: >
-    {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ quickstarts_base }}
+    {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ quickstarts_base }}
   when: openshift_examples_load_quickstarts | bool
   register: oex_import_quickstarts
   failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0"
@@ -116,7 +116,7 @@
     - "{{ xpaas_templates_base }}/sso70-basic.json"
 
 - name: Remove old xPaas templates from openshift namespace
-  command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}"
+  command: "{{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}"
   with_items:
     - sso70-basic
   register: oex_delete_old_xpaas_templates
@@ -125,7 +125,7 @@
 
 - name: Import xPaas image streams
   command: >
-    {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_image_streams }}
+    {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_image_streams }}
   when: openshift_examples_load_xpaas | bool
   register: oex_import_xpaas_streams
   failed_when: "'already exists' not in oex_import_xpaas_streams.stderr and oex_import_xpaas_streams.rc != 0"
@@ -133,7 +133,7 @@
 
 - name: Import xPaas templates
   command: >
-    {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_templates_base }}
+    {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_templates_base }}
   when: openshift_examples_load_xpaas | bool
   register: oex_import_xpaas_templates
   failed_when: "'already exists' not in oex_import_xpaas_templates.stderr and oex_import_xpaas_templates.rc != 0"

+ 1 - 1
roles/openshift_excluder/tasks/install.yml

@@ -1,7 +1,7 @@
 ---
 
 - when:
-  - not openshift.common.is_atomic | bool
+  - not openshift_is_atomic | bool
   - r_openshift_excluder_install_ran is not defined
 
   block:

+ 2 - 2
roles/openshift_expand_partition/tasks/main.yml

@@ -1,7 +1,7 @@
 ---
 - name: Ensure growpart is installed
   package: name=cloud-utils-growpart state=present
-  when: not openshift.common.is_containerized | bool
+  when: not openshift_is_containerized | bool
   register: result
   until: result is succeeded
 
@@ -10,7 +10,7 @@
   register: has_growpart
   failed_when: has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout
   changed_when: false
-  when: openshift.common.is_containerized | bool
+  when: openshift_is_containerized | bool
 
 - name: Grow the partitions
   command: "growpart {{oep_drive}} {{oep_partition}}"

+ 2 - 0
roles/openshift_facts/defaults/main.yml

@@ -1,4 +1,6 @@
 ---
+openshift_client_binary: "{{ openshift_is_containerized | ternary('/usr/local/bin/oc', 'oc') }}"
+
 openshift_cli_image_dict:
   origin: 'openshift/origin'
   openshift-enterprise: 'openshift3/ose'

+ 1 - 33
roles/openshift_facts/library/openshift_facts.py

@@ -887,7 +887,7 @@ def get_openshift_version(facts):
     if os.path.isfile('/usr/bin/openshift'):
         _, output, _ = module.run_command(['/usr/bin/openshift', 'version'])  # noqa: F405
         version = parse_openshift_version(output)
-    elif 'common' in facts and 'is_containerized' in facts['common']:
+    else:
         version = get_container_openshift_version(facts)
 
     # Handle containerized masters that have not yet been configured as a node.
@@ -1278,36 +1278,7 @@ def set_container_facts_if_unset(facts):
             dict: the facts dict updated with the generated containerization
             facts
     """
-    facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
-
-    if 'is_containerized' not in facts['common']:
-        facts['common']['is_containerized'] = facts['common']['is_atomic']
-
-    if safe_get_bool(facts['common']['is_containerized']):
-        facts['common']['client_binary'] = '/usr/local/bin/oc'
-
-    return facts
 
-
-def set_installed_variant_rpm_facts(facts):
-    """ Set RPM facts of installed variant
-        Args:
-            facts (dict): existing facts
-        Returns:
-            dict: the facts dict updated with installed_variant_rpms
-                          """
-    installed_rpms = []
-    for base_rpm in ['openshift', 'atomic-openshift', 'origin']:
-        optional_rpms = ['master', 'node', 'clients', 'sdn-ovs']
-        variant_rpms = [base_rpm] + \
-                       ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
-                       ['tuned-profiles-%s-node' % base_rpm]
-        for rpm in variant_rpms:
-            exit_code, _, _ = module.run_command(['rpm', '-q', rpm])  # noqa: F405
-            if exit_code == 0:
-                installed_rpms.append(rpm)
-
-    facts['common']['installed_variant_rpms'] = installed_rpms
     return facts
 
 
@@ -1430,8 +1401,6 @@ class OpenShiftFacts(object):
         facts = set_proxy_facts(facts)
         facts = set_builddefaults_facts(facts)
         facts = set_buildoverrides_facts(facts)
-        if not safe_get_bool(facts['common']['is_containerized']):
-            facts = set_installed_variant_rpm_facts(facts)
         facts = set_nodename(facts)
         return dict(openshift=facts)
 
@@ -1459,7 +1428,6 @@ class OpenShiftFacts(object):
                                   hostname=hostname,
                                   public_hostname=hostname,
                                   portal_net='172.30.0.0/16',
-                                  client_binary='oc',
                                   dns_domain='cluster.local',
                                   config_base='/etc/origin')
 

+ 1 - 1
roles/openshift_health_checker/openshift_checks/docker_image_availability.py

@@ -160,7 +160,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
                 required.add(self._registry_console_image(image_tag, image_info))
 
         # images for containerized components
-        if self.get_var("openshift", "common", "is_containerized"):
+        if self.get_var("openshift_is_containerized"):
             components = set()
             if 'oo_nodes_to_config' in host_groups:
                 components.update(["node", "openvswitch"])

+ 2 - 2
roles/openshift_health_checker/openshift_checks/etcd_traffic.py

@@ -20,8 +20,8 @@ class EtcdTraffic(OpenShiftCheck):
         return super(EtcdTraffic, self).is_active() and valid_group_names and valid_version
 
     def run(self):
-        is_containerized = self.get_var("openshift", "common", "is_containerized")
-        unit = "etcd_container" if is_containerized else "etcd"
+        openshift_is_containerized = self.get_var("openshift_is_containerized")
+        unit = "etcd_container" if openshift_is_containerized else "etcd"
 
         log_matchers = [{
             "start_regexp": r"Starting Etcd Server",

+ 4 - 4
roles/openshift_health_checker/openshift_checks/mixins.py

@@ -10,8 +10,8 @@ class NotContainerizedMixin(object):
 
     def is_active(self):
         """Only run on non-containerized hosts."""
-        is_containerized = self.get_var("openshift", "common", "is_containerized")
-        return super(NotContainerizedMixin, self).is_active() and not is_containerized
+        openshift_is_containerized = self.get_var("openshift_is_containerized")
+        return super(NotContainerizedMixin, self).is_active() and not openshift_is_containerized
 
 
 class DockerHostMixin(object):
@@ -23,7 +23,7 @@ class DockerHostMixin(object):
         """Only run on hosts that depend on Docker."""
         group_names = set(self.get_var("group_names", default=[]))
         needs_docker = set(["oo_nodes_to_config"])
-        if self.get_var("openshift.common.is_containerized"):
+        if self.get_var("openshift_is_containerized"):
             needs_docker.update(["oo_masters_to_config", "oo_etcd_to_config"])
         return super(DockerHostMixin, self).is_active() and bool(group_names.intersection(needs_docker))
 
@@ -33,7 +33,7 @@ class DockerHostMixin(object):
         (which would not be able to install but should already have them).
         Returns: msg, failed
         """
-        if self.get_var("openshift", "common", "is_atomic"):
+        if self.get_var("openshift_is_atomic"):
             return "", False
 
         # NOTE: we would use the "package" module but it's actually an action plugin

+ 14 - 27
roles/openshift_health_checker/test/docker_image_availability_test.py

@@ -6,13 +6,8 @@ from openshift_checks.docker_image_availability import DockerImageAvailability,
 @pytest.fixture()
 def task_vars():
     return dict(
-        openshift=dict(
-            common=dict(
-                is_containerized=False,
-                is_atomic=False,
-            ),
-            docker=dict(),
-        ),
+        openshift_is_atomic=False,
+        openshift_is_containerized=False,
         openshift_service_type='origin',
         openshift_deployment_type='origin',
         openshift_image_tag='',
@@ -20,7 +15,7 @@ def task_vars():
     )
 
 
-@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
+@pytest.mark.parametrize('deployment_type, openshift_is_containerized, group_names, expect_active', [
     ("invalid", True, [], False),
     ("", True, [], False),
     ("origin", False, [], False),
@@ -30,20 +25,20 @@ def task_vars():
     ("origin", True, ["nfs"], False),
     ("openshift-enterprise", True, ["lb"], False),
 ])
-def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active):
+def test_is_active(task_vars, deployment_type, openshift_is_containerized, group_names, expect_active):
     task_vars['openshift_deployment_type'] = deployment_type
-    task_vars['openshift']['common']['is_containerized'] = is_containerized
+    task_vars['openshift_is_containerized'] = openshift_is_containerized
     task_vars['group_names'] = group_names
     assert DockerImageAvailability(None, task_vars).is_active() == expect_active
 
 
-@pytest.mark.parametrize("is_containerized,is_atomic", [
+@pytest.mark.parametrize("openshift_is_containerized,openshift_is_atomic", [
     (True, True),
     (False, False),
     (True, False),
     (False, True),
 ])
-def test_all_images_available_locally(task_vars, is_containerized, is_atomic):
+def test_all_images_available_locally(task_vars, openshift_is_containerized, openshift_is_atomic):
     def execute_module(module_name, module_args, *_):
         if module_name == "yum":
             return {}
@@ -55,8 +50,8 @@ def test_all_images_available_locally(task_vars, is_containerized, is_atomic):
             'images': [module_args['name']],
         }
 
-    task_vars['openshift']['common']['is_containerized'] = is_containerized
-    task_vars['openshift']['common']['is_atomic'] = is_atomic
+    task_vars['openshift_is_containerized'] = openshift_is_containerized
+    task_vars['openshift_is_atomic'] = openshift_is_atomic
     result = DockerImageAvailability(execute_module, task_vars).run()
 
     assert not result.get('failed', False)
@@ -172,7 +167,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo
     assert expect_registries_reached == check.reachable_registries
 
 
-@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [
+@pytest.mark.parametrize("deployment_type, openshift_is_containerized, groups, oreg_url, expected", [
     (  # standard set of stuff required on nodes
         "origin", False, ['oo_nodes_to_config'], "",
         set([
@@ -232,14 +227,10 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo
     ),
 
 ])
-def test_required_images(deployment_type, is_containerized, groups, oreg_url, expected):
+def test_required_images(deployment_type, openshift_is_containerized, groups, oreg_url, expected):
     task_vars = dict(
-        openshift=dict(
-            common=dict(
-                is_containerized=is_containerized,
-                is_atomic=False,
-            ),
-        ),
+        openshift_is_containerized=openshift_is_containerized,
+        openshift_is_atomic=False,
         openshift_deployment_type=deployment_type,
         group_names=groups,
         oreg_url=oreg_url,
@@ -287,11 +278,7 @@ def test_registry_console_image(task_vars, expected):
 
 def test_containerized_etcd():
     task_vars = dict(
-        openshift=dict(
-            common=dict(
-                is_containerized=True,
-            ),
-        ),
+        openshift_is_containerized=True,
         openshift_deployment_type="origin",
         group_names=['oo_etcd_to_config'],
     )

+ 4 - 4
roles/openshift_health_checker/test/docker_storage_test.py

@@ -4,21 +4,21 @@ from openshift_checks import OpenShiftCheckException
 from openshift_checks.docker_storage import DockerStorage
 
 
-@pytest.mark.parametrize('is_containerized, group_names, is_active', [
+@pytest.mark.parametrize('openshift_is_containerized, group_names, is_active', [
     (False, ["oo_masters_to_config", "oo_etcd_to_config"], False),
     (False, ["oo_masters_to_config", "oo_nodes_to_config"], True),
     (True, ["oo_etcd_to_config"], True),
 ])
-def test_is_active(is_containerized, group_names, is_active):
+def test_is_active(openshift_is_containerized, group_names, is_active):
     task_vars = dict(
-        openshift=dict(common=dict(is_containerized=is_containerized)),
+        openshift_is_containerized=openshift_is_containerized,
         group_names=group_names,
     )
     assert DockerStorage(None, task_vars).is_active() == is_active
 
 
 def non_atomic_task_vars():
-    return {"openshift": {"common": {"is_atomic": False}}}
+    return {"openshift_is_atomic": False}
 
 
 @pytest.mark.parametrize('docker_info, failed, expect_msg', [

+ 4 - 8
roles/openshift_health_checker/test/etcd_traffic_test.py

@@ -36,9 +36,7 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words)
 
     task_vars = dict(
         group_names=group_names,
-        openshift=dict(
-            common=dict(is_containerized=False),
-        ),
+        openshift_is_containerized=False,
         openshift_service_type="origin"
     )
 
@@ -50,15 +48,13 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words)
     assert result.get("failed", False) == failed
 
 
-@pytest.mark.parametrize('is_containerized,expected_unit_value', [
+@pytest.mark.parametrize('openshift_is_containerized,expected_unit_value', [
     (False, "etcd"),
     (True, "etcd_container"),
 ])
-def test_systemd_unit_matches_deployment_type(is_containerized, expected_unit_value):
+def test_systemd_unit_matches_deployment_type(openshift_is_containerized, expected_unit_value):
     task_vars = dict(
-        openshift=dict(
-            common=dict(is_containerized=is_containerized),
-        )
+        openshift_is_containerized=openshift_is_containerized
     )
 
     def execute_module(module_name, args, *_):

+ 3 - 3
roles/openshift_health_checker/test/mixins_test.py

@@ -10,8 +10,8 @@ class NotContainerizedCheck(NotContainerizedMixin, OpenShiftCheck):
 
 
 @pytest.mark.parametrize('task_vars,expected', [
-    (dict(openshift=dict(common=dict(is_containerized=False))), True),
-    (dict(openshift=dict(common=dict(is_containerized=True))), False),
+    (dict(openshift_is_containerized=False), True),
+    (dict(openshift_is_containerized=True), False),
 ])
 def test_is_active(task_vars, expected):
     assert NotContainerizedCheck(None, task_vars).is_active() == expected
@@ -20,4 +20,4 @@ def test_is_active(task_vars, expected):
 def test_is_active_missing_task_vars():
     with pytest.raises(OpenShiftCheckException) as excinfo:
         NotContainerizedCheck().is_active()
-    assert 'is_containerized' in str(excinfo.value)
+    assert 'openshift_is_containerized' in str(excinfo.value)

+ 3 - 3
roles/openshift_health_checker/test/ovs_version_test.py

@@ -70,7 +70,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
     assert result is return_value
 
 
-@pytest.mark.parametrize('group_names,is_containerized,is_active', [
+@pytest.mark.parametrize('group_names,openshift_is_containerized,is_active', [
     (['oo_masters_to_config'], False, True),
     # ensure check is skipped on containerized installs
     (['oo_masters_to_config'], True, False),
@@ -82,9 +82,9 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
     (['lb'], False, False),
     (['nfs'], False, False),
 ])
-def test_ovs_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active):
+def test_ovs_version_skip_when_not_master_nor_node(group_names, openshift_is_containerized, is_active):
     task_vars = dict(
         group_names=group_names,
-        openshift=dict(common=dict(is_containerized=is_containerized)),
+        openshift_is_containerized=openshift_is_containerized,
     )
     assert OvsVersion(None, task_vars).is_active() == is_active

+ 3 - 3
roles/openshift_health_checker/test/package_availability_test.py

@@ -3,16 +3,16 @@ import pytest
 from openshift_checks.package_availability import PackageAvailability
 
 
-@pytest.mark.parametrize('pkg_mgr,is_containerized,is_active', [
+@pytest.mark.parametrize('pkg_mgr,openshift_is_containerized,is_active', [
     ('yum', False, True),
     ('yum', True, False),
     ('dnf', True, False),
     ('dnf', False, False),
 ])
-def test_is_active(pkg_mgr, is_containerized, is_active):
+def test_is_active(pkg_mgr, openshift_is_containerized, is_active):
     task_vars = dict(
         ansible_pkg_mgr=pkg_mgr,
-        openshift=dict(common=dict(is_containerized=is_containerized)),
+        openshift_is_containerized=openshift_is_containerized,
     )
     assert PackageAvailability(None, task_vars).is_active() == is_active
 

+ 3 - 3
roles/openshift_health_checker/test/package_version_test.py

@@ -99,7 +99,7 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc
     assert result == return_value
 
 
-@pytest.mark.parametrize('group_names,is_containerized,is_active', [
+@pytest.mark.parametrize('group_names,openshift_is_containerized,is_active', [
     (['oo_masters_to_config'], False, True),
     # ensure check is skipped on containerized installs
     (['oo_masters_to_config'], True, False),
@@ -111,9 +111,9 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc
     (['lb'], False, False),
     (['nfs'], False, False),
 ])
-def test_package_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active):
+def test_package_version_skip_when_not_master_nor_node(group_names, openshift_is_containerized, is_active):
     task_vars = dict(
         group_names=group_names,
-        openshift=dict(common=dict(is_containerized=is_containerized)),
+        openshift_is_containerized=openshift_is_containerized,
     )
     assert PackageVersion(None, task_vars).is_active() == is_active

+ 1 - 1
roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml

@@ -10,7 +10,7 @@
     dest: "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
 
 - name: Create GlusterFS registry service and endpoint
-  command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}"
+  command: "{{ openshift_client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}"
   with_items:
   - "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
   - "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml"

+ 3 - 3
roles/openshift_hosted/tasks/wait_for_pod.yml

@@ -3,7 +3,7 @@
   block:
   - name: Ensure OpenShift pod correctly rolls out (best-effort today)
     command: |
-      {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \
+      {{ openshift_client_binary }} rollout status deploymentconfig {{ item.name }} \
                         --namespace {{ item.namespace | default('default') }} \
                         --config {{ openshift_master_config_dir }}/admin.kubeconfig
     async: 600
@@ -13,7 +13,7 @@
 
   - name: Determine the latest version of the OpenShift pod deployment
     command: |
-      {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \
+      {{ openshift_client_binary }} get deploymentconfig {{ item.name }} \
              --namespace {{ item.namespace }} \
              --config {{ openshift_master_config_dir }}/admin.kubeconfig \
              -o jsonpath='{ .status.latestVersion }'
@@ -22,7 +22,7 @@
 
   - name: Poll for OpenShift pod deployment success
     command: |
-      {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
+      {{ openshift_client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
              --namespace {{ item.0.namespace }} \
              --config {{ openshift_master_config_dir }}/admin.kubeconfig \
              -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'

+ 1 - 1
roles/openshift_hosted_templates/defaults/main.yml

@@ -1,5 +1,5 @@
 ---
-hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted"
+hosted_base: "{{ openshift.common.config_base if openshift_is_containerized | bool else '/usr/share/openshift' }}/hosted"
 hosted_deployment_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'enterprise' }}"
 
 content_version: "{{ openshift.common.examples_content_version }}"

+ 1 - 1
roles/openshift_hosted_templates/tasks/main.yml

@@ -52,7 +52,7 @@
 
 - name: Create or update hosted templates
   command: >
-    {{ openshift.common.client_binary }} {{ openshift_hosted_templates_import_command }}
+    {{ openshift_client_binary }} {{ openshift_hosted_templates_import_command }}
     -f {{ hosted_base }}
     --config={{ openshift_hosted_templates_kubeconfig }}
     -n openshift

+ 7 - 7
roles/openshift_loadbalancer/tasks/main.yml

@@ -4,33 +4,33 @@
 
 - name: Install haproxy
   package: name=haproxy state=present
-  when: not openshift.common.is_containerized | bool
+  when: not openshift_is_containerized | bool
   register: result
   until: result is succeeded
 
 - name: Pull haproxy image
   command: >
     docker pull {{ openshift_router_image }}:{{ openshift_image_tag }}
-  when: openshift.common.is_containerized | bool
+  when: openshift_is_containerized | bool
 
 - name: Create config directory for haproxy
   file:
     path: /etc/haproxy
     state: directory
-  when: openshift.common.is_containerized | bool
+  when: openshift_is_containerized | bool
 
 - name: Create the systemd unit files
   template:
     src: "haproxy.docker.service.j2"
     dest: "/etc/systemd/system/haproxy.service"
-  when: openshift.common.is_containerized | bool
+  when: openshift_is_containerized | bool
   notify: restart haproxy
 
 - name: Configure systemd service directory for haproxy
   file:
     path: /etc/systemd/system/haproxy.service.d
     state: directory
-  when: not openshift.common.is_containerized | bool
+  when: not openshift_is_containerized | bool
 
 # Work around ini_file create option in 2.2 which defaults to no
 - name: Create limits.conf file
@@ -41,7 +41,7 @@
     owner: root
     group: root
   changed_when: false
-  when: not openshift.common.is_containerized | bool
+  when: not openshift_is_containerized | bool
 
 - name: Configure the nofile limits for haproxy
   ini_file:
@@ -50,7 +50,7 @@
     option: LimitNOFILE
     value: "{{ openshift_loadbalancer_limit_nofile | default(100000) }}"
   notify: restart haproxy
-  when: not openshift.common.is_containerized | bool
+  when: not openshift_is_containerized | bool
 
 - name: Configure haproxy
   template:

+ 1 - 1
roles/openshift_loadbalancer/templates/haproxy.cfg.j2

@@ -3,7 +3,7 @@
 global
     maxconn     {{ openshift_loadbalancer_global_maxconn | default(20000) }}
     log         /dev/log local0 info
-{% if openshift.common.is_containerized | bool %}
+{% if openshift_is_containerized | bool %}
     stats socket /var/lib/haproxy/run/haproxy.sock mode 600 level admin
 {% else %}
     chroot      /var/lib/haproxy

+ 1 - 1
roles/openshift_logging/tasks/annotate_ops_projects.yaml

@@ -1,6 +1,6 @@
 ---
 - command: >
-    {{ openshift.common.client_binary }}
+    {{ openshift_client_binary }}
     --config={{ openshift.common.config_base }}/master/admin.kubeconfig
     get namespaces -o jsonpath={.items[*].metadata.name} {{ __default_logging_ops_projects | join(' ') }}
   register: __logging_ops_projects

+ 2 - 2
roles/openshift_logging/tasks/delete_logging.yaml

@@ -109,14 +109,14 @@
 
 # remove annotations added by logging
 - command: >
-    {{ openshift.common.client_binary }}
+    {{ openshift_client_binary }}
     --config={{ openshift.common.config_base }}/master/admin.kubeconfig
     get namespaces -o name {{ __default_logging_ops_projects | join(' ') }}
   register: __logging_ops_projects
 
 - name: Remove Annotation of Operations Projects
   command: >
-    {{ openshift.common.client_binary }}
+    {{ openshift_client_binary }}
     --config={{ openshift.common.config_base }}/master/admin.kubeconfig
     annotate {{ project }} openshift.io/logging.ui.hostname-
   with_items: "{{ __logging_ops_projects.stdout_lines }}"

+ 1 - 1
roles/openshift_logging/tasks/generate_certs.yaml

@@ -17,7 +17,7 @@
 
 - name: Generate certificates
   command: >
-    {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
+    {{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
     --key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt
     --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test
   check_mode: no

+ 1 - 1
roles/openshift_logging/tasks/install_logging.yaml

@@ -1,7 +1,7 @@
 ---
 - name: Gather OpenShift Logging Facts
   openshift_logging_facts:
-    oc_bin: "{{openshift.common.client_binary}}"
+    oc_bin: "{{openshift_client_binary}}"
     openshift_logging_namespace: "{{openshift_logging_namespace}}"
 
 - name: Set logging project

+ 1 - 1
roles/openshift_logging/tasks/procure_server_certs.yaml

@@ -27,7 +27,7 @@
 
 - name: Creating signed server cert and key for {{ cert_info.procure_component }}
   command: >
-     {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
+     {{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
      --key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
      --hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key
      --signer-serial={{generated_certs_dir}}/ca.serial.txt

+ 1 - 1
roles/openshift_logging_elasticsearch/tasks/main.yaml

@@ -111,7 +111,7 @@
 
 - name: Create logging-metrics-reader-role
   command: >
-    {{ openshift.common.client_binary }}
+    {{ openshift_client_binary }}
     --config={{ openshift.common.config_base }}/master/admin.kubeconfig
     -n "{{ openshift_logging_elasticsearch_namespace }}"
     create -f "{{mktemp.stdout}}/templates/logging-metrics-role.yml"

+ 1 - 1
roles/openshift_manage_node/tasks/main.yml

@@ -18,7 +18,7 @@
   retries: 120
   delay: 1
   changed_when: false
-  when: openshift.common.is_containerized | bool
+  when: openshift_is_containerized | bool
   delegate_to: "{{ openshift_master_host }}"
   run_once: true
 

+ 6 - 6
roles/openshift_master/tasks/main.yml

@@ -19,7 +19,7 @@
     name: "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
     state: present
   when:
-  - not openshift.common.is_containerized | bool
+  - not openshift_is_containerized | bool
   register: result
   until: result is succeeded
 
@@ -31,12 +31,12 @@
     owner: root
     group: root
   when:
-  - openshift.common.is_containerized | bool
+  - openshift_is_containerized | bool
 
 - name: Reload systemd units
   command: systemctl daemon-reload
   when:
-  - openshift.common.is_containerized | bool
+  - openshift_is_containerized | bool
 
 - name: Re-gather package dependent master facts
   openshift_facts:
@@ -48,7 +48,7 @@
 
 - name: Create the policy file if it does not already exist
   command: >
-    {{ openshift.common.client_binary }} adm create-bootstrap-policy-file
+    {{ openshift_client_binary }} adm create-bootstrap-policy-file
       --filename={{ openshift_master_policy }}
   args:
     creates: "{{ openshift_master_policy }}"
@@ -69,7 +69,7 @@
   package: name=httpd-tools state=present
   when:
   - item.kind == 'HTPasswdPasswordIdentityProvider'
-  - not openshift.common.is_atomic | bool
+  - not openshift_is_atomic | bool
   with_items: "{{ openshift.master.identity_providers }}"
   register: result
   until: result is succeeded
@@ -164,7 +164,7 @@
 - name: Install Master system container
   include_tasks: system_container.yml
   when:
-  - openshift.common.is_containerized | bool
+  - openshift_is_containerized | bool
   - l_is_master_system_container | bool
 
 - name: Create session secrets file

+ 1 - 1
roles/openshift_master/tasks/registry_auth.yml

@@ -43,7 +43,7 @@
   set_fact:
     l_bind_docker_reg_auth: True
   when:
-  - openshift.common.is_containerized | bool
+  - openshift_is_containerized | bool
   - oreg_auth_user is defined
   - >
       (master_oreg_auth_credentials_stat.stat.exists

+ 4 - 4
roles/openshift_master/tasks/set_loopback_context.yml

@@ -1,13 +1,13 @@
 ---
 - name: Test local loopback context
   command: >
-    {{ openshift.common.client_binary }} config view
+    {{ openshift_client_binary }} config view
     --config={{ openshift_master_loopback_config }}
   changed_when: false
   register: l_loopback_config
 
 - command: >
-    {{ openshift.common.client_binary }} config set-cluster
+    {{ openshift_client_binary }} config set-cluster
     --certificate-authority={{ openshift_master_config_dir }}/ca.crt
     --embed-certs=true --server={{ openshift.master.loopback_api_url }}
     {{ openshift.master.loopback_cluster_name }}
@@ -17,7 +17,7 @@
   register: set_loopback_cluster
 
 - command: >
-    {{ openshift.common.client_binary }} config set-context
+    {{ openshift_client_binary }} config set-context
     --cluster={{ openshift.master.loopback_cluster_name }}
     --namespace=default --user={{ openshift.master.loopback_user }}
     {{ openshift.master.loopback_context_name }}
@@ -27,7 +27,7 @@
   register: l_set_loopback_context
 
 - command: >
-    {{ openshift.common.client_binary }} config use-context {{ openshift.master.loopback_context_name }}
+    {{ openshift_client_binary }} config use-context {{ openshift.master.loopback_context_name }}
     --config={{ openshift_master_loopback_config }}
   when:
   - l_set_loopback_context is changed

+ 2 - 2
roles/openshift_master/tasks/systemd_units.yml

@@ -7,7 +7,7 @@
     containerized_svc_dir: "/etc/systemd/system"
     ha_svc_template_path: "docker-cluster"
   when:
-  - openshift.common.is_containerized | bool
+  - openshift_is_containerized | bool
 
 - include_tasks: registry_auth.yml
 
@@ -34,7 +34,7 @@
   register: l_pull_result
   changed_when: "'Downloaded newer image' in l_pull_result.stdout"
   when:
-  - openshift.common.is_containerized | bool
+  - openshift_is_containerized | bool
   - not l_is_master_system_container | bool
 
 - name: Create the ha systemd unit files

+ 1 - 1
roles/openshift_master/tasks/upgrade.yml

@@ -1,6 +1,6 @@
 ---
 - include_tasks: upgrade/rpm_upgrade.yml
-  when: not openshift.common.is_containerized | bool
+  when: not openshift_is_containerized | bool
 
 - include_tasks: upgrade/upgrade_scheduler.yml
 

+ 1 - 1
roles/openshift_master/templates/atomic-openshift-master.j2

@@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }}
 {% elif openshift_push_via_dns | default(false) %}
 OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
 {% endif %}
-{% if openshift.common.is_containerized | bool %}
+{% if openshift_is_containerized | bool %}
 IMAGE_VERSION={{ openshift_image_tag }}
 {% endif %}
 

+ 1 - 1
roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2

@@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }}
 {% elif openshift_push_via_dns | default(false) %}
 OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
 {% endif %}
-{% if openshift.common.is_containerized | bool %}
+{% if openshift_is_containerized | bool %}
 IMAGE_VERSION={{ openshift_image_tag }}
 {% endif %}
 

+ 1 - 1
roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2

@@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }}
 {% elif openshift_push_via_dns | default(false) %}
 OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
 {% endif %}
-{% if openshift.common.is_containerized | bool %}
+{% if openshift_is_containerized | bool %}
 IMAGE_VERSION={{ openshift_image_tag }}
 {% endif %}
 

+ 2 - 2
roles/openshift_master_certificates/tasks/main.yml

@@ -47,7 +47,7 @@
 
 - name: Create the master server certificate
   command: >
-    {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert
+    {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-server-cert
     {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}
     --certificate-authority {{ named_ca_certificate }}
     {% endfor %}
@@ -71,7 +71,7 @@
 
 - name: Generate the loopback master client config
   command: >
-    {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+    {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config
       --certificate-authority={{ openshift_ca_cert }}
       {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}
       --certificate-authority {{ named_ca_certificate }}

+ 1 - 1
roles/openshift_metrics/tasks/generate_certificates.yaml

@@ -1,7 +1,7 @@
 ---
 - name: generate ca certificate chain
   command: >
-    {{ openshift.common.client_binary }} adm ca create-signer-cert
+    {{ openshift_client_binary }} adm ca create-signer-cert
     --config={{ mktemp.stdout }}/admin.kubeconfig
     --key='{{ mktemp.stdout }}/ca.key'
     --cert='{{ mktemp.stdout }}/ca.crt'

+ 1 - 1
roles/openshift_metrics/tasks/install_cassandra.yaml

@@ -1,6 +1,6 @@
 ---
 - shell: >
-    {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }}
+    {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }}
     --config={{ mktemp.stdout }}/admin.kubeconfig
     get rc hawkular-cassandra-{{node}} -o jsonpath='{.spec.replicas}' || echo 0
   vars:

+ 1 - 1
roles/openshift_metrics/tasks/install_hawkular.yaml

@@ -1,6 +1,6 @@
 ---
 - command: >
-    {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }}
+    {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }}
     --config={{ mktemp.stdout }}/admin.kubeconfig
     get rc hawkular-metrics -o jsonpath='{.spec.replicas}'
   register: hawkular_metrics_replica_count

+ 1 - 1
roles/openshift_metrics/tasks/install_heapster.yaml

@@ -1,6 +1,6 @@
 ---
 - command: >
-    {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }}
+    {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }}
     --config={{ mktemp.stdout }}/admin.kubeconfig
     get rc heapster -o jsonpath='{.spec.replicas}'
   register: heapster_replica_count

+ 1 - 1
roles/openshift_metrics/tasks/install_metrics.yaml

@@ -70,7 +70,7 @@
 - include_tasks: update_master_config.yaml
 
 - command: >
-    {{openshift.common.client_binary}}
+    {{openshift_client_binary}}
     --config={{mktemp.stdout}}/admin.kubeconfig
     get rc
     -l metrics-infra

+ 3 - 3
roles/openshift_metrics/tasks/oc_apply.yaml

@@ -1,7 +1,7 @@
 ---
 - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
   command: >
-    {{ openshift.common.client_binary }}
+    {{ openshift_client_binary }}
     --config={{ kubeconfig }}
     get {{file_content.kind}} {{file_content.metadata.name}}
     -o jsonpath='{.metadata.resourceVersion}'
@@ -12,7 +12,7 @@
 
 - name: Applying {{file_name}}
   command: >
-    {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+    {{ openshift_client_binary }} --config={{ kubeconfig }}
     apply -f {{ file_name }}
     -n {{namespace}}
   register: generation_apply
@@ -21,7 +21,7 @@
 
 - name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
   command: >
-    {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+    {{ openshift_client_binary }} --config={{ kubeconfig }}
     get {{file_content.kind}} {{file_content.metadata.name}}
     -o jsonpath='{.metadata.resourceVersion}'
     -n {{namespace}}

+ 1 - 1
roles/openshift_metrics/tasks/pre_install.yaml

@@ -14,7 +14,7 @@
 
 - name: list existing secrets
   command: >
-    {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }}
+    {{ openshift_client_binary }} -n {{ openshift_metrics_project }}
     --config={{ mktemp.stdout }}/admin.kubeconfig
     get secrets -o name
   register: metrics_secrets

+ 1 - 1
roles/openshift_metrics/tasks/setup_certificate.yaml

@@ -1,7 +1,7 @@
 ---
 - name: generate {{ component }} keys
   command: >
-    {{ openshift.common.client_binary }} adm ca create-server-cert
+    {{ openshift_client_binary }} adm ca create-server-cert
     --config={{ mktemp.stdout }}/admin.kubeconfig
     --key='{{ mktemp.stdout }}/{{ component }}.key'
     --cert='{{ mktemp.stdout }}/{{ component }}.crt'

+ 3 - 3
roles/openshift_metrics/tasks/start_metrics.yaml

@@ -1,6 +1,6 @@
 ---
 - command: >
-    {{openshift.common.client_binary}}
+    {{openshift_client_binary}}
     --config={{mktemp.stdout}}/admin.kubeconfig
     get rc
     -l metrics-infra=hawkular-cassandra
@@ -23,7 +23,7 @@
   changed_when: metrics_cassandra_rc | length > 0
 
 - command: >
-    {{openshift.common.client_binary}}
+    {{openshift_client_binary}}
     --config={{mktemp.stdout}}/admin.kubeconfig
     get rc
     -l metrics-infra=hawkular-metrics
@@ -45,7 +45,7 @@
   changed_when: metrics_metrics_rc | length > 0
 
 - command: >
-    {{openshift.common.client_binary}}
+    {{openshift_client_binary}}
     --config={{mktemp.stdout}}/admin.kubeconfig
     get rc
     -l metrics-infra=heapster

+ 3 - 3
roles/openshift_metrics/tasks/stop_metrics.yaml

@@ -1,6 +1,6 @@
 ---
 - command: >
-    {{openshift.common.client_binary}}
+    {{openshift_client_binary}}
     --config={{mktemp.stdout}}/admin.kubeconfig
     get rc
     -l metrics-infra=heapster
@@ -22,7 +22,7 @@
     loop_var: object
 
 - command: >
-    {{openshift.common.client_binary}}
+    {{openshift_client_binary}}
     --config={{mktemp.stdout}}/admin.kubeconfig
     get rc
     -l metrics-infra=hawkular-metrics
@@ -44,7 +44,7 @@
   changed_when: metrics_hawkular_rc | length > 0
 
 - command: >
-    {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig
+    {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig
     get rc
     -o name
     -l metrics-infra=hawkular-cassandra

+ 2 - 2
roles/openshift_metrics/tasks/uninstall_hosa.yaml

@@ -1,7 +1,7 @@
 ---
 - name: remove Hawkular Agent (HOSA) components
   command: >
-    {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
+    {{ openshift_client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
     delete --ignore-not-found --selector=metrics-infra=agent
     all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings
   register: delete_metrics
@@ -9,7 +9,7 @@
 
 - name: remove rolebindings
   command: >
-    {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
+    {{ openshift_client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
     delete --ignore-not-found
     clusterrolebinding/hawkular-openshift-agent-rb
   changed_when: delete_metrics.stdout != 'No resources found'

+ 2 - 2
roles/openshift_metrics/tasks/uninstall_metrics.yaml

@@ -4,7 +4,7 @@
 
 - name: remove metrics components
   command: >
-    {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig
+    {{ openshift_client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig
     delete --ignore-not-found --selector=metrics-infra
     all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings,clusterrole
   register: delete_metrics
@@ -12,7 +12,7 @@
 
 - name: remove rolebindings
   command: >
-    {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig
+    {{ openshift_client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig
     delete --ignore-not-found
     rolebinding/hawkular-view
     clusterrolebinding/heapster-cluster-reader

+ 1 - 1
roles/openshift_node/handlers/main.yml

@@ -34,7 +34,7 @@
   pause: seconds=15
   when:
   - (not skip_node_svc_handlers | default(False) | bool)
-  - openshift.common.is_containerized | bool
+  - openshift_is_containerized | bool
 
 - name: restart node
   systemd:

+ 0 - 0
roles/openshift_node/tasks/config.yml


Algúns arquivos non se mostraron porque demasiados arquivos cambiaron neste cambio