Pārlūkot izejas kodu

Switch the master to always run with bootstrapping on

Change the defaults for node bootstrapping to true, all nodes will
bootstrap unless opted out. Remove containerized node artifacts

Remove the openshift_master role - it is dead.
Clayton Coleman 7 gadi atpakaļ
vecāks
revīzija
ddf1aa2f1a
100 mainītis faili ar 390 papildinājumiem un 1543 dzēšanām
  1. 1 3
      .papr.inventory
  2. 0 3
      .papr.sh
  3. 1 1
      docs/best_practices_guide.adoc
  4. 1 1
      docs/proposals/role_decomposition.md
  5. 6 6
      inventory/hosts.example
  6. 1 1
      inventory/hosts.glusterfs.native.example
  7. 1 1
      inventory/hosts.glusterfs.registry-only.example
  8. 1 1
      inventory/hosts.glusterfs.storage-and-registry.example
  9. 0 1
      playbooks/aws/provisioning-inventory.example.ini
  10. 0 20
      playbooks/byo/openshift-cluster/upgrades/v3_6/README.md
  11. 0 5
      playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
  12. 0 14
      playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
  13. 0 7
      playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
  14. 0 20
      playbooks/byo/openshift-cluster/upgrades/v3_7/README.md
  15. 0 5
      playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
  16. 0 14
      playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
  17. 0 7
      playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
  18. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_6/roles
  19. 0 44
      playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
  20. 0 54
      playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
  21. 0 38
      playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
  22. 0 12
      playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml
  23. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_7/roles
  24. 0 70
      playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
  25. 0 80
      playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
  26. 0 38
      playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
  27. 0 26
      playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
  28. 4 0
      playbooks/deploy_cluster.yml
  29. 6 3
      playbooks/gcp/openshift-cluster/install.yml
  30. 1 10
      playbooks/gcp/openshift-cluster/install_gcp.yml
  31. 8 0
      playbooks/init/basic_facts.yml
  32. 1 1
      playbooks/init/evaluate_groups.yml
  33. 1 1
      playbooks/openshift-etcd/private/scaleup.yml
  34. 10 28
      playbooks/openshift-master/private/config.yml
  35. 1 1
      playbooks/openshift-master/private/restart.yml
  36. 4 2
      playbooks/openshift-master/private/tasks/enable_bootstrap.yml
  37. 1 1
      playbooks/openshift-master/private/tasks/restart_services.yml
  38. 2 0
      playbooks/openshift-master/private/tasks/wire_aggregator.yml
  39. 2 11
      playbooks/openshift-master/private/upgrade.yml
  40. 1 1
      playbooks/openshift-node/private/additional_config.yml
  41. 44 0
      playbooks/openshift-node/private/bootstrap.yml
  42. 1 1
      playbooks/openshift-node/private/certificates-backup.yml
  43. 1 1
      playbooks/openshift-node/private/certificates.yml
  44. 1 1
      playbooks/openshift-node/private/clean_image.yml
  45. 1 3
      playbooks/openshift-node/private/config.yml
  46. 1 1
      playbooks/openshift-node/private/configure_nodes.yml
  47. 0 20
      playbooks/openshift-node/private/containerized_nodes.yml
  48. 1 1
      playbooks/openshift-node/private/enable_excluders.yml
  49. 1 1
      playbooks/openshift-node/private/image_prep.yml
  50. 70 0
      playbooks/openshift-node/private/join.yml
  51. 1 1
      playbooks/openshift-node/private/manage_node.yml
  52. 10 11
      playbooks/openshift-node/private/setup.yml
  53. 23 0
      playbooks/openshift-node/private/setup_bootstrap.yml
  54. 0 5
      roles/etcd/tasks/main.yml
  55. 1 1
      roles/lib_utils/filter_plugins/oo_filters.py
  56. 1 1
      roles/lib_utils/filter_plugins/openshift_master.py
  57. 63 0
      roles/openshift_ca/tasks/main.yml
  58. 20 24
      roles/openshift_control_plane/defaults/main.yml
  59. 8 1
      roles/openshift_control_plane/files/apiserver.yaml
  60. 1 1
      roles/openshift_control_plane/files/controller.yaml
  61. 0 1
      roles/openshift_master/tasks/bootstrap_settings.yml
  62. 0 0
      roles/openshift_control_plane/tasks/check_master_api_is_ready.yml
  63. 0 0
      roles/openshift_control_plane/tasks/ensure_nodes_matching_selector.yml
  64. 0 29
      roles/openshift_control_plane/tasks/journald.yml
  65. 17 4
      roles/openshift_control_plane/tasks/main.yml
  66. 9 2
      roles/openshift_control_plane/tasks/upgrade.yml
  67. 6 5
      roles/openshift_control_plane/tasks/upgrade/rpm_upgrade.yml
  68. 0 0
      roles/openshift_control_plane/tasks/upgrade/upgrade_predicates.yml
  69. 0 0
      roles/openshift_control_plane/tasks/upgrade/upgrade_priorities.yml
  70. 12 163
      roles/openshift_control_plane/tasks/upgrade/upgrade_scheduler.yml
  71. 0 0
      roles/openshift_control_plane/tasks/upgrade/v3_6/master_config_upgrade.yml
  72. 0 0
      roles/openshift_control_plane/tasks/upgrade/v3_7/master_config_upgrade.yml
  73. 22 4
      roles/openshift_control_plane/templates/master.yaml.v1.j2
  74. 0 0
      roles/openshift_control_plane/vars/main.yml
  75. 7 1
      roles/openshift_facts/library/openshift_facts.py
  76. 1 1
      roles/openshift_gcp/tasks/configure_master_healthcheck.yml
  77. 1 1
      roles/openshift_gcp/tasks/setup_scale_group_facts.yml
  78. 1 1
      roles/openshift_gcp/templates/openshift-bootstrap-update.j2
  79. 1 1
      roles/openshift_grafana/defaults/main.yaml
  80. 1 1
      roles/openshift_grafana/tasks/install_grafana.yaml
  81. 3 3
      roles/openshift_hosted/defaults/main.yml
  82. 1 1
      roles/openshift_logging_curator/tasks/main.yaml
  83. 1 1
      roles/openshift_logging_elasticsearch/tasks/main.yaml
  84. 1 1
      roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
  85. 1 1
      roles/openshift_logging_kibana/tasks/main.yaml
  86. 1 1
      roles/openshift_logging_mux/tasks/main.yaml
  87. 1 1
      roles/openshift_manage_node/tasks/set_default_node_role.yml
  88. 0 49
      roles/openshift_master/README.md
  89. 0 149
      roles/openshift_master/defaults/main.yml
  90. 0 36
      roles/openshift_master/handlers/main.yml
  91. 0 17
      roles/openshift_master/meta/main.yml
  92. 0 15
      roles/openshift_master/tasks/bootstrap.yml
  93. 0 44
      roles/openshift_master/tasks/firewall.yml
  94. 0 253
      roles/openshift_master/tasks/main.yml
  95. 0 13
      roles/openshift_master/tasks/push_via_dns.yml
  96. 0 52
      roles/openshift_master/tasks/registry_auth.yml
  97. 0 19
      roles/openshift_master/tasks/restart.yml
  98. 0 34
      roles/openshift_master/tasks/set_loopback_context.yml
  99. 0 32
      roles/openshift_master/tasks/system_container.yml
  100. 0 0
      roles/openshift_master/tasks/systemd_units.yml

+ 1 - 3
.papr.inventory

@@ -17,8 +17,6 @@ osm_host_subnet_length=9
 
 [all:vars]
 # bootstrap configs
-openshift_node_groups=[{"name":"node-config-master","labels":["node-role.kubernetes.io/master=true","node-role.kubernetes.io/infra=true"]},{"name":"node-config-node","labels":["node-role.kubernetes.io/compute=true"]}]
-openshift_master_bootstrap_enabled=true
 openshift_master_bootstrap_auto_approve=true
 openshift_master_bootstrap_auto_approver_node_selector={"node-role.kubernetes.io/master":"true"}
 osm_controller_args={"experimental-cluster-signing-duration": ["20m"]}
@@ -33,6 +31,6 @@ ocp-master
 ocp-master
 
 [nodes]
-ocp-master openshift_schedulable=true
+ocp-master openshift_schedulable=true openshift_node_labels="{'node-role.kubernetes.io/infra':'true'}"
 ocp-node1
 ocp-node2

+ 0 - 3
.papr.sh

@@ -32,9 +32,6 @@ upload_journals() {
 
 trap upload_journals ERR
 
-# make all nodes ready for bootstrapping
-ansible-playbook -v -i .papr.inventory playbooks/openshift-node/private/image_prep.yml
-
 # run the actual installer
 ansible-playbook -v -i .papr.inventory playbooks/deploy_cluster.yml
 

+ 1 - 1
docs/best_practices_guide.adoc

@@ -406,7 +406,7 @@ For consistency, role names SHOULD follow the above naming pattern. It is import
 Many times the `technology` portion of the pattern will line up with a package name. It is advised that whenever possible, the package name should be used.
 
 .Examples:
-* The role to configure a master is called `openshift_master`
+* The role to configure a master is called `openshift_control_plane`
 * The role to configure OpenShift specific yum repositories is called `openshift_repos`
 
 === Filters

+ 1 - 1
docs/proposals/role_decomposition.md

@@ -330,7 +330,7 @@ in meta/main.yml without:
 ## Avoiding overly verbose roles
 When we are splitting our roles up into smaller components we want to ensure we
 avoid creating roles that are, for a lack of a better term, overly verbose. What
-do we mean by that? If we have `openshift_master` as an example, and we were to
+do we mean by that? If we have `openshift_control_plane` as an example, and we were to
 split it up, we would have a component for `etcd`, `docker`, and possibly for
 its rpms/configs. We would want to avoid creating a role that would just create
 certificates as those would make sense to be contained with the rpms and configs.

+ 6 - 6
inventory/hosts.example

@@ -411,7 +411,7 @@ debug_level=2
 #
 # An OpenShift router will be created during install if there are
 # nodes present with labels matching the default router selector,
-# "region=infra". Set openshift_node_labels per node as needed in
+# "node-role.kubernetes.io/infra=true". Set openshift_node_labels per node as needed in
 # order to label nodes.
 #
 # Example:
@@ -420,8 +420,8 @@ debug_level=2
 #
 # Router selector (optional)
 # Router will only be created if nodes matching this label are present.
-# Default value: 'region=infra'
-#openshift_hosted_router_selector='region=infra'
+# Default value: 'node-role.kubernetes.io/infra=true'
+#openshift_hosted_router_selector='node-role.kubernetes.io/infra=true'
 #
 # Router replicas (optional)
 # Unless specified, openshift-ansible will calculate the replica count
@@ -462,7 +462,7 @@ debug_level=2
 #
 # An OpenShift registry will be created during install if there are
 # nodes present with labels matching the default registry selector,
-# "region=infra". Set openshift_node_labels per node as needed in
+# "node-role.kubernetes.io/infra=true". Set openshift_node_labels per node as needed in
 # order to label nodes.
 #
 # Example:
@@ -471,8 +471,8 @@ debug_level=2
 #
 # Registry selector (optional)
 # Registry will only be created if nodes matching this label are present.
-# Default value: 'region=infra'
-#openshift_hosted_registry_selector='region=infra'
+# Default value: 'node-role.kubernetes.io/infra=true'
+#openshift_hosted_registry_selector='node-role.kubernetes.io/infra=true'
 #
 # Registry replicas (optional)
 # Unless specified, openshift-ansible will calculate the replica count

+ 1 - 1
inventory/hosts.glusterfs.native.example

@@ -31,7 +31,7 @@ master
 # masters should be schedulable to run web console pods
 master  openshift_schedulable=True
 # A hosted registry, by default, will only be deployed on nodes labeled
-# "region=infra".
+# "node-role.kubernetes.io/infra=true".
 node0   openshift_schedulable=True
 node1   openshift_schedulable=True
 node2   openshift_schedulable=True

+ 1 - 1
inventory/hosts.glusterfs.registry-only.example

@@ -37,7 +37,7 @@ master
 # masters should be schedulable to run web console pods
 master  openshift_schedulable=True
 # A hosted registry, by default, will only be deployed on nodes labeled
-# "region=infra".
+# "node-role.kubernetes.io/infra=true".
 node0   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
 node1   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
 node2   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True

+ 1 - 1
inventory/hosts.glusterfs.storage-and-registry.example

@@ -43,7 +43,7 @@ node0   openshift_schedulable=True
 node1   openshift_schedulable=True
 node2   openshift_schedulable=True
 # A hosted registry, by default, will only be deployed on nodes labeled
-# "region=infra".
+# "node-role.kubernetes.io/infra=true".
 node3   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
 node4   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
 node5   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True

+ 0 - 1
playbooks/aws/provisioning-inventory.example.ini

@@ -11,7 +11,6 @@ etcd
 openshift_deployment_type=origin
 openshift_cloudprovider_kind=aws
 
-openshift_master_bootstrap_enabled=True
 openshift_master_api_port=443
 
 openshift_hosted_router_wait=False

+ 0 - 20
playbooks/byo/openshift-cluster/upgrades/v3_6/README.md

@@ -1,20 +0,0 @@
-# v3.6 Major and Minor Upgrade Playbook
-
-## Overview
-This playbook currently performs the following steps.
-
- * Upgrade and restart master services
- * Unschedule node
- * Upgrade and restart docker
- * Upgrade and restart node services
- * Modifies the subset of the configuration necessary
- * Applies the latest cluster policies
- * Updates the default router if one exists
- * Updates the default registry if one exists
- * Updates image streams and quickstarts
-
-## Usage
-
-```
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
-```

+ 0 - 5
playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml

@@ -1,5 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml

+ 0 - 14
playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml

@@ -1,14 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml

+ 0 - 7
playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml

@@ -1,7 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml

+ 0 - 20
playbooks/byo/openshift-cluster/upgrades/v3_7/README.md

@@ -1,20 +0,0 @@
-# v3.7 Major and Minor Upgrade Playbook
-
-## Overview
-This playbook currently performs the following steps.
-
- * Upgrade and restart master services
- * Unschedule node
- * Upgrade and restart docker
- * Upgrade and restart node services
- * Modifies the subset of the configuration necessary
- * Applies the latest cluster policies
- * Updates the default router if one exists
- * Updates the default registry if one exists
- * Updates image streams and quickstarts
-
-## Usage
-
-```
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
-```

+ 0 - 5
playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml

@@ -1,5 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml

+ 0 - 14
playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml

@@ -1,14 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml

+ 0 - 7
playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml

@@ -1,7 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_6/roles

@@ -1 +0,0 @@
-../../../../../roles/

+ 0 - 44
playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml

@@ -1,44 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- import_playbook: ../init.yml
-  tags:
-  - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
-  hosts: oo_all_hosts
-  tags:
-  - pre_upgrade
-  tasks:
-  - set_fact:
-      openshift_upgrade_target: '3.6'
-      openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
-
-- import_playbook: ../pre/config.yml
-  vars:
-    l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
-    l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
-    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-    l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
-    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
-    l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
-    openshift_protect_installed_version: False
-
-- import_playbook: validator.yml
-
-- name: Flag pre-upgrade checks complete for hosts without errors
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
-  tasks:
-  - set_fact:
-      pre_upgrade_complete: True
-
-# Pre-upgrade completed
-
-- import_playbook: ../upgrade_control_plane.yml
-  vars:
-    master_config_hook: "v3_6/master_config_upgrade.yml"
-
-- import_playbook: ../upgrade_nodes.yml
-
-- import_playbook: ../post_control_plane.yml

+ 0 - 54
playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml

@@ -1,54 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- import_playbook: ../init.yml
-  vars:
-    l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-    l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-
-- name: Configure the upgrade target for the common upgrade tasks
-  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
-  tasks:
-  - set_fact:
-      openshift_upgrade_target: '3.6'
-      openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
-
-- import_playbook: ../pre/config.yml
-  # These vars a meant to exclude oo_nodes from plays that would otherwise include
-  # them by default.
-  vars:
-    l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
-    l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
-    l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-    l_upgrade_no_proxy_hosts: "oo_masters_to_config"
-    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-    l_upgrade_verify_targets_hosts: "oo_masters_to_config"
-    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
-    l_upgrade_excluder_hosts: "oo_masters_to_config"
-    openshift_protect_installed_version: False
-
-- import_playbook: validator.yml
-
-- name: Flag pre-upgrade checks complete for hosts without errors
-  hosts: oo_masters_to_config:oo_etcd_to_config
-  tasks:
-  - set_fact:
-      pre_upgrade_complete: True
-
-# Pre-upgrade completed
-
-- import_playbook: ../upgrade_control_plane.yml
-  vars:
-    master_config_hook: "v3_6/master_config_upgrade.yml"
-
-- import_playbook: ../post_control_plane.yml

+ 0 - 38
playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml

@@ -1,38 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- import_playbook: ../init.yml
-  tags:
-  - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
-  hosts: oo_all_hosts
-  tags:
-  - pre_upgrade
-  tasks:
-  - set_fact:
-      openshift_upgrade_target: '3.6'
-      openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"
-
-- import_playbook: ../pre/config.yml
-  vars:
-    l_upgrade_repo_hosts: "oo_nodes_to_config"
-    l_upgrade_no_proxy_hosts: "oo_all_hosts"
-    l_upgrade_health_check_hosts: "oo_nodes_to_config"
-    l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
-    l_upgrade_docker_target_hosts: "oo_nodes_to_config"
-    l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
-    l_upgrade_nodes_only: True
-
-- name: Flag pre-upgrade checks complete for hosts without errors
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
-  tasks:
-  - set_fact:
-      pre_upgrade_complete: True
-
-# Pre-upgrade completed
-
-- import_playbook: ../upgrade_nodes.yml

+ 0 - 12
playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml

@@ -1,12 +0,0 @@
----
-###############################################################################
-# Pre upgrade checks for known data problems, if this playbook fails you should
-# contact support. If you're not supported contact users@lists.openshift.com
-###############################################################################
-- name: Verify 3.6 specific upgrade checks
-  hosts: oo_first_master
-  roles:
-  - { role: lib_openshift }
-  tasks:
-  - name: Check for invalid namespaces and SDN errors
-    oc_objectvalidator:

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_7/roles

@@ -1 +0,0 @@
-../../../../../roles/

+ 0 - 70
playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml

@@ -1,70 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- import_playbook: ../init.yml
-  tags:
-  - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
-  hosts: oo_all_hosts
-  tags:
-  - pre_upgrade
-  tasks:
-  - set_fact:
-      openshift_upgrade_target: '3.7'
-      openshift_upgrade_min: '3.6'
-
-- import_playbook: ../pre/config.yml
-  vars:
-    l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
-    l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
-    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-    l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
-    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
-    l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
-    openshift_protect_installed_version: False
-
-- import_playbook: validator.yml
-
-- name: Flag pre-upgrade checks complete for hosts without errors
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
-  tasks:
-  - set_fact:
-      pre_upgrade_complete: True
-
-# Pre-upgrade completed
-
-- import_playbook: ../upgrade_control_plane.yml
-  vars:
-    master_config_hook: "v3_7/master_config_upgrade.yml"
-
-# this must occur after the control plane is upgraded because systemd service
-# names are changed
-- name: Configure API aggregation on masters
-  hosts: oo_masters_to_config
-  serial: 1
-  roles:
-  - role: openshift_facts
-  tasks:
-  - include_tasks: ../../../../openshift-master/private/tasks/wire_aggregator.yml
-
-# All controllers must be stopped at the same time then restarted
-- name: Cycle all controller services to force new leader election mode
-  hosts: oo_masters_to_config
-  gather_facts: no
-  roles:
-  - role: openshift_facts
-  tasks:
-  - name: Stop {{ openshift_service_type }}-master-controllers
-    systemd:
-      name: "{{ openshift_service_type }}-master-controllers"
-      state: stopped
-  - name: Start {{ openshift_service_type }}-master-controllers
-    systemd:
-      name: "{{ openshift_service_type }}-master-controllers"
-      state: started
-
-- import_playbook: ../upgrade_nodes.yml
-
-- import_playbook: ../post_control_plane.yml

+ 0 - 80
playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml

@@ -1,80 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- import_playbook: ../init.yml
-  vars:
-    l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-    l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-
-- name: Configure the upgrade target for the common upgrade tasks
-  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
-  tasks:
-  - set_fact:
-      openshift_upgrade_target: '3.7'
-      openshift_upgrade_min: '3.6'
-
-- import_playbook: ../pre/config.yml
-  # These vars a meant to exclude oo_nodes from plays that would otherwise include
-  # them by default.
-  vars:
-    l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
-    l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"
-    l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-    l_upgrade_no_proxy_hosts: "oo_masters_to_config"
-    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-    l_upgrade_verify_targets_hosts: "oo_masters_to_config"
-    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
-    l_upgrade_excluder_hosts: "oo_masters_to_config"
-    openshift_protect_installed_version: False
-
-- import_playbook: validator.yml
-
-- name: Flag pre-upgrade checks complete for hosts without errors
-  hosts: oo_masters_to_config:oo_etcd_to_config
-  tasks:
-  - set_fact:
-      pre_upgrade_complete: True
-
-# Pre-upgrade completed
-
-- import_playbook: ../upgrade_control_plane.yml
-  vars:
-    master_config_hook: "v3_7/master_config_upgrade.yml"
-
-# this must occur after the control plane is upgraded because systemd service
-# names are changed
-- name: Configure API aggregation on masters
-  hosts: oo_masters_to_config
-  serial: 1
-  roles:
-  - role: openshift_facts
-  tasks:
-  - include_tasks: ../../../../openshift-master/private/tasks/wire_aggregator.yml
-
-# All controllers must be stopped at the same time then restarted
-- name: Cycle all controller services to force new leader election mode
-  hosts: oo_masters_to_config
-  gather_facts: no
-  roles:
-  - role: openshift_facts
-  tasks:
-  - name: Stop {{ openshift_service_type }}-master-controllers
-    systemd:
-      name: "{{ openshift_service_type }}-master-controllers"
-      state: stopped
-  - name: Start {{ openshift_service_type }}-master-controllers
-    systemd:
-      name: "{{ openshift_service_type }}-master-controllers"
-      state: started
-
-- import_playbook: ../post_control_plane.yml

+ 0 - 38
playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml

@@ -1,38 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- import_playbook: ../init.yml
-  tags:
-  - pre_upgrade
-
-- name: Configure the upgrade target for the common upgrade tasks
-  hosts: oo_all_hosts
-  tags:
-  - pre_upgrade
-  tasks:
-  - set_fact:
-      openshift_upgrade_target: '3.7'
-      openshift_upgrade_min: '3.6'
-
-- import_playbook: ../pre/config.yml
-  vars:
-    l_upgrade_repo_hosts: "oo_nodes_to_config"
-    l_upgrade_no_proxy_hosts: "oo_all_hosts"
-    l_upgrade_health_check_hosts: "oo_nodes_to_config"
-    l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
-    l_upgrade_docker_target_hosts: "oo_nodes_to_config"
-    l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
-    l_upgrade_nodes_only: True
-
-- name: Flag pre-upgrade checks complete for hosts without errors
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
-  tasks:
-  - set_fact:
-      pre_upgrade_complete: True
-
-# Pre-upgrade completed
-
-- import_playbook: ../upgrade_nodes.yml

+ 0 - 26
playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml

@@ -1,26 +0,0 @@
----
-###############################################################################
-# Pre upgrade checks for known data problems, if this playbook fails you should
-# contact support. If you're not supported contact users@lists.openshift.com
-###############################################################################
-- name: Verify 3.7 specific upgrade checks
-  hosts: oo_first_master
-  roles:
-  - { role: lib_openshift }
-  - { role: openshift_facts }
-
-  tasks:
-  - name: Check for invalid namespaces and SDN errors
-    oc_objectvalidator:
-  # DO NOT DISABLE THIS, YOUR UPGRADE WILL FAIL IF YOU DO SO
-  - name: Confirm OpenShift authorization objects are in sync
-    command: >
-      {{ openshift_client_binary }} adm migrate authorization
-    when:
-    - openshift_currently_installed_version is version_compare('3.7','<')
-    - openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool
-    changed_when: false
-    register: l_oc_result
-    until: l_oc_result.rc == 0
-    retries: 2
-    delay: 15

+ 4 - 0
playbooks/deploy_cluster.yml

@@ -1,8 +1,12 @@
 ---
 - import_playbook: init/main.yml
 
+- import_playbook: openshift-node/private/bootstrap.yml
+
 - import_playbook: common/private/control_plane.yml
 
+- import_playbook: openshift-node/private/join.yml
+
 - import_playbook: openshift-node/private/config.yml
 
 - import_playbook: common/private/components.yml

+ 6 - 3
playbooks/gcp/openshift-cluster/install.yml

@@ -11,15 +11,18 @@
 - name: run the init
   import_playbook: ../../init/main.yml
 
+- name: ensure master nodes are ready for bootstrapping
+  import_playbook: ../../openshift-node/private/bootstrap.yml
+
 - name: configure the control plane
   import_playbook: ../../common/private/control_plane.yml
 
-- name: ensure the masters are configured as nodes
-  import_playbook: ../../openshift-node/private/config.yml
-
 - name: run the GCP specific post steps
   import_playbook: install_gcp.yml
 
+- name: configure any nodes that aren't bootstrapped
+  import_playbook: ../../openshift-node/private/config.yml
+
 - name: install components
   import_playbook: ../../common/private/components.yml
 

+ 1 - 10
playbooks/gcp/openshift-cluster/install_gcp.yml

@@ -6,16 +6,7 @@
     include_role:
       name: openshift_gcp
       tasks_from: configure_master_healthcheck.yml
-  - name: configure node bootstrapping
+  - name: configure master bootstrap distribution
     include_role:
       name: openshift_gcp
       tasks_from: configure_master_bootstrap.yml
-    when:
-    - openshift_master_bootstrap_enabled | default(False)
-  - name: configure node bootstrap autoapprover
-    include_role:
-      name: openshift_bootstrap_autoapprover
-      tasks_from: main
-    when:
-    - openshift_master_bootstrap_enabled | default(False)
-    - openshift_master_bootstrap_auto_approve | default(False) | bool

+ 8 - 0
playbooks/init/basic_facts.yml

@@ -29,6 +29,14 @@
     - openshift_deployment_type is undefined
     - deployment_type is defined
 
+  - name: check for node already bootstrapped
+    stat:
+      path: "/etc/origin/node/bootstrap-node-config.yaml"
+    register: bootstrap_node_config_path_check
+  - name: initialize_facts set fact openshift_is_bootstrapped
+    set_fact:
+      openshift_is_bootstrapped: "{{ bootstrap_node_config_path_check.stat.exists }}"
+
   - name: initialize_facts set fact openshift_is_atomic and openshift_is_containerized
     set_fact:
       openshift_is_atomic: "{{ ostree_booted.stat.exists }}"

+ 1 - 1
playbooks/init/evaluate_groups.yml

@@ -52,7 +52,7 @@
         for documentation on how to migrate from embedded to external etcd.
     when:
     - g_etcd_hosts | default([]) | length == 0
-    - not (openshift_node_bootstrap | default(False))
+    - not (openshift_node_bootstrap | default(True))
 
   - name: Evaluate oo_all_hosts
     add_host:

+ 1 - 1
playbooks/openshift-etcd/private/scaleup.yml

@@ -85,5 +85,5 @@
   - role: openshift_master_facts
   post_tasks:
   - import_role:
-      name: openshift_master
+      name: openshift_control_plane
       tasks_from: update_etcd_client_urls.yml

+ 10 - 28
playbooks/openshift-master/private/config.yml

@@ -163,16 +163,20 @@
     openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
     openshift_ca_host: "{{ groups.oo_first_master.0 }}"
   pre_tasks:
-  # This will be moved into the control plane role once openshift_master is removed
-  - name: Add static pod and systemd shim commands
-    import_role:
-      name: openshift_control_plane
-      tasks_from: static_shim
   - name: Prepare the bootstrap node config on masters for self-hosting
     import_role:
       name: openshift_node_group
       tasks_from: bootstrap
-    when: openshift_master_bootstrap_enabled | default(false) | bool
+  # TODO: move me into a more appropriate location
+  - name: Update the sysconfig for the masters to give them bootstrap config
+    lineinfile:
+      dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
+      line: "{{ item.line | default(omit) }}"
+      regexp: "{{ item.regexp }}"
+      state: "{{ item.state | default('present') }}"
+    with_items:
+    - line: "BOOTSTRAP_CONFIG_NAME={{ openshift_node_group_master | default('node-config-master') }}"
+      regexp: "^BOOTSTRAP_CONFIG_NAME=.*"
 
   roles:
   - role: openshift_master_facts
@@ -183,20 +187,7 @@
   - role: openshift_buildoverrides
   - role: nickhammond.logrotate
 
-  # DEPRECATED: begin moving away from this
-  - role: openshift_master
-    openshift_master_ha: "{{ (groups.oo_masters | length > 1) | bool }}"
-    openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
-    r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
-    r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"
-    openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}"
-    openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}"
-    openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}"
-    openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}"
-    when: not ( openshift_master_bootstrap_enabled | default(false) | bool )
-
   - role: openshift_control_plane
-    when: openshift_master_bootstrap_enabled | default(false) | bool
   - role: tuned
   - role: nuage_ca
     when: openshift_use_nuage | default(false) | bool
@@ -218,21 +209,12 @@
 
   - name: setup bootstrap settings
     include_tasks: tasks/enable_bootstrap.yml
-    when: openshift_master_bootstrap_enabled | default(false) | bool
 
   post_tasks:
   - name: Create group for deployment type
     group_by: key=oo_masters_deployment_type_{{ openshift_deployment_type }}
     changed_when: False
 
-- name: Configure API Aggregation on masters
-  hosts: oo_masters
-  serial: 1
-  roles:
-  - role: openshift_facts
-  tasks:
-  - include_tasks: tasks/wire_aggregator.yml
-
 - name: Re-enable excluder if it was previously enabled
   hosts: oo_masters_to_config
   gather_facts: no

+ 1 - 1
playbooks/openshift-master/private/restart.yml

@@ -10,6 +10,6 @@
   - include_tasks: tasks/restart_hosts.yml
     when: openshift_rolling_restart_mode | default('services') == 'system'
   - import_role:
-      name: openshift_master
+      name: openshift_control_plane
       tasks_from: restart.yml
     when: openshift_rolling_restart_mode | default('services') == 'services'

+ 4 - 2
playbooks/openshift-master/private/tasks/enable_bootstrap.yml

@@ -1,12 +1,12 @@
 ---
 - name: Setup the master bootstrap settings
   import_role:
-    name: openshift_master
+    name: openshift_control_plane
     tasks_from: bootstrap_settings.yml
 
 - name: Setup the bootstrap kubeconfig
   import_role:
-    name: openshift_master
+    name: openshift_control_plane
     tasks_from: bootstrap.yml
 
 - name: Setup the node group config maps
@@ -18,3 +18,5 @@
   import_role:
     name: openshift_bootstrap_autoapprover
   run_once: True
+  when:
+  - openshift_master_bootstrap_auto_approve | default(False) | bool

+ 1 - 1
playbooks/openshift-master/private/tasks/restart_services.yml

@@ -1,4 +1,4 @@
 ---
 - import_role:
-    name: openshift_master
+    name: openshift_control_plane
     tasks_from: restart.yml

+ 2 - 0
playbooks/openshift-master/private/tasks/wire_aggregator.yml

@@ -1,4 +1,6 @@
 ---
+# DEPRECATED: These tasks will be removed
+
 - name: Make temp cert dir
   command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX
   register: certtemp

+ 2 - 11
playbooks/openshift-master/private/upgrade.yml

@@ -68,12 +68,12 @@
     when: openshift_master_upgrade_pre_hook is defined
 
   - import_role:
-      name: openshift_master
+      name: openshift_control_plane
       tasks_from: upgrade.yml
 
   - name: update vsphere provider master config
     include_role:
-      name: openshift_master
+      name: openshift_control_plane
       tasks_from: update-vsphere
     when:
     - openshift_cloudprovider_kind is defined
@@ -82,7 +82,6 @@
 
   - name: Setup and enable bootstrapping options
     include_tasks: tasks/enable_bootstrap.yml
-    when: openshift_master_bootstrap_enabled | default(false) | bool
 
   # Run the upgrade hook prior to restarting services/system if defined:
   - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"
@@ -260,14 +259,6 @@
   - set_fact:
       reconcile_complete: True
 
-- name: Configure API aggregation on masters
-  hosts: oo_masters_to_config
-  serial: 1
-  roles:
-  - role: openshift_facts
-  tasks:
-  - include_tasks: tasks/wire_aggregator.yml
-
 ##############################################################################
 # Gate on reconcile
 ##############################################################################

+ 1 - 1
playbooks/openshift-node/private/additional_config.yml

@@ -1,6 +1,6 @@
 ---
 - name: create additional node network plugin groups
-  hosts: "{{ openshift_node_scale_up_group | default('oo_nodes_to_config') }}"
+  hosts: "{{ openshift_node_scale_up_group | default('oo_nodes_to_config') }}:!oo_exclude_nodes_to_config"
   tasks:
   # Creating these node groups will prevent a ton of skipped tasks.
   # Create group for flannel nodes

+ 44 - 0
playbooks/openshift-node/private/bootstrap.yml

@@ -0,0 +1,44 @@
+---
+- name: Node Preparation Checkpoint Start
+  hosts: all
+  gather_facts: false
+  tasks:
+  - name: Set Node preparation 'In Progress'
+    run_once: true
+    set_stats:
+      data:
+        installer_phase_node:
+          status: "In Progress"
+          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- import_playbook: setup_bootstrap.yml
+
+- import_playbook: configure_nodes.yml
+
+- name: node bootstrap config
+  hosts: oo_nodes_to_config:!oo_exclude_nodes_to_config
+  tasks:
+  - import_role:
+      name: openshift_node
+      tasks_from: bootstrap.yml
+  - import_role:
+      name: openshift_node_group
+      tasks_from: bootstrap.yml
+  - set_fact:
+      openshift_is_bootstrapped: True
+
+- import_playbook: enable_excluders.yml
+
+- import_playbook: clean_image.yml
+
+- name: Node Preparation Checkpoint End
+  hosts: all
+  gather_facts: false
+  tasks:
+  - name: Set Node preparation 'Complete'
+    run_once: true
+    set_stats:
+      data:
+        installer_phase_node:
+          status: "Complete"
+          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"

+ 1 - 1
playbooks/openshift-node/private/certificates-backup.yml

@@ -14,7 +14,7 @@
       warn: no
 
 - name: Redeploy node certificates
-  hosts: oo_nodes_to_config
+  hosts: oo_nodes_to_config:!oo_exclude_nodes_to_config
   pre_tasks:
   - name: Remove CA certificate
     file:

+ 1 - 1
playbooks/openshift-node/private/certificates.yml

@@ -1,6 +1,6 @@
 ---
 - name: Create OpenShift certificates for node hosts
-  hosts: oo_nodes_to_config
+  hosts: oo_nodes_to_config:!oo_exclude_nodes_to_config
   gather_facts: no
   roles:
   - role: openshift_node_certificates

+ 1 - 1
playbooks/openshift-node/private/clean_image.yml

@@ -1,6 +1,6 @@
 ---
 - name: Configure nodes
-  hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+  hosts: oo_nodes_to_config:!oo_exclude_nodes_to_config
   tasks:
   - name: Remove any ansible facts created during AMI creation
     file:

+ 1 - 3
playbooks/openshift-node/private/config.yml

@@ -11,11 +11,9 @@
           status: "In Progress"
           start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
 
-- import_playbook: certificates.yml
-
 - import_playbook: setup.yml
 
-- import_playbook: containerized_nodes.yml
+- import_playbook: certificates.yml
 
 - import_playbook: configure_nodes.yml
 

+ 1 - 1
playbooks/openshift-node/private/configure_nodes.yml

@@ -1,6 +1,6 @@
 ---
 - name: Configure nodes
-  hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+  hosts: oo_nodes_to_config:!oo_exclude_nodes_to_config
   vars:
     openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
     openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"

+ 0 - 20
playbooks/openshift-node/private/containerized_nodes.yml

@@ -1,20 +0,0 @@
----
-- name: Configure containerized nodes
-  hosts: oo_containerized_master_nodes
-  serial: 1
-  vars:
-    openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
-    openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
-    openshift_no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
-                                                    | union(groups['oo_masters_to_config'])
-                                                    | union(groups['oo_etcd_to_config'] | default([])))
-                                                | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
-                                                }}"
-
-  roles:
-  - role: openshift_clock
-  - role: openshift_cloud_provider
-    when: openshift_cloudprovider_kind is defined
-  - role: openshift_node
-    openshift_ca_host: "{{ groups.oo_first_master.0 }}"
-  - role: nickhammond.logrotate

+ 1 - 1
playbooks/openshift-node/private/enable_excluders.yml

@@ -1,6 +1,6 @@
 ---
 - name: Re-enable excluder if it was previously enabled
-  hosts: oo_nodes_to_config
+  hosts: oo_nodes_to_config:!oo_exclude_nodes_to_config
   gather_facts: no
   roles:
   - role: openshift_excluder

+ 1 - 1
playbooks/openshift-node/private/image_prep.yml

@@ -20,7 +20,7 @@
   import_playbook: configure_nodes.yml
 
 - name: node bootstrap config
-  hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+  hosts: oo_nodes_to_config
   tasks:
     - import_role:
         name: openshift_node

+ 70 - 0
playbooks/openshift-node/private/join.yml

@@ -0,0 +1,70 @@
+---
+- name: Evaluate bootstrapped nodes
+  hosts: localhost
+  gather_facts: no
+  connection: local
+  tasks:
+  - name: Add all nodes that are bootstrapped
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_bootstrap
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_become: "{{ g_sudo | default(omit) }}"
+    with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
+    when:
+    - hostvars[item].openshift is defined
+    - hostvars[item].openshift.common is defined
+    - (hostvars[item].openshift_is_bootstrapped | bool) or (hostvars[item].openshift_node_bootstrap | default(True) | bool)
+    changed_when: False
+
+- name: Distribute bootstrap and start nodes
+  hosts: oo_nodes_to_bootstrap
+  gather_facts: no
+  tasks:
+  - import_role:
+      name: openshift_node
+      tasks_from: distribute_bootstrap.yml
+
+- name: Approve any pending CSR requests from inventory nodes
+  hosts: oo_first_master
+  gather_facts: no
+  tasks:
+  - name: Dump all candidate bootstrap hostnames
+    debug:
+      msg: "{{ groups['oo_nodes_to_bootstrap'] | default([]) }}"
+
+  - name: Find all hostnames for bootstrapping
+    set_fact:
+      l_nodes_to_join: "{{ groups['oo_nodes_to_bootstrap'] | default([]) | map('extract', hostvars) | map(attribute='openshift.node.nodename') | list }}"
+
+  - name: Dump the bootstrap hostnames
+    debug:
+      msg: "{{ l_nodes_to_join }}"
+
+  - name: Approve bootstrap nodes
+    oc_adm_csr:
+      nodes: "{{ l_nodes_to_join }}"
+      timeout: 60
+      fail_on_timeout: true
+    register: approve_out
+    ignore_errors: true
+    when:
+    - l_nodes_to_join|length > 0
+
+  - when: not approve_out|succeeded
+    block:
+    - name: Get CSRs
+      command: >
+        {{ openshift_client_binary }} describe csr --config=/etc/origin/master/admin.kubeconfig
+    - name: Report approval errors
+      fail:
+        msg: Node approval failed
+
+- name: Ensure any inventory labels are applied to the nodes
+  hosts: oo_nodes_to_bootstrap
+  vars:
+    openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+  roles:
+  - role: openshift_manage_node
+    openshift_master_host: "{{ groups.oo_first_master.0 }}"
+    openshift_manage_node_is_master: "{{ ('oo_masters_to_config' in group_names) | bool }}"

+ 1 - 1
playbooks/openshift-node/private/manage_node.yml

@@ -1,6 +1,6 @@
 ---
 - name: Additional node config
-  hosts: "{{ openshift_node_scale_up_group | default('oo_nodes_to_config') }}"
+  hosts: "{{ openshift_node_scale_up_group | default('oo_nodes_to_config') }}:!oo_exclude_nodes_to_config"
   vars:
     openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
   roles:

+ 10 - 11
playbooks/openshift-node/private/setup.yml

@@ -1,25 +1,24 @@
 ---
-- name: Disable excluders
-  hosts: oo_nodes_to_config
-  gather_facts: no
-  roles:
-  - role: openshift_excluder
-    r_openshift_excluder_action: disable
-
 - name: Evaluate node groups
   hosts: localhost
   connection: local
   tasks:
-  - name: Evaluate oo_containerized_master_nodes
+  - name: Evaluate oo_exclude_nodes_to_config as all nodes that have already been bootstrapped
     add_host:
       name: "{{ item }}"
-      groups: oo_containerized_master_nodes
+      groups: oo_exclude_nodes_to_config
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_become: "{{ g_sudo | default(omit) }}"
     with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
     when:
     - hostvars[item].openshift is defined
     - hostvars[item].openshift.common is defined
-    - hostvars[item].openshift_is_containerized | bool
-    - (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
+    - hostvars[item].openshift_is_bootstrapped | bool
     changed_when: False
+
+- name: Disable excluders
+  hosts: oo_nodes_to_config:!oo_exclude_nodes_to_config
+  gather_facts: no
+  roles:
+  - role: openshift_excluder
+    r_openshift_excluder_action: disable

+ 23 - 0
playbooks/openshift-node/private/setup_bootstrap.yml

@@ -0,0 +1,23 @@
+---
+# We exclude all nodes that have already been bootstrapped or have requested not to be bootstrapped
+- name: Evaluate node groups
+  hosts: localhost
+  connection: local
+  tasks:
+  - name: Evaluate oo_exclude_nodes_to_config as all nodes that shouldn't be configured for bootstrapping
+    add_host:
+      name: "{{ item }}"
+      groups: oo_exclude_nodes_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_become: "{{ g_sudo | default(omit) }}"
+    with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
+    when:
+    - (not (hostvars[item].openshift_node_bootstrap | default(True) | bool)) or (hostvars[item].openshift_is_bootstrapped | bool)
+    changed_when: False
+
+- name: Disable excluders
+  hosts: oo_nodes_to_config:!oo_exclude_nodes_to_config
+  gather_facts: no
+  roles:
+  - role: openshift_excluder
+    r_openshift_excluder_action: disable

+ 0 - 5
roles/etcd/tasks/main.yml

@@ -1,8 +1,3 @@
 ---
 - name: Configure etcd with static pods
   import_tasks: static.yml
-  when: openshift_master_bootstrap_enabled | default(False) | bool
-
-- name: Configure etcd with RPMs
-  import_tasks: rpm.yml
-  when: not (openshift_master_bootstrap_enabled | default(False) | bool)

+ 1 - 1
roles/lib_utils/filter_plugins/oo_filters.py

@@ -575,7 +575,7 @@ def lib_utils_oo_selector_to_string_list(user_dict):
     """Convert a dict of selectors to a key=value list of strings
 
 Given input of {'region': 'infra', 'zone': 'primary'} returns a list
-of items as ['region=infra', 'zone=primary']
+of items as ['node-role.kubernetes.io/infra=true', 'zone=primary']
     """
     selectors = []
     for key in user_dict:

+ 1 - 1
roles/lib_utils/filter_plugins/openshift_master.py

@@ -454,7 +454,7 @@ class GitHubIdentityProvider(IdentityProviderOauthBase):
 
 
 class FilterModule(object):
-    ''' Custom ansible filters for use by the openshift_master role'''
+    ''' Custom ansible filters for use by the openshift_control_plane role'''
 
     @staticmethod
     def translate_idps(idps, api_version):

+ 63 - 0
roles/openshift_ca/tasks/main.yml

@@ -40,10 +40,23 @@
   delegate_to: "{{ openshift_ca_host }}"
   run_once: true
 
+- name: Determine if front-proxy CA must be created
+  stat:
+    path: "{{ openshift_ca_config_dir }}/{{ item }}"
+  register: g_master_front_proxy_ca_stat_result
+  with_items:
+  - front-proxy-ca.crt
+  - front-proxy-ca.key
+  delegate_to: "{{ openshift_ca_host }}"
+  run_once: true
+
 - set_fact:
     master_ca_missing: "{{ False in (g_master_ca_stat_result.results
                                      | lib_utils_oo_collect(attribute='stat.exists')
                                      | list) }}"
+    master_front_proxy_ca_missing: "{{ False in (g_master_front_proxy_ca_stat_result.results
+                                     | lib_utils_oo_collect(attribute='stat.exists')
+                                     | list) }}"
   run_once: true
 
 - name: Retain original serviceaccount keys
@@ -86,6 +99,19 @@
   register: g_master_legacy_ca_result
 
 # This should NOT replace the CA due to --overwrite=false when a CA already exists.
+- name: Create the front-proxy CA if it does not already exist
+  command: >
+    {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-signer-cert
+    --cert="{{ openshift_ca_config_dir }}/front-proxy-ca.crt"
+    --key="{{ openshift_ca_config_dir }}/front-proxy-ca.key"
+    --serial="{{ openshift_ca_config_dir }}/ca.serial.txt"
+    --expire-days={{ openshift_ca_cert_expire_days }}
+    --overwrite=false
+  when: master_front_proxy_ca_missing | bool or openshift_certificates_redeploy | default(false) | bool
+  delegate_to: "{{ openshift_ca_host }}"
+  run_once: true
+
+# This should NOT replace the CA due to --overwrite=false when a CA already exists.
 - name: Create the master certificates if they do not already exist
   command: >
     {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-master-certs
@@ -187,6 +213,43 @@
   delegate_to: "{{ openshift_ca_host }}"
   run_once: true
 
+# create-api-client-config generates a ca.crt file which will
+# overwrite the OpenShift CA certificate.  Generate the loopback
+# kubeconfig in a temporary directory and then copy files into the
+# master config dir to avoid overwriting ca.crt.
+- block:
+  - name: Create temp directory for loopback master client config
+    command: mktemp -d /tmp/openshift-ansible-XXXXXX
+    register: openshift_ca_loopback_tmpdir
+  - name: Generate the aggregator api-client config
+    command: >
+      {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config
+        --certificate-authority={{ openshift_ca_cert }}
+        {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}
+        --certificate-authority {{ named_ca_certificate }}
+        {% endfor %}
+        --client-dir={{ openshift_ca_loopback_tmpdir.stdout }}
+        --user=aggregator-front-proxy
+        --signer-cert="{{ openshift_ca_config_dir }}/front-proxy-ca.crt"
+        --signer-key="{{ openshift_ca_config_dir }}/front-proxy-ca.key"
+        --signer-serial={{ openshift_ca_serial }}
+        --expire-days={{ openshift_master_cert_expire_days }}
+  - name: Copy generated loopback master client config to master config dir
+    copy:
+      src: "{{ openshift_ca_loopback_tmpdir.stdout }}/{{ item }}"
+      dest: "{{ openshift_ca_config_dir }}"
+      remote_src: true
+    with_items:
+    - aggregator-front-proxy.crt
+    - aggregator-front-proxy.key
+    - aggregator-front-proxy.kubeconfig
+  - name: Delete temp directory
+    file:
+      name: "{{ openshift_ca_loopback_tmpdir.stdout }}"
+      state: absent
+  delegate_to: "{{ openshift_ca_host }}"
+  run_once: true
+
 - name: Restore original serviceaccount keys
   copy:
     src: "{{ item }}.keep"

+ 20 - 24
roles/openshift_control_plane/defaults/main.yml

@@ -27,10 +27,28 @@ system_images_registry_dict:
 
 system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}"
 
-l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+l_osm_id_providers_dict:
+  openshift-enterprise:
+  - name: 'deny_all'
+    challenge: True
+    login: True
+    kind: 'DenyAllPasswordIdentityProvider'
+  origin:
+  - name: 'allow_all'
+    challenge: True
+    login: True
+    kind: 'AllowAllPasswordIdentityProvider'
+
+openshift_master_identity_providers: "{{ l_osm_id_providers_dict[openshift_deployment_type] }}"
+
+l_osm_disabled_features: "{{ openshift_deployment_subtype == 'registry' | bool }}"
+l_osm_disabled_features_list:
+- Builder
+- S2IBuilder
+- WebConsole
+
 
 openshift_master_dns_port: 8053
-osm_default_node_selector: ''
 osm_project_request_template: ''
 osm_mcs_allocator_range: 's0:/2'
 osm_mcs_labels_per_project: 5
@@ -95,26 +113,6 @@ openshift_master_oauth_templates: "{{ openshift_master_oauth_template | ternary(
 # Here we combine openshift_master_oath_template into 'login' key of openshift_master_oath_templates, if not present.
 l_openshift_master_oauth_templates: "{{ openshift_master_oauth_templates | default(openshift_master_oauth_templates_default) }}"
 
-# These defaults assume forcing journald persistence, fsync to disk once
-# a second, rate-limiting to 10,000 logs a second, no forwarding to
-# syslog or wall, using 8GB of disk space maximum, using 10MB journal
-# files, keeping only a days worth of logs per journal file, and
-# retaining journal files no longer than a month.
-journald_vars_to_replace:
-- { var: Storage, val: persistent }
-- { var: Compress, val: yes }
-- { var: SyncIntervalSec, val: 1s }
-- { var: RateLimitInterval, val: 1s }
-- { var: RateLimitBurst, val: 10000 }
-- { var: SystemMaxUse, val: 8G }
-- { var: SystemKeepFree, val: 20% }
-- { var: SystemMaxFileSize, val: 10M }
-- { var: MaxRetentionSec, val: 1month }
-- { var: MaxFileSec, val: 1day }
-- { var: ForwardToSyslog, val: no }
-- { var: ForwardToWall, val: no }
-
-
 # NOTE
 # r_openshift_master_*_default may be defined external to this role.
 # openshift_use_*, if defined, may affect other roles or play behavior.
@@ -142,8 +140,6 @@ openshift_master_image_config_latest: "{{ openshift_master_image_config_latest_d
 openshift_master_config_dir_default: "{{ openshift.common.config_base ~ '/master' if openshift is defined and 'common' in openshift else '/etc/origin/master' }}"
 openshift_master_config_dir: "{{ openshift_master_config_dir_default }}"
 
-openshift_master_bootstrap_enabled: False
-
 openshift_master_csr_sa: node-bootstrapper
 openshift_master_csr_namespace: openshift-infra
 

+ 8 - 1
roles/openshift_control_plane/files/apiserver.yaml

@@ -11,7 +11,7 @@ spec:
   hostNetwork: true
   containers:
   - name: api
-    image: openshift/origin:v3.9.0-alpha.4
+    image: openshift/origin:v3.10.0
     command: ["/bin/bash", "-c"]
     args:
     - |
@@ -36,6 +36,13 @@ spec:
         scheme: HTTPS
         port: 8443
         path: healthz
+      initialDelaySeconds: 45
+    readinessProbe:
+      httpGet:
+        scheme: HTTPS
+        port: 8443
+        path: healthz/ready
+      initialDelaySeconds: 10
   volumes:
   - name: master-config
     hostPath:

+ 1 - 1
roles/openshift_control_plane/files/controller.yaml

@@ -11,7 +11,7 @@ spec:
   hostNetwork: true
   containers:
   - name: controllers
-    image: openshift/origin:v3.9.0-alpha.4
+    image: openshift/origin:v3.10.0
     command: ["/bin/bash", "-c"]
     args:
     - |

+ 0 - 1
roles/openshift_master/tasks/bootstrap_settings.yml

@@ -12,4 +12,3 @@
   notify:
   - restart master controllers
   - restart master api
-  when: openshift_master_bootstrap_enabled | default(False)

roles/openshift_master/tasks/check_master_api_is_ready.yml → roles/openshift_control_plane/tasks/check_master_api_is_ready.yml


roles/openshift_master/tasks/ensure_nodes_matching_selector.yml → roles/openshift_control_plane/tasks/ensure_nodes_matching_selector.yml


+ 0 - 29
roles/openshift_control_plane/tasks/journald.yml

@@ -1,29 +0,0 @@
----
-- name: Checking for journald.conf
-  stat: path=/etc/systemd/journald.conf
-  register: journald_conf_file
-
-- name: Create journald persistence directories
-  file:
-    path: /var/log/journal
-    state: directory
-
-- name: Update journald setup
-  replace:
-    dest: /etc/systemd/journald.conf
-    regexp: '^(\#| )?{{ item.var }}=\s*.*?$'
-    replace: ' {{ item.var }}={{ item.val }}'
-    backup: yes
-  with_items: "{{ journald_vars_to_replace | default([]) }}"
-  when: journald_conf_file.stat.exists
-  register: journald_update
-
-# I need to restart journald immediatelly, otherwise it gets into way during
-# further steps in ansible
-- name: Restart journald
-  command: "systemctl restart systemd-journald"
-  retries: 3
-  delay: 5
-  register: result
-  until: result.rc == 0
-  when: journald_update is changed

+ 17 - 4
roles/openshift_control_plane/tasks/main.yml

@@ -14,6 +14,9 @@
 - name: Open up firewall ports
   import_tasks: firewall.yml
 
+- name: Prepare static pod scripts
+  import_tasks: static_shim.yml
+
 - name: Create r_openshift_master_data_dir
   file:
     path: "{{ r_openshift_master_data_dir }}"
@@ -119,9 +122,6 @@
     local_facts:
       no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}"
 
-- name: Update journald config
-  include_tasks: journald.yml
-
 - name: Create session secrets file
   template:
     dest: "{{ openshift.master.session_secrets_file }}"
@@ -162,10 +162,23 @@
     name: "{{ openshift_service_type }}-node"
     state: restarted
     enabled: yes
+  register: node_start
+  ignore_errors: yes
+
+- when: node_start is failed
+  block:
+  - name: Get node logs
+    command: journalctl --no-pager -n 300 -u {{ openshift_service_type }}-node
+    register: logs_node
+    ignore_errors: true
+  - debug:
+      msg: "{{ logs_node.stdout_lines }}"
+  - fail:
+      msg: Node start failed.
 
 - name: Verify that the control plane is running
   command: >
-    curl -k {{ openshift.master.api_url }}/healthz
+    curl -k {{ openshift.master.api_url }}/healthz/ready
   args:
     # Disables the following warning:
     # Consider using get_url or uri module rather than running curl

+ 9 - 2
roles/openshift_control_plane/tasks/upgrade.yml

@@ -8,7 +8,9 @@
 - include_tasks: "upgrade/{{ master_config_hook }}"
   when: master_config_hook is defined
 
-- include_tasks: journald.yml
+- include_tasks: systemd_units.yml
+
+- include_tasks: set_loopback_context.yml
 
 - name: Check for ca-bundle.crt
   stat:
@@ -42,4 +44,9 @@
     value: "{{ oreg_url | default(oreg_url_master) }}"
   when: oreg_url is defined or oreg_url_master is defined
 
-- include_tasks: static.yml
+- name: Change default node selector to compute=true
+  yedit:
+    src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+    key: 'projectConfig.defaultNodeSelector'
+    value: '{{ hostvars[groups.oo_first_master.0].l_osm_default_node_selector }}'
+  when: openshift_upgrade_target | version_compare('3.9', '>=')

+ 6 - 5
roles/openshift_control_plane/tasks/upgrade/rpm_upgrade.yml

@@ -8,15 +8,14 @@
 
 # TODO: If the sdn package isn't already installed this will install it, we
 # should fix that
-
-- import_tasks: ../static.yml
-
-- name: Upgrade master packages
+- name: Upgrade master packages - yum
   command:
     yum install -y {{ master_pkgs | join(' ') }} \
-    {{ ' --exclude *' ~ openshift_service_type ~ '*3.9*' if openshift_release | version_compare('3.9','<') else '' }}
+    {{ ' --exclude *' ~ openshift_service_type ~ '*3.9*' if openshift_upgrade_target | version_compare('3.9','<') else '' }}
   vars:
     master_pkgs:
+      - "{{ openshift_service_type }}{{ openshift_pkg_version | default('') }}"
+      - "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') }}"
       - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}"
       - "{{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }}"
   register: result
@@ -29,6 +28,8 @@
     state: present
   vars:
     master_pkgs:
+      - "{{ openshift_service_type }}{{ openshift_pkg_version }}"
+      - "{{ openshift_service_type }}-master{{ openshift_pkg_version }}"
       - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
       - "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}"
   register: result

roles/openshift_master/tasks/upgrade_predicates.yml → roles/openshift_control_plane/tasks/upgrade/upgrade_predicates.yml


roles/openshift_master/tasks/upgrade/upgrade_priorities.yml → roles/openshift_control_plane/tasks/upgrade/upgrade_priorities.yml


+ 12 - 163
roles/openshift_control_plane/tasks/upgrade/upgrade_scheduler.yml

@@ -1,175 +1,24 @@
 ---
 # Upgrade predicates
-- vars:
-    # openshift_master_facts_default_predicates is a custom lookup plugin in
-    # role lib_utils
-    prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
-    prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}"
-    default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}"
-    # older_predicates are the set of predicates that have previously been
-    # hard-coded into openshift_facts
-    older_predicates:
-    - - name: MatchNodeSelector
-      - name: PodFitsResources
-      - name: PodFitsPorts
-      - name: NoDiskConflict
-      - name: NoVolumeZoneConflict
-      - name: MaxEBSVolumeCount
-      - name: MaxGCEPDVolumeCount
-      - name: Region
-        argument:
-          serviceAffinity:
-            labels:
-            - region
-    - - name: MatchNodeSelector
-      - name: PodFitsResources
-      - name: PodFitsPorts
-      - name: NoDiskConflict
-      - name: NoVolumeZoneConflict
-      - name: Region
-        argument:
-          serviceAffinity:
-            labels:
-            - region
-    - - name: MatchNodeSelector
-      - name: PodFitsResources
-      - name: PodFitsPorts
-      - name: NoDiskConflict
-      - name: Region
-        argument:
-          serviceAffinity:
-            labels:
-            - region
-    # older_predicates_no_region are the set of predicates that have previously
-    # been hard-coded into openshift_facts, with the Region predicate removed
-    older_predicates_no_region:
-    - - name: MatchNodeSelector
-      - name: PodFitsResources
-      - name: PodFitsPorts
-      - name: NoDiskConflict
-      - name: NoVolumeZoneConflict
-      - name: MaxEBSVolumeCount
-      - name: MaxGCEPDVolumeCount
-    - - name: MatchNodeSelector
-      - name: PodFitsResources
-      - name: PodFitsPorts
-      - name: NoDiskConflict
-      - name: NoVolumeZoneConflict
-    - - name: MatchNodeSelector
-      - name: PodFitsResources
-      - name: PodFitsPorts
-      - name: NoDiskConflict
-  block:
-
-  # Handle case where openshift_master_predicates is defined
-  - block:
-    - debug:
-        msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}"
-      when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region]
-
-    - debug:
-        msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}"
-      when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates
-    when: openshift_master_scheduler_predicates | default(none) is not none
-
-  # Handle cases where openshift_master_predicates is not defined
-  - block:
-    - debug:
-        msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}"
-      when:
-      - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
-      - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates]
-
-    - set_fact:
-        openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}"
-      when:
-      - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
-      - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates]
-
-    - set_fact:
-        openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}"
-      when:
-      - openshift_master_scheduler_current_predicates != default_predicates_no_region
-      - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region]
-
-    when: openshift_master_scheduler_predicates | default(none) is none
-
+- include_tasks: upgrade_predicates.yml
 
 # Upgrade priorities
-- vars:
-    prev_priorities: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
-    prev_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, zones_enabled=False) }}"
-    default_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', zones_enabled=False) }}"
-    # older_priorities are the set of priorities that have previously been
-    # hard-coded into openshift_facts
-    older_priorities:
-    - - name: LeastRequestedPriority
-        weight: 1
-      - name: SelectorSpreadPriority
-        weight: 1
-      - name: Zone
-        weight: 2
-        argument:
-          serviceAntiAffinity:
-            label: zone
-    # older_priorities_no_region are the set of priorities that have previously
-    # been hard-coded into openshift_facts, with the Zone priority removed
-    older_priorities_no_zone:
-    - - name: LeastRequestedPriority
-        weight: 1
-      - name: SelectorSpreadPriority
-        weight: 1
-  block:
-
-  # Handle case where openshift_master_priorities is defined
-  - block:
-    - debug:
-        msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}"
-      when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone]
-
-    - debug:
-        msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}"
-      when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities
-    when: openshift_master_scheduler_priorities | default(none) is not none
-
-  # Handle cases where openshift_master_priorities is not defined
-  - block:
-    - debug:
-        msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}"
-      when:
-      - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
-      - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities]
-
-    - set_fact:
-        openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}"
-      when:
-      - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
-      - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities]
-
-    - set_fact:
-        openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}"
-      when:
-      - openshift_master_scheduler_current_priorities != default_priorities_no_zone
-      - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone]
-
-    when: openshift_master_scheduler_priorities | default(none) is none
-
+- include_tasks: upgrade_priorities.yml
 
 # Update scheduler
-- vars:
-    scheduler_config:
+- name: Update scheduler config
+  copy:
+    content: "{{ upgrade_scheduler_config | to_nice_json }}"
+    dest: "{{ openshift_master_scheduler_conf }}"
+    backup: true
+  when: >
+    openshift_upgrade_scheduler_predicates is defined or
+    openshift_upgrade_scheduler_priorities is defined
+  vars:
+    upgrade_scheduler_config:
       kind: Policy
       apiVersion: v1
       predicates: "{{ openshift_upgrade_scheduler_predicates
                       | default(openshift_master_scheduler_current_predicates) }}"
       priorities: "{{ openshift_upgrade_scheduler_priorities
                       | default(openshift_master_scheduler_current_priorities) }}"
-  block:
-  - name: Update scheduler config
-    copy:
-      content: "{{ scheduler_config | to_nice_json }}"
-      dest: "{{ openshift_master_scheduler_conf }}"
-      backup: true
-  when: >
-    openshift_upgrade_scheduler_predicates is defined or
-    openshift_upgrade_scheduler_priorities is defined

roles/openshift_master/tasks/upgrade/v3_6/master_config_upgrade.yml → roles/openshift_control_plane/tasks/upgrade/v3_6/master_config_upgrade.yml


roles/openshift_master/tasks/upgrade/v3_7/master_config_upgrade.yml → roles/openshift_control_plane/tasks/upgrade/v3_7/master_config_upgrade.yml


+ 22 - 4
roles/openshift_control_plane/templates/master.yaml.v1.j2

@@ -1,9 +1,27 @@
 kind: MasterConfig
 apiVersion: v1
 admissionConfig:
-{% if 'admission_plugin_config' in openshift.master %}
-  pluginConfig:{{ openshift.master.admission_plugin_config | lib_utils_to_padded_yaml(level=2) }}
-{% endif %}
+  pluginConfig:{{ openshift.master.admission_plugin_config | default(None) | lib_utils_to_padded_yaml(level=2) }}
+    PodPreset:
+      configuration:
+        kind: DefaultAdmissionConfig
+        apiVersion: v1
+        disable: false
+aggregatorConfig:
+  proxyClientInfo:
+    certFile: aggregator-front-proxy.crt
+    keyFile: aggregator-front-proxy.key
+authConfig:
+  requestHeader:
+    clientCA: front-proxy-ca.crt
+    clientCommonNames:
+    - aggregator-front-proxy
+    usernameHeaders:
+    - X-Remote-User
+    groupHeaders:
+    - X-Remote-Group
+    extraHeaderPrefixes:
+    - X-Remote-Extra-
 apiLevels:
 - v1
 {% if not openshift_version_gte_3_9 %}
@@ -179,7 +197,7 @@ policyConfig:
   openshiftInfrastructureNamespace: openshift-infra
   openshiftSharedResourcesNamespace: openshift
 projectConfig:
-  defaultNodeSelector: "{{ osm_default_node_selector }}"
+  defaultNodeSelector: "{{ hostvars[groups.oo_first_master.0].l_osm_default_node_selector }}"
   projectRequestMessage: "{{ osm_project_request_message }}"
   projectRequestTemplate: "{{ osm_project_request_template }}"
   securityAllocator:

roles/openshift_master/vars/main.yml → roles/openshift_control_plane/vars/main.yml


+ 7 - 1
roles/openshift_facts/library/openshift_facts.py

@@ -539,7 +539,10 @@ def set_nodename(facts):
         # elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
         #     facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
         else:
-            facts['node']['nodename'] = facts['common']['hostname'].lower()
+            if 'bootstrapped' in facts['node'] and facts['node']['bootstrapped']:
+                facts['node']['nodename'] = facts['common']['raw_hostname'].lower()
+            else:
+                facts['node']['nodename'] = facts['common']['hostname'].lower()
     return facts
 
 
@@ -1186,9 +1189,12 @@ class OpenShiftFacts(object):
         hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
                            self.system_facts['ansible_fqdn']]
         hostname = choose_hostname(hostname_values, ip_addr).lower()
+        exit_code, output, _ = module.run_command(['hostname'])  # noqa: F405
+        raw_hostname = output.strip() if exit_code == 0 else hostname
 
         defaults['common'] = dict(ip=ip_addr,
                                   public_ip=ip_addr,
+                                  raw_hostname=raw_hostname,
                                   hostname=hostname,
                                   public_hostname=hostname,
                                   portal_net='172.30.0.0/16',

+ 1 - 1
roles/openshift_gcp/tasks/configure_master_healthcheck.yml

@@ -8,7 +8,7 @@
 - name: install haproxy
   package: name=haproxy state=present
   register: result
-  until: '"failed" not in result'
+  until: result is succeeded
   retries: 10
   delay: 10
 

+ 1 - 1
roles/openshift_gcp/tasks/setup_scale_group_facts.yml

@@ -41,4 +41,4 @@
     groups: bootstrap_nodes
     openshift_node_bootstrap: True
   with_items: "{{ groups['tag_ocp-node'] | default([]) | intersect(groups['tag_ocp-bootstrap'] | default([])) }}"
-  when: not (openshift_node_bootstrap | default(False))
+  when: not (openshift_node_bootstrap | default(True))

+ 1 - 1
roles/openshift_gcp/templates/openshift-bootstrap-update.j2

@@ -2,6 +2,6 @@
 
 set -euo pipefail
 
-oc serviceaccounts create-kubeconfig -n openshift-infra node-bootstrapper > /root/bootstrap.kubeconfig
+"{{ openshift_client_binary }}" serviceaccounts create-kubeconfig -n openshift-infra node-bootstrapper > /root/bootstrap.kubeconfig
 gcloud compute project-info --project '{{ openshift_gcp_project }}' add-metadata --metadata-from-file '{{ openshift_gcp_prefix + openshift_gcp_clusterid | default("default") }}-bootstrap-config=/root/bootstrap.kubeconfig'
 rm -f /root/bootstrap.kubeconfig

+ 1 - 1
roles/openshift_grafana/defaults/main.yaml

@@ -12,7 +12,7 @@ l_openshift_grafana_serviceaccount_annotations:
 openshift_grafana_datasource_name: "prometheus"
 openshift_grafana_node_exporter: false
 openshift_grafana_graph_granularity: "2m"
-openshift_grafana_node_selector: {"region":"infra"}
+openshift_grafana_node_selector: {"node-role.kubernetes.io/infra":"true"}
 openshift_grafana_hostname: grafana-{{openshift_grafana_namespace}}.{{openshift_master_default_subdomain}}
 openshift_grafana_service_name: grafana
 openshift_grafana_service_port: 443

+ 1 - 1
roles/openshift_grafana/tasks/install_grafana.yaml

@@ -2,7 +2,7 @@
 
 - name: Ensure that Grafana has nodes to run on
   import_role:
-    name: openshift_master
+    name: openshift_control_plane
     tasks_from: ensure_nodes_matching_selector.yml
   vars:
     openshift_master_ensure_nodes_selector: "{{ grafana_node_selector | map_to_pairs }}"

+ 3 - 3
roles/openshift_hosted/defaults/main.yml

@@ -2,7 +2,7 @@
 ##########
 # Common #
 ##########
-openshift_hosted_infra_selector: "region=infra"
+openshift_hosted_infra_selector: "node-role.kubernetes.io/infra=true"
 r_openshift_hosted_use_calico_default: "{{ openshift_use_calico | default(False) }}"
 r_openshift_hosted_use_calico: "{{ r_openshift_hosted_use_calico_default }}"
 
@@ -34,7 +34,7 @@ r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default
 openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}"
 openshift_hosted_router_namespace: 'default'
 
-openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
+openshift_hosted_router_wait: False
 
 openshift_hosted_router_edits:
 - key: spec.strategy.rollingParams.intervalSeconds
@@ -81,7 +81,7 @@ r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(
 r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
 
 openshift_hosted_registry_name: docker-registry
-openshift_hosted_registry_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
+openshift_hosted_registry_wait: False
 openshift_hosted_registry_cert_expire_days: 730
 r_openshift_hosted_registry_os_firewall_deny: []
 r_openshift_hosted_registry_os_firewall_allow:

+ 1 - 1
roles/openshift_logging_curator/tasks/main.yaml

@@ -18,7 +18,7 @@
 
 - name: Ensure that Logging Curator has nodes to run on
   import_role:
-    name: openshift_master
+    name: openshift_control_plane
     tasks_from: ensure_nodes_matching_selector.yml
   vars:
     openshift_master_ensure_nodes_selector: "{{ openshift_logging_curator_nodeselector | map_to_pairs }}"

+ 1 - 1
roles/openshift_logging_elasticsearch/tasks/main.yaml

@@ -1,7 +1,7 @@
 ---
 - name: Ensure that ElasticSearch has nodes to run on
   import_role:
-    name: openshift_master
+    name: openshift_control_plane
     tasks_from: ensure_nodes_matching_selector.yml
   vars:
     openshift_master_ensure_nodes_selector: "{{ openshift_logging_es_nodeselector | map_to_pairs }}"

+ 1 - 1
roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml

@@ -6,7 +6,7 @@
 
 - name: Ensure that Logging EventRouter has nodes to run on
   import_role:
-    name: openshift_master
+    name: openshift_control_plane
     tasks_from: ensure_nodes_matching_selector.yml
   vars:
     openshift_master_ensure_nodes_selector: "{{ openshift_logging_eventrouter_nodeselector | map_to_pairs }}"

+ 1 - 1
roles/openshift_logging_kibana/tasks/main.yaml

@@ -10,7 +10,7 @@
 
 - name: Ensure that Kibana has nodes to run on
   import_role:
-    name: openshift_master
+    name: openshift_control_plane
     tasks_from: ensure_nodes_matching_selector.yml
   vars:
     openshift_master_ensure_nodes_selector: "{{ openshift_logging_kibana_nodeselector | map_to_pairs }}"

+ 1 - 1
roles/openshift_logging_mux/tasks/main.yaml

@@ -9,7 +9,7 @@
 
 - name: Ensure that Logging Mux has nodes to run on
   import_role:
-    name: openshift_master
+    name: openshift_control_plane
     tasks_from: ensure_nodes_matching_selector.yml
   vars:
     openshift_master_ensure_nodes_selector: "{{ openshift_logging_mux_nodeselector | map_to_pairs }}"

+ 1 - 1
roles/openshift_manage_node/tasks/set_default_node_role.yml

@@ -7,7 +7,7 @@
       oc_obj:
         state: list
         kind: Node
-        selector: '{{ (openshift_hosted_infra_selector | default("region=infra")) | regex_replace("=", "!=") }},node-role.kubernetes.io/master!=true,node-role.kubernetes.io/compute!=true'
+        selector: '{{ (openshift_hosted_infra_selector | default("node-role.kubernetes.io/infra=true")) | regex_replace("=", "!=") }},node-role.kubernetes.io/master!=true,node-role.kubernetes.io/compute!=true'
       register: non_master_non_infra_nodes_result
 
     - name: label non-master non-infra nodes compute

+ 0 - 49
roles/openshift_master/README.md

@@ -1,49 +0,0 @@
-OpenShift Master
-==================================
-
-Master service installation
-
-Requirements
-------------
-
-* Ansible 2.2
-* A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
-rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos.
-
-Role Variables
---------------
-
-From this role:
-
-| Name                                             | Default value         |                                                                               |
-|---------------------------------------------------|-----------------------|-------------------------------------------------------------------------------|
-| openshift_node_ips                                | []                    | List of the openshift node ip addresses to pre-register when master starts up |
-| oreg_url                                          | UNDEF                 | Default docker registry to use                                                |
-| oreg_url_master                                   | UNDEF                 | Default docker registry to use, specifically on the master                    |
-| openshift_master_api_port                         | UNDEF                 |                                                                               |
-| openshift_master_console_port                     | UNDEF                 |                                                                               |
-| openshift_master_api_url                          | UNDEF                 |                                                                               |
-| openshift_master_console_url                      | UNDEF                 |                                                                               |
-| openshift_master_public_api_url                   | UNDEF                 |                                                                               |
-| openshift_master_public_console_url               | UNDEF                 |                                                                               |
-| openshift_master_saconfig_limit_secret_references | false                 |                                                                               |
-
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-TODO

+ 0 - 149
roles/openshift_master/defaults/main.yml

@@ -1,149 +0,0 @@
----
-# openshift_master_defaults_in_use is a workaround to detect if we are consuming
-# the plays from the role or outside of the role.
-openshift_master_defaults_in_use: True
-openshift_master_debug_level: "{{ debug_level | default(2) }}"
-
-r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
-r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
-
-osm_image_default_dict:
-  origin: 'openshift/origin'
-  openshift-enterprise: 'openshift3/ose'
-osm_image_default: "{{ osm_image_default_dict[openshift_deployment_type] }}"
-osm_image: "{{ osm_image_default }}"
-
-system_images_registry_dict:
-  openshift-enterprise: "registry.access.redhat.com"
-  origin: "docker.io"
-
-l_openshift_master_images_dict:
-  origin: 'openshift/origin-${component}:${version}'
-  openshift-enterprise: 'openshift3/ose-${component}:${version}'
-l_osm_registry_url_default: "{{ l_openshift_master_images_dict[openshift_deployment_type] }}"
-l_osm_registry_url: "{{ oreg_url_master | default(oreg_url) | default(l_osm_registry_url_default) }}"
-
-system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}"
-
-l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
-
-l_osm_id_providers_dict:
-  openshift-enterprise:
-  - name: 'deny_all'
-    challenge: True
-    login: True
-    kind: 'DenyAllPasswordIdentityProvider'
-  origin:
-  - name: 'allow_all'
-    challenge: True
-    login: True
-    kind: 'AllowAllPasswordIdentityProvider'
-
-openshift_master_identity_providers: "{{ l_osm_id_providers_dict[openshift_deployment_type] }}"
-
-l_osm_disabled_features: "{{ openshift_deployment_subtype == 'registry' | bool }}"
-l_osm_disabled_features_list:
-- Builder
-- S2IBuilder
-- WebConsole
-
-openshift_master_dns_port: 8053
-osm_project_request_template: ''
-osm_mcs_allocator_range: 's0:/2'
-osm_mcs_labels_per_project: 5
-osm_uid_allocator_range: '1000000000-1999999999/10000'
-osm_project_request_message: ''
-
-openshift_node_ips: []
-r_openshift_master_clean_install: false
-r_openshift_master_etcd3_storage: false
-r_openshift_master_os_firewall_enable: true
-r_openshift_master_os_firewall_deny: []
-default_r_openshift_master_os_firewall_allow:
-- service: api server https
-  port: "{{ openshift.master.api_port }}/tcp"
-- service: api controllers https
-  port: "{{ openshift.master.controllers_port }}/tcp"
-- service: skydns tcp
-  port: "{{ openshift_master_dns_port }}/tcp"
-- service: skydns udp
-  port: "{{ openshift_master_dns_port }}/udp"
-r_openshift_master_os_firewall_allow: "{{ default_r_openshift_master_os_firewall_allow | union(openshift_master_open_ports | default([])) }}"
-
-# oreg_url is defined by user input
-oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
-oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
-oreg_auth_credentials_replace: False
-l_bind_docker_reg_auth: False
-openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False) | bool) or (openshift_use_crio_only | default(False)) }}"
-
-containerized_svc_dir: "/usr/lib/systemd/system"
-ha_svc_template_path: "native-cluster"
-
-openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}"
-
-openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig"
-loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
-openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"
-openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
-
-scheduler_config:
-  kind: Policy
-  apiVersion: v1
-  predicates: "{{ openshift_master_scheduler_predicates
-                  | default(openshift_master_scheduler_current_predicates
-                            | default(openshift_master_scheduler_default_predicates)) }}"
-  priorities: "{{ openshift_master_scheduler_priorities
-                  | default(openshift_master_scheduler_current_priorities
-                            | default(openshift_master_scheduler_default_priorities)) }}"
-
-openshift_master_valid_grant_methods:
-- auto
-- prompt
-- deny
-
-openshift_master_is_scaleup_host: False
-
-# openshift_master_oauth_template is deprecated.  Should be added to deprecations
-# and removed.
-openshift_master_oauth_template: False
-openshift_master_oauth_templates_default:
-  login: "{{ openshift_master_oauth_template }}"
-openshift_master_oauth_templates: "{{ openshift_master_oauth_template | ternary(openshift_master_oauth_templates_default, False) }}"
-# Here we combine openshift_master_oath_template into 'login' key of openshift_master_oath_templates, if not present.
-l_openshift_master_oauth_templates: "{{ openshift_master_oauth_templates | default(openshift_master_oauth_templates_default) }}"
-
-# NOTE
-# r_openshift_master_*_default may be defined external to this role.
-# openshift_use_*, if defined, may affect other roles or play behavior.
-r_openshift_master_use_openshift_sdn_default: "{{ openshift_use_openshift_sdn | default(True) }}"
-r_openshift_master_use_openshift_sdn: "{{ r_openshift_master_use_openshift_sdn_default }}"
-
-r_openshift_master_use_nuage_default: "{{ openshift_use_nuage | default(False) }}"
-r_openshift_master_use_nuage: "{{ r_openshift_master_use_nuage_default }}"
-
-r_openshift_master_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
-r_openshift_master_use_contiv: "{{ r_openshift_master_use_contiv_default }}"
-
-r_openshift_master_use_kuryr_default: "{{ openshift_use_kuryr | default(False) }}"
-r_openshift_master_use_kuryr: "{{ r_openshift_master_use_kuryr_default }}"
-
-r_openshift_master_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
-r_openshift_master_data_dir: "{{ r_openshift_master_data_dir_default }}"
-
-r_openshift_master_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}"
-r_openshift_master_sdn_network_plugin_name: "{{ r_openshift_master_sdn_network_plugin_name_default }}"
-
-openshift_master_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}"
-openshift_master_image_config_latest: "{{ openshift_master_image_config_latest_default }}"
-
-openshift_master_config_dir_default: "{{ openshift.common.config_base ~ '/master' if openshift is defined and 'common' in openshift else '/etc/origin/master' }}"
-openshift_master_config_dir: "{{ openshift_master_config_dir_default }}"
-
-openshift_master_bootstrap_enabled: False
-
-openshift_master_csr_sa: node-bootstrapper
-openshift_master_csr_namespace: openshift-infra
-
-openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
-openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"

+ 0 - 36
roles/openshift_master/handlers/main.yml

@@ -1,36 +0,0 @@
----
-- name: restart master api
-  systemd:
-    name: "{{ openshift_service_type }}-master-api"
-    state: restarted
-  when:
-  - not (master_api_service_status_changed | default(false) | bool)
-  notify:
-  - Verify API Server
-
-# We retry the controllers because the API may not be 100% initialized yet.
-- name: restart master controllers
-  command: "systemctl restart {{ openshift_service_type }}-master-controllers"
-  retries: 3
-  delay: 5
-  register: result
-  until: result.rc == 0
-  when:
-  - not (master_controllers_service_status_changed | default(false) | bool)
-
-- name: Verify API Server
-  # Using curl here since the uri module requires python-httplib2 and
-  # wait_for port doesn't provide health information.
-  command: >
-    curl --silent --tlsv1.2
-    --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
-    {{ openshift.master.api_url }}/healthz/ready
-  args:
-    # Disables the following warning:
-    # Consider using get_url or uri module rather than running curl
-    warn: no
-  register: l_api_available_output
-  until: l_api_available_output.stdout == 'ok'
-  retries: 120
-  delay: 1
-  changed_when: false

+ 0 - 17
roles/openshift_master/meta/main.yml

@@ -1,17 +0,0 @@
----
-galaxy_info:
-  author: Jhon Honce
-  description: Master
-  company: Red Hat, Inc.
-  license: Apache License, Version 2.0
-  min_ansible_version: 2.2
-  platforms:
-  - name: EL
-    versions:
-    - 7
-  categories:
-  - cloud
-dependencies:
-- role: lib_openshift
-- role: lib_utils
-- role: openshift_facts

+ 0 - 15
roles/openshift_master/tasks/bootstrap.yml

@@ -1,15 +0,0 @@
----
-# TODO: create a module for this command.
-# oc_serviceaccounts_kubeconfig
-- name: create service account kubeconfig with csr rights
-  command: >
-    oc serviceaccounts create-kubeconfig {{ openshift_master_csr_sa }} -n {{ openshift_master_csr_namespace }}
-  register: kubeconfig_out
-  until: kubeconfig_out.rc == 0
-  retries: 24
-  delay: 5
-
-- name: put service account kubeconfig into a file on disk for bootstrap
-  copy:
-    content: "{{ kubeconfig_out.stdout }}"
-    dest: "{{ openshift_master_config_dir }}/bootstrap.kubeconfig"

+ 0 - 44
roles/openshift_master/tasks/firewall.yml

@@ -1,44 +0,0 @@
----
-- when: r_openshift_master_firewall_enabled | bool and not r_openshift_master_use_firewalld | bool
-  block:
-  - name: Add iptables allow rules
-    os_firewall_manage_iptables:
-      name: "{{ item.service }}"
-      action: add
-      protocol: "{{ item.port.split('/')[1] }}"
-      port: "{{ item.port.split('/')[0] }}"
-    when:
-    - item.cond | default(True)
-    with_items: "{{ r_openshift_master_os_firewall_allow }}"
-
-  - name: Remove iptables rules
-    os_firewall_manage_iptables:
-      name: "{{ item.service }}"
-      action: remove
-      protocol: "{{ item.port.split('/')[1] }}"
-      port: "{{ item.port.split('/')[0] }}"
-    when:
-    - item.cond | default(True)
-    with_items: "{{ r_openshift_master_os_firewall_deny }}"
-
-- when: r_openshift_master_firewall_enabled | bool and r_openshift_master_use_firewalld | bool
-  block:
-  - name: Add firewalld allow rules
-    firewalld:
-      port: "{{ item.port }}"
-      permanent: true
-      immediate: true
-      state: enabled
-    when:
-    - item.cond | default(True)
-    with_items: "{{ r_openshift_master_os_firewall_allow }}"
-
-  - name: Remove firewalld allow rules
-    firewalld:
-      port: "{{ item.port }}"
-      permanent: true
-      immediate: true
-      state: disabled
-    when:
-    - item.cond | default(True)
-    with_items: "{{ r_openshift_master_os_firewall_deny }}"

+ 0 - 253
roles/openshift_master/tasks/main.yml

@@ -1,253 +0,0 @@
----
-# TODO: add ability to configure certificates given either a local file to
-#       point to or certificate contents, set in default cert locations.
-
-# Authentication Variable Validation
-# TODO: validate the different identity provider kinds as well
-- fail:
-    msg: >
-      Invalid OAuth grant method: {{ openshift_master_oauth_grant_method }}
-  when:
-  - openshift_master_oauth_grant_method is defined
-  - openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods
-
-- name: Open up firewall ports
-  import_tasks: firewall.yml
-
-- name: Install Master package
-  package:
-    name: "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
-    state: present
-  when:
-  - not openshift_is_containerized | bool
-  register: result
-  until: result is succeeded
-
-- name: Create r_openshift_master_data_dir
-  file:
-    path: "{{ r_openshift_master_data_dir }}"
-    state: directory
-    mode: 0755
-    owner: root
-    group: root
-  when:
-  - openshift_is_containerized | bool
-
-- name: Reload systemd units
-  command: systemctl daemon-reload
-  when:
-  - openshift_is_containerized | bool
-
-- name: Re-gather package dependent master facts
-  openshift_facts:
-
-- name: Create config parent directory if it does not exist
-  file:
-    path: "{{ openshift_master_config_dir }}"
-    state: directory
-
-- name: Create the policy file if it does not already exist
-  command: >
-    {{ openshift_client_binary }} adm create-bootstrap-policy-file
-      --filename={{ openshift_master_policy }}
-  args:
-    creates: "{{ openshift_master_policy }}"
-  notify:
-  - restart master api
-  - restart master controllers
-
-- name: Create the scheduler config
-  copy:
-    content: "{{ scheduler_config | to_nice_json }}"
-    dest: "{{ openshift_master_scheduler_conf }}"
-    backup: true
-  notify:
-  - restart master api
-  - restart master controllers
-
-- name: Install httpd-tools if needed
-  package: name=httpd-tools state=present
-  when:
-  - item.kind == 'HTPasswdPasswordIdentityProvider'
-  - not openshift_is_atomic | bool
-  with_items: "{{ openshift_master_identity_providers }}"
-  register: result
-  until: result is succeeded
-
-- name: Ensure htpasswd directory exists
-  file:
-    path: "{{ item.filename | dirname }}"
-    state: directory
-  when:
-  - item.kind == 'HTPasswdPasswordIdentityProvider'
-  with_items: "{{ openshift_master_identity_providers }}"
-
-- name: Create the htpasswd file if needed
-  template:
-    dest: "{{ item.filename }}"
-    src: htpasswd.j2
-    backup: yes
-  when:
-  - item.kind == 'HTPasswdPasswordIdentityProvider'
-  - openshift.master.manage_htpasswd | bool
-  with_items: "{{ openshift_master_identity_providers }}"
-
-- name: Ensure htpasswd file exists
-  copy:
-    dest: "{{ item.filename }}"
-    force: no
-    content: ""
-    mode: 0600
-  when:
-  - item.kind == 'HTPasswdPasswordIdentityProvider'
-  with_items: "{{ openshift_master_identity_providers }}"
-
-- name: Create the ldap ca file if needed
-  copy:
-    dest: "{{ item.ca if 'ca' in item and '/' in item.ca else openshift_master_config_dir ~ '/' ~ item.ca | default('ldap_ca.crt') }}"
-    content: "{{ openshift.master.ldap_ca }}"
-    mode: 0600
-    backup: yes
-  when:
-  - openshift.master.ldap_ca is defined
-  - item.kind == 'LDAPPasswordIdentityProvider'
-  with_items: "{{ openshift_master_identity_providers }}"
-
-- name: Create the openid ca file if needed
-  copy:
-    dest: "{{ item.ca if 'ca' in item and '/' in item.ca else openshift_master_config_dir ~ '/' ~ item.ca | default('openid_ca.crt') }}"
-    content: "{{ openshift.master.openid_ca }}"
-    mode: 0600
-    backup: yes
-  when:
-  - openshift.master.openid_ca is defined
-  - item.kind == 'OpenIDIdentityProvider'
-  - item.ca | default('') != ''
-  with_items: "{{ openshift_master_identity_providers }}"
-
-- name: Create the request header ca file if needed
-  copy:
-    dest: "{{ item.clientCA if 'clientCA' in item and '/' in item.clientCA else openshift_master_config_dir ~ '/' ~ item.clientCA | default('request_header_ca.crt') }}"
-    content: "{{ openshift.master.request_header_ca }}"
-    mode: 0600
-    backup: yes
-  when:
-  - openshift.master.request_header_ca is defined
-  - item.kind == 'RequestHeaderIdentityProvider'
-  - item.clientCA | default('') != ''
-  with_items: "{{ openshift_master_identity_providers }}"
-
-- name: Include push_via_dns.yml
-  include_tasks: push_via_dns.yml
-
-- name: Set fact of all etcd host IPs
-  openshift_facts:
-    role: common
-    local_facts:
-      no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}"
-
-- name: Install the systemd units
-  include_tasks: systemd_units.yml
-
-- name: Install Master system container
-  include_tasks: system_container.yml
-  when:
-  - openshift_is_containerized | bool
-  - l_is_master_system_container | bool
-
-- name: Create session secrets file
-  template:
-    dest: "{{ openshift.master.session_secrets_file }}"
-    src: sessionSecretsFile.yaml.v1.j2
-    owner: root
-    group: root
-    mode: 0600
-  when:
-  - openshift.master.session_auth_secrets is defined
-  - openshift.master.session_encryption_secrets is defined
-  notify:
-  - restart master api
-
-- set_fact:
-    # translate_idps is a custom filter in role lib_utils
-    translated_identity_providers: "{{ openshift_master_identity_providers | translate_idps('v1') }}"
-
-# TODO: add the validate parameter when there is a validation command to run
-- name: Create master config
-  template:
-    dest: "{{ openshift_master_config_file }}"
-    src: master.yaml.v1.j2
-    backup: true
-    owner: root
-    group: root
-    mode: 0600
-  notify:
-  - restart master api
-  - restart master controllers
-
-- include_tasks: bootstrap_settings.yml
-  when: openshift_master_bootstrap_enabled | default(False)
-
-- include_tasks: set_loopback_context.yml
-
-- name: Start and enable master api
-  systemd:
-    name: "{{ openshift_service_type }}-master-api"
-    enabled: yes
-    state: started
-  register: l_start_result
-  until: not (l_start_result is failed)
-  retries: 1
-  delay: 60
-
-- name: Dump logs from master-api if it failed
-  command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api
-  when:
-  - l_start_result is failed
-
-- set_fact:
-    master_api_service_status_changed: "{{ l_start_result is changed }}"
-
-- include_tasks: check_master_api_is_ready.yml
-  when:
-  - master_api_service_status_changed | bool
-
-- name: Start and enable master controller service
-  systemd:
-    name: "{{ openshift_service_type }}-master-controllers"
-    enabled: yes
-    state: started
-  register: l_start_result
-  until: not (l_start_result is failed)
-  retries: 1
-  delay: 60
-
-- name: configure vsphere svc account
-  include_role:
-    name: openshift_cloud_provider
-    tasks_from: vsphere-svc
-  when:
-  - openshift_cloudprovider_kind is defined
-  - openshift_cloudprovider_kind == 'vsphere'
-  - openshift_version | version_compare('3.9', '>=')
-  - inventory_hostname == openshift_master_hosts[0]
-
-- name: update vsphere provider master config
-  include_tasks: update-vsphere.yml
-  when:
-  - openshift_cloudprovider_kind is defined
-  - openshift_cloudprovider_kind == 'vsphere'
-  - openshift_version | version_compare('3.9', '>=')
-
-- name: Dump logs from master-controllers if it failed
-  command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-controllers
-  when:
-  - l_start_result is failed
-
-- name: Set fact master_controllers_service_status_changed
-  set_fact:
-    master_controllers_service_status_changed: "{{ l_start_result is changed }}"
-
-- name: node bootstrap settings
-  include_tasks: bootstrap.yml
-  when: openshift_master_bootstrap_enabled | default(False)

+ 0 - 13
roles/openshift_master/tasks/push_via_dns.yml

@@ -1,13 +0,0 @@
----
-# This is an ugly hack to verify settings are in a file without modifying them with lineinfile.
-# The template file will stomp any other settings made.
-- when: openshift_push_via_dns is not defined
-  block:
-  - name: check whether our docker-registry setting exists in the env file
-    shell: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift_service_type }}-master*"
-    failed_when: false
-    changed_when: false
-    register: l_already_set
-
-  - set_fact:
-      openshift_push_via_dns: True

+ 0 - 52
roles/openshift_master/tasks/registry_auth.yml

@@ -1,52 +0,0 @@
----
-- name: Check for credentials file for registry auth
-  stat:
-    path: "{{ oreg_auth_credentials_path }}"
-  when: oreg_auth_user is defined
-  register: master_oreg_auth_credentials_stat
-
-- name: Create credentials for registry auth
-  command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
-  when:
-  - not (openshift_docker_alternative_creds | default(False))
-  - oreg_auth_user is defined
-  - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
-  register: master_oreg_auth_credentials_create
-  retries: 3
-  delay: 5
-  until: master_oreg_auth_credentials_create.rc == 0
-  notify:
-  - restart master api
-  - restart master controllers
-
-# docker_creds is a custom module from lib_utils
-# 'docker login' requires a docker.service running on the local host, this is an
-# alternative implementation for non-docker hosts.  This implementation does not
-# check the registry to determine whether or not the credentials will work.
-- name: Create credentials for registry auth (alternative)
-  docker_creds:
-    path: "{{ oreg_auth_credentials_path }}"
-    registry: "{{ oreg_host }}"
-    username: "{{ oreg_auth_user }}"
-    password: "{{ oreg_auth_password }}"
-  when:
-  - openshift_docker_alternative_creds | default(False) | bool
-  - oreg_auth_user is defined
-  - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
-  register: master_oreg_auth_credentials_create_alt
-  notify:
-  - restart master api
-  - restart master controllers
-
-# Container images may need the registry credentials
-- name: Setup ro mount of /root/.docker for containerized hosts
-  set_fact:
-    l_bind_docker_reg_auth: True
-  when:
-  - openshift_is_containerized | bool
-  - oreg_auth_user is defined
-  - >
-      (master_oreg_auth_credentials_stat.stat.exists
-      or oreg_auth_credentials_replace
-      or master_oreg_auth_credentials_create.changed
-      or master_oreg_auth_credentials_create_alt.changed) | bool

+ 0 - 19
roles/openshift_master/tasks/restart.yml

@@ -1,19 +0,0 @@
----
-- name: Restart master API
-  service:
-    name: "{{ openshift_service_type }}-master-api"
-    state: restarted
-- name: Wait for master API to come back online
-  wait_for:
-    host: "{{ openshift.common.hostname }}"
-    state: started
-    delay: 10
-    port: "{{ openshift.master.api_port }}"
-    timeout: 600
-# We retry the controllers because the API may not be 100% initialized yet.
-- name: restart master controllers
-  command: "systemctl restart {{ openshift_service_type }}-master-controllers"
-  retries: 3
-  delay: 5
-  register: result
-  until: result.rc == 0

+ 0 - 34
roles/openshift_master/tasks/set_loopback_context.yml

@@ -1,34 +0,0 @@
----
-- name: Test local loopback context
-  command: >
-    {{ openshift_client_binary }} config view
-    --config={{ openshift_master_loopback_config }}
-  changed_when: false
-  register: l_loopback_config
-
-- command: >
-    {{ openshift_client_binary }} config set-cluster
-    --certificate-authority={{ openshift_master_config_dir }}/ca.crt
-    --embed-certs=true --server={{ openshift.master.loopback_api_url }}
-    {{ openshift.master.loopback_cluster_name }}
-    --config={{ openshift_master_loopback_config }}
-  when:
-  - loopback_context_string not in l_loopback_config.stdout
-  register: set_loopback_cluster
-
-- command: >
-    {{ openshift_client_binary }} config set-context
-    --cluster={{ openshift.master.loopback_cluster_name }}
-    --namespace=default --user={{ openshift.master.loopback_user }}
-    {{ openshift.master.loopback_context_name }}
-    --config={{ openshift_master_loopback_config }}
-  when:
-  - set_loopback_cluster is changed
-  register: l_set_loopback_context
-
-- command: >
-    {{ openshift_client_binary }} config use-context {{ openshift.master.loopback_context_name }}
-    --config={{ openshift_master_loopback_config }}
-  when:
-  - l_set_loopback_context is changed
-  register: set_current_context

+ 0 - 32
roles/openshift_master/tasks/system_container.yml

@@ -1,32 +0,0 @@
----
-
-- name: Pre-pull master system container image
-  command: >
-    atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osm_image }}:{{ openshift_image_tag }}
-  register: l_pull_result
-  changed_when: "'Pulling layer' in l_pull_result.stdout"
-
-- name: Check Master system container package
-  command: >
-    atomic containers list --no-trunc -a -f container={{ openshift_service_type }}-master
-
-# HA
-- name: Install or Update HA api master system container
-  oc_atomic_container:
-    name: "{{ openshift_service_type }}-master-api"
-    image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osm_image }}:{{ openshift_image_tag }}"
-    state: latest
-    values:
-    - COMMAND=api
-    - "NODE_SERVICE={{ openshift_service_type }}-node.service"
-    - "DOCKER_SERVICE={{ openshift_docker_service_name }}.service"
-
-- name: Install or Update HA controller master system container
-  oc_atomic_container:
-    name: "{{ openshift_service_type }}-master-controllers"
-    image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osm_image }}:{{ openshift_image_tag }}"
-    state: latest
-    values:
-    - COMMAND=controllers
-    - "NODE_SERVICE={{ openshift_service_type }}-node.service"
-    - "DOCKER_SERVICE={{ openshift_docker_service_name }}.service"

+ 0 - 0
roles/openshift_master/tasks/systemd_units.yml


Daži faili netika attēloti, jo izmaiņu fails ir pārāk liels