basic_facts.yml 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. ---
  2. - name: Ensure that all non-node hosts are accessible
  3. hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config
  4. any_errors_fatal: true
  5. tasks:
  6. - name: Initialize basic host facts
  7. # l_init_fact_hosts is passed in via play during control-plane-only
  8. # upgrades and scale-up plays; otherwise oo_all_hosts is used.
  9. hosts: "{{ l_init_fact_hosts | default('oo_all_hosts') }}"
  10. roles:
  11. - role: openshift_facts
  12. tasks:
  13. # TODO: Should this role be refactored into health_checks??
  14. - name: Run openshift_sanitize_inventory to set variables
  15. import_role:
  16. name: openshift_sanitize_inventory
  17. - name: Detecting Operating System from ostree_booted
  18. stat:
  19. path: /run/ostree-booted
  20. register: ostree_booted
  21. # TODO(michaelgugino) remove this line once CI is updated.
  22. - name: set openshift_deployment_type if unset
  23. set_fact:
  24. openshift_deployment_type: "{{ deployment_type }}"
  25. when:
  26. - openshift_deployment_type is undefined
  27. - deployment_type is defined
  28. - name: check for node already bootstrapped
  29. stat:
  30. path: "/etc/origin/node/bootstrap-node-config.yaml"
  31. register: bootstrap_node_config_path_check
  32. - name: initialize_facts set fact openshift_is_bootstrapped
  33. set_fact:
  34. openshift_is_bootstrapped: "{{ openshift_is_bootstrapped|default(False) or bootstrap_node_config_path_check.stat.exists }}"
  35. - name: initialize_facts set fact openshift_is_atomic and openshift_is_containerized
  36. set_fact:
  37. openshift_is_atomic: "{{ ostree_booted.stat.exists }}"
  38. openshift_is_containerized: "{{ ostree_booted.stat.exists or (containerized | default(false) | bool) }}"
  39. # TODO: Should this be moved into health checks??
  40. # Seems as though any check that happens with a corresponding fail should move into health_checks
  41. # Fail as early as possible if Atomic and old version of Docker
  42. - when:
  43. - openshift_is_atomic | bool
  44. block:
  45. # See https://access.redhat.com/articles/2317361
  46. # and https://github.com/ansible/ansible/issues/15892
  47. # NOTE: the "'s can not be removed at this level else the docker command will fail
  48. # NOTE: When ansible >2.2.1.x is used this can be updated per
  49. # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121
  50. - name: Determine Atomic Host Docker Version
  51. shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"'
  52. register: l_atomic_docker_version
  53. - name: assert atomic host docker version is 1.12 or later
  54. assert:
  55. that:
  56. - l_atomic_docker_version.stdout | replace('"', '') is version('1.12','>=')
  57. msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
  58. - name: Retrieve existing master configs and validate
  59. hosts: oo_masters_to_config
  60. gather_facts: no
  61. any_errors_fatal: true
  62. roles:
  63. - openshift_facts
  64. tasks:
  65. - import_role:
  66. name: openshift_control_plane
  67. tasks_from: check_existing_config.yml
  68. - when:
  69. - l_existing_config_master_config is defined
  70. - l_existing_config_master_config.networkConfig is defined
  71. block:
  72. - set_fact:
  73. openshift_portal_net: "{{ l_existing_config_master_config.networkConfig.serviceNetworkCIDR }}"
  74. - set_fact:
  75. osm_cluster_network_cidr: "{{ l_existing_config_master_config.networkConfig.clusterNetworks[0].cidr }}"
  76. osm_host_subnet_length: "{{ l_existing_config_master_config.networkConfig.clusterNetworks[0].hostSubnetLength }}"
  77. when:
  78. - l_existing_config_master_config.networkConfig.clusterNetworks is defined
  79. # End block
  80. - name: Initialize special first-master variables
  81. hosts: oo_first_master
  82. roles:
  83. - role: openshift_facts
  84. tasks:
  85. - when: not (osm_default_node_selector is defined)
  86. block:
  87. - set_fact:
  88. # l_existing_config_master_config is set in openshift_control_plane/tasks/check_existing_config.yml
  89. openshift_master_config_node_selector: "{{ l_existing_config_master_config.projectConfig.defaultNodeSelector }}"
  90. when:
  91. - l_existing_config_master_config is defined
  92. - l_existing_config_master_config.projectConfig is defined
  93. - l_existing_config_master_config.projectConfig.defaultNodeSelector is defined
  94. - l_existing_config_master_config.projectConfig.defaultNodeSelector != ''
  95. - set_fact:
  96. # We need to setup openshift_client_binary here for special uses of delegate_to in
  97. # later roles and plays.
  98. first_master_client_binary: "{{ openshift_client_binary }}"
  99. #Some roles may require this to be set for first master
  100. openshift_client_binary: "{{ openshift_client_binary }}"
  101. # we need to know if a default node selector has been manually set outside the installer
  102. l_osm_default_node_selector: '{{ osm_default_node_selector | default(openshift_master_config_node_selector) | default("node-role.kubernetes.io/compute=true") }}'
  103. - name: Disable web console if required
  104. hosts: oo_masters_to_config
  105. gather_facts: no
  106. tasks:
  107. - set_fact:
  108. openshift_web_console_install: False
  109. when:
  110. - openshift_deployment_subtype is defined
  111. - openshift_deployment_subtype == 'registry' or ( osm_disabled_features is defined and 'WebConsole' in osm_disabled_features )