basic_facts.yml 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. ---
  2. - name: Ensure that all non-node hosts are accessible
  3. hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config
  4. any_errors_fatal: true
  5. tasks:
  6. - name: Initialize basic host facts
  7. # l_init_fact_hosts is passed in via play during control-plane-only
  8. # upgrades and scale-up plays; otherwise oo_all_hosts is used.
  9. hosts: "{{ l_init_fact_hosts | default('oo_all_hosts') }}"
  10. roles:
  11. - role: openshift_facts
  12. tasks:
  13. # TODO: Should this role be refactored into health_checks??
  14. - name: Run openshift_sanitize_inventory to set variables
  15. import_role:
  16. name: openshift_sanitize_inventory
  17. - name: Detecting Operating System from ostree_booted
  18. stat:
  19. path: /run/ostree-booted
  20. get_checksum: false
  21. get_attributes: false
  22. get_mime: false
  23. register: ostree_booted
  24. # TODO(michaelgugino) remove this line once CI is updated.
  25. - name: set openshift_deployment_type if unset
  26. set_fact:
  27. openshift_deployment_type: "{{ deployment_type }}"
  28. when:
  29. - openshift_deployment_type is undefined
  30. - deployment_type is defined
  31. - name: check for node already bootstrapped
  32. stat:
  33. path: "/etc/origin/node/bootstrap-node-config.yaml"
  34. get_checksum: false
  35. get_attributes: false
  36. get_mime: false
  37. register: bootstrap_node_config_path_check
  38. - name: initialize_facts set fact openshift_is_bootstrapped
  39. set_fact:
  40. openshift_is_bootstrapped: "{{ openshift_is_bootstrapped|default(False) or bootstrap_node_config_path_check.stat.exists }}"
  41. - name: initialize_facts set fact openshift_is_atomic and openshift_is_containerized
  42. set_fact:
  43. openshift_is_atomic: "{{ ostree_booted.stat.exists }}"
  44. openshift_is_containerized: "{{ ostree_booted.stat.exists or (containerized | default(false) | bool) }}"
  45. # TODO: Should this be moved into health checks??
  46. # Seems as though any check that happens with a corresponding fail should move into health_checks
  47. # Fail as early as possible if Atomic and old version of Docker
  48. - when:
  49. - openshift_is_atomic | bool
  50. block:
  51. # See https://access.redhat.com/articles/2317361
  52. # and https://github.com/ansible/ansible/issues/15892
  53. # NOTE: the "'s can not be removed at this level else the docker command will fail
  54. # NOTE: When ansible >2.2.1.x is used this can be updated per
  55. # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121
  56. - name: Determine Atomic Host Docker Version
  57. shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"'
  58. register: l_atomic_docker_version
  59. - name: assert atomic host docker version is 1.12 or later
  60. assert:
  61. that:
  62. - l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=')
  63. msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
  64. - name: Retrieve existing master configs and validate
  65. hosts: oo_masters_to_config
  66. gather_facts: no
  67. any_errors_fatal: true
  68. roles:
  69. - openshift_facts
  70. tasks:
  71. - import_role:
  72. name: openshift_control_plane
  73. tasks_from: check_existing_config.yml
  74. - when:
  75. - l_existing_config_master_config is defined
  76. - l_existing_config_master_config.networkConfig is defined
  77. block:
  78. - set_fact:
  79. openshift_portal_net: "{{ l_existing_config_master_config.networkConfig.serviceNetworkCIDR }}"
  80. - set_fact:
  81. osm_cluster_network_cidr: "{{ l_existing_config_master_config.networkConfig.clusterNetworks[0].cidr }}"
  82. osm_host_subnet_length: "{{ l_existing_config_master_config.networkConfig.clusterNetworks[0].hostSubnetLength }}"
  83. when:
  84. - l_existing_config_master_config.networkConfig.clusterNetworks is defined
  85. # End block
  86. - name: Initialize special first-master variables
  87. hosts: oo_first_master
  88. roles:
  89. - role: openshift_facts
  90. tasks:
  91. - when: not (osm_default_node_selector is defined)
  92. block:
  93. - set_fact:
  94. # l_existing_config_master_config is set in openshift_control_plane/tasks/check_existing_config.yml
  95. openshift_master_config_node_selector: "{{ l_existing_config_master_config.projectConfig.defaultNodeSelector }}"
  96. when:
  97. - l_existing_config_master_config is defined
  98. - l_existing_config_master_config.projectConfig is defined
  99. - l_existing_config_master_config.projectConfig.defaultNodeSelector is defined
  100. - l_existing_config_master_config.projectConfig.defaultNodeSelector != ''
  101. - set_fact:
  102. # We need to setup openshift_client_binary here for special uses of delegate_to in
  103. # later roles and plays.
  104. first_master_client_binary: "{{ openshift_client_binary }}"
  105. #Some roles may require this to be set for first master
  106. openshift_client_binary: "{{ openshift_client_binary }}"
  107. # we need to know if a default node selector has been manually set outside the installer
  108. l_osm_default_node_selector: '{{ osm_default_node_selector | default(openshift_master_config_node_selector) | default("node-role.kubernetes.io/compute=true") }}'
  109. - name: Disable web console if required
  110. hosts: oo_masters_to_config
  111. gather_facts: no
  112. tasks:
  113. - set_fact:
  114. openshift_web_console_install: False
  115. when:
  116. - openshift_deployment_subtype is defined
  117. - openshift_deployment_subtype == 'registry' or ( osm_disabled_features is defined and 'WebConsole' in osm_disabled_features )