main.yml 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. ---
  2. # TODO: allow for overriding default ports where possible
  3. - fail:
  4. msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
  5. when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
  6. - name: Set node facts
  7. openshift_facts:
  8. role: "{{ item.role }}"
  9. local_facts: "{{ item.local_facts }}"
  10. with_items:
  11. - role: node
  12. local_facts:
  13. annotations: "{{ openshift_node_annotations | default(none) }}"
  14. debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
  15. iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
  16. kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
  17. labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
  18. registry_url: "{{ oreg_url | default(none) }}"
  19. schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
  20. sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
  21. storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
  22. set_node_ip: "{{ openshift_set_node_ip | default(None) }}"
  23. node_image: "{{ osn_image | default(None) }}"
  24. ovs_image: "{{ osn_ovs_image | default(None) }}"
  25. proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
  26. local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
  27. dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
  28. # We have to add tuned-profiles in the same transaction otherwise we run into depsolving
  29. # problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
  30. - name: Install Node package
  31. action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
  32. when: not openshift.common.is_containerized | bool
  33. - name: Install sdn-ovs package
  34. action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
  35. when: openshift.common.use_openshift_sdn and not openshift.common.is_containerized | bool
  36. - name: Pull node image
  37. command: >
  38. docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
  39. when: openshift.common.is_containerized | bool
  40. - name: Pull OpenVSwitch image
  41. command: >
  42. docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
  43. when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
  44. - name: Install the systemd units
  45. include: systemd_units.yml
  46. - name: Reload systemd units
  47. command: systemctl daemon-reload
  48. when: openshift.common.is_containerized | bool and (install_node_result | changed or install_ovs_sysconfig | changed or install_node_dep_result | changed)
  49. notify:
  50. - restart node
  51. - name: Start and enable openvswitch docker service
  52. service: name=openvswitch.service enabled=yes state=started
  53. when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
  54. register: ovs_start_result
  55. - set_fact:
  56. ovs_service_status_changed: "{{ ovs_start_result | changed }}"
  57. # TODO: add the validate parameter when there is a validation command to run
  58. - name: Create the Node config
  59. template:
  60. dest: "{{ openshift_node_config_file }}"
  61. src: node.yaml.v1.j2
  62. backup: true
  63. owner: root
  64. group: root
  65. mode: 0600
  66. notify:
  67. - restart node
  68. - name: Configure AWS Cloud Provider Settings
  69. lineinfile:
  70. dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
  71. regexp: "{{ item.regex }}"
  72. line: "{{ item.line }}"
  73. create: true
  74. with_items:
  75. - regex: '^AWS_ACCESS_KEY_ID='
  76. line: "AWS_ACCESS_KEY_ID={{ openshift.cloudprovider.aws.access_key }}"
  77. - regex: '^AWS_SECRET_ACCESS_KEY='
  78. line: "AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}"
  79. when: "'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws"
  80. notify:
  81. - restart node
  82. - name: Additional storage plugin configuration
  83. include: storage_plugins/main.yml
  84. # Necessary because when you're on a node that's also a master the master will be
  85. # restarted after the node restarts docker and it will take up to 60 seconds for
  86. # systemd to start the master again
  87. - name: Wait for master API to become available before proceeding
  88. # Using curl here since the uri module requires python-httplib2 and
  89. # wait_for port doesn't provide health information.
  90. command: >
  91. curl --silent --cacert {{ openshift.common.config_base }}/node/ca.crt
  92. {{ openshift_node_master_api_url }}/healthz/ready
  93. register: api_available_output
  94. until: api_available_output.stdout == 'ok'
  95. retries: 120
  96. delay: 1
  97. changed_when: false
  98. when: openshift.common.is_containerized | bool
  99. - name: Start and enable node
  100. service: name={{ openshift.common.service_type }}-node enabled=yes state=started
  101. register: node_start_result
  102. ignore_errors: yes
  103. - name: Check logs on failure
  104. command: journalctl -xe
  105. register: node_failure
  106. when: node_start_result | failed
  107. - name: Dump failure information
  108. debug: var=node_failure
  109. when: node_start_result | failed
  110. - set_fact:
  111. node_service_status_changed: "{{ node_start_result | changed }}"