scaleup.yml 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. ---
  2. - name: create new nodes
  3. hosts: localhost
  4. connection: local
  5. tasks:
  6. - import_tasks: ssh_bastion.yml
  7. - import_tasks: get_machinesets.yml
  8. - include_tasks: create_machineset.yml
  9. loop: "{{ machineset.resources }}"
  10. when:
  11. - item.status.replicas is defined
  12. - item.status.replicas != 0
  13. - name: wait for nodes to become available
  14. hosts: new_workers
  15. gather_facts: false
  16. tasks:
  17. - wait_for_connection: {}
  18. - setup: {}
  19. - name: Copy ops-mirror.pem
  20. copy:
  21. src: ../../inventory/dynamic/injected/ops-mirror.pem
  22. dest: /var/lib/yum/ops-mirror.pem
  23. owner: root
  24. group: root
  25. mode: 0644
  26. - name: Initialize openshift repos
  27. import_tasks: additional_repos.yml
  28. - import_playbook: ../../playbooks/scaleup.yml
  29. vars:
  30. openshift_kubeconfig_path: "{{ kubeconfig_path }}"
  31. - name: wait for nodes to join
  32. hosts: new_workers
  33. tasks:
  34. - name: HACK disable selinux
  35. selinux:
  36. policy: targeted
  37. state: permissive
  38. - name: Create core user for storage tests to pass
  39. user:
  40. name: core
  41. group: wheel
  42. - name: Make sure core user has ssh config directory
  43. file:
  44. name: /home/core/.ssh
  45. state: directory
  46. owner: core
  47. group: wheel
  48. mode: 0700
  49. - name: Install nfs-utils for storage tests
  50. package:
  51. name: nfs-utils
  52. state: present
  53. - name: Wait for new nodes to be ready
  54. k8s_facts:
  55. kubeconfig: "{{ kubeconfig_path }}"
  56. kind: Node
  57. name: "{{ node_name }}"
  58. delegate_to: localhost
  59. register: new_machine
  60. until:
  61. - new_machine.resources is defined
  62. - new_machine.resources | length > 0
  63. - new_machine.resources[0].status is defined
  64. - new_machine.resources[0].status.conditions is defined
  65. - new_machine.resources[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
  66. # Give the node three minutes to come back online.
  67. retries: 48
  68. delay: 30
  69. ignore_errors: true
  70. - when: new_machine is failed
  71. block:
  72. - name: Collect a list of containers
  73. command: crictl ps -a -q
  74. ignore_errors: true
  75. register: crictl_ps_output
  76. - name: Collect container logs
  77. command: "crictl logs {{ item }}"
  78. register: crictl_logs_output
  79. with_items: "{{ crictl_ps_output.stdout_lines }}"
  80. ignore_errors: true
  81. - name: Get crio logs
  82. command: journalctl --no-pager -u crio
  83. register: crio_logs
  84. ignore_errors: true
  85. - name: Get kubelet logs
  86. command: journalctl --no-pager -u kubelet
  87. register: kubelet_logs
  88. ignore_errors: tru
  89. - debug:
  90. var: crictl_logs_output
  91. - debug:
  92. msg: "{{ kubelet_logs.stdout_lines }}"
  93. - debug:
  94. msg: "{{ crio_logs.stdout_lines }}"
  95. - fail:
  96. msg: Node failed to become Ready
  97. - name: Remove CoreOS nodes
  98. hosts: localhost
  99. connection: local
  100. tasks:
  101. - name: Mark CoreOS nodes as unschedulable
  102. command: >
  103. oc adm cordon {{ item | lower }}
  104. --config={{ kubeconfig_path }}
  105. with_items: "{{ pre_scaleup_workers_name }}"
  106. - name: Drain CoreOS nodes
  107. command: >
  108. oc adm drain {{ item | lower }}
  109. --config={{ kubeconfig_path }}
  110. --force --delete-local-data --ignore-daemonsets
  111. --timeout=0s
  112. with_items: "{{ pre_scaleup_workers_name }}"
  113. - name: remove existing machinesets
  114. k8s:
  115. api_version: machine.openshift.io/v1beta1
  116. kubeconfig: "{{ kubeconfig_path }}"
  117. namespace: openshift-machine-api
  118. kind: MachineSet
  119. name: "{{ item }}"
  120. state: absent
  121. with_items: "{{ pre_scaleup_machineset_names }}"
  122. - name: Delete CoreOS nodes
  123. k8s:
  124. kubeconfig: "{{ kubeconfig_path }}"
  125. kind: Node
  126. name: "{{ item }}"
  127. state: absent
  128. with_items: "{{ pre_scaleup_workers_name }}"
  129. - name: Wait for worker configs to roll out
  130. command: oc wait machineconfigpool/worker --for=condition=Updated --timeout=5m