Ver código fonte

GlusterFS: Upgrade playbook

Signed-off-by: Jose A. Rivera <jarrpa@redhat.com>
Jose A. Rivera 6 anos atrás
pai
commit
bb58888545
25 arquivos alterados com 375 adições e 187 exclusões
  1. 8 0
      playbooks/openshift-glusterfs/private/upgrade.yml
  2. 10 0
      playbooks/openshift-glusterfs/upgrade.yml
  3. 5 4
      roles/openshift_storage_glusterfs/defaults/main.yml
  4. 2 0
      roles/openshift_storage_glusterfs/files/glusterfs-template.yml
  5. 1 0
      roles/openshift_storage_glusterfs/files/heketi-template.yml
  6. 6 26
      roles/openshift_storage_glusterfs/tasks/check_cluster_health.yml
  7. 15 0
      roles/openshift_storage_glusterfs/tasks/cluster_health.yml
  8. 16 2
      roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
  9. 2 0
      roles/openshift_storage_glusterfs/tasks/glusterfs_config_facts.yml
  10. 13 0
      roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
  11. 2 0
      roles/openshift_storage_glusterfs/tasks/glusterfs_registry_facts.yml
  12. 32 0
      roles/openshift_storage_glusterfs/tasks/glusterfs_restart_pod.yml
  13. 71 0
      roles/openshift_storage_glusterfs/tasks/glusterfs_upgrade.yml
  14. 86 0
      roles/openshift_storage_glusterfs/tasks/heketi_deploy.yml
  15. 0 125
      roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
  16. 0 0
      roles/openshift_storage_glusterfs/tasks/heketi_get_key.yml
  17. 47 0
      roles/openshift_storage_glusterfs/tasks/heketi_init_db.yml
  18. 11 11
      roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
  19. 2 7
      roles/openshift_storage_glusterfs/tasks/heketi_load.yml
  20. 8 6
      roles/openshift_storage_glusterfs/tasks/heketi_pod_check.yml
  21. 1 3
      roles/openshift_storage_glusterfs/tasks/heketi_setup.yml
  22. 26 0
      roles/openshift_storage_glusterfs/tasks/upgrade.yml
  23. 5 3
      roles/openshift_storage_glusterfs/tasks/wait_for_pods.yml
  24. 3 0
      roles/openshift_storage_glusterfs/templates/heketi-endpoints.yml.j2
  25. 3 0
      roles/openshift_storage_glusterfs/templates/heketi-service.yml.j2

+ 8 - 0
playbooks/openshift-glusterfs/private/upgrade.yml

@@ -0,0 +1,8 @@
+---
+- name: Upgrade GlusterFS
+  hosts: oo_first_master
+  tasks:
+  - name: Run glusterfs upgrade role
+    import_role:
+      name: openshift_storage_glusterfs
+      tasks_from: upgrade.yml

+ 10 - 0
playbooks/openshift-glusterfs/upgrade.yml

@@ -0,0 +1,10 @@
+---
+- import_playbook: ../init/main.yml
+  vars:
+    l_init_fact_hosts: "oo_masters_to_config:oo_glusterfs_to_config"
+    l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+    l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] | union(groups['oo_glusterfs_to_config']) }}"
+    l_install_base_packages: False
+    l_base_packages_hosts: "all:!all"
+
+- import_playbook: private/upgrade.yml

+ 5 - 4
roles/openshift_storage_glusterfs/defaults/main.yml

@@ -24,6 +24,9 @@ l_gluster_heketi_image_dict:
 openshift_storage_glusterfs_heketi_image: "{{ l_gluster_heketi_image_dict[openshift_deployment_type] | lib_utils_oo_oreg_image((oreg_url | default('None'))) }}"
 
 openshift_storage_glusterfs_timeout: 300
+openshift_storage_glusterfs_health_timeout: 1200
+openshift_storage_glusterfs_restart: False
+openshift_storage_glusterfs_namespace: "{{ 'glusterfs' | quote if openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native else 'default' | quote }}"
 openshift_storage_glusterfs_is_native: True
 openshift_storage_glusterfs_name: 'storage'
 openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host"
@@ -61,9 +64,10 @@ openshift_storage_glusterfs_heketi_ssh_user: 'root'
 openshift_storage_glusterfs_heketi_ssh_sudo: False
 openshift_storage_glusterfs_heketi_ssh_keyfile: "{{ omit }}"
 openshift_storage_glusterfs_heketi_fstab: "{{ '/var/lib/heketi/fstab' | quote if openshift_storage_glusterfs_heketi_executor == 'kubernetes' else '/etc/fstab' | quote }}"
-openshift_storage_glusterfs_namespace: "{{ 'glusterfs' | quote if openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native else 'default' | quote }}"
 
 openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
+openshift_storage_glusterfs_registry_health_timeout: "{{ openshift_storage_glusterfs_health_timeout }}"
+openshift_storage_glusterfs_registry_restart: "{{ openshift_storage_glusterfs_restart }}"
 openshift_storage_glusterfs_registry_namespace: "{{ openshift_hosted_registry_namespace | default(openshift_storage_glusterfs_namespace) }}"
 openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
 openshift_storage_glusterfs_registry_name: 'registry'
@@ -125,6 +129,3 @@ r_openshift_storage_glusterfs_os_firewall_allow:
   port: "3260/tcp"
 - service: rpcbind
   port: "111/tcp"
-
-# One retry every 10 seconds.
-openshift_glusterfs_cluster_health_check_retries: 120

+ 2 - 0
roles/openshift_storage_glusterfs/files/glusterfs-template.yml

@@ -22,6 +22,8 @@ objects:
     selector:
       matchLabels:
         glusterfs: ${CLUSTER_NAME}-pod
+    updateStrategy:
+      type: OnDelete
     template:
       metadata:
         name: glusterfs-${CLUSTER_NAME}

+ 1 - 0
roles/openshift_storage_glusterfs/files/heketi-template.yml

@@ -5,6 +5,7 @@ metadata:
   name: heketi
   labels:
     glusterfs: heketi-template
+    heketi: template
   annotations:
     description: Heketi service deployment template
     tags: glusterfs,heketi

+ 6 - 26
roles/openshift_storage_glusterfs/tasks/check_cluster_health.yml

@@ -1,35 +1,15 @@
 ---
-# glusterfs_check_containerized is a custom module defined at
-# lib_utils/library/glusterfs_check_containerized.py
-- name: Check for cluster health of glusterfs
-  glusterfs_check_containerized:
-    oc_bin: "{{ first_master_client_binary }}"
-    oc_conf: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
-    oc_namespace: "{{ openshift_storage_glusterfs_namespace }}"
-    cluster_name: "{{ openshift_storage_glusterfs_name }}"
-    exclude_node: "{{ openshift.common.hostname }}"
-  delegate_to: "{{ groups.oo_first_master.0 }}"
-  retries: "{{ openshift_glusterfs_cluster_health_check_retries | int}}"
-  delay: 10
-  register: glusterfs_check_containerized_res
-  until: glusterfs_check_containerized_res is succeeded
+- block:
+  - import_tasks: glusterfs_config_facts.yml
+  - import_tasks: cluster_health.yml
   when:
   - openshift_storage_glusterfs_is_native | bool
   - "'glusterfs' in groups"
   - "groups['glusterfs'] | length > 0"
 
-- name: Check for cluster health of glusterfs (registry)
-  glusterfs_check_containerized:
-    oc_bin: "{{ first_master_client_binary }}"
-    oc_conf: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
-    oc_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}"
-    cluster_name: "{{ openshift_storage_glusterfs_registry_name }}"
-    exclude_node: "{{ openshift.common.hostname }}"
-  delegate_to: "{{ groups.oo_first_master.0 }}"
-  retries: "{{ openshift_glusterfs_cluster_health_check_retries | int}}"
-  delay: 10
-  register: glusterfs_check_containerized_reg_res
-  until: glusterfs_check_containerized_reg_res is succeeded
+- block:
+  - import_tasks: glusterfs_registry_facts.yml
+  - import_tasks: cluster_health.yml
   when:
   - openshift_storage_glusterfs_registry_is_native | bool
   - "'glusterfs_registry' in groups"

+ 15 - 0
roles/openshift_storage_glusterfs/tasks/cluster_health.yml

@@ -0,0 +1,15 @@
+---
+# glusterfs_check_containerized is a custom module defined at
+# lib_utils/library/glusterfs_check_containerized.py
+- name: Check for GlusterFS cluster health
+  glusterfs_check_containerized:
+    oc_bin: "{{ first_master_client_binary }}"
+    oc_conf: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
+    oc_namespace: "{{ glusterfs_namespace }}"
+    cluster_name: "{{ glusterfs_name }}"
+    exclude_node: "{{ openshift.common.hostname }}"
+  delegate_to: "{{ groups.oo_first_master.0 }}"
+  retries: "{{ (glusterfs_health_timeout | int / 10) | int }}"
+  delay: 10
+  register: glusterfs_check_containerized_res
+  until: glusterfs_check_containerized_res is succeeded

+ 16 - 2
roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml

@@ -37,15 +37,29 @@
 - import_tasks: heketi_setup.yml
   when: glusterfs_heketi_is_native
 
-- import_tasks: heketi_deploy_part1.yml
+# This must always be run. There are tasks further on in the play that require
+# require these variables to be properly set, even if the tasks are skipped, so
+# that Ansible can properly parse the tasks.
+- import_tasks: heketi_pod_check.yml
+
+- import_tasks: heketi_init_deploy.yml
   when:
   - glusterfs_heketi_is_native
   - glusterfs_heketi_deploy_is_missing
   - glusterfs_heketi_is_missing
 
 - import_tasks: heketi_load.yml
+  when:
+  - glusterfs_heketi_topology_load
+
+- import_tasks: heketi_init_db.yml
+  when:
+  - glusterfs_heketi_is_native
+  - not glusterfs_heketi_deploy_is_missing
+  - glusterfs_heketi_is_missing
+  - glusterfs_heketi_topology_load
 
-- import_tasks: heketi_deploy_part2.yml
+- import_tasks: heketi_deploy.yml
   when:
   - glusterfs_heketi_is_native
   - glusterfs_heketi_is_missing

+ 2 - 0
roles/openshift_storage_glusterfs/tasks/glusterfs_config_facts.yml

@@ -1,6 +1,8 @@
 ---
 - set_fact:
     glusterfs_timeout: "{{ openshift_storage_glusterfs_timeout }}"
+    glusterfs_health_timeout: "{{ openshift_storage_glusterfs_health_timeout }}"
+    glusterfs_restart: "{{ openshift_storage_glusterfs_restart }}"
     glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}"
     glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}"
     glusterfs_name: "{{ openshift_storage_glusterfs_name }}"

+ 13 - 0
roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml

@@ -41,3 +41,16 @@
         (glusterfs_ds.results.results[0].status.numberReady | default(0) < glusterfs_ds.results.results[0].status.desiredNumberScheduled | default(glusterfs_nodes | count))
 
 - import_tasks: wait_for_pods.yml
+
+- name: Get GlusterFS pods
+  set_fact:
+    glusterfs_pods: "{{ glusterfs_pods_wait.results.results[0]['items'] }}"
+  when:
+  - glusterfs_restart
+
+- include_tasks: glusterfs_restart_pod.yml
+  vars:
+    restart_node: "{{ item }}"
+  with_items: "{{ glusterfs_pods | default([]) }}"
+  when:
+  - glusterfs_restart

+ 2 - 0
roles/openshift_storage_glusterfs/tasks/glusterfs_registry_facts.yml

@@ -1,6 +1,8 @@
 ---
 - set_fact:
     glusterfs_timeout: "{{ openshift_storage_glusterfs_registry_timeout }}"
+    glusterfs_health_timeout: "{{ openshift_storage_glusterfs_registry_health_timeout }}"
+    glusterfs_restart: "{{ openshift_storage_glusterfs_registry_restart }}"
     glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}"
     glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}"
     glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}"

+ 32 - 0
roles/openshift_storage_glusterfs/tasks/glusterfs_restart_pod.yml

@@ -0,0 +1,32 @@
+---
+- name: Unlabel GlusterFS node
+  oc_label:
+    name: "{{ restart_node.spec.nodeName }}"
+    kind: node
+    state: absent
+    labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"
+
+# There was an alternate installation method, outside of openshift-ansible,
+# that was written prior to this Ansible automation. It used a different style
+# label and we wan tot be able to upgrade those installations as well.
+- name: Unlabel GlusterFS node (old label)
+  oc_label:
+    name: "{{ restart_node.spec.nodeName }}"
+    kind: node
+    state: absent
+    labels: [ { 'key': 'storagenode', 'value': '' } ]
+
+- import_tasks: wait_for_pods.yml
+  vars:
+    glusterfs_count: "{{ glusterfs_nodes | count - 1 }}"
+
+- name: Label GlusterFS nodes
+  oc_label:
+    name: "{{ restart_node.spec.nodeName }}"
+    kind: node
+    state: add
+    labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"
+
+- import_tasks: wait_for_pods.yml
+
+- import_tasks: cluster_health.yml

+ 71 - 0
roles/openshift_storage_glusterfs/tasks/glusterfs_upgrade.yml

@@ -0,0 +1,71 @@
+---
+- import_tasks: heketi_get_key.yml
+  when: glusterfs_heketi_admin_key is undefined
+
+- name: Delete heketi resources
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: "{{ item.kind }}"
+    name: "{{ item.name | default(omit) }}"
+    selector: "{{ item.selector | default(omit) }}"
+    state: absent
+  with_items:
+  - kind: "template,svc,route,dc,secret,sa"
+    selector: "heketi"
+  - kind: "template,svc,route,dc,secret,sa"
+    selector: "deploy-heketi"
+  failed_when: False
+
+- name: Wait for heketi pod to delete
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: pod
+    state: list
+    selector: "heketi"
+  register: heketi_pod_wait
+  until:
+  - "heketi_pod_wait.results.results[0]['items'] | count == 0"
+  delay: 10
+  retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+
+- name: Delete other glusterfs resources
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: "{{ item.kind }}"
+    name: "{{ item.name | default(omit) }}"
+    selector: "{{ item.selector | default(omit) }}"
+    state: absent
+  with_items:
+  - kind: "template,svc,route,clusterrole,sa,dc"
+    selector: "glusterfs"
+  - kind: "clusterrolebinding"
+    name: "glusterblock-provisioner"
+  - kind: "clusterrolebinding"
+    name: "glusterblock-{{ glusterfs_name }}-provisioner"
+  failed_when: False
+
+# oc delete --cascade=false seems broken for DaemonSets.
+# Using curl to talk to the API directly.
+- name: Delete glusterfs daemonset w/o cascade
+  shell: "curl -k -X DELETE https://localhost:8443/apis/extensions/v1beta1/namespaces/glusterfs/daemonsets/glusterfs-storage -d '{\"kind\":\"DeleteOptions\",\"apiVersion\":\"v1\",\"propagationPolicy\":\"Orphan\"}' -H \"Accept: application/json\" -H \"Content-Type: application/json\"  --cert {{ openshift.common.config_base }}/master/admin.crt --key {{ openshift.common.config_base }}//master/admin.key"
+  #shell: "{{ first_master_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig delete ds --namespace={{ glusterfs_namespace }} --cascade=false --selector=glusterfs"
+  delegate_to: "{{ groups.oo_first_master.0 }}"
+  failed_when: False
+
+- name: Get old-style GlusterFS pods
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: pod
+    state: list
+    selector: "glusterfs=pod"
+  register: glusterfs_pods
+
+- name: Relabel old-style GlusterFS pods
+  oc_label:
+    name: "{{ item.['metadata']['name'] }}"
+    kind: node
+    state: add
+    labels: "[ { 'key': 'glusterfs', 'value': '{{ glusterfs_name }}'-pod } ]"
+  with_items: "{{ glusterfs_pods.results.results[0]['items'] | default([]) }}"
+
+- import_tasks: glusterfs_common.yml

+ 86 - 0
roles/openshift_storage_glusterfs/tasks/heketi_deploy.yml

@@ -0,0 +1,86 @@
+---
+- name: Generate heketi endpoints
+  template:
+    src: "heketi-endpoints.yml.j2"
+    dest: "{{ mktemp.stdout }}/heketi-endpoints.yml"
+
+- name: Generate heketi service
+  template:
+    src: "heketi-service.yml.j2"
+    dest: "{{ mktemp.stdout }}/heketi-service.yml"
+
+- name: Create heketi endpoints
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    state: present
+    kind: endpoints
+    name: "heketi-db-{{ glusterfs_name }}-endpoints"
+    files:
+    - "{{ mktemp.stdout }}/heketi-endpoints.yml"
+
+- name: Create heketi service
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    state: present
+    kind: service
+    name: "heketi-db-{{ glusterfs_name }}-endpoints"
+    files:
+    - "{{ mktemp.stdout }}/heketi-service.yml"
+
+- name: Copy heketi template
+  copy:
+    src: "heketi-template.yml"
+    dest: "{{ mktemp.stdout }}/heketi-template.yml"
+
+- name: Create heketi template
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: template
+    name: heketi
+    state: present
+    files:
+    - "{{ mktemp.stdout }}/heketi-template.yml"
+
+- name: Deploy heketi pod
+  oc_process:
+    namespace: "{{ glusterfs_namespace }}"
+    template_name: "heketi"
+    create: True
+    params:
+      IMAGE_NAME: "{{ glusterfs_heketi_image }}"
+      HEKETI_ROUTE: "{{ glusterfs_heketi_url | default(['heketi-',glusterfs_name]|join) }}"
+      HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
+      HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
+      HEKETI_EXECUTOR: "{{ glusterfs_heketi_executor }}"
+      HEKETI_FSTAB: "{{ glusterfs_heketi_fstab }}"
+      CLUSTER_NAME: "{{ glusterfs_name }}"
+
+- name: Wait for heketi pod
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: pod
+    state: list
+    selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
+  register: heketi_pod_wait
+  until:
+  - "heketi_pod_wait.results.results[0]['items'] | count > 0"
+  # Pod's 'Ready' status must be True
+  - "heketi_pod_wait.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+  delay: 10
+  retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+
+- name: Update heketi pod result
+  set_fact:
+    heketi_pod: "{{ heketi_pod_wait.results.results[0]['items'][0] }}"
+
+- name: Set heketi-cli command
+  set_fact:
+    glusterfs_heketi_client: "{{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.metadata.name }} {{ glusterfs_heketi_cli }} -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
+
+- name: Verify heketi service
+  command: "{{ glusterfs_heketi_client }} cluster list"
+  changed_when: False
+
+- name: Set heketi deployed fact
+  set_fact:
+    glusterfs_heketi_is_missing: False

+ 0 - 125
roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml

@@ -1,125 +0,0 @@
----
-- name: Create heketi DB volume
-  command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --image {{ glusterfs_heketi_image }} --listfile /tmp/heketi-storage.json"
-  register: setup_storage
-
-- name: Copy heketi-storage list
-  shell: "{{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
-
-# Need `command` here because heketi-storage.json contains multiple objects.
-- name: Copy heketi DB to GlusterFS volume
-  command: "{{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}"
-  when: setup_storage.rc == 0
-
-- name: Wait for copy job to finish
-  oc_obj:
-    namespace: "{{ glusterfs_namespace }}"
-    kind: job
-    state: list
-    name: "heketi-storage-copy-job"
-  register: heketi_job
-  until:
-  - "'results' in heketi_job.results and heketi_job.results.results | count > 0"
-  # Pod's 'Complete' status must be True
-  - "heketi_job.results.results | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
-  delay: 10
-  retries: "{{ (glusterfs_timeout | int / 10) | int }}"
-  failed_when:
-  - "'results' in heketi_job.results"
-  - "heketi_job.results.results | count > 0"
-  # Fail when pod's 'Failed' status is True
-  - "heketi_job.results.results | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1"
-  when: setup_storage.rc == 0
-
-- name: Delete deploy resources
-  oc_obj:
-    namespace: "{{ glusterfs_namespace }}"
-    kind: "{{ item.kind }}"
-    name: "{{ item.name | default(omit) }}"
-    selector: "{{ item.selector | default(omit) }}"
-    state: absent
-  with_items:
-  - kind: "template,route,service,jobs,dc,secret"
-    selector: "deploy-heketi"
-  - kind: "svc"
-    name: "heketi-storage-endpoints"
-  - kind: "secret"
-    name: "heketi-{{ glusterfs_name | default }}-topology-secret"
-
-- name: Generate heketi endpoints
-  template:
-    src: "heketi-endpoints.yml.j2"
-    dest: "{{ mktemp.stdout }}/heketi-endpoints.yml"
-
-- name: Generate heketi service
-  template:
-    src: "heketi-service.yml.j2"
-    dest: "{{ mktemp.stdout }}/heketi-service.yml"
-
-- name: Create heketi endpoints
-  oc_obj:
-    namespace: "{{ glusterfs_namespace }}"
-    state: present
-    kind: endpoints
-    name: "heketi-db-{{ glusterfs_name }}-endpoints"
-    files:
-    - "{{ mktemp.stdout }}/heketi-endpoints.yml"
-
-- name: Create heketi service
-  oc_obj:
-    namespace: "{{ glusterfs_namespace }}"
-    state: present
-    kind: service
-    name: "heketi-db-{{ glusterfs_name }}-endpoints"
-    files:
-    - "{{ mktemp.stdout }}/heketi-service.yml"
-
-- name: Copy heketi template
-  copy:
-    src: "heketi-template.yml"
-    dest: "{{ mktemp.stdout }}/heketi-template.yml"
-
-- name: Create heketi template
-  oc_obj:
-    namespace: "{{ glusterfs_namespace }}"
-    kind: template
-    name: heketi
-    state: present
-    files:
-    - "{{ mktemp.stdout }}/heketi-template.yml"
-
-- name: Deploy heketi pod
-  oc_process:
-    namespace: "{{ glusterfs_namespace }}"
-    template_name: "heketi"
-    create: True
-    params:
-      IMAGE_NAME: "{{ glusterfs_heketi_image }}"
-      HEKETI_ROUTE: "{{ glusterfs_heketi_url | default(['heketi-',glusterfs_name]|join) }}"
-      HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
-      HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
-      HEKETI_EXECUTOR: "{{ glusterfs_heketi_executor }}"
-      HEKETI_FSTAB: "{{ glusterfs_heketi_fstab }}"
-      CLUSTER_NAME: "{{ glusterfs_name }}"
-
-- name: Wait for heketi pod
-  oc_obj:
-    namespace: "{{ glusterfs_namespace }}"
-    kind: pod
-    state: list
-    selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
-  register: heketi_pod
-  until:
-  - "heketi_pod.results.results[0]['items'] | count > 0"
-  # Pod's 'Ready' status must be True
-  - "heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
-  delay: 10
-  retries: "{{ (glusterfs_timeout | int / 10) | int }}"
-
-- name: Set heketi-cli command
-  set_fact:
-    glusterfs_heketi_client: "{{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {{ glusterfs_heketi_cli }} -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
-
-- name: Verify heketi service
-  command: "{{ glusterfs_heketi_client }} cluster list"
-  changed_when: False

roles/openshift_storage_glusterfs/tasks/get_heketi_key.yml → roles/openshift_storage_glusterfs/tasks/heketi_get_key.yml


+ 47 - 0
roles/openshift_storage_glusterfs/tasks/heketi_init_db.yml

@@ -0,0 +1,47 @@
+---
+- name: Create heketi DB volume
+  command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --image {{ glusterfs_heketi_image }} --listfile /tmp/heketi-storage.json"
+  register: setup_storage
+
+- name: Copy heketi-storage list
+  shell: "{{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.metadata.name }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
+
+# Need `command` here because heketi-storage.json contains multiple objects.
+- name: Copy heketi DB to GlusterFS volume
+  command: "{{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}"
+  when: setup_storage.rc == 0
+
+- name: Wait for copy job to finish
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: job
+    state: list
+    name: "heketi-storage-copy-job"
+  register: heketi_job
+  until:
+  - "'results' in heketi_job.results and heketi_job.results.results | count > 0"
+  # Pod's 'Complete' status must be True
+  - "heketi_job.results.results | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
+  delay: 10
+  retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+  failed_when:
+  - "'results' in heketi_job.results"
+  - "heketi_job.results.results | count > 0"
+  # Fail when pod's 'Failed' status is True
+  - "heketi_job.results.results | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1"
+  when: setup_storage.rc == 0
+
+- name: Delete deploy resources
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: "{{ item.kind }}"
+    name: "{{ item.name | default(omit) }}"
+    selector: "{{ item.selector | default(omit) }}"
+    state: absent
+  with_items:
+  - kind: "template,route,service,jobs,dc,secret"
+    selector: "deploy-heketi"
+  - kind: "svc"
+    name: "heketi-storage-endpoints"
+  - kind: "secret"
+    name: "heketi-{{ glusterfs_name | default }}-topology-secret"

+ 11 - 11
roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml

@@ -29,24 +29,24 @@
       HEKETI_FSTAB: "{{ glusterfs_heketi_fstab }}"
       CLUSTER_NAME: "{{ glusterfs_name }}"
 
-- name: Set heketi Deployed fact
-  set_fact:
-    glusterfs_heketi_deploy_is_missing: False
-
 - name: Wait for deploy-heketi pod
   oc_obj:
     namespace: "{{ glusterfs_namespace }}"
     kind: pod
     state: list
     selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
-  register: deploy_heketi_pod
+  register: deploy_heketi_pod_wait
   until:
-  - "deploy_heketi_pod.results.results[0]['items'] | count > 0"
+  - "deploy_heketi_pod_wait.results.results[0]['items'] | count > 0"
   # Pod's 'Ready' status must be True
-  - "deploy_heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+  - "deploy_heketi_pod_wait.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
   delay: 10
   retries: "{{ (glusterfs_timeout | int / 10) | int }}"
-  when:
-  - glusterfs_heketi_is_native
-  - not glusterfs_heketi_deploy_is_missing
-  - glusterfs_heketi_is_missing
+
+- name: Update deploy-heketi pod result
+  set_fact:
+    deploy_heketi_pod: "{{ deploy_heketi_pod_wait.results.results[0]['items'][0] }}"
+
+- name: Set deploy-heketi deployed fact
+  set_fact:
+    glusterfs_heketi_deploy_is_missing: False

Diferenças do arquivo suprimidas por serem muito extensas
+ 2 - 7
roles/openshift_storage_glusterfs/tasks/heketi_load.yml


+ 8 - 6
roles/openshift_storage_glusterfs/tasks/heketi_pod_check.yml

@@ -5,15 +5,16 @@
     state: list
     kind: pod
     selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
-  register: deploy_heketi_pod
+  register: deploy_heketi_pod_check
 
 - name: Check if need to deploy deploy-heketi
   set_fact:
     glusterfs_heketi_deploy_is_missing: False
+    deploy_heketi_pod: "{{ deploy_heketi_pod_check.results.results[0]['items'][0] }}"
   when:
-  - "deploy_heketi_pod.results.results[0]['items'] | count > 0"
+  - "deploy_heketi_pod_check.results.results[0]['items'] | count > 0"
   # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
-  - "deploy_heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+  - "deploy_heketi_pod_check.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
 
 - name: Check for existing heketi pod
   oc_obj:
@@ -21,12 +22,13 @@
     state: list
     kind: pod
     selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
-  register: heketi_pod
+  register: heketi_pod_check
 
 - name: Check if need to deploy heketi
   set_fact:
     glusterfs_heketi_is_missing: False
+    heketi_pod: "{{ heketi_pod_check.results.results[0]['items'][0] }}"
   when:
-  - "heketi_pod.results.results[0]['items'] | count > 0"
+  - "heketi_pod_check.results.results[0]['items'] | count > 0"
   # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
-  - "heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+  - "heketi_pod_check.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"

+ 1 - 3
roles/openshift_storage_glusterfs/tasks/heketi_setup.yml

@@ -21,14 +21,12 @@
     resource_name: edit
     state: present
 
-- import_tasks: heketi_pod_check.yml
-
 - name: Generate heketi config file
   template:
     src: "heketi.json.j2"
     dest: "{{ mktemp.stdout }}/heketi.json"
 
-- import_tasks: get_heketi_key.yml
+- import_tasks: heketi_get_key.yml
 
 - name: Generate heketi admin key
   set_fact:

+ 26 - 0
roles/openshift_storage_glusterfs/tasks/upgrade.yml

@@ -0,0 +1,26 @@
+---
+- import_tasks: mktemp.yml
+
+- name: Set state facts
+  set_fact:
+    openshift_storage_glusterfs_heketi_deploy_is_missing: False
+    openshift_storage_glusterfs_heketi_topology_load: False
+    openshift_storage_glusterfs_restart: True
+
+- name: upgrade glusterfs
+  block:
+    - import_tasks: glusterfs_config_facts.yml
+    - import_tasks: glusterfs_upgrade.yml
+  when:
+    - "'glusterfs' in groups"
+    - "groups['glusterfs'] | length > 0"
+
+- name: upgrade glusterfs_registry
+  block:
+    - import_tasks: glusterfs_registry_facts.yml
+    - import_tasks: glusterfs_upgrade.yml
+  when:
+    - "'glusterfs_registry' in groups"
+    - "groups['glusterfs_registry'] | length > 0"
+
+- import_tasks: rmtemp.yml

+ 5 - 3
roles/openshift_storage_glusterfs/tasks/wait_for_pods.yml

@@ -5,10 +5,12 @@
     kind: pod
     state: list
     selector: "glusterfs={{ glusterfs_name }}-pod"
-  register: glusterfs_pods
+  register: glusterfs_pods_wait
   until:
-  - "glusterfs_pods.results.results[0]['items'] | count > 0"
+  - "glusterfs_pods_wait.results.results[0]['items'] | count > 0"
   # There must be as many pods with 'Ready' staus  True as there are nodes expecting those pods
-  - "glusterfs_pods.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"
+  - "glusterfs_pods_wait.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == l_glusterfs_count"
   delay: 10
   retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+  vars:
+    l_glusterfs_count: "{{ glusterfs_count | default(glusterfs_nodes | count) | int }}"

+ 3 - 0
roles/openshift_storage_glusterfs/templates/heketi-endpoints.yml.j2

@@ -3,6 +3,9 @@ apiVersion: v1
 kind: Endpoints
 metadata:
   name: heketi-db-{{ glusterfs_name }}-endpoints
+  labels:
+    glusterfs: heketi-{{ glusterfs_name }}-db-endpoints
+    heketi: {{ glusterfs_name }}-db-endpoints
 subsets:
 - addresses:
 {% for node in glusterfs_nodes %}

+ 3 - 0
roles/openshift_storage_glusterfs/templates/heketi-service.yml.j2

@@ -3,6 +3,9 @@ apiVersion: v1
 kind: Service
 metadata:
   name: heketi-db-{{ glusterfs_name }}-endpoints
+  labels:
+    glusterfs: heketi-{{ glusterfs_name }}-db-service
+    heketi: {{ glusterfs_name }}-db-service
 spec:
   ports:
   - port: 1