123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102 |
- ---
- # TODO: Add support for choosing base image based on deployment_type and os
- # wanted (os wanted needs support added in bin/cluster with sane defaults:
- # fedora/centos for origin, rhel for online/enterprise)
- # TODO: create a role to encapsulate some of this complexity, possibly also
- # create a module to manage the storage tasks, network tasks, and possibly
- # even handle the libvirt tasks to set metadata in the domain xml and be able
- # to create/query data about vms without having to use xml the python libvirt
- # bindings look like a good candidate for this
- - name: Download Base Cloud image
- get_url:
- url: '{{ image_url }}'
- sha256sum: '{{ image_sha256 }}'
- dest: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}'
- when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}'
- - name: Create the cloud-init config drive path
- file:
- dest: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/'
- state: directory
- with_items: instances
- - name: Create the cloud-init config drive files
- template:
- src: '{{ item[1] }}'
- dest: '{{ os_libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}'
- with_nested:
- - instances
- - [ user-data, meta-data ]
- - name: Create the cloud-init config drive
- command: 'genisoimage -output {{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
- args:
- chdir: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/'
- creates: '{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
- with_items: instances
- - name: Refresh the libvirt storage pool for openshift
- command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
- - name: Create VMs drives
- command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ os_libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2'
- with_items: instances
- - name: Create VMs
- virt:
- name: '{{ item }}'
- command: define
- xml: "{{ lookup('template', '../templates/domain.xml') }}"
- uri: '{{ libvirt_uri }}'
- with_items: instances
- - name: Start VMs
- virt:
- name: '{{ item }}'
- state: running
- uri: '{{ libvirt_uri }}'
- with_items: instances
- - name: Wait for the VMs to get an IP
- shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | egrep -c ''{{ instances | join("|") }}'''
- register: nb_allocated_ips
- until: nb_allocated_ips.stdout == '{{ instances | length }}'
- retries: 30
- delay: 1
- - name: Collect IP addresses of the VMs
- shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}'''
- register: scratch_ip
- with_items: instances
- - set_fact:
- ips: "{{ scratch_ip.results | oo_collect('stdout') }}"
- - name: Add new instances
- add_host:
- hostname: '{{ item.0 }}'
- ansible_ssh_host: '{{ item.1 }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}'
- with_together:
- - instances
- - ips
- - name: Wait for ssh
- wait_for:
- host: '{{ item }}'
- port: 22
- with_items: ips
- - name: Wait for openshift user setup
- command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup'
- register: result
- until: result.rc == 0
- retries: 30
- delay: 1
- with_together:
- - instances
- - ips
|