Kenny Woodson пре 7 година
родитељ
комит
300917e342
27 измењених фајлова са 698 додато и 51 уклоњено
  1. 2 2
      images/installer/Dockerfile
  2. 6 0
      images/installer/origin-extra-root/etc/yum.repos.d/azure-cli.repo
  3. 6 46
      images/installer/root/usr/local/bin/entrypoint-gcp
  4. 76 0
      images/installer/root/usr/local/bin/entrypoint-provider
  5. 43 0
      inventory/dynamic/azure/ansible.cfg
  6. 0 0
      inventory/dynamic/azure/group_vars/all/.gitkeep
  7. 1 0
      inventory/dynamic/azure/none
  8. 1 0
      playbooks/aws/openshift-cluster/build_ami.yml
  9. 6 0
      playbooks/azure/OWNERS
  10. 2 0
      playbooks/azure/README.md
  11. 54 0
      playbooks/azure/openshift-cluster/build_base_image.yml
  12. 88 0
      playbooks/azure/openshift-cluster/build_node_image.yml
  13. 12 0
      playbooks/azure/openshift-cluster/deprovision.yml
  14. 62 0
      playbooks/azure/openshift-cluster/group_vars/all/yum_repos.yml
  15. 109 0
      playbooks/azure/openshift-cluster/launch.yml
  16. 50 0
      playbooks/azure/openshift-cluster/provisioning_vars.yml.example
  17. 1 0
      playbooks/azure/openshift-cluster/roles
  18. 43 0
      playbooks/azure/openshift-cluster/tasks/create_blob_from_vm.yml
  19. 38 0
      playbooks/azure/openshift-cluster/tasks/create_image_from_vm.yml
  20. 49 0
      playbooks/azure/openshift-cluster/tasks/provision_instance.yml
  21. 15 0
      playbooks/azure/openshift-cluster/tasks/remove_yum.yml
  22. 19 0
      playbooks/azure/openshift-cluster/tasks/yum_certs.yml
  23. 0 1
      roles/openshift_node/defaults/main.yml
  24. 2 0
      roles/openshift_node/tasks/bootstrap.yml
  25. 2 0
      roles/os_update_latest/defaults/main.yml
  26. 9 0
      roles/os_update_latest/tasks/main.yml
  27. 2 2
      setup.py

+ 2 - 2
images/installer/Dockerfile

@@ -10,14 +10,14 @@ COPY images/installer/origin-extra-root /
 # install ansible and deps
 RUN INSTALL_PKGS="python-lxml python-dns pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients iproute" \
  && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
- && EPEL_PKGS="python2-boto python2-boto3 python2-crypto which python2-pip.noarch" \
+ && EPEL_PKGS="python2-boto python2-boto3 python2-crypto which python2-pip.noarch python-scandir python2-packaging azure-cli" \
  && EPEL_TESTING_PKGS="ansible" \
  && yum install -y epel-release \
  && yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \
  && yum install -y --setopt=tsflags=nodocs --enablerepo=epel-testing $EPEL_TESTING_PKGS \
  && yum install -y https://sdodson.fedorapeople.org/google-cloud-sdk-183.0.0-3.el7.x86_64.rpm \
  && rpm -V $INSTALL_PKGS $EPEL_PKGS $EPEL_TESTING_PKGS \
- && pip install apache-libcloud~=2.2.1 \
+ && pip install 'apache-libcloud~=2.2.1' 'SecretStorage<3' 'ansible[azure]' \
  && yum clean all
 
 LABEL name="openshift/origin-ansible" \

+ 6 - 0
images/installer/origin-extra-root/etc/yum.repos.d/azure-cli.repo

@@ -0,0 +1,6 @@
+[azure-cli]
+name=Azure CLI
+baseurl=https://packages.microsoft.com/yumrepos/azure-cli
+enabled=1
+gpgcheck=1
+gpgkey=https://packages.microsoft.com/keys/microsoft.asc

+ 6 - 46
images/installer/root/usr/local/bin/entrypoint-gcp

@@ -1,51 +1,11 @@
 #!/bin/bash
 #
-# This file sets up the user to run in the GCP environment.
-# It provides dynamic inventory that works well when run in
-# a container environment by setting up a default inventory.
-# It assumes the user has provided a GCP service account token
-# and ssh-privatekey file at "$(pwd)/inventory/dynamic/injected"
-# and automatically links any YAML files found into the group
-# vars directory, which allows the playbook to more easily be
-# run in containerized contexts.
-
-WORK=$(pwd)
-FILES="${WORK}/inventory/dynamic/injected"
-
-# Patch /etc/passwd file with the current user info.
-# The current user's entry must be correctly defined in this file in order for
-# the `ssh` command to work within the created container.
-
-if ! whoami &>/dev/null; then
-  echo "${USER:-default}:x:$(id -u):$(id -g):Default User:$HOME:/sbin/nologin" >> /etc/passwd
-fi
-
-# Provide a "files_dir" variable that points to inventory/dynamic/injected
-echo "files_dir: \"${FILES}\"" > "${WORK}/inventory/dynamic/gcp/group_vars/all/00_default_files_dir.yml"
-# Add any injected variable files into the group vars directory
-find "${FILES}" -name '*.yml' -or -name '*.yaml' -or -name vars | xargs -L1 -I {} ln -fs {} "${WORK}/inventory/dynamic/gcp/group_vars/all"
-# Avoid sudo when running locally - nothing in the image requires it.
-mkdir -p "${WORK}/inventory/dynamic/gcp/host_vars/localhost"
-echo "ansible_become: no" > "${WORK}/inventory/dynamic/gcp/host_vars/localhost/00_skip_root.yaml"
+# Temporary wrapper for entrypoint-gcp until the migration
+# to entrypoint-provider is complete.
+#
 
-if [[ -z "${ANSIBLE_CONFIG-}" ]]; then
-  export ANSIBLE_CONFIG="${WORK}/inventory/dynamic/gcp/ansible.cfg"
-fi
+set -euo pipefail
 
-# SSH requires the file to be owned by the current user, but Docker copies
-# files in as root. Put the file into the ssh dir with the right permissions
-if [[ -f "${FILES}/ssh-privatekey" ]]; then
-  keyfile="${HOME}/.ssh/google_compute_engine"
-  mkdir "${HOME}/.ssh"
-  rm -f "${keyfile}"
-  cat "${FILES}/ssh-privatekey" > "${keyfile}"
-  chmod 0600 "${keyfile}"
-  ssh-keygen -y -f "${keyfile}" >  "${keyfile}.pub"
-fi
-if [[ -f "${FILES}/gce.json" ]]; then
-  gcloud auth activate-service-account --key-file="${FILES}/gce.json"
-else
-  echo "No service account file found at ${FILES}/gce.json, bypassing login"
-fi
+export TYPE='gcp'
 
-exec "$@"
+exec /usr/local/bin/entrypoint-provider "$@"

+ 76 - 0
images/installer/root/usr/local/bin/entrypoint-provider

@@ -0,0 +1,76 @@
+#!/bin/bash
+#
+# This file sets up the user to run in a cloud environment.
+# It provides dynamic inventory that works well when run in
+# a container environment by setting up a default inventory.
+# It assumes the user has provided a service account token
+# and ssh-privatekey file at "$(pwd)/inventory/dynamic/injected"
+# and automatically links any YAML files found into the group
+# vars directory, which allows the playbook to more easily be
+# run in containerized contexts.
+#
+# Currently GCP and Azure are supported.
+
+set -euo pipefail
+
+WORK=$(pwd)
+FILES="${WORK}/inventory/dynamic/injected"
+
+# Patch /etc/passwd file with the current user info.
+# The current user's entry must be correctly defined in this file in order for
+# the `ssh` command to work within the created container.
+
+if ! whoami &>/dev/null; then
+  echo "${USER:-default}:x:$(id -u):$(id -g):Default User:$HOME:/sbin/nologin" >> /etc/passwd
+fi
+
+# Provide a "files_dir" variable that points to inventory/dynamic/injected
+echo "files_dir: \"${FILES}\"" > "${WORK}/inventory/dynamic/${TYPE}/group_vars/all/00_default_files_dir.yml"
+# Add any injected variable files into the group vars directory
+find "${FILES}" \( -name '*.yml' -or -name '*.yaml' -or -name vars \) -print0 | xargs -0 -L1 -I {} ln -fs {} "${WORK}/inventory/dynamic/${TYPE}/group_vars/all"
+# Avoid sudo when running locally - nothing in the image requires it.
+mkdir -p "${WORK}/inventory/dynamic/${TYPE}/host_vars/localhost"
+echo "ansible_become: no" > "${WORK}/inventory/dynamic/${TYPE}/host_vars/localhost/00_skip_root.yaml"
+
+if [[ -z "${ANSIBLE_CONFIG-}" ]]; then
+  export ANSIBLE_CONFIG="${WORK}/inventory/dynamic/${TYPE}/ansible.cfg"
+fi
+
+# SSH requires the file to be owned by the current user, but Docker copies
+# files in as root. Put the file into the ssh dir with the right permissions
+if [[ -f "${FILES}/ssh-privatekey" ]]; then
+  if [[ "$TYPE" == 'gcp' ]]; then
+    keyfile="${HOME}/.ssh/google_compute_engine"
+  else
+    keyfile="${HOME}/.ssh/id_rsa"
+  fi
+  mkdir "${HOME}/.ssh"
+  rm -f "${keyfile}"
+  cat "${FILES}/ssh-privatekey" > "${keyfile}"
+  chmod 0600 "${keyfile}"
+  ssh-keygen -y -f "${keyfile}" >  "${keyfile}.pub"
+fi
+
+if [[ "$TYPE" == 'gcp' ]]; then
+  if [[ -f "${FILES}/gce.json" ]]; then
+    gcloud auth activate-service-account --quiet --key-file="${FILES}/gce.json"
+  else
+    echo "No service account file found at ${FILES}/gce.json, bypassing login"
+  fi
+fi
+
+if [[ "$TYPE" == 'azure' ]]; then
+  if [[ -f "${FILES}/credentials" ]]; then
+    export AZURE_CLIENT_ID=$(sed -ne '/^client_id=/ { s/^client_id=//; p; }' "${FILES}/credentials")
+    export AZURE_SECRET=$(sed -ne '/^secret=/ { s/^secret=//; p; }' "${FILES}/credentials")
+    export AZURE_SUBSCRIPTION_ID=$(sed -ne '/^subscription_id=/ { s/^subscription_id=//; p; }' "${FILES}/credentials")
+    export AZURE_TENANT=$(sed -ne '/^tenant=/ { s/^tenant=//; p; }' "${FILES}/credentials")
+
+    az login --service-principal --username "$AZURE_CLIENT_ID" --password "$AZURE_SECRET" --tenant "$AZURE_TENANT" >/dev/null
+
+  else
+    echo "No service account file found at ${FILES}/credentials, bypassing login"
+  fi
+fi
+
+exec "$@"

+ 43 - 0
inventory/dynamic/azure/ansible.cfg

@@ -0,0 +1,43 @@
+# config file for ansible -- http://ansible.com/
+# ==============================================
+
+# This config file provides examples for running
+# the OpenShift playbooks with the provided
+# inventory scripts.
+
+[defaults]
+# Set the log_path
+#log_path = /tmp/ansible.log
+
+# Additional default options for OpenShift Ansible
+forks = 20
+host_key_checking = False
+retry_files_enabled = False
+retry_files_save_path = ~/ansible-installer-retries
+nocows = True
+remote_user = root
+roles_path = roles/
+gathering = smart
+fact_caching = jsonfile
+fact_caching_connection = $HOME/ansible/facts
+fact_caching_timeout = 600
+callback_whitelist = profile_tasks
+inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt, .ini
+# work around privilege escalation timeouts in ansible:
+timeout = 30
+
+# Uncomment to use the provided example inventory
+inventory = none
+
+[inventory]
+# fail more helpfully when the inventory file does not parse (Ansible 2.4+)
+unparsed_is_failed=true
+
+# Additional ssh options for OpenShift Ansible
+[ssh_connection]
+pipelining = True
+ssh_args = -o ControlMaster=auto -o ControlPersist=600s
+timeout = 10
+# shorten the ControlPath which is often too long; when it is,
+# ssh connection reuse silently fails, making everything slower.
+control_path = %(directory)s/%%h-%%r

+ 0 - 0
inventory/dynamic/azure/group_vars/all/.gitkeep


+ 1 - 0
inventory/dynamic/azure/none

@@ -0,0 +1 @@
+{}

+ 1 - 0
playbooks/aws/openshift-cluster/build_ami.yml

@@ -28,6 +28,7 @@
     set_fact:
       ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default(ansible_ssh_user) }}"
       openshift_node_image_prep_packages:
+      - cloud-init
       - cloud-utils-growpart
 
 # This is the part that installs all of the software and configs for the instance

+ 6 - 0
playbooks/azure/OWNERS

@@ -0,0 +1,6 @@
+reviewers:
+- jim-minter
+- kwoodson
+approvers:
+- jim-minter
+- kwoodson

+ 2 - 0
playbooks/azure/README.md

@@ -0,0 +1,2 @@
+The playbooks and tasks under this directory are not supported for end-customer
+use.

+ 54 - 0
playbooks/azure/openshift-cluster/build_base_image.yml

@@ -0,0 +1,54 @@
+---
+- hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: provision resource group
+    import_tasks: tasks/provision_instance.yml
+    vars:
+      image_prefix: "{{ openshift_azure_input_image_prefix }}"
+      image_resource_group: "{{ openshift_azure_input_image_ns }}"
+
+- hosts: nodes
+  tasks:
+  - name: calculate yum repositories
+    set_fact:
+      openshift_additional_repos: "{{ azure_base_repos[ansible_distribution] }}"
+
+  - name: configure yum repositories
+    import_tasks: tasks/yum_certs.yml
+
+  - name: update rpms
+    import_role:
+      name: os_update_latest
+    vars:
+      os_update_latest_reboot: True
+
+  - name: deconfigure yum repositories
+    import_tasks: tasks/remove_yum.yml
+
+  - name: run waagent deprovision
+    command: waagent -deprovision+user -force
+    args:
+      chdir: /
+
+- hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: get current date/time
+    shell: TZ=Etc/UTC date +%Y%m%d%H%M
+    register: now
+
+  - set_fact:
+      image_name: "{{ openshift_azure_output_image_prefix }}-{{ now.stdout }}"
+
+  - name: create image
+    import_tasks: tasks/create_image_from_vm.yml
+    vars:
+      image_resource_group: "{{ openshift_azure_output_image_ns }}"
+      image_tags:
+        root_image: "{{ (input_image.stdout | from_json).name }}"
+        kernel: "{{ hostvars[groups['nodes'][0]]['ansible_kernel'] }}"
+
+  - name: create blob
+    import_tasks: tasks/create_blob_from_vm.yml
+    when: openshift_azure_storage_account is defined and openshift_azure_storage_account

+ 88 - 0
playbooks/azure/openshift-cluster/build_node_image.yml

@@ -0,0 +1,88 @@
+---
+- hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: provision resource group
+    import_tasks: tasks/provision_instance.yml
+    vars:
+      image_prefix: "{{ openshift_azure_input_image_prefix }}"
+      image_resource_group: "{{ openshift_azure_input_image_ns }}"
+      data_disks:
+      - managed_disk_type: Standard_LRS
+        disk_size_gb: 100
+        lun: 0
+
+  - set_fact:
+      openshift_node_bootstrap: True
+
+- hosts: nodes
+  tasks:
+  - set_fact:
+      openshift_deployment_type: "{{ 'openshift-enterprise' if ansible_distribution == 'RedHat' else 'origin' }}"
+      openshift_enable_origin_repo: False
+      openshift_node_bootstrap: True
+      skip_node_svc_handlers: True
+      openshift_additional_repos: "{{ azure_node_repos[ansible_distribution] }}"
+
+  - set_fact:
+      openshift_additional_repos: "{{ openshift_additional_repos + [{'name': 'install_repo', 'baseurl': openshift_azure_install_repo, 'enabled': true, 'gpgcheck': false}] }}"
+    when: openshift_azure_install_repo is defined and openshift_azure_install_repo
+
+  - name: install centos-release-paas-common rpm
+    yum:
+      name: centos-release-paas-common
+      state: present
+    when: ansible_distribution == "CentOS"
+
+  - name: configure yum repositories
+    import_tasks: tasks/yum_certs.yml
+
+  - name: update rpms
+    import_role:
+      name: os_update_latest
+    vars:
+      os_update_latest_reboot: True
+
+- name: install openshift
+  import_playbook: ../../openshift-node/private/image_prep.yml
+
+- hosts: nodes
+  tasks:
+  - name: deconfigure yum repositories
+    import_tasks: tasks/remove_yum.yml
+
+  - name: record installed rpms
+    yum:
+      list: installed
+    register: yum
+
+  - name: run waagent deprovision
+    command: waagent -deprovision+user -force
+    args:
+      chdir: /
+
+- hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: get current date/time
+    shell: TZ=Etc/UTC date +%Y%m%d%H%M
+    register: now
+
+  - set_fact:
+      openshift_rpm: "{{ hostvars[groups['nodes'][0]]['yum'].results | selectattr('name', 'match', '^(origin|atomic-openshift)$') | first }}"
+
+  - set_fact:
+      image_name: "{{ openshift_azure_output_image_prefix }}-{{ openshift_rpm.version | regex_replace('^(\\d+\\.\\d+).*', '\\1') }}-{{ now.stdout }}"
+
+  - name: create image
+    import_tasks: tasks/create_image_from_vm.yml
+    vars:
+      image_resource_group: "{{ openshift_azure_output_image_ns }}"
+      image_tags:
+        base_image: "{{ (input_image.stdout | from_json).name }}"
+        kernel: "{{ hostvars[groups['nodes'][0]]['ansible_kernel'] }}"
+        openshift: "{{ openshift_rpm.name }}-{{ openshift_rpm.version }}-{{ openshift_rpm.release }}.{{ openshift_rpm.arch }}"
+
+  - name: create blob
+    import_tasks: tasks/create_blob_from_vm.yml
+    when: openshift_azure_storage_account is defined and openshift_azure_storage_account

+ 12 - 0
playbooks/azure/openshift-cluster/deprovision.yml

@@ -0,0 +1,12 @@
+---
+# Warning, use with caution, this will remove all resources
+# from the group.
+
+- hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: delete resource group
+    azure_rm_resourcegroup:
+      name: "{{ openshift_azure_resource_group_name }}"
+      state: absent
+      force: yes  # removes all resources within the group

+ 62 - 0
playbooks/azure/openshift-cluster/group_vars/all/yum_repos.yml

@@ -0,0 +1,62 @@
+---
+azure_base_repos:
+  RedHat:
+  - name: rhel-server-7-releases
+    baseurl: https://mirror.openshift.com/libra/rhui-rhel-server-7-releases/
+    gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
+    sslclientcert: /var/lib/yum/client-cert.pem
+    sslclientkey: /var/lib/yum/client-key.pem
+    enabled: yes
+
+  - name: rhel-server-7-extras
+    baseurl: https://mirror.openshift.com/libra/rhui-rhel-server-7-extras/
+    gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
+    sslclientcert: /var/lib/yum/client-cert.pem
+    sslclientkey: /var/lib/yum/client-key.pem
+    enabled: yes
+
+  CentOS: []
+
+azure_node_repos:
+  RedHat:
+  - name: rhel-server-7-releases
+    baseurl: https://mirror.openshift.com/libra/rhui-rhel-server-7-releases/
+    gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
+    sslclientcert: /var/lib/yum/client-cert.pem
+    sslclientkey: /var/lib/yum/client-key.pem
+    enabled: yes
+
+  - name: rhel-server-7-extras
+    baseurl: https://mirror.openshift.com/libra/rhui-rhel-server-7-extras/
+    gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
+    sslclientcert: /var/lib/yum/client-cert.pem
+    sslclientkey: /var/lib/yum/client-key.pem
+    enabled: yes
+
+  - name: rhel-7-fast-datapath-rpms
+    baseurl: https://mirror.openshift.com/enterprise/rhel/rhel-7-fast-datapath-rpms/
+    gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
+    sslclientcert: /var/lib/yum/client-cert.pem
+    sslclientkey: /var/lib/yum/client-key.pem
+    enabled: yes
+
+  - name: rhel-7-server-ansible-2.4-rpms
+    baseurl: https://mirror.openshift.com/enterprise/rhel/rhel-7-server-ansible-2.4-rpms/
+    gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
+    sslclientcert: /var/lib/yum/client-cert.pem
+    sslclientkey: /var/lib/yum/client-key.pem
+    enabled: yes
+
+  #- name: rhel-server-7-ose-3.10
+  #  baseurl: https://mirror.openshift.com/libra/rhui-rhel-server-7-ose-3.10/
+  #  gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
+  #  sslclientcert: /var/lib/yum/client-cert.pem
+  #  sslclientkey: /var/lib/yum/client-key.pem
+  #  enabled: yes
+
+  CentOS:
+  # TODO: should be using a repo which only provides prerequisites
+  - name: openshift-origin
+    baseurl: http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin/
+    gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+    enabled: yes

+ 109 - 0
playbooks/azure/openshift-cluster/launch.yml

@@ -0,0 +1,109 @@
+---
+- hosts: localhost
+  gather_facts: no
+  tasks:
+  - import_role:
+      name: lib_utils
+
+  - name: calculate input image
+    command: az image list -g "{{ openshift_azure_input_image_ns }}" --query "[?starts_with(name, '{{ openshift_azure_input_image_prefix }}-') && tags.valid=='true'] | sort_by(@, &name) | [-1]"
+    register: input_image
+
+  - name: create temporary directory
+    tempfile:
+      state: directory
+    register: tmp
+
+  - name: download acs-engine
+    get_url:
+      url: "{{ item }}"
+      dest: "{{ tmp.path }}/"
+    with_list:
+    - "http://acs-engine-build-acs-engine-build.svc.ci.openshift.org/acs-engine"
+    - "http://acs-engine-build-acs-engine-build.svc.ci.openshift.org/openshift.json"
+
+  - name: make acs-engine executable
+    file:
+      path: "{{ tmp.path }}/acs-engine"
+      mode: 0755
+
+  - name: configure acs-engine
+    yedit:
+      content_type: json
+      src: "{{ tmp.path }}/openshift.json"
+      edits:
+      - key: properties.orchestratorProfile.openShiftConfig.clusterUsername
+        value: demo
+      - key: properties.orchestratorProfile.openShiftConfig.clusterPassword
+        value: "{{ 16 | lib_utils_oo_random_word }}"
+      # azProfile
+      - key: properties.azProfile.tenantId
+        value: "{{ lookup('env', 'AZURE_TENANT') }}"
+      - key: properties.azProfile.subscriptionId
+        value: "{{ lookup('env', 'AZURE_SUBSCRIPTION_ID') }}"
+      - key: properties.azProfile.resourceGroup
+        value: "{{ openshift_azure_resource_group_name }}"
+      - key: properties.azProfile.location
+        value: "{{ openshift_azure_resource_location }}"
+      # masterProfile
+      - key: properties.masterProfile.dnsPrefix
+        value: "a{{ 16 | lib_utils_oo_random_word }}a"
+      - key: properties.masterProfile.imageReference.name
+        value: "{{ (input_image.stdout | from_json).name }}"
+      - key: properties.masterProfile.imageReference.resourceGroup
+        value: "{{ openshift_azure_input_image_ns }}"
+      # agentpool compute
+      - key: properties.agentPoolProfiles[0].imageReference.name
+        value: "{{ (input_image.stdout | from_json).name }}"
+      - key: properties.agentPoolProfiles[0].imageReference.resourceGroup
+        value: "{{ openshift_azure_input_image_ns }}"
+      # agentpool infra
+      - key: properties.agentPoolProfiles[1].imageReference.name
+        value: "{{ (input_image.stdout | from_json).name }}"
+      - key: properties.agentPoolProfiles[1].imageReference.resourceGroup
+        value: "{{ openshift_azure_input_image_ns }}"
+      # linuxprofile
+      - key: properties.linuxProfile.adminUsername
+        value: "cloud-user"
+      - key: properties.linuxProfile.ssh.publicKeys[0].keyData
+        value: "{{ openshift_azure_vm_ssh_public_key }}"
+      # serviceprincipal
+      - key: properties.servicePrincipalProfile.clientId
+        value: "{{ lookup('env', 'AZURE_CLIENT_ID') }}"
+      - key: properties.servicePrincipalProfile.secret
+        value: "{{ lookup('env', 'AZURE_SECRET') }}"
+
+  - name: run acs-engine deploy
+    command: |
+      {{ tmp.path }}/acs-engine deploy \
+        --resource-group {{ openshift_azure_resource_group_name }} \
+        --location {{ openshift_azure_resource_location }} \
+        --subscription-id {{ lookup('env', 'AZURE_SUBSCRIPTION_ID') }} \
+        --auth-method client_secret \
+        --client-id {{ lookup('env', 'AZURE_CLIENT_ID') }} \
+        --client-secret {{ lookup('env', 'AZURE_SECRET') }} \
+        {{ tmp.path }}/openshift.json
+    ignore_errors: yes
+    register: deploy
+
+  - name: delete temporary directory
+    file:
+      path: "{{ tmp.path }}"
+      state: absent
+
+  - block:
+    - name: get azure deployment message
+      command: >
+        az group deployment list
+        -g "{{ openshift_azure_resource_group_name }}"
+        --query "[0].properties.additionalProperties.error.details[0].message"
+        -o tsv
+      register: message
+
+    - debug:
+        msg: "{{ (message.stdout | from_json).error.details[0].message }}"
+
+    - assert:
+        that: "{{ not deploy.failed }}"
+
+    when: deploy.failed

+ 50 - 0
playbooks/azure/openshift-cluster/provisioning_vars.yml.example

@@ -0,0 +1,50 @@
+---
+# resource group where temporary resources associated with playbook will be
+# placed
+openshift_azure_resource_group_name:
+
+# azure region where resource group will be created
+openshift_azure_resource_location: eastus
+
+# input image resource group
+openshift_azure_input_image_ns: images
+
+# input image prefix, e.g. centos7-root or centos7-base
+openshift_azure_input_image_prefix:
+
+# output image resource group
+openshift_azure_output_image_ns: images
+
+# output image prefix, e.g. centos7-base or centos7
+openshift_azure_output_image_prefix:
+
+# ssh public key for VMs created by playbook; private key must be accessible to
+# ansible
+openshift_azure_vm_ssh_public_key: ssh-rsa ...
+
+# additional yum repo containing origin rpms, used for PR testing
+#openshift_azure_install_repo: http://...
+
+# yum client certificate and key, used if building RHEL images
+#yum_client_cert_contents: |
+#  -----BEGIN CERTIFICATE-----
+#  ...
+#  -----END CERTIFICATE-----
+#yum_client_key_contents: |
+#  -----BEGIN RSA PRIVATE KEY-----
+#  ...
+#  -----END RSA PRIVATE KEY-----
+
+# alternative image registry, used if building OCP pre-release images
+#oreg_url: "registry.reg-aws.openshift.com:443/openshift3/ose-${component}:${version}"
+#oreg_auth_user: <USERNAME>
+#oreg_auth_password: <TOKEN>
+
+# optional storage account in which to place image blob
+#openshift_azure_storage_account:
+
+# resource group of storage account
+#openshift_azure_storage_account_ns:
+
+# container within storage account to hold image blob
+#openshift_azure_container:

+ 1 - 0
playbooks/azure/openshift-cluster/roles

@@ -0,0 +1 @@
+../../../roles

+ 43 - 0
playbooks/azure/openshift-cluster/tasks/create_blob_from_vm.yml

@@ -0,0 +1,43 @@
+---
+- name: get vm details
+  command: >
+    az vm show
+    -g "{{ openshift_azure_resource_group_name }}"
+    -n vm
+  register: vm
+
+- name: get storage account key
+  command: >
+    az storage account keys list
+    -g "{{ openshift_azure_storage_account_ns }}"
+    -n "{{ openshift_azure_storage_account }}"
+  register: keys
+
+- name: get disk sas url
+  command: >
+    az disk grant-access
+    --ids "{{ (vm.stdout | from_json).storageProfile.osDisk.managedDisk.id }}"
+    --duration-in-seconds 3600
+  register: sas
+
+- name: start copy
+  command: >
+    az storage blob copy start
+    --source-uri "{{ (sas.stdout | from_json).accessSas }}"
+    --account-name "{{ openshift_azure_storage_account }}"
+    --account-key "{{ (keys.stdout | from_json)[0].value }}"
+    --destination-container "{{ openshift_azure_container }}"
+    --destination-blob "{{ image_name }}"
+
+- name: get copy status
+  command: >
+    az storage blob show
+    --account-name "{{ openshift_azure_storage_account }}"
+    --account-key "{{ (keys.stdout | from_json)[0].value }}"
+    --container-name "{{ openshift_azure_container }}"
+    --name "{{ image_name }}"
+    --query "properties.copy.status"
+  register: status
+  until: status.stdout | from_json == "success"
+  retries: 120
+  delay: 30

+ 38 - 0
playbooks/azure/openshift-cluster/tasks/create_image_from_vm.yml

@@ -0,0 +1,38 @@
+---
+- name: deallocate vm
+  azure_rm_virtualmachine:
+    name: vm
+    resource_group: "{{ openshift_azure_resource_group_name }}"
+    allocated: no
+  register: vm
+
+- name: generalize vm
+  command: >
+    az vm generalize
+    -g "{{ openshift_azure_resource_group_name }}"
+    -n vm
+
+- name: create image resource group
+  azure_rm_resourcegroup:
+    name: "{{ image_resource_group }}"
+    location: "{{ openshift_azure_resource_location }}"
+
+# Note: requires ansible 2.5
+- name: create image
+  azure_rm_image:
+    resource_group: "{{ image_resource_group }}"
+    name: "{{ image_name }}"
+    source: "{{ vm.ansible_facts.azure_vm.properties.storageProfile.osDisk.managedDisk.id }}"
+    os_type: Linux
+
+- name: calculate tags
+  set_fact:
+    final_tags: "{{ (input_image.stdout | from_json).tags | combine(image_tags) }}"
+
+- name: tag image
+  command: >
+    az resource tag
+    --resource-type Microsoft.Compute/images
+    -g "{{ image_resource_group }}"
+    -n "{{ image_name }}"
+    --tags {% for k in final_tags %}{{ k }}={{ final_tags[k] }} {% endfor %}

+ 49 - 0
playbooks/azure/openshift-cluster/tasks/provision_instance.yml

@@ -0,0 +1,49 @@
+---
+- name: create resource group
+  azure_rm_resourcegroup:
+    name: "{{ openshift_azure_resource_group_name }}"
+    location: "{{ openshift_azure_resource_location }}"
+
+- name: create vnet
+  azure_rm_virtualnetwork:
+    name: vnet
+    resource_group: "{{ openshift_azure_resource_group_name }}"
+    address_prefixes:
+    - 192.168.0.0/16
+
+- name: create subnet
+  azure_rm_subnet:
+    name: subnet
+    resource_group: "{{ openshift_azure_resource_group_name }}"
+    virtual_network: vnet
+    address_prefix: 192.168.0.0/24
+
+- name: calculate input image
+  command: az image list -g "{{ image_resource_group }}" --query "[?starts_with(name, '{{ image_prefix }}-') && tags.valid=='true'] | sort_by(@, &name) | [-1]"
+  register: input_image
+
+- name: create vm
+  azure_rm_virtualmachine:
+    name: vm
+    resource_group: "{{ openshift_azure_resource_group_name }}"
+    vm_size: Standard_D4s_v3
+    image:
+      name: "{{ (input_image.stdout | from_json).name }}"
+      resource_group: "{{ image_resource_group }}"
+    os_type: Linux
+    storage_blob_name: vm
+    managed_disk_type: Standard_LRS
+    data_disks: "{{ data_disks | default([]) }}"
+    admin_username: cloud-user
+    ssh_password_enabled: False
+    ssh_public_keys:
+    - path: "/home/cloud-user/.ssh/authorized_keys"
+      key_data: "{{ openshift_azure_vm_ssh_public_key }}"
+  register: vm
+
+- name: add vm to inventory
+  add_host:
+    groups: nodes
+    name: "{{ vm.ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].properties.ipConfigurations[0].properties.publicIPAddress.properties.ipAddress }}"
+    ansible_ssh_user: cloud-user
+    ansible_become: True

+ 15 - 0
playbooks/azure/openshift-cluster/tasks/remove_yum.yml

@@ -0,0 +1,15 @@
+---
+- name: remove yum client certificate
+  file:
+    state: absent
+    path: "/var/lib/yum/{{ item.name }}"
+  with_items:
+  - name: client-cert.pem
+  - name: client-key.pem
+  when: ansible_distribution == "RedHat"
+
+- name: remove yum repositories
+  yum_repository:
+    state: absent
+    name: "{{ item.name }}"
+  with_items: "{{ openshift_additional_repos }}"

+ 19 - 0
playbooks/azure/openshift-cluster/tasks/yum_certs.yml

@@ -0,0 +1,19 @@
+---
+- name: copy yum client certificate
+  copy:
+    content: "{{ item.content }}"
+    dest: "/var/lib/yum/{{ item.name }}"
+    mode: '0600'
+  with_items:
+  - name: client-cert.pem
+    content: "{{ yum_client_cert_contents }}"
+  - name: client-key.pem
+    content: "{{ yum_client_key_contents }}"
+  no_log: True
+  when: ansible_distribution == "RedHat"
+
+- name: add yum repositories
+  import_role:
+    name: openshift_repos
+  vars:
+    r_openshift_repos_has_run: True

+ 0 - 1
roles/openshift_node/defaults/main.yml

@@ -133,7 +133,6 @@ default_r_openshift_node_image_prep_packages:
 - libselinux-python
 - conntrack-tools
 - openssl
-- cloud-init
 - iproute
 - python-dbus
 - PyYAML

+ 2 - 0
roles/openshift_node/tasks/bootstrap.yml

@@ -7,6 +7,7 @@
   file:
     state: directory
     path: "/etc/systemd/system/{{ openshift_service_type }}-node.service.d"
+  when: '"cloud-init" in r_openshift_node_image_prep_packages'
 
 - name: laydown systemd override
   copy:
@@ -14,6 +15,7 @@
     content: |
       [Unit]
       After=cloud-init.service
+  when: '"cloud-init" in r_openshift_node_image_prep_packages'
 
 - name: update the sysconfig to have necessary variables
   lineinfile:

+ 2 - 0
roles/os_update_latest/defaults/main.yml

@@ -0,0 +1,2 @@
+---
+os_update_latest_reboot: False

+ 9 - 0
roles/os_update_latest/tasks/main.yml

@@ -3,3 +3,12 @@
   package: name=* state=latest
   register: result
   until: result is succeeded
+
+- when:
+  - os_update_latest_reboot
+  - result.changed
+  block:
+  - shell: ( sleep 2 && reboot ) &
+
+  - wait_for_connection:
+      delay: 10

+ 2 - 2
setup.py

@@ -350,13 +350,13 @@ class OpenShiftAnsibleSyntaxCheck(Command):
             # Ignore imported playbooks in 'common', 'private' and 'init'. It is
             # expected that these locations would be imported by entry point
             # playbooks.
-            # Ignore playbooks in 'aws', 'gcp' and 'openstack' because these
+            # Ignore playbooks in 'aws', 'azure', 'gcp' and 'openstack' because these
             # playbooks do not follow the same component entry point structure.
             # Ignore deploy_cluster.yml and prerequisites.yml because these are
             # entry point playbooks but are imported by playbooks in the cloud
             # provisioning playbooks.
             ignored = ('common', 'private', 'init',
-                       'aws', 'gcp', 'openstack',
+                       'aws', 'azure', 'gcp', 'openstack',
                        'deploy_cluster.yml', 'prerequisites.yml')
             if any(x in playbook for x in ignored):
                 continue