Browse Source

Merge branch 'master' into openstack-ssl-cert

David Critch 6 years ago
parent
commit
423bbf268a
100 changed files with 3774 additions and 1180 deletions
  1. 2 3
      .github/ISSUE_TEMPLATE.md
  2. 46 0
      .papr-master-ha.inventory
  3. 33 0
      .papr.all-in-one.inventory
  4. 7 10
      .papr.inventory
  5. 58 7
      .papr.sh
  6. 56 0
      .papr.yml
  7. 1 0
      .release
  8. 1 1
      .tito/packages/openshift-ansible
  9. 5 0
      .tito/releasers.conf
  10. 14 0
      .travis.yml
  11. 0 1
      DEPLOYMENT_TYPES.md
  12. 20 0
      HOOKS.md
  13. 12 0
      OWNERS
  14. 58 2
      README.md
  15. 9 9
      README_CONTAINERIZED_INSTALLATION.md
  16. 2 2
      README_CONTAINER_IMAGE.md
  17. 1 1
      ansible.cfg
  18. 1 1
      docs/best_practices_guide.adoc
  19. 152 0
      docs/openshift_components.md
  20. 1 1
      docs/proposals/role_decomposition.md
  21. 15 54
      docs/pull_requests.md
  22. 19 10
      docs/repo_structure.md
  23. 16 13
      examples/README.md
  24. 2 2
      examples/certificate-check-upload.yaml
  25. 2 2
      examples/certificate-check-volume.yaml
  26. 6 9
      examples/scheduled-certcheck-upload.yaml
  27. 6 9
      examples/scheduled-certcheck-volume.yaml
  28. 3 3
      hack/build-images.sh
  29. 1 1
      hack/push-release.sh
  30. 5 3
      images/installer/Dockerfile
  31. 6 4
      images/installer/Dockerfile.rhel7
  32. 2 2
      images/installer/README_INVENTORY_GENERATOR.md
  33. 6 0
      images/installer/origin-extra-root/etc/yum.repos.d/azure-cli.repo
  34. 1 1
      images/installer/root/exports/manifest.json
  35. 6 46
      images/installer/root/usr/local/bin/entrypoint-gcp
  36. 75 0
      images/installer/root/usr/local/bin/entrypoint-provider
  37. 1 1
      images/installer/root/usr/local/bin/run
  38. 1 1
      images/installer/root/usr/local/bin/usage
  39. 43 0
      inventory/dynamic/azure/ansible.cfg
  40. 0 0
      inventory/dynamic/azure/group_vars/all/.gitkeep
  41. 1 0
      inventory/dynamic/azure/none
  42. 6 6
      inventory/dynamic/gcp/group_vars/all/00_defaults.yml
  43. 113 184
      inventory/hosts.example
  44. 1 1
      inventory/hosts.glusterfs.native.example
  45. 5 5
      inventory/hosts.glusterfs.registry-only.example
  46. 8 8
      inventory/hosts.glusterfs.storage-and-registry.example
  47. 5 4
      inventory/hosts.localhost
  48. 3 3
      inventory/hosts.openstack
  49. 1307 44
      openshift-ansible.spec
  50. 1 1
      playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
  51. 3 575
      playbooks/adhoc/uninstall.yml
  52. 49 0
      playbooks/adhoc/uninstall_docker.yml
  53. 573 0
      playbooks/adhoc/uninstall_openshift.yml
  54. 16 0
      playbooks/aws/OWNERS
  55. 3 1
      playbooks/aws/openshift-cluster/build_ami.yml
  56. 2 3
      playbooks/aws/openshift-cluster/install.yml
  57. 0 1
      playbooks/aws/provisioning-inventory.example.ini
  58. 3 0
      playbooks/azure/BRANCH.md
  59. 8 0
      playbooks/azure/OWNERS
  60. 2 0
      playbooks/azure/README.md
  61. 52 0
      playbooks/azure/openshift-cluster/build_base_image.yml
  62. 116 0
      playbooks/azure/openshift-cluster/build_node_image.yml
  63. 53 0
      playbooks/azure/openshift-cluster/create_and_publish_offer.md
  64. 98 0
      playbooks/azure/openshift-cluster/create_and_publish_offer.yml
  65. 12 0
      playbooks/azure/openshift-cluster/deprovision.yml
  66. 46 0
      playbooks/azure/openshift-cluster/files/get-node-logs
  67. 13 0
      playbooks/azure/openshift-cluster/group_vars/all/image_publish.yml
  68. 62 0
      playbooks/azure/openshift-cluster/group_vars/all/yum_repos.yml
  69. 131 0
      playbooks/azure/openshift-cluster/launch.yml
  70. 59 0
      playbooks/azure/openshift-cluster/provisioning_vars.yml.example
  71. 1 0
      playbooks/azure/openshift-cluster/roles
  72. 9 0
      playbooks/azure/openshift-cluster/tag_image_as_valid.yml
  73. 48 0
      playbooks/azure/openshift-cluster/tasks/create_blob_from_vm.yml
  74. 66 0
      playbooks/azure/openshift-cluster/tasks/create_image_from_vm.yml
  75. 42 0
      playbooks/azure/openshift-cluster/tasks/provision_instance.yml
  76. 15 0
      playbooks/azure/openshift-cluster/tasks/remove_yum.yml
  77. 19 0
      playbooks/azure/openshift-cluster/tasks/yum_certs.yml
  78. 61 0
      playbooks/azure/openshift-cluster/templates/offer.yml.j2
  79. 1 3
      playbooks/byo/openshift-cluster/upgrades/README.md
  80. 0 0
      playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_scale_groups.yml
  81. 20 0
      playbooks/byo/openshift-cluster/upgrades/v3_11/README.md
  82. 5 0
      playbooks/byo/openshift-cluster/upgrades/v3_11/upgrade.yml
  83. 16 0
      playbooks/byo/openshift-cluster/upgrades/v3_11/upgrade_control_plane.yml
  84. 7 0
      playbooks/byo/openshift-cluster/upgrades/v3_11/upgrade_nodes.yml
  85. 7 0
      playbooks/byo/openshift-cluster/upgrades/v3_11/upgrade_scale_groups.yml
  86. 0 20
      playbooks/byo/openshift-cluster/upgrades/v3_6/README.md
  87. 0 5
      playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
  88. 0 14
      playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
  89. 0 7
      playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
  90. 0 20
      playbooks/byo/openshift-cluster/upgrades/v3_7/README.md
  91. 0 5
      playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
  92. 0 14
      playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
  93. 0 7
      playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
  94. 0 20
      playbooks/byo/openshift-cluster/upgrades/v3_9/README.md
  95. 0 5
      playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
  96. 0 16
      playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
  97. 0 7
      playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
  98. 10 0
      playbooks/cluster-operator/OWNERS
  99. 1 2
      playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
  100. 0 0
      playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml

+ 2 - 3
.github/ISSUE_TEMPLATE.md

@@ -17,10 +17,9 @@ If you're operating from a **git clone**:
 
 * The output of `git describe`
 
-If you're running from playbooks installed via RPM or
-`atomic-openshift-utils`
+If you're running from playbooks installed via RPM
 
-* The output of `rpm -q atomic-openshift-utils openshift-ansible`
+* The output of `rpm -q openshift-ansible`
 
 Place the output between the code block below:
 

+ 46 - 0
.papr-master-ha.inventory

@@ -0,0 +1,46 @@
+[OSEv3:children]
+masters
+nodes
+etcd
+
+[OSEv3:vars]
+ansible_ssh_user=root
+ansible_python_interpreter=/usr/bin/python3
+openshift_deployment_type=origin
+openshift_release="{{ lookup('env', 'target_branch') }}"
+openshift_master_default_subdomain="{{ lookup('env', 'RHCI_ocp_master1_IP') }}.xip.io"
+openshift_check_min_host_disk_gb=1.5
+openshift_check_min_host_memory_gb=1.9
+openshift_portal_net=172.30.0.0/16
+openshift_enable_service_catalog=false
+debug_level=4
+openshift_docker_options="--log-driver=journald"
+
+my_node_group1_labels=['node-role.kubernetes.io/master=true', 'node-role.kubernetes.io/infra=true', 'node-role.kubernetes.io/compute=true']
+my_node_group1={'name': 'node-config-all-in-one', 'labels': {{ my_node_group1_labels }} }
+
+openshift_node_groups=[{{ my_node_group1 }}]
+
+openshift_node_group_name="node-config-all-in-one"
+
+[all:vars]
+# bootstrap configs
+openshift_master_bootstrap_auto_approve=true
+openshift_master_bootstrap_auto_approver_node_selector={"node-role.kubernetes.io/master":"true"}
+osm_controller_args={"experimental-cluster-signing-duration": ["20m"]}
+osm_default_node_selector="node-role.kubernetes.io/compute=true"
+
+[masters]
+ocp-master1
+ocp-master2
+ocp-master3
+
+[etcd]
+ocp-master1
+ocp-master2
+ocp-master3
+
+[nodes]
+ocp-master1 openshift_schedulable=true
+ocp-master2
+ocp-master3

+ 33 - 0
.papr.all-in-one.inventory

@@ -0,0 +1,33 @@
+[OSEv3:children]
+masters
+nodes
+etcd
+
+[OSEv3:vars]
+ansible_ssh_user=root
+ansible_python_interpreter=/usr/bin/python3
+openshift_deployment_type=origin
+openshift_release="{{ lookup('env', 'target_branch') }}"
+openshift_master_default_subdomain="{{ lookup('env', 'RHCI_ocp_master_IP') }}.xip.io"
+openshift_check_min_host_disk_gb=1.5
+openshift_check_min_host_memory_gb=1.9
+openshift_portal_net=172.30.0.0/16
+openshift_enable_service_catalog=false
+debug_level=4
+openshift_docker_options="--log-driver=journald"
+
+[all:vars]
+# bootstrap configs
+openshift_master_bootstrap_auto_approve=true
+openshift_master_bootstrap_auto_approver_node_selector={"node-role.kubernetes.io/master":"true"}
+osm_controller_args={"experimental-cluster-signing-duration": ["20m"]}
+osm_default_node_selector="node-role.kubernetes.io/compute=true"
+
+[masters]
+ocp-master
+
+[etcd]
+ocp-master
+
+[nodes]
+ocp-master openshift_schedulable=true ansible_host="{{ lookup('env', 'RHCI_ocp_master_IP') }}" openshift_node_group_name="node-config-all-in-one"

+ 7 - 10
.papr.inventory

@@ -7,22 +7,19 @@ etcd
 ansible_ssh_user=root
 ansible_python_interpreter=/usr/bin/python3
 openshift_deployment_type=origin
+openshift_release="{{ lookup('env', 'target_branch') }}"
 openshift_master_default_subdomain="{{ lookup('env', 'RHCI_ocp_node1_IP') }}.xip.io"
 openshift_check_min_host_disk_gb=1.5
 openshift_check_min_host_memory_gb=1.9
-osm_cluster_network_cidr=10.128.0.0/14
 openshift_portal_net=172.30.0.0/16
-osm_host_subnet_length=9
+debug_level=4
+openshift_docker_options="--log-driver=journald"
 
 [all:vars]
 # bootstrap configs
-openshift_node_groups=[{"name":"node-config-master","labels":["node-role.kubernetes.io/master=true","node-role.kubernetes.io/infra=true"]},{"name":"node-config-node","labels":["node-role.kubernetes.io/compute=true"]}]
-openshift_master_bootstrap_enabled=true
 openshift_master_bootstrap_auto_approve=true
-openshift_master_bootstrap_auto_approver_node_selector={"region":"infra"}
+openshift_master_bootstrap_auto_approver_node_selector={"node-role.kubernetes.io/master":"true"}
 osm_controller_args={"experimental-cluster-signing-duration": ["20m"]}
-openshift_node_bootstrap=true
-openshift_hosted_infra_selector="node-role.kubernetes.io/infra=true"
 osm_default_node_selector="node-role.kubernetes.io/compute=true"
 
 [masters]
@@ -32,6 +29,6 @@ ocp-master
 ocp-master
 
 [nodes]
-ocp-master openshift_schedulable=true
-ocp-node1
-ocp-node2
+ocp-master openshift_schedulable=true openshift_node_group_name="node-config-master-infra"
+ocp-node1 openshift_node_group_name="node-config-infra" openshift_node_group_name="node-config-compute"
+ocp-node2 openshift_node_group_name="node-config-infra" openshift_node_group_name="node-config-compute"

+ 58 - 7
.papr.sh

@@ -10,32 +10,83 @@ if [ -n "${PAPR_BRANCH:-}" ]; then
 else
   target_branch=$PAPR_PULL_TARGET_BRANCH
 fi
+target_branch_in=${target_branch}
 if [[ "${target_branch}" =~ ^release- ]]; then
   target_branch="${target_branch/release-/}"
 else
   dnf install -y sed
   target_branch="$( git describe | sed 's/^openshift-ansible-\([0-9]*\.[0-9]*\)\.[0-9]*-.*/\1/' )"
 fi
+export target_branch
+
+# Need to define some git variables for rebase.
+git config --global user.email "ci@openshift.org"
+git config --global user.name "OpenShift Atomic CI"
+
+# Rebase existing branch on the latest code locally, as PAPR running doesn't do merges
+git fetch origin ${target_branch_in} && git rebase origin/${target_branch_in}
+
+PAPR_INVENTORY=${PAPR_INVENTORY:-.papr.inventory}
+PAPR_RUN_UPDATE=${PAPR_RUN_UPDATE:-0}
+PAPR_UPGRADE_FROM=${PAPR_UPGRADE_FROM:-0}
+PAPR_EXTRAVARS=""
+
+# Replace current branch with PAPR_UPGRADE_FROM
+if [[ "${PAPR_UPGRADE_FROM}" != "0" ]]; then
+  git branch new-code
+  git checkout release-${PAPR_UPGRADE_FROM}
+  git clean -fdx
+  PAPR_EXTRAVARS="-e openshift_release=${PAPR_UPGRADE_FROM}"
+fi
 
 pip install -r requirements.txt
 
+# Human-readable output
+export ANSIBLE_STDOUT_CALLBACK=debug
+
 # ping the nodes to check they're responding and register their ostree versions
-ansible -vvv -i .papr.inventory nodes -a 'rpm-ostree status'
+ansible -vv -i $PAPR_INVENTORY nodes -a 'rpm-ostree status'
+
+# Make sure hostname -f returns correct node name
+ansible -vv -i $PAPR_INVENTORY nodes -m setup
+ansible -vv -i $PAPR_INVENTORY nodes -a "hostnamectl set-hostname {{ ansible_default_ipv4.address }}"
+ansible -vv -i $PAPR_INVENTORY nodes -m setup -a "gather_subset=min"
 
 upload_journals() {
   mkdir journals
-  for node in master node1 node2; do
-    ssh ocp-$node 'journalctl --no-pager || true' > journals/ocp-$node.log
-  done
+  ansible -vvv -i $PAPR_INVENTORY all \
+    -m shell -a 'journalctl --no-pager > /tmp/journal'
+  ansible -vvv -i $PAPR_INVENTORY all \
+    -m fetch -a "src=/tmp/journal dest=journals/{{ inventory_hostname }}.log flat=yes"
+
+  # Split large files into parts, extracting a basename and preserving extention
+  find . -iname "*.log" -execdir sh -c 'split -b 4m --numeric-suffixes --additional-suffix=.log {} $(basename {} .log)_' \; -execdir rm -rf {} \;
 }
 
 trap upload_journals ERR
 
-# make all nodes ready for bootstrapping
-ansible-playbook -vvv -i .papr.inventory playbooks/openshift-node/private/image_prep.yml
+# run the prerequisites play
+ansible-playbook -vvv -i $PAPR_INVENTORY $PAPR_EXTRAVARS playbooks/prerequisites.yml
 
 # run the actual installer
-ansible-playbook -vvv -i .papr.inventory playbooks/deploy_cluster.yml -e "openshift_release=${target_branch}"
+ansible-playbook -vvv -i $PAPR_INVENTORY $PAPR_EXTRAVARS playbooks/deploy_cluster.yml
+
+# Restore the branch if needed
+if [[ "${PAPR_UPGRADE_FROM}" != "0" ]]; then
+  git checkout new-code
+  git clean -fdx
+  pip install -r requirements.txt
+fi
+
+# Run upgrade playbook
+if [[ "${PAPR_RUN_UPDATE}" != "0" ]]; then
+  update_version="$(echo $target_branch | sed 's/\./_/')"
+  # Create basic node-group configmaps for upgrade
+  ansible-playbook -vvv -i $PAPR_INVENTORY $PAPR_EXTRAVARS playbooks/openshift-master/openshift_node_group.yml
+  ansible-playbook -vvv -i $PAPR_INVENTORY playbooks/byo/openshift-cluster/upgrades/v${update_version}/upgrade.yml | tee update.log
+fi
+
+upload_journals
 
 ### DISABLING TESTS FOR NOW, SEE:
 ### https://github.com/openshift/openshift-ansible/pull/6132

+ 56 - 0
.papr.yml

@@ -32,6 +32,7 @@ packages:
   - libffi-devel
   - openssl-devel
   - redhat-rpm-config
+  - findutils
 
 context: 'fedora/27/atomic'
 
@@ -40,3 +41,58 @@ tests:
 
 artifacts:
   - journals/
+
+pulls: false
+required: false
+
+---
+inherit: true
+context: 'fedora/27/atomic/upgrade_minor'
+
+cluster:
+  hosts:
+    - name: ocp-master
+      distro: fedora/27/atomic
+      specs:
+        ram: 4096
+  container:
+    image: registry.fedoraproject.org/fedora:27
+env:
+  PAPR_INVENTORY: .papr.all-in-one.inventory
+  PAPR_RUN_UPDATE: "yes"
+
+---
+inherit: true
+context: 'fedora/27/atomic/upgrade_major'
+
+cluster:
+  hosts:
+    - name: ocp-master
+      distro: fedora/27/atomic
+      specs:
+        ram: 4096
+  container:
+    image: registry.fedoraproject.org/fedora:27
+env:
+  PAPR_INVENTORY: .papr.all-in-one.inventory
+  PAPR_UPGRADE_FROM: "3.10"
+  PAPR_RUN_UPDATE: "yes"
+---
+inherit: true
+context: 'fedora/27/atomic/master-ha'
+
+cluster:
+  hosts:
+    - name: ocp-master1
+      distro: fedora/27/atomic
+      specs:
+        ram: 4096
+    - name: ocp-master2
+      distro: fedora/27/atomic
+    - name: ocp-master3
+      distro: fedora/27/atomic
+  container:
+    image: registry.fedoraproject.org/fedora:27
+
+env:
+  PAPR_INVENTORY: .papr-master-ha.inventory

+ 1 - 0
.release

@@ -0,0 +1 @@
+3.11

+ 1 - 1
.tito/packages/openshift-ansible

@@ -1 +1 @@
-3.10.0-0.13.0 ./
+3.11.0-0.10.0 ./

+ 5 - 0
.tito/releasers.conf

@@ -57,6 +57,11 @@ releaser = tito.release.DistGitReleaser
 branches = rhaos-3.10-rhel-7
 srpm_disttag = .el7aos
 
+[aos-3.11]
+releaser = tito.release.DistGitReleaser
+branches = rhaos-3.11-rhel-7
+srpm_disttag = .el7aos
+
 [copr-openshift-ansible]
 releaser = tito.release.CoprReleaser
 project_name = @OpenShiftOnlineOps/openshift-ansible

+ 14 - 0
.travis.yml

@@ -19,5 +19,19 @@ install:
 script:
   - tox
 
+after_failure:
+  - echo "Here's a list of installed Python packages:"
+  - pip list --format=columns
+  - echo Dumping logs, because tests failed to succeed
+  - |
+      for log in `ls .tox/*/log/*.log`
+      do
+        echo Outputting $log
+        cat $log
+      done
+  - pip_debug_log=/home/travis/.cache/pip/log/debug.log
+  - echo Outputting pip debug log from $pip_debug_log
+  - cat $pip_debug_log
+
 after_success:
   - coveralls

+ 0 - 1
DEPLOYMENT_TYPES.md

@@ -13,5 +13,4 @@ The table below outlines the defaults per `openshift_deployment_type`:
 | **openshift_service_type** (also used for package names)        | origin                                   | atomic-openshift                       |
 | **openshift.common.config_base**                                | /etc/origin                              | /etc/origin                            |
 | **openshift_data_dir**                                          | /var/lib/origin                          | /var/lib/origin                        |
-| **openshift.master.registry_url oreg_url_node**                 | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} |
 | **Image Streams**                                               | centos                                   | rhel                                   |

+ 20 - 0
HOOKS.md

@@ -30,6 +30,10 @@ a set of tasks. Best practice suggests using absolute paths to the hook file to
 openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml
 openshift_master_upgrade_hook=/usr/share/custom/master.yml
 openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml
+
+openshift_node_upgrade_pre_hook=/usr/share/custom/pre_node.yml
+openshift_node_upgrade_hook=/usr/share/custom/node.yml
+openshift_node_upgrade_post_hook=/usr/share/custom/post_node.yml
 # <snip>
 ```
 
@@ -68,3 +72,19 @@ The file may **not** be a playbook.
 - Runs **after** each master is upgraded and has had it's service/system restart.
 - This hook runs against **each master** in serial.
 - If a task needs to run against a different host, said task will need to use [``delegate_to`` or ``local_action``](http://docs.ansible.com/ansible/playbooks_delegation.html#delegation).
+
+### openshift_node_upgrade_pre_hook
+- Runs **before** each node is upgraded.
+- This hook runs against **each node** in serial.
+- If a task needs to run against a different host, said task will need to use [``delegate_to`` or ``local_action``](http://docs.ansible.com/ansible/playbooks_delegation.html#delegation).
+
+### openshift_node_upgrade_hook
+- Runs **after** each node is upgraded but **before** it's marked schedulable again..
+- This hook runs against **each node** in serial.
+- If a task needs to run against a different host, said task will need to use [``delegate_to`` or ``local_action``](http://docs.ansible.com/ansible/playbooks_delegation.html#delegation).
+
+### openshift_node_upgrade_post_hook
+- Runs **after** each node is upgraded; it's the last node upgrade action.
+- This hook runs against **each node** in serial.
+- If a task needs to run against a different host, said task will need to use [``delegate_to`` or ``local_action``](http://docs.ansible.com/ansible/playbooks_delegation.html#delegation).
+

+ 12 - 0
OWNERS

@@ -0,0 +1,12 @@
+# approval == this is a good idea /approve
+approvers:
+  - michaelgugino
+  - mtnbikenc
+  - sdodson
+  - vrutkovs
+# review == this code is good /lgtm
+reviewers:
+  - michaelgugino
+  - mtnbikenc
+  - sdodson
+  - vrutkovs

+ 58 - 2
README.md

@@ -61,7 +61,7 @@ Install base dependencies:
 
 Requirements:
 
-- Ansible >= 2.4.3.0
+- Ansible >= 2.6.0
 - Jinja >= 2.7
 - pyOpenSSL
 - python-lxml
@@ -94,11 +94,67 @@ cd openshift-ansible
 sudo ansible-playbook -i inventory/hosts.localhost playbooks/prerequisites.yml
 sudo ansible-playbook -i inventory/hosts.localhost playbooks/deploy_cluster.yml
 ```
+## Node Group Definition and Mapping
+In 3.10 and newer all members of the [nodes] inventory group must be assigned an
+`openshift_node_group_name`. This value is used to select the configmap that
+configures each node. By default there are three configmaps created; one for
+each node group defined in `openshift_node_groups` and they're named
+`node-config-master` `node-config-infra` `node-config-compute`. It's important
+to note that the configmap is also the authoritative definition of node labels,
+the old `openshift_node_labels` value is effectively ignored.
+
+There are also two configmaps that label nodes into multiple roles, these are
+not recommended for production clusters, however they're named
+`node-config-all-in-one` and `node-config-master-infra` if you'd like to use
+them to deploy non production clusters.
+
+The default set of node groups is defined in
+[roles/openshift_facts/defaults/main.yml] like so
+
+```
+openshift_node_groups:
+  - name: node-config-master
+    labels:
+      - 'node-role.kubernetes.io/master=true'
+    edits: []
+  - name: node-config-infra
+    labels:
+      - 'node-role.kubernetes.io/infra=true'
+    edits: []
+  - name: node-config-compute
+    labels:
+      - 'node-role.kubernetes.io/compute=true'
+    edits: []
+  - name: node-config-master-infra
+    labels:
+      - 'node-role.kubernetes.io/infra=true,node-role.kubernetes.io/master=true'
+    edits: []
+  - name: node-config-all-in-one
+    labels:
+      - 'node-role.kubernetes.io/infra=true,node-role.kubernetes.io/master=true,node-role.kubernetes.io/compute=true'
+    edits: []
+```
+
+When configuring this in the INI based inventory this must be translated into a
+Python dictionary. Here's an example of a group named `node-config-all-in-one`
+which is suitable for an All-In-One installation with
+kubeletArguments.pods-per-core set to 20
+
+```
+openshift_node_groups=[{'name': 'node-config-all-in-one', 'labels': ['node-role.kubernetes.io/master=true', 'node-role.kubernetes.io/infra=true', 'node-role.kubernetes.io/compute=true'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}]
+```
+
+For upgrades, the upgrade process will block until you have the required
+configmaps in the openshift-node namespace. Please define
+`openshift_node_groups` as explained above or accept the defaults and run the
+playbooks/openshift-master/openshift_node_group.yml playbook to have them
+created for you automatically.
+
 
 ## Complete Production Installation Documentation:
 
 - [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)
-- [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
+- [OpenShift Origin](https://docs.openshift.org/latest/install/index.html)
 
 ## Containerized OpenShift Ansible
 

+ 9 - 9
README_CONTAINERIZED_INSTALLATION.md

@@ -44,17 +44,17 @@ beginning of the installation process ensuring that these settings are applied
 before attempting to pull any of the following images.
 
     Origin
-        openshift/origin
-        openshift/node (node + openshift-sdn + openvswitch rpm for client tools)
-        openshift/openvswitch (centos7 + openvswitch rpm, runs ovsdb ovsctl processes)
-        registry.access.redhat.com/rhel7/etcd
+        docker.io/openshift/origin
+        docker.io/openshift/node (node + openshift-sdn + openvswitch rpm for client tools)
+        docker.io/openshift/openvswitch (centos7 + openvswitch rpm, runs ovsdb ovsctl processes)
+        registry.redhat.io/rhel7/etcd
     OpenShift Enterprise
-        openshift3/ose
-        openshift3/node
-        openshift3/openvswitch
-        registry.access.redhat.com/rhel7/etcd
+        registry.access.redhat.com/openshift3/ose
+        registry.access.redhat.com/openshift3/node
+        registry.access.redhat.com/openshift3/openvswitch
+        registry.redhat.io/rhel7/etcd
 
-  * note openshift3/* images come from registry.access.redhat.com and
+  * note openshift3/* images come from registry.redhat.io and
 rely on the --additional-repository flag being set appropriately.
 
 ### Starting and Stopping Containers

+ 2 - 2
README_CONTAINER_IMAGE.md

@@ -30,7 +30,7 @@ Here is an example of how to run a containerized `openshift-ansible` playbook th
            -e INVENTORY_FILE=/tmp/inventory \
            -e PLAYBOOK_FILE=playbooks/openshift-checks/certificate_expiry/default.yaml \
            -e OPTS="-v" -t \
-           openshift/origin-ansible
+           docker.io/openshift/origin-ansible
 
 You might want to adjust some of the options in the example to match your environment and/or preferences. For example: you might want to create a separate directory on the host where you'll copy the ssh key and inventory files prior to invocation to avoid unwanted SELinux re-labeling of the original files or paths (see below).
 
@@ -61,7 +61,7 @@ If the inventory file needs additional files then it can use the path `/var/lib/
 Run the ansible system container:
 
 ```sh
-atomic install --system --set INVENTORY_FILE=$(pwd)/inventory.origin openshift/origin-ansible
+atomic install --system --set INVENTORY_FILE=$(pwd)/inventory.origin docker.io/openshift/origin-ansible
 systemctl start origin-ansible
 ```
 

+ 1 - 1
ansible.cfg

@@ -7,7 +7,7 @@
 
 [defaults]
 # Set the log_path
-#log_path = /tmp/ansible.log
+log_path = ~/openshift-ansible.log
 
 # Additional default options for OpenShift Ansible
 forks = 20

+ 1 - 1
docs/best_practices_guide.adoc

@@ -406,7 +406,7 @@ For consistency, role names SHOULD follow the above naming pattern. It is import
 Many times the `technology` portion of the pattern will line up with a package name. It is advised that whenever possible, the package name should be used.
 
 .Examples:
-* The role to configure a master is called `openshift_master`
+* The role to configure a master is called `openshift_control_plane`
 * The role to configure OpenShift specific yum repositories is called `openshift_repos`
 
 === Filters

+ 152 - 0
docs/openshift_components.md

@@ -0,0 +1,152 @@
+# OpenShift-Ansible Components
+
+>**TL;DR: Look at playbooks/openshift-web-console as an example**
+
+## General Guidelines
+
+Components in OpenShift-Ansible consist of two main parts:
+* Entry point playbook(s)
+* Ansible role
+* OWNERS files in both the playbooks and roles associated with the component
+
+When writing playbooks and roles, follow these basic guidelines to ensure
+success and maintainability. 
+
+### Idempotency
+
+Definition:
+
+>_an idempotent operation is one that has no additional effect if it is called
+more than once with the same input parameters_
+
+Ansible playbooks and roles should be written such that when the playbook is run
+again with the same configuration, no tasks should report `changed` as well as
+no material changes should be made to hosts in the inventory.  Playbooks should
+be re-runnable, but also be idempotent.
+
+### Other advice for success
+
+* Try not to leave artifacts like files or directories
+* Avoid using `failed_when:` where ever possible
+* Always `name:` your tasks
+* Document complex logic or code in tasks
+* Set role defaults in `defaults/main.yml`
+* Avoid the use of `set_fact:`
+
+## Building Component Playbooks
+
+Component playbooks are divided between the root of the component directory and
+the `private` directory.  This allows other parts of openshift-ansible to import
+component playbooks without also running the common initialization playbooks
+unnecessarily.
+
+Entry point playbooks are located in the `playbooks` directory and follow the
+following structure:
+
+```
+playbooks/openshift-component_name
+├── config.yml                          Entry point playbook
+├── private
+│   ├── config.yml                      Included by the Cluster Installer
+│   └── roles -> ../../roles            Don't forget to create this symlink
+├── OWNERS                              Assign 2-3 approvers and reviewers
+└── README.md                           Tell us what this component does
+```
+
+### Entry point config playbook
+
+The primary component entry point playbook will at a minimum run the common
+initialization playbooks and then import the private playbook.
+
+```yaml
+# playbooks/openshift-component_name/config.yml
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/config.yml
+
+```
+
+### Private config playbook
+
+The private component playbook will run the component role against the intended
+host groups and provide any required variables.  This playbook is also called
+during cluster installs and upgrades.  Think of this as the shareable portion of
+the component playbooks.
+
+```yaml
+# playbooks/openshift-component_name/private/config.yml
+---
+
+- name: OpenShift Component_Name Installation
+  hosts: oo_first_master
+  tasks:
+  - import_role:
+      name: openshift_component_name
+```
+
+NOTE: The private playbook may also include wrapper plays for the Installer
+Checkpoint plugin which will be discussed later.
+
+## Building Component Roles
+
+Component roles contain all of the necessary files and logic to install and
+configure the component.  The install portion of the role should also support
+performing upgrades on the component.
+
+Ansible roles are located in the `roles` directory and follow the following
+structure:
+
+```
+roles/openshift_component_name
+├── defaults
+│   └── main.yml                        Defaults for variables used in the role
+│                                           which can be overridden by the user
+├── files
+│   ├── component-config.yml
+│   ├── component-rbac-template.yml
+│   └── component-template.yml
+├── handlers
+│   └── main.yml
+├── meta
+│   └── main.yml
+├── OWNERS                              Assign 2-3 approvers and reviewers
+├── README.md
+├── tasks
+│   └── main.yml                        Default playbook used when calling the role
+├── templates
+└── vars
+    └── main.yml                        Internal roles variables
+```
+### Component Installation
+
+Where possible, Ansible modules should be used to perform idempotent operations
+with the OpenShift API.  Avoid using the `command` or `shell` modules with the
+`oc` cli unless the required operation is not available through either the
+`lib_openshift` modules or Ansible core modules.
+
+The following is a basic flow of Ansible tasks for installation. 
+
+- Create the project (oc_project)
+- Create a temp directory for processing files
+- Copy the client config to temp
+- Copy templates to temp
+- Read existing config map
+- Copy existing config map to temp
+- Generate/update config map
+- Reconcile component RBAC (oc_process)
+- Apply component template (oc_process)
+- Poll healthz and wait for it to come up
+- Log status of deployment
+- Clean up temp
+
+### Component Removal
+
+- Remove the project (oc_project)
+
+## Enabling the Installer Checkpoint callback
+
+- Add the wrapper plays to the entry point playbook
+- Update the installer_checkpoint callback plugin
+
+Details can be found in the installer_checkpoint role.

+ 1 - 1
docs/proposals/role_decomposition.md

@@ -330,7 +330,7 @@ in meta/main.yml without:
 ## Avoiding overly verbose roles
 When we are splitting our roles up into smaller components we want to ensure we
 avoid creating roles that are, for a lack of a better term, overly verbose. What
-do we mean by that? If we have `openshift_master` as an example, and we were to
+do we mean by that? If we have `openshift_control_plane` as an example, and we were to
 split it up, we would have a component for `etcd`, `docker`, and possibly for
 its rpms/configs. We would want to avoid creating a role that would just create
 certificates as those would make sense to be contained with the rpms and configs.

+ 15 - 54
docs/pull_requests.md

@@ -7,7 +7,7 @@ process that is similar to the process observed in other repositories such as
 [`origin`](https://github.com/openshift/origin).
 
 Whenever a
-[Pull Request is opened](../CONTRIBUTING.md#submitting-contributions), some
+[Pull Request is opened](../CONTRIBUTING.md#submitting-contributions), all
 automated test jobs must be successfully run before the PR can be merged.
 
 Some of these jobs are automatically triggered, e.g., Travis, PAPR, and
@@ -16,32 +16,11 @@ Coveralls. Other jobs need to be manually triggered by a member of the
 
 ## Triggering tests
 
-We have two different Jenkins infrastructures, and, while that holds true, there
-are two commands that trigger a different set of test jobs. We are working on
-simplifying the workflow towards a single infrastructure in the future.
-
-- **Test jobs on the older infrastructure**
-
-  Members of the [OpenShift organization](https://github.com/orgs/openshift/people)
-  can trigger the set of test jobs in the older infrastructure by writing a
-  comment with the exact text `aos-ci-test` and nothing else.
-
-  The Jenkins host is not publicly accessible. Test results are posted to S3
-  buckets when complete, and links are available both at the bottom of the Pull
-  Request page and as comments posted by
-  [@openshift-bot](https://github.com/openshift-bot).
-
-- **Test jobs on the newer infrastructure**
-
-  Members of the
-  [Team OpenShift Ansible Contributors](https://github.com/orgs/openshift/teams/team-openshift-ansible-contributors)
-  can trigger the set of test jobs in the newer infrastructure by writing a
-  comment containing `[test]` anywhere in the comment body.
-
-  The [Jenkins host](https://ci.openshift.redhat.com/jenkins/job/test_pull_request_openshift_ansible/)
-  is publicly accessible. Like for the older infrastructure, the result of each
-  job is also posted to the Pull Request as comments and summarized at the
-  bottom of the Pull Request page.
+Members of the [Team OpenShift Ansible
+Contributors](https://github.com/orgs/openshift/teams/team-openshift-ansible-contributors)
+can trigger test jobs by adding a comment containing
+`/ok-to-test`. For a full list of bot commands refer to the [Bot Command
+Help](https://deck-ci.svc.ci.openshift.org/command-help?repo=openshift%2Fopenshift-ansible).
 
 ### Fedora tests
 
@@ -54,34 +33,16 @@ To re-run tests, write a comment containing only `bot, retest this please`.
 
 ## Triggering merge
 
-After a PR is properly reviewed and a set of
-[required jobs](https://github.com/openshift/aos-cd-jobs/blob/master/sjb/test_status_config.yml)
-reported successfully, it can be tagged for merge by a member of the
-[Team OpenShift Ansible Contributors](https://github.com/orgs/openshift/teams/team-openshift-ansible-contributors)
-by writing a comment containing `[merge]` anywhere in the comment body.
-
-Tagging a Pull Request for merge puts it in an automated merge queue. The
-[@openshift-bot](https://github.com/openshift-bot) monitors the queue and merges
-PRs that pass all of the required tests.
-
-### Manual merges
-
-The normal process described above should be followed: `aos-ci-test` and
-`[test]` / `[merge]`.
-
-In exceptional cases, such as when known problems with the merge queue prevent
-PRs from being merged, a PR may be manually merged if _all_ of these conditions
-are true:
-
-- [ ] Travis job must have passed (as enforced by GitHub)
-- [ ] Must have passed `aos-ci-test` (as enforced by GitHub)
-- [ ] Must have a positive review (as enforced by GitHub)
-- [ ] Must have failed the `[merge]` queue with a reported flake at least twice
-- [ ] Must have [issues labeled kind/test-flake](https://github.com/openshift/origin/issues?q=is%3Aopen+is%3Aissue+label%3Akind%2Ftest-flake) in [Origin](https://github.com/openshift/origin) linked in comments for the failures
-- [ ] Content must not have changed since all of the above conditions have been met (no rebases, no new commits)
+After a PR is properly reviewed and all test are passing, it can be
+tagged for merge by a member of the [Team OpenShift Ansible
+Contributors](https://github.com/orgs/openshift/teams/team-openshift-ansible-contributors)
+by writing a comment containing `/lgtm` (looks good to me) anywhere in
+the comment body.
 
-This exception is temporary and should be completely removed in the future once
-the merge queue has become more stable.
+Tagging a Pull Request with `/lgtm` puts it in an automated merge
+queue. The
+[@openshift-ci-robot](https://github.com/openshift-ci-robot) monitors
+the queue and merges PRs that pass all of the required tests.
 
 Only members of the
 [Team OpenShift Ansible Committers](https://github.com/orgs/openshift/teams/team-openshift-ansible-committers)

+ 19 - 10
docs/repo_structure.md

@@ -6,23 +6,30 @@
 .
 ├── inventory           Contains dynamic inventory scripts, and examples of
 │                       Ansible inventories.
-├── library             Contains Python modules used by the playbooks.
 ├── playbooks           Contains Ansible playbooks targeting multiple use cases.
 └── roles               Contains Ansible roles, units of shared behavior among
                         playbooks.
 ```
 
-#### Ansible plugins
+#### Ansible shared libraries and plugins
 
-These are plugins used in playbooks and roles:
+Shared libraries and plugins are located in the `lib_utils` role.
 
-```
-.
-├── ansible-profile
-├── callback_plugins
-├── filter_plugins
-└── lookup_plugins
-```
+#### Ansible playbooks
+
+The `playbooks` directory is organized such that entry point playbooks are
+located in either component sub directories or cloud provisioning subdirectories.
+
+_Cloud Provisioning_
+- aws
+- gcp
+- openstack
+
+_OpenShift Components_
+- openshift-etcd
+- openshift-master
+- openshift-node
+- openshift-<component_name>
 
 ### Scripts
 
@@ -58,4 +65,6 @@ environment and test scripts defined in a YAML file.
 ├── .papr.yml
 ├── .papr.sh
 └── .papr.inventory
+├── .papr.all-in-one.inventory
+└── .papr-master-ha.inventory
 ```

+ 16 - 13
examples/README.md

@@ -18,13 +18,15 @@ You can find more details about the certificate expiration check roles and examp
 
 The example `Job` in [certificate-check-upload.yaml](certificate-check-upload.yaml) executes a [Job](https://docs.openshift.org/latest/dev_guide/jobs.html) that checks the expiration dates of the internal certificates of the cluster and uploads HTML and JSON reports to `/etc/origin/certificate_expiration_report` in the masters.
 
-This example uses the [`easy-mode-upload.yaml`](../playbooks/certificate_expiry/easy-mode-upload.yaml) example playbook, which generates reports and uploads them to the masters. The playbook can be customized via environment variables to control the length of the warning period (`CERT_EXPIRY_WARN_DAYS`) and the location in the masters where the reports are uploaded (`COPY_TO_PATH`).
+This example uses the [`easy-mode-upload.yaml`](../playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml) example playbook, which generates reports and uploads them to the masters. The playbook can be customized via environment variables to control the length of the warning period (`CERT_EXPIRY_WARN_DAYS`) and the location in the masters where the reports are uploaded (`COPY_TO_PATH`).
 
 The job expects the inventory to be provided via the *hosts* key of a [ConfigMap](https://docs.openshift.org/latest/dev_guide/configmaps.html) named *inventory*, and the passwordless ssh key that allows connecting to the hosts to be availalbe as *ssh-privatekey* from a [Secret](https://docs.openshift.org/latest/dev_guide/secrets.html) named *sshkey*, so these are created first:
 
     oc new-project certcheck
     oc create configmap inventory --from-file=hosts=/etc/ansible/hosts
-    oc secrets new-sshauth sshkey --ssh-privatekey=$HOME/.ssh/id_rsa
+    oc create secret generic sshkey \
+      --from-file=ssh-privatekey=$HOME/.ssh/id_rsa \
+      --type=kubernetes.io/ssh-auth
 
 Note that `inventory`, `hosts`, `sshkey` and `ssh-privatekey` are referenced by name from the provided example Job definition. If you use different names for the objects/attributes you will have to adjust the Job accordingly.
 
@@ -34,30 +36,30 @@ To create the Job:
 
 ### Scheduled job for certificate expiration report upload
 
-**Note**: This example uses the [ScheduledJob](https://docs.openshift.com/container-platform/3.4/dev_guide/scheduled_jobs.html) object, which has been renamed to [CronJob](https://docs.openshift.org/latest/dev_guide/cron_jobs.html) upstream and is still a Technology Preview subject to further change.
-
-The example `ScheduledJob` in [scheduled-certcheck-upload.yaml](scheduled-certcheck-upload.yaml) does the same as the `Job` example above, but it is scheduled to automatically run every first day of the month (see the `spec.schedule` value in the example).
+The example `CronJob` in [scheduled-certcheck-upload.yaml](scheduled-certcheck-upload.yaml) does the same as the `Job` example above, but it is scheduled to automatically run every first day of the month (see the `spec.schedule` value in the example).
 
 The job definition is the same and it expects the same configuration: we provide the inventory and ssh key via a ConfigMap and a Secret respectively:
 
     oc new-project certcheck
     oc create configmap inventory --from-file=hosts=/etc/ansible/hosts
-    oc secrets new-sshauth sshkey --ssh-privatekey=$HOME/.ssh/id_rsa
+    oc create secret generic sshkey \
+      --from-file=ssh-privatekey=$HOME/.ssh/id_rsa \
+      --type=kubernetes.io/ssh-auth
 
-And then we create the ScheduledJob:
+And then we create the CronJob:
 
     oc create -f examples/scheduled-certcheck-upload.yaml
 
-### Job and ScheduledJob to check certificates using volumes
+### Job and CronJob to check certificates using volumes
 
 There are two additional examples:
 
  - A `Job` [certificate-check-volume.yaml](certificate-check-volume.yaml)
- - A `ScheduledJob` [scheduled-certcheck-upload.yaml](scheduled-certcheck-upload.yaml)
+ - A `CronJob` [scheduled-certcheck-upload.yaml](scheduled-certcheck-upload.yaml)
 
 These perform the same work as the two examples above, but instead of uploading the generated reports to the masters they store them in a custom path within the container that is expected to be backed by a [PersistentVolumeClaim](https://docs.openshift.org/latest/dev_guide/persistent_volumes.html), so that the reports are actually written to storage external to the container.
 
-These examples assume that there is an existing `PersistentVolumeClaim` called `certcheck-reports` and they use the  [`html_and_json_timestamp.yaml`](../playbooks/certificate_expiry/html_and_json_timestamp.yaml) example playbook to write timestamped reports into it.
+These examples assume that there is an existing `PersistentVolumeClaim` called `certcheck-reports` and they use the  [`html_and_json_timestamp.yaml`](../playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml) example playbook to write timestamped reports into it.
 
 You can later access the reports from another pod that mounts the same volume, or externally via direct access to the backend storage behind the matching `PersistentVolume`.
 
@@ -65,7 +67,9 @@ To run these examples we prepare the inventory and ssh keys as in the other exam
 
     oc new-project certcheck
     oc create configmap inventory --from-file=hosts=/etc/ansible/hosts
-    oc secrets new-sshauth sshkey --ssh-privatekey=$HOME/.ssh/id_rsa
+    oc create secret generic sshkey \
+      --from-file=ssh-privatekey=$HOME/.ssh/id_rsa \
+      --type=kubernetes.io/ssh-auth
 
 Additionally we allocate a `PersistentVolumeClaim` to store the reports:
 
@@ -87,7 +91,6 @@ With that we can run the `Job` once:
 
     oc create -f examples/certificate-check-volume.yaml
 
-or schedule it to run periodically as a `ScheduledJob`:
+or schedule it to run periodically as a `CronJob`:
 
     oc create -f examples/scheduled-certcheck-volume.yaml
-

+ 2 - 2
examples/certificate-check-upload.yaml

@@ -28,10 +28,10 @@ spec:
     spec:
       containers:
       - name: openshift-ansible
-        image: openshift/origin-ansible
+        image: docker.io/openshift/origin-ansible
         env:
         - name: PLAYBOOK_FILE
-          value: playbooks/certificate_expiry/easy-mode-upload.yaml
+          value: playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml
         - name: INVENTORY_FILE
           value: /tmp/inventory/hosts       # from configmap vol below
         - name: ANSIBLE_PRIVATE_KEY_FILE    # from secret vol below

+ 2 - 2
examples/certificate-check-volume.yaml

@@ -30,10 +30,10 @@ spec:
     spec:
       containers:
       - name: openshift-ansible
-        image: openshift/origin-ansible
+        image: docker.io/openshift/origin-ansible
         env:
         - name: PLAYBOOK_FILE
-          value: playbooks/certificate_expiry/html_and_json_timestamp.yaml
+          value: playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
         - name: INVENTORY_FILE
           value: /tmp/inventory/hosts       # from configmap vol below
         - name: ANSIBLE_PRIVATE_KEY_FILE    # from secret vol below

+ 6 - 9
examples/scheduled-certcheck-upload.yaml

@@ -1,4 +1,4 @@
-# An example ScheduledJob to run a regular check of OpenShift's internal
+# An example CronJob to run a regular check of OpenShift's internal
 # certificate status.
 #
 # Each job will upload new reports to a directory in the master hosts
@@ -6,13 +6,10 @@
 # The Job specification is the same as 'certificate-check-upload.yaml'
 # and the expected pre-configuration is equivalent.
 # See that Job example and examples/README.md for more details.
-#
-# NOTE: ScheduledJob has been renamed to CronJob in upstream k8s recently. At
-# some point (OpenShift 3.6+) this will have to be renamed to "kind: CronJob"
-# and once the API stabilizes the apiVersion will have to be updated too.
+
 ---
-apiVersion: batch/v2alpha1
-kind: ScheduledJob
+apiVersion: batch/v1beta1
+kind: CronJob
 metadata:
   name: certificate-check
   labels:
@@ -28,10 +25,10 @@ spec:
         spec:
           containers:
           - name: openshift-ansible
-            image: openshift/origin-ansible
+            image: docker.io/openshift/origin-ansible
             env:
             - name: PLAYBOOK_FILE
-              value: playbooks/certificate_expiry/easy-mode-upload.yaml
+              value: playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml
             - name: INVENTORY_FILE
               value: /tmp/inventory/hosts       # from configmap vol below
             - name: ANSIBLE_PRIVATE_KEY_FILE    # from secret vol below

+ 6 - 9
examples/scheduled-certcheck-volume.yaml

@@ -1,4 +1,4 @@
-# An example ScheduledJob to run a regular check of OpenShift's internal
+# An example CronJob to run a regular check of OpenShift's internal
 # certificate status.
 #
 # Each job will add a new pair of reports to the configured Persistent Volume
@@ -6,13 +6,10 @@
 # The Job specification is the same as 'certificate-check-volume.yaml'
 # and the expected pre-configuration is equivalent.
 # See that Job example and examples/README.md for more details.
-#
-# NOTE: ScheduledJob has been renamed to CronJob in upstream k8s recently. At
-# some point (OpenShift 3.6+) this will have to be renamed to "kind: CronJob"
-# and once the API stabilizes the apiVersion will have to be updated too.
+
 ---
-apiVersion: batch/v2alpha1
-kind: ScheduledJob
+apiVersion: batch/v1beta1
+kind: CronJob
 metadata:
   name: certificate-check
   labels:
@@ -28,10 +25,10 @@ spec:
         spec:
           containers:
           - name: openshift-ansible
-            image: openshift/origin-ansible
+            image: docker.io/openshift/origin-ansible
             env:
             - name: PLAYBOOK_FILE
-              value: playbooks/certificate_expiry/html_and_json_timestamp.yaml
+              value: playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
             - name: INVENTORY_FILE
               value: /tmp/inventory/hosts       # from configmap vol below
             - name: ANSIBLE_PRIVATE_KEY_FILE    # from secret vol below

+ 3 - 3
hack/build-images.sh

@@ -36,7 +36,7 @@ done
 
 # allow ENV to take precedent over switches
 prefix="${PREFIX:-$prefix}"
-version="${OS_TAG:-$version}" 
+version="${OS_TAG:-$version}"
 
 if [ "$help" = true ]; then
   echo "Builds the docker images for openshift-ansible"
@@ -44,12 +44,12 @@ if [ "$help" = true ]; then
   echo "Options: "
   echo "  --prefix=PREFIX"
   echo "  The prefix to use for the image names."
-  echo "  default: openshift/origin-ansible"
+  echo "  default: docker.io/openshift/origin-ansible"
   echo
   echo "  --version=VERSION"
   echo "  The version used to tag the image (can be a comma-separated list)"
   echo "  default: latest"
-  echo 
+  echo
   echo "  --no-cache"
   echo "  If set will perform the build without a cache."
   echo

+ 1 - 1
hack/push-release.sh

@@ -16,7 +16,7 @@ set -o pipefail
 starttime=$(date +%s)
 
 # image name without repo or tag.
-image="${PREFIX:-openshift/origin-ansible}"
+image="${PREFIX:-docker.io/openshift/origin-ansible}"
 
 # existing local tag on the image we want to push
 source_tag="${OS_TAG:-latest}"

+ 5 - 3
images/installer/Dockerfile

@@ -8,15 +8,17 @@ USER root
 COPY images/installer/origin-extra-root /
 
 # install ansible and deps
-RUN INSTALL_PKGS="python-lxml python-dns pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \
+RUN INSTALL_PKGS="python-lxml python-dns pyOpenSSL python2-cryptography openssl python2-passlib httpd-tools openssh-clients origin-clients iproute patch" \
  && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
- && EPEL_PKGS="python2-boto python2-boto3 python2-crypto google-cloud-sdk-183.0.0 which python2-pip.noarch" \
+ && EPEL_PKGS="python2-boto python2-boto3 python2-crypto which python2-pip.noarch python-scandir python2-packaging azure-cli" \
  && EPEL_TESTING_PKGS="ansible" \
  && yum install -y epel-release \
  && yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \
  && yum install -y --setopt=tsflags=nodocs --enablerepo=epel-testing $EPEL_TESTING_PKGS \
+ && if [ "$(uname -m)" == "x86_64" ]; then yum install -y https://sdodson.fedorapeople.org/google-cloud-sdk-183.0.0-3.el7.x86_64.rpm ; fi \
+ && yum install -y java-1.8.0-openjdk-headless \
  && rpm -V $INSTALL_PKGS $EPEL_PKGS $EPEL_TESTING_PKGS \
- && pip install apache-libcloud~=2.2.1 \
+ && pip install 'apache-libcloud~=2.2.1' 'SecretStorage<3' 'ansible[azure]' \
  && yum clean all
 
 LABEL name="openshift/origin-ansible" \

+ 6 - 4
images/installer/Dockerfile.rhel7

@@ -5,12 +5,14 @@ MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
 USER root
 
 # Playbooks, roles, and their dependencies are installed from packages.
-RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto python2-boto3 python2-crypto openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \
+RUN INSTALL_PKGS="openshift-ansible atomic-openshift-clients python-boto python2-boto3 python2-crypto openssl iproute httpd-tools" \
+ && x86_EXTRA_RPMS=$(if [ "$(uname -m)" == "x86_64" ]; then echo -n google-cloud-sdk ; fi) \
  && yum repolist > /dev/null \
  && yum-config-manager --enable rhel-7-server-ose-3.7-rpms \
  && yum-config-manager --enable rhel-7-server-rh-common-rpms \
- && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
- && rpm -q $INSTALL_PKGS \
+ && yum install -y java-1.8.0-openjdk-headless \
+ && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS $x86_EXTRA_RPMS \
+ && rpm -q $INSTALL_PKGS $x86_EXTRA_RPMS \
  && yum clean all
 
 LABEL name="openshift3/ose-ansible" \
@@ -30,7 +32,7 @@ LABEL name="openshift3/ose-ansible" \
 ENV USER_UID=1001 \
     HOME=/opt/app-root/src \
     WORK_DIR=/usr/share/ansible/openshift-ansible \
-    ANSIBLE_CONFIG=/usr/share/atomic-openshift-utils/ansible.cfg \
+    ANSIBLE_CONFIG=/usr/share/ansible/openshift-ansible/ansible.cfg \
     OPTS="-v"
 
 # Add image scripts and files for running as a system container

+ 2 - 2
images/installer/README_INVENTORY_GENERATOR.md

@@ -49,7 +49,7 @@ docker run -u `id -u` \
        -e PLAYBOOK_FILE=playbooks/openshift-checks/health.yml \
        -e GENERATE_INVENTORY=true \
        -e USER=`whoami` \
-       openshift/origin-ansible
+       docker.io/openshift/origin-ansible
 
 ```
 
@@ -66,7 +66,7 @@ and manually execute `/usr/local/bin/generate`:
 docker run -u `id -u` \
        -v ...
        ...
-       -it openshift/origin-ansible /bin/bash
+       -it docker.io/openshift/origin-ansible /bin/bash
 
 ---
 

+ 6 - 0
images/installer/origin-extra-root/etc/yum.repos.d/azure-cli.repo

@@ -0,0 +1,6 @@
+[azure-cli]
+name=Azure CLI
+baseurl=https://packages.microsoft.com/yumrepos/azure-cli
+enabled=1
+gpgcheck=1
+gpgkey=https://packages.microsoft.com/keys/microsoft.asc

+ 1 - 1
images/installer/root/exports/manifest.json

@@ -6,7 +6,7 @@
         "VAR_LOG_OPENSHIFT_LOG": "/var/log/ansible.log",
         "PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml",
 	"HOME_ROOT": "/root",
-	"ANSIBLE_CONFIG": "/usr/share/atomic-openshift-utils/ansible.cfg",
+	"ANSIBLE_CONFIG": "/usr/share/ansible/openshift-ansible/ansible.cfg",
         "INVENTORY_FILE": "/dev/null"
     }
 }

+ 6 - 46
images/installer/root/usr/local/bin/entrypoint-gcp

@@ -1,51 +1,11 @@
 #!/bin/bash
 #
-# This file sets up the user to run in the GCP environment.
-# It provides dynamic inventory that works well when run in
-# a container environment by setting up a default inventory.
-# It assumes the user has provided a GCP service account token
-# and ssh-privatekey file at "$(pwd)/inventory/dynamic/injected"
-# and automatically links any YAML files found into the group
-# vars directory, which allows the playbook to more easily be
-# run in containerized contexts.
-
-WORK=$(pwd)
-FILES="${WORK}/inventory/dynamic/injected"
-
-# Patch /etc/passwd file with the current user info.
-# The current user's entry must be correctly defined in this file in order for
-# the `ssh` command to work within the created container.
-
-if ! whoami &>/dev/null; then
-  echo "${USER:-default}:x:$(id -u):$(id -g):Default User:$HOME:/sbin/nologin" >> /etc/passwd
-fi
-
-# Provide a "files_dir" variable that points to inventory/dynamic/injected
-echo "files_dir: \"${FILES}\"" > "${WORK}/inventory/dynamic/gcp/group_vars/all/00_default_files_dir.yml"
-# Add any injected variable files into the group vars directory
-find "${FILES}" -name '*.yml' -or -name '*.yaml' -or -name vars | xargs -L1 -I {} ln -fs {} "${WORK}/inventory/dynamic/gcp/group_vars/all"
-# Avoid sudo when running locally - nothing in the image requires it.
-mkdir -p "${WORK}/inventory/dynamic/gcp/host_vars/localhost"
-echo "ansible_become: no" > "${WORK}/inventory/dynamic/gcp/host_vars/localhost/00_skip_root.yaml"
+# Temporary wrapper for entrypoint-gcp until the migration
+# to entrypoint-provider is complete.
+#
 
-if [[ -z "${ANSIBLE_CONFIG-}" ]]; then
-  export ANSIBLE_CONFIG="${WORK}/inventory/dynamic/gcp/ansible.cfg"
-fi
+set -euo pipefail
 
-# SSH requires the file to be owned by the current user, but Docker copies
-# files in as root. Put the file into the ssh dir with the right permissions
-if [[ -f "${FILES}/ssh-privatekey" ]]; then
-  keyfile="${HOME}/.ssh/google_compute_engine"
-  mkdir "${HOME}/.ssh"
-  rm -f "${keyfile}"
-  cat "${FILES}/ssh-privatekey" > "${keyfile}"
-  chmod 0600 "${keyfile}"
-  ssh-keygen -y -f "${keyfile}" >  "${keyfile}.pub"
-fi
-if [[ -f "${FILES}/gce.json" ]]; then
-  gcloud auth activate-service-account --key-file="${FILES}/gce.json"
-else
-  echo "No service account file found at ${FILES}/gce.json, bypassing login"
-fi
+export TYPE='gcp'
 
-exec "$@"
+exec /usr/local/bin/entrypoint-provider "$@"

+ 75 - 0
images/installer/root/usr/local/bin/entrypoint-provider

@@ -0,0 +1,75 @@
+#!/bin/bash
+#
+# This file sets up the user to run in a cloud environment.
+# It provides dynamic inventory that works well when run in
+# a container environment by setting up a default inventory.
+# It assumes the user has provided a service account token
+# and ssh-privatekey file at "$(pwd)/inventory/dynamic/injected"
+# and automatically links any YAML files found into the group
+# vars directory, which allows the playbook to more easily be
+# run in containerized contexts.
+#
+# Currently GCP and Azure are supported.
+
+set -euo pipefail
+
+WORK=$(pwd)
+FILES="${WORK}/inventory/dynamic/injected"
+
+# Patch /etc/passwd file with the current user info.
+# The current user's entry must be correctly defined in this file in order for
+# the `ssh` command to work within the created container.
+
+if ! whoami &>/dev/null; then
+  echo "${USER:-default}:x:$(id -u):$(id -g):Default User:$HOME:/sbin/nologin" >> /etc/passwd
+fi
+
+# Provide a "files_dir" variable that points to inventory/dynamic/injected
+echo "files_dir: \"${FILES}\"" > "${WORK}/inventory/dynamic/${TYPE}/group_vars/all/00_default_files_dir.yml"
+# Add any injected variable files into the group vars directory
+find "${FILES}" \( -name '*.yml' -or -name '*.yaml' -or -name vars \) -print0 | xargs -0 -L1 -I {} ln -fs {} "${WORK}/inventory/dynamic/${TYPE}/group_vars/all"
+# Avoid sudo when running locally - nothing in the image requires it.
+mkdir -p "${WORK}/inventory/dynamic/${TYPE}/host_vars/localhost"
+echo "ansible_become: no" > "${WORK}/inventory/dynamic/${TYPE}/host_vars/localhost/00_skip_root.yaml"
+
+if [[ -z "${ANSIBLE_CONFIG-}" ]]; then
+  export ANSIBLE_CONFIG="${WORK}/inventory/dynamic/${TYPE}/ansible.cfg"
+fi
+
+# SSH requires the file to be owned by the current user, but Docker copies
+# files in as root. Put the file into the ssh dir with the right permissions
+if [[ -f "${FILES}/ssh-privatekey" ]]; then
+  if [[ "$TYPE" == 'gcp' ]]; then
+    keyfile="${HOME}/.ssh/google_compute_engine"
+  else
+    keyfile="${HOME}/.ssh/id_rsa"
+  fi
+  mkdir "${HOME}/.ssh"
+  rm -f "${keyfile}"
+  cat "${FILES}/ssh-privatekey" > "${keyfile}"
+  chmod 0600 "${keyfile}"
+  ssh-keygen -y -f "${keyfile}" >  "${keyfile}.pub"
+fi
+
+if [[ "$TYPE" == 'gcp' ]]; then
+  if [[ -f "${FILES}/gce.json" ]]; then
+    gcloud auth activate-service-account --quiet --key-file="${FILES}/gce.json"
+  else
+    echo "No service account file found at ${FILES}/gce.json, bypassing login"
+  fi
+fi
+
+if [[ "$TYPE" == 'azure' ]]; then
+  if [[ -f "${FILES}/credentials" ]]; then
+    set -a
+    . "${FILES}/credentials"
+    set +a
+
+    az login --service-principal --username "$AZURE_CLIENT_ID" --password "$AZURE_SECRET" --tenant "$AZURE_TENANT" >/dev/null
+
+  else
+    echo "No service account file found at ${FILES}/credentials, bypassing login"
+  fi
+fi
+
+exec "$@"

+ 1 - 1
images/installer/root/usr/local/bin/run

@@ -21,7 +21,7 @@ if [[ -v INVENTORY_FILE ]]; then
   cp ${INVENTORY_FILE} ${INVENTORY}
 elif [[ -v INVENTORY_DIR ]]; then
   INVENTORY="$(mktemp -d)"
-  cp -R ${INVENTORY_DIR}/* ${INVENTORY}
+  cp -RL ${INVENTORY_DIR}/* ${INVENTORY}
 elif [[ -v INVENTORY_URL ]]; then
   curl -o ${INVENTORY} ${INVENTORY_URL}
 elif [[ -v DYNAMIC_SCRIPT_URL ]]; then

+ 1 - 1
images/installer/root/usr/local/bin/usage

@@ -28,6 +28,6 @@ docker run -tu `id -u` \
        -e INVENTORY_FILE=/tmp/inventory \
        -e OPTS="-v" \
        -e PLAYBOOK_FILE=playbooks/byo/openshift_facts.yml \
-       openshift/origin-ansible
+       docker.io/openshift/origin-ansible
 
 EOF

+ 43 - 0
inventory/dynamic/azure/ansible.cfg

@@ -0,0 +1,43 @@
+# config file for ansible -- http://ansible.com/
+# ==============================================
+
+# This config file provides examples for running
+# the OpenShift playbooks with the provided
+# inventory scripts.
+
+[defaults]
+# Set the log_path
+#log_path = /tmp/ansible.log
+
+# Additional default options for OpenShift Ansible
+forks = 20
+host_key_checking = False
+retry_files_enabled = False
+retry_files_save_path = ~/ansible-installer-retries
+nocows = True
+remote_user = root
+roles_path = roles/
+gathering = smart
+fact_caching = jsonfile
+fact_caching_connection = $HOME/ansible/facts
+fact_caching_timeout = 600
+callback_whitelist = profile_tasks
+inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt, .ini
+# work around privilege escalation timeouts in ansible:
+timeout = 30
+
+# Uncomment to use the provided example inventory
+inventory = none
+
+[inventory]
+# fail more helpfully when the inventory file does not parse (Ansible 2.4+)
+unparsed_is_failed=true
+
+# Additional ssh options for OpenShift Ansible
+[ssh_connection]
+pipelining = True
+ssh_args = -o ControlMaster=auto -o ControlPersist=600s
+timeout = 10
+# shorten the ControlPath which is often too long; when it is,
+# ssh connection reuse silently fails, making everything slower.
+control_path = %(directory)s/%%h-%%r

utils/test/__init__.py → inventory/dynamic/azure/group_vars/all/.gitkeep


+ 1 - 0
inventory/dynamic/azure/none

@@ -0,0 +1 @@
+{}

+ 6 - 6
inventory/dynamic/gcp/group_vars/all/00_defaults.yml

@@ -3,6 +3,7 @@
 ansible_become: yes
 
 openshift_deployment_type: origin
+osm_etcd_image: "{{ etcd_image_dict[openshift_deployment_type] }}"
 
 # Debugging settings
 debug_level: 2
@@ -35,9 +36,8 @@ osm_cluster_network_cidr: 172.16.0.0/16
 osm_host_subnet_length: 9
 openshift_portal_net: 172.30.0.0/16
 
-# Default cluster configuration
-openshift_master_cluster_method: native
-openshift_schedulable: true
-# TODO: change to upstream conventions
-openshift_hosted_infra_selector: "role=infra"
-osm_default_node_selector: "role=app"
+# masters and infra are the same in CI
+openshift_gcp_node_group_mapping:
+  masters: 'node-config-master'
+  infra: 'node-config-master'
+  compute: 'node-config-compute'

+ 113 - 184
inventory/hosts.example

@@ -12,8 +12,8 @@ ose3-master[1:3].test.example.com
 
 [nodes]
 ose3-master[1:3].test.example.com
-ose3-infra[1:2].test.example.com openshift_node_labels="{'region': 'infra', 'zone': 'default'}"
-ose3-node[1:2].test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
+ose3-infra[1:2].test.example.com
+ose3-node[1:2].test.example.com
 
 [nfs]
 ose3-master1.test.example.com
@@ -51,7 +51,7 @@ openshift_deployment_type=origin
 # use this to lookup the latest exact version of the container images, which is the tag actually used to configure
 # the cluster. For RPM installations we just verify the version detected in your configured repos matches this
 # release.
-openshift_release=v3.9
+openshift_release="3.9"
 
 # default subdomain to use for exposed routes, you should have wildcard dns
 # for *.apps.test.example.com that points at your infra nodes which will run
@@ -80,21 +80,10 @@ debug_level=2
 # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
 #openshift_pkg_version=-3.9.0
 
-# This enables all the system containers except for docker:
-#openshift_use_system_containers=False
-#
-# But you can choose separately each component that must be a
-# system container:
-#
-#openshift_use_openvswitch_system_container=False
-#openshift_use_node_system_container=False
-#openshift_use_master_system_container=False
-#openshift_use_etcd_system_container=False
-#
-# In either case, system_images_registry must be specified to be able to find the system images
+# If using Atomic Host, you may specify system container image registry for the nodes:
 #system_images_registry="docker.io"
 # when openshift_deployment_type=='openshift-enterprise'
-#system_images_registry="registry.access.redhat.com"
+#system_images_registry="registry.redhat.io"
 
 # Manage openshift example imagestreams and templates during install and upgrade
 #openshift_install_examples=true
@@ -103,28 +92,17 @@ debug_level=2
 # See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
 #openshift_master_logout_url=http://example.com
 
-# Configure extensionScripts in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
-#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js']
-
-# Configure extensionStylesheets in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
-#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css']
-
 # Configure extensions in the master config for console customization
 # See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
-#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}]
-
-# Configure extensions in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
-#openshift_master_oauth_templates:
-#  login: /path/to/login-template.html
+#openshift_master_oauth_templates={'login': '/path/to/login-template.html'}
 # openshift_master_oauth_template is deprecated.  Use openshift_master_oauth_templates instead.
 #openshift_master_oauth_template=/path/to/login-template.html
 
 # Configure imagePolicyConfig in the master config
-# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
+# See: https://docs.openshift.org/latest/admin_guide/image_policy.html
 #openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
+# This setting overrides allowedRegistriesForImport in openshift_master_image_policy_config. By default, all registries are allowed.
+#openshift_master_image_policy_allowed_registries_for_import=["docker.io", "*.docker.io", "*.redhat.com", "gcr.io", "quay.io", "registry.centos.org", "registry.redhat.io", "*.amazonaws.com"]
 
 # Configure master API rate limits for external clients
 #openshift_master_external_ratelimit_qps=200
@@ -133,34 +111,9 @@ debug_level=2
 #openshift_master_loopback_ratelimit_qps=300
 #openshift_master_loopback_ratelimit_burst=600
 
-# Docker Configuration
-# Add additional, insecure, and blocked registries to global docker configuration
-# For enterprise deployment types we ensure that registry.access.redhat.com is
-# included if you do not include it
-#openshift_docker_additional_registries=registry.example.com
-#openshift_docker_insecure_registries=registry.example.com
-#openshift_docker_blocked_registries=registry.hacker.com
-# Disable pushing to dockerhub
-#openshift_docker_disable_push_dockerhub=True
-# Use Docker inside a System Container. Note that this is a tech preview and should
-# not be used to upgrade!
-# The following options for docker are ignored:
-# - docker_version
-# - docker_upgrade
-# The following options must not be used
-# - openshift_docker_options
-#openshift_docker_use_system_container=False
-# Install and run cri-o. By default this will install cri-o as a system container.
+# Install and run cri-o.
 #openshift_use_crio=False
-# You can install cri-o as an rpm by setting the following variable:
-#openshift_crio_use_rpm=False
-# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override
-# just as container-engine does.
-# Force the registry to use for the container-engine/crio system container. By default the registry
-# will be built off of the deployment type and ansible_distribution. Only
-# use this option if you are sure you know what you are doing!
-#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest"
-#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest"
+#openshift_use_crio_only=False
 # The following two variables are used when openshift_use_crio is True
 # and cleans up after builds that pass through docker. When openshift_use_crio is True
 # these variables are set to the defaults shown. You may override them here.
@@ -186,8 +139,12 @@ debug_level=2
 # Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
 # docker_upgrade=False
 
-# Specify exact version of etcd to configure or upgrade to.
-# etcd_version="3.1.0"
+# Specify a list of block devices to be formatted and mounted on the nodes
+# during prerequisites.yml. For each hash, "device", "path", "filesystem" are
+# required. To add devices only on certain classes of node, redefine
+# container_runtime_extra_storage as a group var.
+#container_runtime_extra_storage='[{"device":"/dev/vdc","path":"/var/lib/origin/openshift.local.volumes","filesystem":"xfs","options":"gquota"}]'
+
 # Enable etcd debug logging, defaults to false
 # etcd_debug=true
 # Set etcd log levels by package
@@ -210,15 +167,19 @@ debug_level=2
 # Tasks to run after each master is upgraded and system/services have been restarted.
 # openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml
 
-# Alternate image format string, useful if you've got your own registry mirror
-# Configure this setting just on node or master
-#oreg_url_master=example.com/openshift3/ose-${component}:${version}
-#oreg_url_node=example.com/openshift3/ose-${component}:${version}
-# For setting the configuration globally
+# Cluster Image Source (registry) configuration
+# openshift-enterprise default is 'registry.access.redhat.com/openshift3/ose-${component}:${version}'
+# origin default is 'docker.io/openshift/origin-${component}:${version}'
 #oreg_url=example.com/openshift3/ose-${component}:${version}
-# If oreg_url points to a registry other than registry.access.redhat.com we can
+# If oreg_url points to a registry other than registry.redhat.io we can
 # modify image streams to point at that registry by setting the following to true
 #openshift_examples_modify_imagestreams=true
+# Add insecure and blocked registries to global docker configuration
+#openshift_docker_insecure_registries=registry.example.com
+#openshift_docker_blocked_registries=registry.hacker.com
+# You may also configure additional default registries for docker, however this
+# is discouraged. Instead you should make use of fully qualified image names.
+#openshift_docker_additional_registries=registry.example.com
 
 # If oreg_url points to a registry requiring authentication, provide the following:
 #oreg_auth_user=some_user
@@ -232,12 +193,12 @@ debug_level=2
 #openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
 #openshift_repos_enable_testing=false
 
-# If the image for etcd needs to be pulled from anywhere else than registry.access.redhat.com, e.g. in
+# If the image for etcd needs to be pulled from anywhere else than registry.redhat.io, e.g. in
 # a disconnected and containerized installation, use osm_etcd_image to specify the image to use:
-#osm_etcd_image=rhel7/etcd
+#osm_etcd_image=registry.example.com/rhel7/etcd
 
 # htpasswd auth
-#openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
+#openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
 # Defining htpasswd users
 #openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
 # or
@@ -333,6 +294,10 @@ debug_level=2
 # openshift_gcp_prefix is a unique string to identify each openshift cluster.
 #openshift_gcp_prefix=
 #openshift_gcp_multizone=False
+# Note: To enable nested virtualization in gcp use the following variable and url
+#openshift_gcp_licenses="https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx"
+# Additional details regarding nested virtualization are available:
+# https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances
 #
 # vSphere
 #openshift_cloudprovider_kind=vsphere
@@ -411,23 +376,27 @@ debug_level=2
 #
 # An OpenShift router will be created during install if there are
 # nodes present with labels matching the default router selector,
-# "region=infra". Set openshift_node_labels per node as needed in
-# order to label nodes.
+# "node-role.kubernetes.io/infra=true".
 #
 # Example:
 # [nodes]
-# node.example.com openshift_node_labels="{'region': 'infra'}"
+# node.example.com openshift_node_group_name="node-config-infra"
 #
 # Router selector (optional)
 # Router will only be created if nodes matching this label are present.
-# Default value: 'region=infra'
-#openshift_hosted_router_selector='region=infra'
+# Default value: 'node-role.kubernetes.io/infra=true'
+#openshift_hosted_router_selector='node-role.kubernetes.io/infra=true'
 #
 # Router replicas (optional)
 # Unless specified, openshift-ansible will calculate the replica count
 # based on the number of nodes matching the openshift router selector.
 #openshift_hosted_router_replicas=2
 #
+# Router extended route validation (optional)
+# If enabled, openshift-ansible will configure the router to perform extended
+# validation on routes before admitting them.
+#openshift_hosted_router_extended_validation=true
+#
 # Router force subdomain (optional)
 # A router path format to force on all routes used by this router
 # (will ignore the route host value)
@@ -462,17 +431,16 @@ debug_level=2
 #
 # An OpenShift registry will be created during install if there are
 # nodes present with labels matching the default registry selector,
-# "region=infra". Set openshift_node_labels per node as needed in
-# order to label nodes.
+# "node-role.kubernetes.io/infra=true".
 #
 # Example:
 # [nodes]
-# node.example.com openshift_node_labels="{'region': 'infra'}"
+# node.example.com openshift_node_group_name="node-config-infra"
 #
 # Registry selector (optional)
 # Registry will only be created if nodes matching this label are present.
-# Default value: 'region=infra'
-#openshift_hosted_registry_selector='region=infra'
+# Default value: 'node-role.kubernetes.io/infra=true'
+#openshift_hosted_registry_selector='node-role.kubernetes.io/infra=true'
 #
 # Registry replicas (optional)
 # Unless specified, openshift-ansible will calculate the replica count
@@ -527,6 +495,14 @@ debug_level=2
 #openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
 #openshift_hosted_registry_storage_volume_size=10Gi
 #
+# hostPath (local filesystem storage)
+# Suitable for "all-in-one" or proof of concept deployments
+# Must not be used for high-availability and production deployments
+#openshift_hosted_registry_storage_kind=hostpath
+#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_registry_storage_hostpath_path=/var/lib/openshift_volumes
+#openshift_hosted_registry_storage_volume_size=10Gi
+#
 # AWS S3
 # S3 bucket must already exist.
 #openshift_hosted_registry_storage_kind=object
@@ -564,6 +540,10 @@ debug_level=2
 #openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/
 #openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem
 #openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid
+# vSphere Volume with vSphere Cloud Provider
+# openshift_hosted_registry_storage_kind=vsphere
+# openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']
+# openshift_hosted_registry_storage_annotations=['volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/vsphere-volume']
 #
 # GCS Storage Bucket
 #openshift_hosted_registry_storage_provider=gcs
@@ -577,6 +557,12 @@ debug_level=2
 # By default metrics are not automatically deployed, set this to enable them
 #openshift_metrics_install_metrics=true
 #
+# metrics-server deployment
+# By default, metrics-server is not automatically deployed, unless metrics is also
+# deployed.  Deploying metrics-server is necessary to use the HorizontalPodAutoscaler.
+# Set this to enable it.
+#openshift_metrics_server_install=true
+#
 # Storage Options
 # If openshift_metrics_storage_kind is unset then metrics will be stored
 # in an EmptyDir volume and will be deleted when the cassandra pod terminates.
@@ -624,17 +610,32 @@ debug_level=2
 # Currently, you may only alter the hostname portion of the url, alterting the
 # `/hawkular/metrics` path will break installation of metrics.
 #openshift_metrics_hawkular_hostname=hawkular-metrics.example.com
-# Configure the prefix and version for the component images
-#openshift_metrics_image_prefix=docker.io/openshift/origin-
-#openshift_metrics_image_version=v3.9
+# Configure the metrics component images # Note, these will be modified by oreg_url by default
+#openshift_metrics_cassandra_image="docker.io/openshift/origin-metrics-cassandra:{{ openshift_image_tag }}"
+#openshift_metrics_hawkular_agent_image="docker.io/openshift/origin-metrics-hawkular-openshift-agent:{{ openshift_image_tag }}"
+#openshift_metrics_hawkular_metrics_image="docker.io/openshift/origin-metrics-hawkular-metrics:{{ openshift_image_tag }}"
+#openshift_metrics_schema_installer_image="docker.io/openshift/origin-metrics-schema-installer:{{ openshift_image_tag }}"
+#openshift_metrics_heapster_image="docker.io/openshift/origin-metrics-heapster:{{ openshift_image_tag }}"
 # when openshift_deployment_type=='openshift-enterprise'
-#openshift_metrics_image_prefix=registry.access.redhat.com/openshift3/
-#openshift_metrics_image_version=v3.9
+#openshift_metrics_cassandra_image="registry.redhat.io/openshift3/metrics-cassandra:{{ openshift_image_tag }}"
+#openshift_metrics_hawkular_agent_image="registry.redhat.io/openshift3/metrics-hawkular-openshift-agent:{{ openshift_image_tag }}"
+#openshift_metrics_hawkular_metrics_image="registry.redhat.io/openshift3/metrics-hawkular-metrics:{{ openshift_image_tag }}"
+#openshift_metrics_schema_installer_image="registry.redhat.io/openshift3/metrics-schema-installer:{{ openshift_image_tag }}"
+#openshift_metrics_heapster_image="registry.redhat.io/openshift3/metrics-heapster:{{ openshift_image_tag }}"
 #
 # StorageClass
 # openshift_storageclass_name=gp2
 # openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'}
+# openshift_storageclass_mount_options=['dir_mode=0777', 'file_mode=0777']
+# openshift_storageclass_reclaim_policy="Delete"
 #
+# PersistentLocalStorage
+# If Persistent Local Storage is wanted, this boolean can be defined to True.
+# This will create all necessary configuration to use persistent storage on nodes.
+#openshift_persistentlocalstorage_enabled=False
+#openshift_persistentlocalstorage_classes=[]
+#openshift_persistentlocalstorage_path=/mnt/local-storage
+#openshift_persistentlocalstorage_provisionner_image=quay.io/external_storage/local-volume-provisioner:v1.0.1
 
 # Logging deployment
 #
@@ -686,12 +687,6 @@ debug_level=2
 # Configure the number of elastic search nodes, unless you're using dynamic provisioning
 # this value must be 1
 #openshift_logging_es_cluster_size=1
-# Configure the prefix and version for the component images
-#openshift_logging_image_prefix=docker.io/openshift/origin-
-#openshift_logging_image_version=v3.9.0
-# when openshift_deployment_type=='openshift-enterprise'
-#openshift_logging_image_prefix=registry.access.redhat.com/openshift3/
-#openshift_logging_image_version=3.9.0
 
 # Prometheus deployment
 #
@@ -699,76 +694,18 @@ debug_level=2
 #openshift_hosted_prometheus_deploy=true
 #
 # Prometheus storage config
-# Option A - NFS Host Group
-# An NFS volume will be created with path "nfs_directory/volume_name"
-# on the host within the [nfs] host group.  For example, the volume
-# path using these options would be "/exports/prometheus"
-#openshift_prometheus_storage_kind=nfs
-#openshift_prometheus_storage_access_modes=['ReadWriteOnce']
-#openshift_prometheus_storage_nfs_directory=/exports
-#openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
-#openshift_prometheus_storage_volume_name=prometheus
-#openshift_prometheus_storage_volume_size=10Gi
-#openshift_prometheus_storage_labels={'storage': 'prometheus'}
-#openshift_prometheus_storage_type='pvc'
-#openshift_prometheus_storage_class=glusterfs-storage
-# For prometheus-alertmanager
-#openshift_prometheus_alertmanager_storage_kind=nfs
-#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
-#openshift_prometheus_alertmanager_storage_nfs_directory=/exports
-#openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
-#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
-#openshift_prometheus_alertmanager_storage_volume_size=10Gi
-#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
-#openshift_prometheus_alertmanager_storage_type='pvc'
-#openshift_prometheus_alertmanager_storage_class=glusterfs-storage
-# For prometheus-alertbuffer
-#openshift_prometheus_alertbuffer_storage_kind=nfs
-#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
-#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports
-#openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
-#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
-#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
-#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
-#openshift_prometheus_alertbuffer_storage_type='pvc'
-#openshift_prometheus_alertbuffer_storage_class=glusterfs-storage
-#
-# Option B - External NFS Host
-# NFS volume must already exist with path "nfs_directory/_volume_name" on
-# the storage_host. For example, the remote volume path using these
-# options would be "nfs.example.com:/exports/prometheus"
-#openshift_prometheus_storage_kind=nfs
-#openshift_prometheus_storage_access_modes=['ReadWriteOnce']
-#openshift_prometheus_storage_host=nfs.example.com
-#openshift_prometheus_storage_nfs_directory=/exports
-#openshift_prometheus_storage_volume_name=prometheus
-#openshift_prometheus_storage_volume_size=10Gi
-#openshift_prometheus_storage_labels={'storage': 'prometheus'}
-#openshift_prometheus_storage_type='pvc'
-#openshift_prometheus_storage_class=glusterfs-storage
-# For prometheus-alertmanager
-#openshift_prometheus_alertmanager_storage_kind=nfs
-#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
-#openshift_prometheus_alertmanager_storage_host=nfs.example.com
-#openshift_prometheus_alertmanager_storage_nfs_directory=/exports
-#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
-#openshift_prometheus_alertmanager_storage_volume_size=10Gi
-#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
-#openshift_prometheus_alertmanager_storage_type='pvc'
-#openshift_prometheus_alertmanager_storage_class=glusterfs-storage
-# For prometheus-alertbuffer
-#openshift_prometheus_alertbuffer_storage_kind=nfs
-#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
-#openshift_prometheus_alertbuffer_storage_host=nfs.example.com
-#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports
-#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
-#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
-#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
-#openshift_prometheus_alertbuffer_storage_type='pvc'
-#openshift_prometheus_alertbuffer_storage_class=glusterfs-storage
-#
-# Option C - none -- Prometheus, alertmanager and alertbuffer will use emptydir volumes
-# which are destroyed when pods are deleted
+# By default prometheus uses emptydir storage, if you want to persist you should
+# configure it to use pvc storage type. Each volume must be ReadWriteOnce.
+#openshift_prometheus_storage_type=emptydir
+#openshift_prometheus_alertmanager_storage_type=emptydir
+#openshift_prometheus_alertbuffer_storage_type=emptydir
+# Use PVCs for persistence
+#openshift_prometheus_storage_type=pvc
+#openshift_prometheus_alertmanager_storage_type=pvc
+#openshift_prometheus_alertbuffer_storage_type=pvc
+
+# Grafana deployment, requires Prometheus
+#openshift_hosted_grafana_deploy=true
 
 # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
 # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -860,6 +797,10 @@ debug_level=2
 #
 # Detected names may be overridden by specifying the "names" key
 #openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}]
+#
+# Add a trusted CA to all pods, copies from the control host, may be multiple
+# certs in one file
+#openshift_additional_ca=/path/to/additional-ca.crt
 
 # Session options
 #openshift_master_session_name=ssn
@@ -885,17 +826,7 @@ debug_level=2
 # interface other than the default network interface.
 #openshift_set_node_ip=True
 
-# Configure dnsIP in the node config.
-# This setting overrides the bind IP address used by each node's dnsmasq.
-# By default, this value is set to the IP which ansible uses to connect to the node.
-# Only update this variable if you need to bind dnsmasq on a different interface
-#
-# Example:
-# [nodes]
-# node.example.com openshift_dns_ip=172.30.0.1
-
-# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
-#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']}
+#openshift_node_kubelet_args is deprecated, use node config edits instead
 
 # Configure logrotate scripts
 # See: https://github.com/nickhammond/ansible-logrotate
@@ -979,13 +910,10 @@ debug_level=2
 # Enable template service broker (requires service catalog to be enabled, above)
 #template_service_broker_install=true
 
-# Force a specific prefix (IE: registry) to use when pulling the service catalog image
-# NOTE: The registry all the way up to the start of the image name must be provided. Two examples
-# below are provided.
-#openshift_service_catalog_image_prefix=docker.io/openshift/origin-
-#openshift_service_catalog_image_prefix=registry.access.redhat.com/openshift3/ose-
-# Force a specific image version to use when pulling the service catalog image
-#openshift_service_catalog_image_version=v3.9
+# Specify an openshift_service_catalog image
+# (defaults for origin and openshift-enterprise, repsectively)
+#openshift_service_catalog_image="docker.io/openshift/origin-service-catalog:{{ openshift_image_tag }}""
+#openshift_service_catalog_image="registry.redhat.io/openshift3/ose-service-catalog:{{ openshift_image_tag }}"
 
 # TSB image tag
 #template_service_broker_version='v3.9'
@@ -994,7 +922,7 @@ debug_level=2
 #openshift_template_service_broker_namespaces=['openshift']
 
 # masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
-#openshift_master_dynamic_provisioning_enabled=False
+#openshift_master_dynamic_provisioning_enabled=True
 
 # Admission plugin config
 #openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}}
@@ -1004,21 +932,19 @@ debug_level=2
 
 # OpenShift Per-Service Environment Variables
 # Environment variables are added to /etc/sysconfig files for
-# each OpenShift service: node, master (api and controllers).
+# each OpenShift node.
 # API and controllers environment variables are merged in single
 # master environments.
-#openshift_master_api_env_vars={"ENABLE_HTTP2": "true"}
-#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"}
 #openshift_node_env_vars={"ENABLE_HTTP2": "true"}
 
 # Enable API service auditing
-#openshift_master_audit_config={"enabled": true}
+#openshift_master_audit_config={"enabled": "true"}
 #
 # In case you want more advanced setup for the auditlog you can
 # use this line.
 # The directory in "auditFilePath" will be created if it's not
 # exist
-#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
+#openshift_master_audit_config={"enabled": "true", "auditFilePath": "/var/lib/origin/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": "14", "maximumFileSizeMegabytes": "500", "maximumRetainedFiles": "5"}
 
 # Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
 # by openshift_deployment_type=origin
@@ -1160,6 +1086,9 @@ debug_level=2
 #openshift_master_open_ports=[{"service":"svc1","port":"11/tcp"}]
 #openshift_node_open_ports=[{"service":"svc2","port":"12-13/tcp"},{"service":"svc3","port":"14/udp"}]
 
+# Service port node range
+#openshift_node_port_range=30000-32767
+
 # Enable unsupported configurations, things that will yield a partially
 # functioning cluster but would not be supported for production use
 #openshift_enable_unsupported_configurations=false

+ 1 - 1
inventory/hosts.glusterfs.native.example

@@ -31,7 +31,7 @@ master
 # masters should be schedulable to run web console pods
 master  openshift_schedulable=True
 # A hosted registry, by default, will only be deployed on nodes labeled
-# "region=infra".
+# "node-role.kubernetes.io/infra=true".
 node0   openshift_schedulable=True
 node1   openshift_schedulable=True
 node2   openshift_schedulable=True

+ 5 - 5
inventory/hosts.glusterfs.registry-only.example

@@ -31,16 +31,16 @@ openshift_deployment_type=origin
 openshift_hosted_registry_storage_kind=glusterfs
 
 [masters]
-master
+master openshift_node_group_name="node-config-master"
 
 [nodes]
 # masters should be schedulable to run web console pods
 master  openshift_schedulable=True
 # A hosted registry, by default, will only be deployed on nodes labeled
-# "region=infra".
-node0   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node1   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node2   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+# "node-role.kubernetes.io/infra=true".
+node0   openshift_node_group_name="node-config-infra"
+node1   openshift_node_group_name="node-config-infra"
+node2   openshift_node_group_name="node-config-infra"
 
 [etcd]
 master

+ 8 - 8
inventory/hosts.glusterfs.storage-and-registry.example

@@ -36,17 +36,17 @@ master
 
 [nodes]
 # masters should be schedulable to run web console pods
-master  openshift_schedulable=True
+master  openshift_node_group_name="node-config-master" openshift_schedulable=True
 # It is recommended to not use a single cluster for both general and registry
 # storage, so two three-node clusters will be required.
-node0   openshift_schedulable=True
-node1   openshift_schedulable=True
-node2   openshift_schedulable=True
+node0   openshift_node_group_name="node-config-compute"
+node1   openshift_node_group_name="node-config-compute"
+node2   openshift_node_group_name="node-config-compute"
 # A hosted registry, by default, will only be deployed on nodes labeled
-# "region=infra".
-node3   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node4   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node5   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+# "node-role.kubernetes.io/infra=true".
+node3   openshift_node_group_name="node-config-infra"
+node4   openshift_node_group_name="node-config-infra"
+node5   openshift_node_group_name="node-config-infra"
 
 [etcd]
 master

+ 5 - 4
inventory/hosts.localhost

@@ -9,13 +9,13 @@ etcd
 # if your target hosts are Fedora uncomment this
 #ansible_python_interpreter=/usr/bin/python3
 openshift_deployment_type=origin
-openshift_release=3.7
-osm_cluster_network_cidr=10.128.0.0/14
 openshift_portal_net=172.30.0.0/16
-osm_host_subnet_length=9
 # localhost likely doesn't meet the minimum requirements
 openshift_disable_check=disk_availability,memory_availability
 
+openshift_node_groups=[{'name': 'node-config-all-in-one', 'labels': ['node-role.kubernetes.io/master=true', 'node-role.kubernetes.io/infra=true', 'node-role.kubernetes.io/compute=true']}]
+
+
 [masters]
 localhost ansible_connection=local
 
@@ -23,4 +23,5 @@ localhost ansible_connection=local
 localhost ansible_connection=local
 
 [nodes]
-localhost  ansible_connection=local openshift_schedulable=true openshift_node_labels="{'region': 'infra', 'zone': 'default'}"
+# openshift_node_group_name should refer to a dictionary with matching key of name in list openshift_node_groups.
+localhost ansible_connection=local openshift_node_group_name="node-config-all-in-one"

+ 3 - 3
inventory/hosts.openstack

@@ -19,7 +19,7 @@ openshift_deployment_type=openshift-enterprise
 
 openshift_additional_repos=[{'id': 'ose-3.1', 'name': 'ose-3.1', 'baseurl': 'http://pulp.dist.prod.ext.phx2.redhat.com/content/dist/rhel/server/7/7Server/x86_64/ose/3.1/os', 'enabled': 1, 'gpgcheck': 0}]
 
-openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '{{ openshift.common.config_base }}/htpasswd'}]
+openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
 
 #openshift_pkg_version=-3.0.0.0
 
@@ -33,5 +33,5 @@ jdetiber-etcd.usersys.redhat.com
 #ose3-lb-ansible.test.example.com
 
 [nodes]
-jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}"
-jdetiber-node[1:2].usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
+jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_group_name="node-config-master"
+jdetiber-node[1:2].usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_group_name="node-config-compute"

File diff suppressed because it is too large
+ 1307 - 44
openshift-ansible.spec


+ 1 - 1
playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml

@@ -52,7 +52,7 @@
     ignore_errors: yes
 
   - name: Remove non-running docker images
-    shell: "docker images | grep -v -e registry.access.redhat.com -e docker-registry.usersys.redhat.com -e docker-registry.ops.rhcloud.com | awk '{print $3}' | xargs --no-run-if-empty docker rmi 2>/dev/null"
+    shell: "docker images | grep -v -e registry.redhat.io -e registry.access.redhat.com -e docker-registry.usersys.redhat.com -e docker-registry.ops.rhcloud.com | awk '{print $3}' | xargs --no-run-if-empty docker rmi 2>/dev/null"
     ignore_errors: yes
 
   # leaving off the '-t' for docker exec.  With it, it doesn't work with ansible and tty support

+ 3 - 575
playbooks/adhoc/uninstall.yml

@@ -1,576 +1,4 @@
-# This deletes *ALL* Origin and OpenShift Enterprise content installed by
-# ansible.  This includes:
-#
-#    configuration
-#    containers
-#    example templates and imagestreams
-#    images
-#    RPMs
 ---
-- hosts: OSEv3:children
-  become: yes
-  tasks:
-  - name: Detecting Operating System
-    shell: ls /run/ostree-booted
-    ignore_errors: yes
-    failed_when: false
-    register: ostree_output
-
-  # Since we're not calling openshift_facts we'll do this for now
-  - set_fact:
-      openshift_is_atomic: "{{ ostree_output.rc == 0 }}"
-      openshift_is_containerized: "{{ ostree_output.rc == 0 or containerized | default(false) | bool }}"
-
-# Stop services on all hosts prior to removing files.
-- hosts: nodes
-  become: yes
-  tasks:
-  - name: Remove dnsmasq dispatcher
-    file:
-      path: "{{ item }}"
-      state: absent
-    with_items:
-    - /etc/dnsmasq.d/origin-dns.conf
-    - /etc/dnsmasq.d/origin-upstream-dns.conf
-    - /etc/dnsmasq.d/openshift-ansible.conf
-    - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh
-  - service:
-      name: NetworkManager
-      state: restarted
-  - name: Stop services
-    service: name={{ item }} state=stopped
-    with_items:
-    - atomic-openshift-node
-    - openshift-node
-    - openvswitch
-    - origin-node
-    failed_when: false
-
-- hosts: masters
-  become: yes
-  tasks:
-  - name: Stop services
-    service: name={{ item }} state=stopped
-    with_items:
-    - atomic-openshift-master
-    - atomic-openshift-master-api
-    - atomic-openshift-master-controllers
-    - openshift-master
-    - openshift-master-api
-    - openshift-master-controllers
-    - origin-master
-    - origin-master-api
-    - origin-master-controllers
-    failed_when: false
-
-- hosts: etcd
-  become: yes
-  tasks:
-  - name: Stop services
-    service: name={{ item }} state=stopped
-    with_items:
-    - etcd
-    failed_when: false
-
-- hosts: lb
-  become: yes
-  tasks:
-  - name: Stop services
-    service: name={{ item }} state=stopped
-    with_items:
-    - haproxy
-    failed_when: false
-
-- hosts: nodes
-  become: yes
-  vars:
-    node_dirs:
-    - "/etc/origin"
-    - "/var/lib/origin"
-  tasks:
-  - name: unmask services
-    command: systemctl unmask "{{ item }}"
-    changed_when: False
-    failed_when: False
-    with_items:
-    - firewalld
-
-  - block:
-    - block:
-      - name: Remove packages
-        package: name={{ item }} state=absent
-        with_items:
-        - atomic-openshift
-        - atomic-openshift-clients
-        - atomic-openshift-excluder
-        - atomic-openshift-docker-excluder
-        - atomic-openshift-node
-        - atomic-openshift-sdn-ovs
-        - cockpit-bridge
-        - cockpit-docker
-        - cockpit-system
-        - cockpit-ws
-        - kubernetes-client
-        - openshift
-        - openshift-node
-        - openshift-sdn
-        - openshift-sdn-ovs
-        - openvswitch
-        - origin
-        - origin-excluder
-        - origin-docker-excluder
-        - origin-clients
-        - origin-node
-        - origin-sdn-ovs
-        - tuned-profiles-atomic-openshift-node
-        - tuned-profiles-origin-node
-        register: result
-        until: result is succeeded
-
-      - name: Remove flannel package
-        package: name=flannel state=absent
-        when: openshift_use_flannel | default(false) | bool
-        register: result
-        until: result is succeeded
-      when: not openshift_is_atomic | bool
-
-    - shell: systemctl reset-failed
-      changed_when: False
-
-    - shell: systemctl daemon-reload
-      changed_when: False
-
-    - name: Remove br0 interface
-      shell: ovs-vsctl del-br br0
-      changed_when: False
-      failed_when: False
-
-    - name: Remove linux interfaces
-      shell: ip link del "{{ item }}"
-      changed_when: False
-      failed_when: False
-      with_items:
-      - lbr0
-      - vlinuxbr
-      - vovsbr
-
-    - name: Remove virtual devices
-      command: nmcli delete device "{{ item }}"
-      failed_when: False
-      with_items:
-      - tun0
-      - docker0
-
-    when: openshift_remove_all | default(true) | bool
-
-  - shell: atomic uninstall "{{ item }}"-master-api
-    changed_when: False
-    failed_when: False
-    with_items:
-    - openshift-enterprise
-    - origin
-
-  - shell: atomic uninstall "{{ item }}"-master-controllers
-    changed_when: False
-    failed_when: False
-    with_items:
-    - openshift-enterprise
-    - origin
-
-  - shell: atomic uninstall "{{ item }}"-master
-    changed_when: False
-    failed_when: False
-    with_items:
-    - openshift-enterprise
-    - origin
-
-  - shell: atomic uninstall "{{ item }}"-node
-    changed_when: False
-    failed_when: False
-    with_items:
-    - openshift-enterprise
-    - origin
-
-  - shell: atomic uninstall "{{ item }}"
-    changed_when: False
-    failed_when: False
-    with_items:
-    - etcd
-    - openvswitch
-
-  - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
-    changed_when: False
-
-  - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
-    changed_when: False
-    failed_when: False
-    with_items:
-    - openshift-enterprise
-    - origin
-
-  - shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}'
-    changed_when: False
-    failed_when: False
-    register: exited_containers_to_delete
-    with_items:
-    - openshift3/ose
-    - openshift3/node
-    - openshift3/openvswitch
-    - openshift/origin
-
-  - shell: "docker rm {{ item.stdout_lines | join(' ') }}"
-    changed_when: False
-    failed_when: False
-    with_items: "{{ exited_containers_to_delete.results }}"
-
-  - block:
-    - block:
-      - shell: docker images | egrep {{ item }} | awk '{ print $3 }'
-        changed_when: False
-        failed_when: False
-        register: images_to_delete
-        with_items:
-        - registry\.access\..*redhat\.com/openshift3
-        - registry\.qe\.openshift\.com/.*
-        - registry\.access\..*redhat\.com/rhel7/etcd
-        - docker.io/openshift
-
-      - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
-        changed_when: False
-        failed_when: False
-        with_items: "{{ images_to_delete.results }}"
-      when: openshift_uninstall_images | default(True) | bool
-
-    - name: remove sdn drop files
-      file:
-        path: /run/openshift-sdn
-        state: absent
-
-    - name: Remove files owned by RPMs
-      file: path={{ item }} state=absent
-      with_items:
-      - /etc/sysconfig/openshift-node
-      - /etc/sysconfig/openvswitch
-      - /run/openshift-sdn
-    when: openshift_remove_all | default(True) | bool
-
-  - find: path={{ item }} file_type=file
-    register: files
-    with_items:
-    - "{{ node_dirs }}"
-
-  - find: path={{ item }} file_type=directory
-    register: directories
-    with_items:
-    - "{{ node_dirs }}"
-
-  - file: path={{ item.1.path }} state=absent
-    with_subelements:
-    - "{{ files.results | default([]) }}"
-    - files
-
-  - file: path={{ item.1.path }} state=absent
-    with_subelements:
-    - "{{ directories.results | default([]) }}"
-    - files
-
-  - shell: systemctl daemon-reload
-    changed_when: False
-
-  - name: restart container-engine
-    service: name=container-engine state=stopped enabled=no
-    failed_when: false
-    register: container_engine
-
-  - name: restart docker
-    service: name=docker state=stopped enabled=no
-    failed_when: false
-    when: not (container_engine is changed)
-    register: l_docker_restart_docker_in_pb_result
-    until: not (l_docker_restart_docker_in_pb_result is failed)
-    retries: 3
-    delay: 30
-
-  - name: Remove remaining files
-    file: path={{ item }} state=absent
-    with_items:
-    - /etc/ansible/facts.d/openshift.fact
-    - /etc/openshift
-    - /etc/openshift-sdn
-    - /etc/pki/ca-trust/source/anchors/openshift-ca.crt
-    - /etc/sysconfig/atomic-openshift-node
-    - /etc/sysconfig/atomic-openshift-node-dep
-    - /etc/sysconfig/openshift-node-dep
-    - /etc/sysconfig/origin-node
-    - /etc/sysconfig/origin-node
-    - /etc/sysconfig/origin-node-dep
-    - /etc/systemd/system/atomic-openshift-node-dep.service
-    - /etc/systemd/system/atomic-openshift-node.service
-    - /etc/systemd/system/atomic-openshift-node.service.wants
-    - /etc/systemd/system/docker.service.d/docker-sdn-ovs.conf
-    - /etc/systemd/system/openvswitch.service
-    - /etc/systemd/system/origin-node-dep.service
-    - /etc/systemd/system/origin-node.service
-    - /etc/systemd/system/origin-node.service.wants
-    - /var/lib/docker
-
-  - name: Rebuild ca-trust
-    command: update-ca-trust
-
-  - name: Reset Docker proxy configuration
-    lineinfile:
-      state=absent
-      dest=/etc/sysconfig/docker
-      regexp='(NO_PROXY|HTTP_PROXY|HTTPS_PROXY)=.*'
-
-  - name: Reset Docker registry configuration
-    lineinfile:
-      state=absent
-      dest=/etc/sysconfig/docker
-      regexp='(ADD_REGISTRY|BLOCK_REGISTRY|INSECURE_REGISTRY)=.*'
-
-  - name: Detect Docker storage configuration
-    shell: vgs -o name | grep docker
-    register: docker_vg_name
-    failed_when: false
-    changed_when: false
-
-  - name: Wipe out Docker storage contents
-    command: vgremove -f {{ item }}
-    with_items: "{{ docker_vg_name.stdout_lines }}"
-    when: docker_vg_name.rc == 0
-
-  - name: Wipe out Docker storage configuration
-    file: path=/etc/sysconfig/docker-storage state=absent
-    when: docker_vg_name.rc == 0
-
-
-- hosts: masters
-  become: yes
-  vars:
-    master_dirs:
-    - "/etc/origin"
-    - "/var/lib/origin"
-  tasks:
-  - name: unmask services
-    command: systemctl unmask "{{ item }}"
-    changed_when: False
-    failed_when: False
-    with_items:
-    - firewalld
-    - atomic-openshift-master
-
-  - name: Remove packages
-    package: name={{ item }} state=absent
-    when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool
-    with_items:
-    - atomic-openshift
-    - atomic-openshift-clients
-    - atomic-openshift-excluder
-    - atomic-openshift-docker-excluder
-    - atomic-openshift-master
-    - cockpit-bridge
-    - cockpit-docker
-    - cockpit-system
-    - cockpit-ws
-    - corosync
-    - kubernetes-client
-    - openshift
-    - openshift-master
-    - origin
-    - origin-clients
-    - origin-excluder
-    - origin-docker-excluder
-    - origin-master
-    register: result
-    until: result is succeeded
-
-  - shell: systemctl reset-failed
-    changed_when: False
-
-  - shell: systemctl daemon-reload
-    changed_when: False
-
-  - name: Remove files owned by RPMs
-    file: path={{ item }} state=absent
-    when: openshift_remove_all | default(True) | bool
-    with_items:
-    - /etc/sysconfig/atomic-openshift-master
-    - /etc/sysconfig/openvswitch
-
-  - find: path={{ item }} file_type=file
-    register: files
-    with_items:
-    - "{{ master_dirs }}"
-
-  - find: path={{ item }} file_type=directory
-    register: directories
-    with_items:
-    - "{{ master_dirs }}"
-
-  - file: path={{ item.1.path }} state=absent
-    with_subelements:
-    - "{{ files.results | default([]) }}"
-    - files
-
-  - file: path={{ item.1.path }} state=absent
-    with_subelements:
-    - "{{ directories.results | default([]) }}"
-    - files
-
-  - set_fact:
-      client_users: "{{ [ansible_ssh_user, 'root'] | unique }}"
-
-  - name: Remove client kubeconfigs
-    file:
-      path: "~{{ item }}/.kube"
-      state: absent
-    with_items:
-    - "{{ client_users }}"
-
-  - name: Remove remaining files
-    file: path={{ item }} state=absent
-    with_items:
-    - /etc/ansible/facts.d/openshift.fact
-    - /etc/corosync
-    - /etc/openshift
-    - /etc/openshift-sdn
-    - /etc/systemd/system/atomic-openshift-master.service
-    - /etc/systemd/system/atomic-openshift-master-api.service
-    - /etc/systemd/system/atomic-openshift-master-controllers.service
-    - /etc/systemd/system/origin-master.service
-    - /etc/systemd/system/origin-master-api.service
-    - /etc/systemd/system/origin-master-controllers.service
-    - /etc/systemd/system/openvswitch.service
-    - /etc/sysconfig/atomic-openshift-master-api
-    - /etc/sysconfig/atomic-openshift-master-controllers
-    - /etc/sysconfig/origin-master
-    - /etc/sysconfig/origin-master-api
-    - /etc/sysconfig/origin-master-controllers
-    - /etc/sysconfig/openshift-master
-    - /etc/sysconfig/origin-master
-    - /etc/sysconfig/origin-master-api
-    - /etc/sysconfig/origin-master-controllers
-    - /usr/share/openshift/examples
-    - /usr/lib/systemd/system/atomic-openshift-master-api.service
-    - /usr/lib/systemd/system/atomic-openshift-master-controllers.service
-    - /usr/lib/systemd/system/origin-master-api.service
-    - /usr/lib/systemd/system/origin-master-controllers.service
-    - /usr/local/bin/openshift
-    - /usr/local/bin/oadm
-    - /usr/local/bin/oc
-    - /usr/local/bin/kubectl
-    - /etc/flannel
-
-  # Since we are potentially removing the systemd unit files for separated
-  # master-api and master-controllers services, so we need to reload the
-  # systemd configuration manager
-  - name: Reload systemd manager configuration
-    command: systemctl daemon-reload
-
-- hosts: etcd
-  become: yes
-  vars:
-    etcd_dirs:
-    - "/etc/etcd"
-    - "/var/lib/etcd"
-  tasks:
-  - name: unmask services
-    command: systemctl unmask "{{ item }}"
-    changed_when: False
-    failed_when: False
-    with_items:
-    - etcd
-    - etcd3
-    - firewalld
-
-  - name: Stop additional atomic services
-    service: name={{ item }} state=stopped
-    when: openshift_is_containerized | bool
-    with_items:
-    - etcd_container
-    failed_when: false
-
-  - name: Remove packages
-    package: name={{ item }} state=absent
-    when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool
-    with_items:
-    - etcd
-    - etcd3
-    register: result
-    until: result is succeeded
-
-  - shell: systemctl reset-failed
-    changed_when: False
-
-  - shell: systemctl daemon-reload
-    changed_when: False
-
-  - find: path={{ item }} file_type=file
-    register: files
-    with_items:
-    - "{{ etcd_dirs }}"
-
-  - find: path={{ item }} file_type=directory
-    register: directories
-    with_items:
-    - "{{ etcd_dirs }}"
-
-  - file: path={{ item.1.path }} state=absent
-    with_subelements:
-    - "{{ files.results | default([]) }}"
-    - files
-
-  - file: path={{ item.1.path }} state=absent
-    with_subelements:
-    - "{{ directories.results | default([]) }}"
-    - files
-
-  # Intenationally using rm command over file module because if someone had mounted a filesystem
-  # at /var/lib/etcd then the contents was not removed correctly
-  - name: Remove etcd data
-    shell: rm -rf /var/lib/etcd/*
-    args:
-      warn: no
-    failed_when: false
-
-  - name: Remove remaining files
-    file: path={{ item }} state=absent
-    with_items:
-    - /etc/ansible/facts.d/openshift.fact
-    - /etc/systemd/system/etcd_container.service
-    - /etc/profile.d/etcdctl.sh
-
-- hosts: lb
-  become: yes
-  tasks:
-  - name: unmask services
-    command: systemctl unmask "{{ item }}"
-    changed_when: False
-    failed_when: False
-    with_items:
-    - firewalld
-
-  - name: Remove packages
-    package: name={{ item }} state=absent
-    when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool
-    with_items:
-    - haproxy
-    register: result
-    until: result is succeeded
-
-  - shell: systemctl reset-failed
-    changed_when: False
-
-  - shell: systemctl daemon-reload
-    changed_when: False
-
-  - name: Remove remaining files
-    file: path={{ item }} state=absent
-    with_items:
-    - /etc/ansible/facts.d/openshift.fact
-    - /var/lib/haproxy/stats
-    # Here we remove only limits.conf rather than directory, as users may put their files.
-    # - /etc/systemd/system/haproxy.service.d
-    - /etc/systemd/system/haproxy.service.d/limits.conf
-    - /etc/systemd/system/haproxy.service
+- import_playbook: uninstall_openshift.yml
+- import_playbook: uninstall_docker.yml
+  when: openshift_uninstall_docker | default(False) | bool

+ 49 - 0
playbooks/adhoc/uninstall_docker.yml

@@ -0,0 +1,49 @@
+# This deletes *ALL* Origin and OpenShift Enterprise content installed by
+# ansible.  This includes:
+#
+#    configuration
+#    containers
+#    example templates and imagestreams
+#    images
+#    RPMs
+---
+- hosts: OSEv3:children
+  become: yes
+  tasks:
+  - name: Detecting Operating System
+    shell: ls /run/ostree-booted
+    ignore_errors: yes
+    failed_when: false
+    register: ostree_output
+
+  # Since we're not calling openshift_facts we'll do this for now
+  - set_fact:
+      openshift_is_atomic: "{{ ostree_output.rc == 0 }}"
+      openshift_is_containerized: "{{ ostree_output.rc == 0 or containerized | default(false) | bool }}"
+
+- hosts: nodes:masters
+  become: yes
+  tasks:
+
+  - name: Stop docker service
+    service:
+      name: docker
+      state: stopped
+      enabled: no
+    failed_when: false
+    register: l_docker_restart_docker_in_pb_result
+    until: not (l_docker_restart_docker_in_pb_result is failed)
+    retries: 3
+    delay: 30
+
+  - name: Reset docker-storage-setup
+    shell: docker-storage-setup --reset
+    failed_when: False
+
+  - name: rm -rf docker config files
+    shell: "rm {{ item }} -rf"
+    failed_when: False
+    with_items:
+    - /etc/docker*
+    - /etc/sysconfig/docker*
+    - /etc/systemd/system/docker*

+ 573 - 0
playbooks/adhoc/uninstall_openshift.yml

@@ -0,0 +1,573 @@
+# This deletes *ALL* Origin and OpenShift Enterprise content installed by
+# ansible.  This includes:
+#
+#    configuration
+#    containers
+#    example templates and imagestreams
+#    images
+#    RPMs
+---
+- hosts: OSEv3:children
+  become: yes
+  tasks:
+  - name: Detecting Operating System
+    shell: ls /run/ostree-booted
+    ignore_errors: yes
+    failed_when: false
+    register: ostree_output
+
+  # Since we're not calling openshift_facts we'll do this for now
+  - set_fact:
+      openshift_is_atomic: "{{ ostree_output.rc == 0 }}"
+      openshift_is_containerized: "{{ ostree_output.rc == 0 or containerized | default(false) | bool }}"
+
+# Stop services on all hosts prior to removing files.
+- hosts: nodes
+  become: yes
+  tasks:
+  - name: Remove dnsmasq dispatcher
+    file:
+      path: "{{ item }}"
+      state: absent
+    with_items:
+    - /etc/dnsmasq.d/origin-dns.conf
+    - /etc/dnsmasq.d/origin-upstream-dns.conf
+    - /etc/dnsmasq.d/openshift-ansible.conf
+    - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh
+  - service:
+      name: NetworkManager
+      state: restarted
+  - name: Stop services
+    service: name={{ item }} state=stopped
+    with_items:
+    - atomic-openshift-node
+    - openshift-node
+    - origin-node
+    failed_when: false
+  - name: Stop OVS service
+    service: name=openvswitch state=stopped
+    failed_when: false
+    when: openshift_use_openshift_sdn | default(True) | bool
+
+- hosts: masters
+  become: yes
+  tasks:
+  - name: Stop services
+    service: name={{ item }} state=stopped
+    with_items:
+    - atomic-openshift-master
+    - atomic-openshift-master-api
+    - atomic-openshift-master-controllers
+    - openshift-master
+    - openshift-master-api
+    - openshift-master-controllers
+    - origin-master
+    - origin-master-api
+    - origin-master-controllers
+    failed_when: false
+
+- hosts: etcd
+  become: yes
+  tasks:
+  - name: Stop services
+    service: name={{ item }} state=stopped
+    with_items:
+    - etcd
+    failed_when: false
+
+- hosts: lb
+  become: yes
+  tasks:
+  - name: Stop services
+    service: name={{ item }} state=stopped
+    with_items:
+    - haproxy
+    failed_when: false
+
+- hosts: nodes
+  become: yes
+  vars:
+    node_dirs:
+    - "/etc/origin"
+    - "/var/lib/origin"
+  tasks:
+  - name: unmask services
+    command: systemctl unmask "{{ item }}"
+    changed_when: False
+    failed_when: False
+    with_items:
+    - firewalld
+
+  - block:
+    - block:
+      - name: Remove packages
+        package: name={{ item }} state=absent
+        with_items:
+        - atomic-openshift
+        - atomic-openshift-clients
+        - atomic-openshift-excluder
+        - atomic-openshift-docker-excluder
+        - atomic-openshift-node
+        - atomic-openshift-sdn-ovs
+        - cockpit-bridge
+        - cockpit-docker
+        - cockpit-system
+        - cockpit-ws
+        - kubernetes-client
+        - openshift
+        - openshift-node
+        - openshift-sdn
+        - openshift-sdn-ovs
+        - origin
+        - origin-excluder
+        - origin-docker-excluder
+        - origin-clients
+        - origin-node
+        - origin-sdn-ovs
+        - tuned-profiles-atomic-openshift-node
+        - tuned-profiles-origin-node
+        register: result
+        until: result is succeeded
+
+      - name: Remove OVS package
+        package: name=openvswitch state=absent
+        register: result
+        until: result is succeeded
+        when: openshift_use_openshift_sdn | default(True) | bool
+
+      - name: Remove flannel package
+        package: name=flannel state=absent
+        when: openshift_use_flannel | default(false) | bool
+        register: result
+        until: result is succeeded
+      when: not openshift_is_atomic | bool
+
+    - shell: systemctl reset-failed
+      changed_when: False
+
+    - shell: systemctl daemon-reload
+      changed_when: False
+
+    - name: Remove br0 interface
+      shell: ovs-vsctl del-br br0
+      changed_when: False
+      failed_when: False
+      when: openshift_use_openshift_sdn | default(True) | bool
+
+    - name: Remove linux interfaces
+      shell: ip link del "{{ item }}"
+      changed_when: False
+      failed_when: False
+      with_items:
+      - lbr0
+      - vlinuxbr
+      - vovsbr
+      when: openshift_use_openshift_sdn | default(True) | bool
+
+    - name: Remove virtual devices
+      command: nmcli delete device "{{ item }}"
+      failed_when: False
+      with_items:
+      - tun0
+      when: openshift_use_openshift_sdn | default(True) | bool
+
+    when: openshift_remove_all | default(true) | bool
+
+  - shell: atomic uninstall "{{ item }}"-master-api
+    changed_when: False
+    failed_when: False
+    with_items:
+    - openshift-enterprise
+    - origin
+
+  - shell: atomic uninstall "{{ item }}"-master-controllers
+    changed_when: False
+    failed_when: False
+    with_items:
+    - openshift-enterprise
+    - origin
+
+  - shell: atomic uninstall "{{ item }}"-master
+    changed_when: False
+    failed_when: False
+    with_items:
+    - openshift-enterprise
+    - origin
+
+  - shell: atomic uninstall "{{ item }}"-node
+    changed_when: False
+    failed_when: False
+    with_items:
+    - openshift-enterprise
+    - origin
+
+  - shell: atomic uninstall "{{ item }}"
+    changed_when: False
+    failed_when: False
+    with_items:
+    - etcd
+
+  - shell: atomic uninstall "{{ item }}"
+    changed_when: False
+    failed_when: False
+    with_items:
+    - openvswitch
+    when: openshift_use_openshift_sdn | default(True) | bool
+
+  - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+    changed_when: False
+
+  - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
+    changed_when: False
+    failed_when: False
+    with_items:
+    - openshift-enterprise
+    - origin
+
+  - shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}'
+    changed_when: False
+    failed_when: False
+    register: exited_containers_to_delete
+    with_items:
+    - openshift3/ose
+    - openshift3/node
+    - openshift3/openvswitch
+    - openshift/origin
+
+  - shell: "docker rm {{ item.stdout_lines | join(' ') }}"
+    changed_when: False
+    failed_when: False
+    with_items: "{{ exited_containers_to_delete.results }}"
+
+  - name: Remove k8s_ containters
+    shell: docker ps -a -q -f name=k8s_ | xargs docker rm -f
+    failed_when: False
+
+  - block:
+    - block:
+      - shell: docker images | egrep {{ item }} | awk '{ print $3 }'
+        changed_when: False
+        failed_when: False
+        register: images_to_delete
+        with_items:
+        - registry\.access\..*redhat\.com/openshift3
+        - registry\.qe\.openshift\.com/.*
+        - registry\.access\..*redhat\.com/rhel7/etcd
+        - docker.io/openshift
+
+      - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
+        changed_when: False
+        failed_when: False
+        with_items: "{{ images_to_delete.results }}"
+      when: openshift_uninstall_images | default(True) | bool
+
+    - name: remove sdn drop files
+      file:
+        path: /run/openshift-sdn
+        state: absent
+
+    - name: Remove files owned by RPMs
+      file: path={{ item }} state=absent
+      with_items:
+      - /etc/sysconfig/openshift-node
+      - /run/openshift-sdn
+
+    - name: Remove files owned by OVS RPM
+      file: path=/etc/sysconfig/openvswitch state=absent
+      when: openshift_use_openshift_sdn | default(True) | bool
+
+    when: openshift_remove_all | default(True) | bool
+
+  - find: path={{ item }} file_type=file
+    register: files
+    with_items:
+    - "{{ node_dirs }}"
+
+  - find: path={{ item }} file_type=directory
+    register: directories
+    with_items:
+    - "{{ node_dirs }}"
+
+  - file: path={{ item.1.path }} state=absent
+    with_subelements:
+    - "{{ files.results | default([]) }}"
+    - files
+
+  - file: path={{ item.1.path }} state=absent
+    with_subelements:
+    - "{{ directories.results | default([]) }}"
+    - files
+
+  - shell: systemctl daemon-reload
+    changed_when: False
+
+  - name: Remove remaining files
+    file: path={{ item }} state=absent
+    with_items:
+    - /etc/ansible/facts.d/openshift.fact
+    - /etc/openshift
+    - /etc/openshift-sdn
+    - /etc/pki/ca-trust/source/anchors/openshift-ca.crt
+    - /etc/sysconfig/atomic-openshift-node
+    - /etc/sysconfig/atomic-openshift-node-dep
+    - /etc/sysconfig/openshift-node-dep
+    - /etc/sysconfig/origin-node
+    - /etc/sysconfig/origin-node
+    - /etc/sysconfig/origin-node-dep
+    - /etc/systemd/system/atomic-openshift-node-dep.service
+    - /etc/systemd/system/atomic-openshift-node.service
+    - /etc/systemd/system/atomic-openshift-node.service.wants
+    - /etc/systemd/system/origin-node-dep.service
+    - /etc/systemd/system/origin-node.service
+    - /etc/systemd/system/origin-node.service.wants
+
+  - name: Remove remaining OVS files
+    file: path={{ item }} state=absent
+    with_items:
+    - /etc/systemd/system/openvswitch.service
+    when: openshift_use_openshift_sdn | default(True) | bool
+
+  - name: Rebuild ca-trust
+    command: update-ca-trust
+
+- hosts: masters
+  become: yes
+  vars:
+    master_dirs:
+    - "/etc/origin"
+    - "/var/lib/origin"
+  tasks:
+  - name: unmask services
+    command: systemctl unmask "{{ item }}"
+    changed_when: False
+    failed_when: False
+    with_items:
+    - firewalld
+    - atomic-openshift-master
+
+  - name: Remove packages
+    package: name={{ item }} state=absent
+    when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool
+    with_items:
+    - atomic-openshift
+    - atomic-openshift-clients
+    - atomic-openshift-excluder
+    - atomic-openshift-docker-excluder
+    - atomic-openshift-master
+    - cockpit-bridge
+    - cockpit-docker
+    - cockpit-system
+    - cockpit-ws
+    - corosync
+    - kubernetes-client
+    - openshift
+    - openshift-master
+    - origin
+    - origin-clients
+    - origin-excluder
+    - origin-docker-excluder
+    - origin-master
+    register: result
+    until: result is succeeded
+
+  - shell: systemctl reset-failed
+    changed_when: False
+
+  - shell: systemctl daemon-reload
+    changed_when: False
+
+  - name: Remove files owned by RPMs
+    file: path={{ item }} state=absent
+    with_items:
+    - /etc/sysconfig/atomic-openshift-master
+    when: openshift_remove_all | default(True) | bool
+
+  - name: Remove files owned by OVS RPM
+    file: path={{ item }} state=absent
+    with_items:
+    - /etc/sysconfig/openvswitch
+    when:
+    - openshift_remove_all | default(True) | bool
+    - openshift_use_openshift_sdn | default(True) | bool
+
+  - find: path={{ item }} file_type=file
+    register: files
+    with_items:
+    - "{{ master_dirs }}"
+
+  - find: path={{ item }} file_type=directory
+    register: directories
+    with_items:
+    - "{{ master_dirs }}"
+
+  - file: path={{ item.1.path }} state=absent
+    with_subelements:
+    - "{{ files.results | default([]) }}"
+    - files
+
+  - file: path={{ item.1.path }} state=absent
+    with_subelements:
+    - "{{ directories.results | default([]) }}"
+    - files
+
+  - set_fact:
+      client_users: "{{ [ansible_ssh_user, 'root'] | unique }}"
+
+  - name: Remove client kubeconfigs
+    file:
+      path: "~{{ item }}/.kube"
+      state: absent
+    with_items:
+    - "{{ client_users }}"
+
+  - name: Remove remaining files
+    file: path={{ item }} state=absent
+    with_items:
+    - /etc/ansible/facts.d/openshift.fact
+    - /etc/corosync
+    - /etc/openshift
+    - /etc/openshift-sdn
+    - /etc/systemd/system/atomic-openshift-master.service
+    - /etc/systemd/system/atomic-openshift-master-api.service
+    - /etc/systemd/system/atomic-openshift-master-controllers.service
+    - /etc/systemd/system/origin-master.service
+    - /etc/systemd/system/origin-master-api.service
+    - /etc/systemd/system/origin-master-controllers.service
+    - /etc/sysconfig/atomic-openshift-master-api
+    - /etc/sysconfig/atomic-openshift-master-controllers
+    - /etc/sysconfig/origin-master
+    - /etc/sysconfig/origin-master-api
+    - /etc/sysconfig/origin-master-controllers
+    - /etc/sysconfig/openshift-master
+    - /etc/sysconfig/origin-master
+    - /etc/sysconfig/origin-master-api
+    - /etc/sysconfig/origin-master-controllers
+    - /usr/share/openshift/examples
+    - /usr/lib/systemd/system/atomic-openshift-master-api.service
+    - /usr/lib/systemd/system/atomic-openshift-master-controllers.service
+    - /usr/lib/systemd/system/origin-master-api.service
+    - /usr/lib/systemd/system/origin-master-controllers.service
+    - /usr/local/bin/openshift
+    - /usr/local/bin/oadm
+    - /usr/local/bin/oc
+    - /usr/local/bin/kubectl
+    - /etc/flannel
+
+  - name: Remove remaining OVS files
+    file: path={{ item }} state=absent
+    with_items:
+    - /etc/systemd/system/openvswitch.service
+    when: openshift_use_openshift_sdn | default(True) | bool
+
+  # Since we are potentially removing the systemd unit files for separated
+  # master-api and master-controllers services, so we need to reload the
+  # systemd configuration manager
+  - name: Reload systemd manager configuration
+    command: systemctl daemon-reload
+
+- hosts: etcd
+  become: yes
+  vars:
+    etcd_dirs:
+    - "/etc/etcd"
+    - "/var/lib/etcd"
+  tasks:
+  - name: unmask services
+    command: systemctl unmask "{{ item }}"
+    changed_when: False
+    failed_when: False
+    with_items:
+    - etcd
+    - etcd3
+    - firewalld
+
+  - name: Stop additional atomic services
+    service: name={{ item }} state=stopped
+    when: openshift_is_containerized | bool
+    with_items:
+    - etcd_container
+    failed_when: false
+
+  - name: Remove packages
+    package: name={{ item }} state=absent
+    when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool
+    with_items:
+    - etcd
+    - etcd3
+    register: result
+    until: result is succeeded
+
+  - shell: systemctl reset-failed
+    changed_when: False
+
+  - shell: systemctl daemon-reload
+    changed_when: False
+
+  - find: path={{ item }} file_type=file
+    register: files
+    with_items:
+    - "{{ etcd_dirs }}"
+
+  - find: path={{ item }} file_type=directory
+    register: directories
+    with_items:
+    - "{{ etcd_dirs }}"
+
+  - file: path={{ item.1.path }} state=absent
+    with_subelements:
+    - "{{ files.results | default([]) }}"
+    - files
+
+  - file: path={{ item.1.path }} state=absent
+    with_subelements:
+    - "{{ directories.results | default([]) }}"
+    - files
+
+  # Intenationally using rm command over file module because if someone had mounted a filesystem
+  # at /var/lib/etcd then the contents was not removed correctly
+  - name: Remove etcd data
+    shell: rm -rf /var/lib/etcd/*
+    args:
+      warn: no
+    failed_when: false
+
+  - name: Remove remaining files
+    file: path={{ item }} state=absent
+    with_items:
+    - /etc/ansible/facts.d/openshift.fact
+    - /etc/systemd/system/etcd_container.service
+    - /etc/profile.d/etcdctl.sh
+
+- hosts: lb
+  become: yes
+  tasks:
+  - name: unmask services
+    command: systemctl unmask "{{ item }}"
+    changed_when: False
+    failed_when: False
+    with_items:
+    - firewalld
+
+  - name: Remove packages
+    package: name={{ item }} state=absent
+    when: not openshift_is_atomic | bool and openshift_remove_all | default(True) | bool
+    with_items:
+    - haproxy
+    register: result
+    until: result is succeeded
+
+  - shell: systemctl reset-failed
+    changed_when: False
+
+  - shell: systemctl daemon-reload
+    changed_when: False
+
+  - name: Remove remaining files
+    file: path={{ item }} state=absent
+    with_items:
+    - /etc/ansible/facts.d/openshift.fact
+    - /var/lib/haproxy/stats
+    # Here we remove only limits.conf rather than directory, as users may put their files.
+    # - /etc/systemd/system/haproxy.service.d
+    - /etc/systemd/system/haproxy.service.d/limits.conf
+    - /etc/systemd/system/haproxy.service

+ 16 - 0
playbooks/aws/OWNERS

@@ -0,0 +1,16 @@
+# approval == this is a good idea /approve
+approvers:
+  - kwoodson
+  - abutcher
+  - michaelgugino
+  - mtnbikenc
+  - sdodson
+  - vrutkovs
+# review == this code is good /lgtm
+reviewers:
+  - kwoodson
+  - abutcher
+  - michaelgugino
+  - mtnbikenc
+  - sdodson
+  - vrutkovs

+ 3 - 1
playbooks/aws/openshift-cluster/build_ami.yml

@@ -27,13 +27,15 @@
   - name: set the user to perform installation
     set_fact:
       ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default(ansible_ssh_user) }}"
-      openshift_node_bootstrap: True
       openshift_node_image_prep_packages:
+      - cloud-init
       - cloud-utils-growpart
 
 # This is the part that installs all of the software and configs for the instance
 # to become a node.
 - import_playbook: ../../openshift-node/private/image_prep.yml
+  vars:
+    openshift_node_reboot_instance_before_cleanup: true
 
 - import_playbook: seal_ami.yml
   vars:

+ 2 - 3
playbooks/aws/openshift-cluster/install.yml

@@ -18,8 +18,7 @@
 - name: run the init
   import_playbook: ../../init/main.yml
 
+- import_playbook: ../../openshift-checks/private/install.yml
+
 - name: configure the control plane
   import_playbook: ../../common/private/control_plane.yml
-
-- name: ensure the masters are configured as nodes
-  import_playbook: ../../openshift-node/private/config.yml

+ 0 - 1
playbooks/aws/provisioning-inventory.example.ini

@@ -11,7 +11,6 @@ etcd
 openshift_deployment_type=origin
 openshift_cloudprovider_kind=aws
 
-openshift_master_bootstrap_enabled=True
 openshift_master_api_port=443
 
 openshift_hosted_router_wait=False

+ 3 - 0
playbooks/azure/BRANCH.md

@@ -0,0 +1,3 @@
+When a release branch is cut we need to perform the following actions:
+- update the playbooks/azure/openshift-cluster/groups_vars/all/yum_repos.yml to reflect the new package location.
+- update the playbooks/azure/openshift-cluster/launch.yml to update the acs-engine's openshift.json parameters to match $release.

+ 8 - 0
playbooks/azure/OWNERS

@@ -0,0 +1,8 @@
+reviewers:
+- jim-minter
+- kwoodson
+- pweil-
+approvers:
+- jim-minter
+- kwoodson
+- pweil-

+ 2 - 0
playbooks/azure/README.md

@@ -0,0 +1,2 @@
+The playbooks and tasks under this directory are not supported for end-customer
+use.

+ 52 - 0
playbooks/azure/openshift-cluster/build_base_image.yml

@@ -0,0 +1,52 @@
+---
+- hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: calculate input image
+    command: az image list -g "{{ openshift_azure_input_image_ns }}" --query "[?starts_with(name, '{{ openshift_azure_input_image_prefix }}-') && tags.valid=='true'] | sort_by(@, &name) | [-1]"
+    register: input_image
+
+  - name: provision resource group
+    import_tasks: tasks/provision_instance.yml
+
+- hosts: nodes
+  tasks:
+  - name: calculate yum repositories
+    set_fact:
+      openshift_additional_repos: "{{ azure_base_repos[ansible_distribution] }}"
+
+  - name: configure yum repositories
+    import_tasks: tasks/yum_certs.yml
+
+  - name: update rpms
+    import_role:
+      name: os_update_latest
+    vars:
+      os_update_latest_reboot: True
+
+  - name: deconfigure yum repositories
+    import_tasks: tasks/remove_yum.yml
+
+  - name: run waagent deprovision
+    command: waagent -deprovision+user -force
+    args:
+      chdir: /
+
+- hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: create image
+    import_tasks: tasks/create_image_from_vm.yml
+    vars:
+      image_resource_group: "{{ openshift_azure_output_image_ns }}"
+      image_name: "{{ openshift_azure_output_image_name }}"
+      image_tags:
+        root_image: "{{ (input_image.stdout | from_json).name }}"
+        kernel: "{{ hostvars[groups['nodes'][0]]['ansible_kernel'] }}"
+        valid: "true"
+
+  - name: create blob
+    import_tasks: tasks/create_blob_from_vm.yml
+    vars:
+      image_name: "{{ openshift_azure_output_image_name }}"
+    when: openshift_azure_storage_account is defined and openshift_azure_storage_account

+ 116 - 0
playbooks/azure/openshift-cluster/build_node_image.yml

@@ -0,0 +1,116 @@
+---
+- hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: calculate input image
+    command: az image list -g "{{ openshift_azure_input_image_ns }}" --query "[?starts_with(name, '{{ openshift_azure_input_image_prefix }}-') && tags.valid=='true'] | sort_by(@, &name) | [-1]"
+    register: input_image
+
+  - name: provision resource group
+    import_tasks: tasks/provision_instance.yml
+    vars:
+      create_args: --data-disk-sizes-gb 128
+
+- hosts: nodes
+  tasks:
+  - set_fact:
+      openshift_deployment_type: "{{ 'openshift-enterprise' if ansible_distribution == 'RedHat' else 'origin' }}"
+      openshift_enable_origin_repo: False
+      skip_node_svc_handlers: True
+      openshift_additional_repos: "{{ azure_node_repos[ansible_distribution] }}"
+      openshift_node_open_ports: [{"service":"Node exporter metrics", "port":"9100/tcp"}]
+      openshift_node_include_control_plane: True
+      openshift_control_plane_static_pod_location: /etc/origin/node/disabled/
+      openshift_node_group_cloud_provider: azure
+      openshift_node_image_prep_packages:
+      - strace
+      - tcpdump
+      etcd_ip: ETCD_IP_REPLACE
+
+  - name: add insights-client to package installs when on rhel
+    set_fact:
+      openshift_node_image_prep_packages: "{{ openshift_node_image_prep_packages | union(['insights-client']) }}"
+    when: openshift_deployment_type == 'openshift-enterprise'
+
+  - set_fact:
+      openshift_additional_repos: "{{ openshift_additional_repos + [{'name': 'install_repo', 'baseurl': openshift_azure_install_repo, 'enabled': true, 'gpgcheck': false}] }}"
+    when: openshift_azure_install_repo is defined and openshift_azure_install_repo
+
+  - name: install centos-release-paas-common rpm
+    yum:
+      name: centos-release-paas-common
+      state: present
+    when: ansible_distribution == "CentOS"
+
+  - name: configure yum repositories
+    import_tasks: tasks/yum_certs.yml
+
+  - name: update rpms
+    import_role:
+      name: os_update_latest
+    vars:
+      os_update_latest_reboot: True
+
+- name: install openshift
+  import_playbook: ../../openshift-node/private/image_prep.yml
+  vars:
+    etcd_image: "{{ etcd_image_dict[openshift_deployment_type] }}"
+
+- hosts: nodes
+  tasks:
+  - name: deconfigure yum repositories
+    import_tasks: tasks/remove_yum.yml
+
+  - name: add get-node-logs script
+    copy:
+      src: files/get-node-logs
+      dest: /usr/local/bin/get-node-logs
+      owner: root
+      group: root
+      mode: 0755
+
+  - name: record installed rpms
+    yum:
+      list: installed
+    register: yum
+
+  - name: disable waagent data disk management
+    lineinfile:
+      path: /etc/waagent.conf
+      regexp: "{{ item.regexp }}"
+      line: "{{ item.line }}"
+    with_items:
+    - { regexp: '^ResourceDisk\.Format=', line: 'ResourceDisk.Format=n' }
+
+  - name: persist oreg_url
+    copy:
+      dest: "/etc/origin/oreg_url"
+      content: "{{ oreg_url }}"
+    when: oreg_url is defined
+
+  - name: run waagent deprovision
+    shell: sleep 2 && waagent -deprovision+user -force
+    async: 1
+    poll: 0
+
+- hosts: localhost
+  gather_facts: no
+  tasks:
+  - set_fact:
+      openshift_rpm: "{{ hostvars[groups['nodes'][0]]['yum'].results | selectattr('name', 'match', '^(origin|atomic-openshift)$') | first }}"
+
+  - name: create image
+    import_tasks: tasks/create_image_from_vm.yml
+    vars:
+      image_resource_group: "{{ openshift_azure_output_image_ns }}"
+      image_name: "{{ openshift_azure_output_image_name }}"
+      image_tags:
+        base_image: "{{ (input_image.stdout | from_json).name }}"
+        kernel: "{{ hostvars[groups['nodes'][0]]['ansible_kernel'] }}"
+        openshift: "{{ openshift_rpm.name }}-{{ openshift_rpm.version }}-{{ openshift_rpm.release }}.{{ openshift_rpm.arch }}"
+
+  - name: create blob
+    import_tasks: tasks/create_blob_from_vm.yml
+    vars:
+      image_name: "{{ openshift_azure_output_image_name }}"
+    when: openshift_azure_storage_account is defined and openshift_azure_storage_account

+ 53 - 0
playbooks/azure/openshift-cluster/create_and_publish_offer.md

@@ -0,0 +1,53 @@
+# Create and publish offer
+
+Note:  This document is not intended for general consumption.
+
+
+This document outlines the process in which to publish an image to the cloudpartner.azure.com portal.
+
+# Publish image
+
+The steps to build the image are as follows:
+
+## Step 1:
+
+Build the Openshift image using the build_node_image.yml playbook.  Once this playbook completes it should
+produce a storage blob that points to the image.  This blob exists inside of the resourcegroup named images,
+storage accounts named openshiftimages, and the container named, images.
+
+```
+$ ansible-playbook build_node_image.yml
+```
+
+## Step 2:
+
+This step performs the following work:
+- generates a storage blob url
+- generates a sas url for the storage container
+- a cancel of any current operations on the offer will be called (in case of any updates)
+- if an offer exists, the current offer will be fetched and updated
+- if an offer ! exist, the offer will be created
+- a publish is called on the offer
+
+```
+$ ansible-playbook  create_and_publish_offer.yml -e @publishingvars.yml
+```
+
+Example publishingvars.yml
+```
+openshift_azure_container: images
+openshift_azure_storage_account: openshiftimages
+image_name: rhel7-3.9-201805211419
+openshift_azure_image_publish_emails:
+- your.name@email.com
+openshift_azure_templ_allowed_subscriptions:
+- <subcription id1>
+- <subcription id2>
+openshift_azure_templ_restricted_audience_manual_entries:
+- type: subscriptionId
+  id: <subcription id1>
+  description: <description1>
+- type: subscriptionId
+  id: <subcription id2>
+  description: <description2>
+```

+ 98 - 0
playbooks/azure/openshift-cluster/create_and_publish_offer.yml

@@ -0,0 +1,98 @@
+---
+- hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: ensure libraries are available
+    import_role:
+      name: lib_utils
+
+  - name: set expire date
+    set_fact:
+      sas_expire: "{{ lookup('pipe', 'date -d \"14 days\" +\"%Y-%m-%dT%H:%M:00Z\"') }}"
+      disk: "{{ image_name }}.vhd"
+
+  - name: fetch the image for the tags
+    command: |
+      az image list --resource-group images  -o json --query "[?name=='{{ image_name }}'].tags"
+    register: imagetags
+
+  - name: generate storage blob url
+    command: |
+      az storage blob url -c {{ openshift_azure_container }} --account-name {{ openshift_azure_storage_account }} -n {{ disk }} --output=json
+    register: bloburl
+
+  - name: generate sas url for the container
+    command: |
+      az storage container generate-sas --name {{ openshift_azure_container }} --account-name {{ openshift_azure_storage_account }} --permissions rl --expiry {{ sas_expire }} --output=json
+    register: sasurl
+
+  - name: set the sas URLS
+    set_fact:
+      openshift_azure_sas_url: "{{ bloburl.stdout|from_json + '?' + sasurl.stdout|from_json }}"
+
+  - name: set the image tags
+    set_fact:
+      image_tags: "{{ (imagetags.stdout | from_json)[0] }}"
+
+  - name: set the image_type
+    set_fact:
+      image_type: "{{ 'rhel' if 'rhel' in image_tags.base_image else 'centos' }}"
+      image_x: "{{ image_tags.openshift.split('.')[0] }}"
+      image_y: "{{ image_tags.openshift.split('.')[1] }}"
+      image_z: "{{ image_tags.openshift.split('.')[2] }}"
+      image_vm_images: |-
+        { "{{ image_x }}{{ image_y }}.{{ image_z }}.{{ (image_name | regex_search('([0-9]{12})'))[:8] }}": {"osVhdUrl": "{{ openshift_azure_sas_url }}" } }
+
+  - name: fetch the current offer and update the versions
+    oo_azure_rm_publish_image_facts:
+      offer: osa
+    register: offerout
+
+  - when:
+    - offerout['status_code'] != 404
+    block:
+    - debug:
+        msg: "{{ offerout }}"
+        verbosity: 1
+
+    - debug:
+        msg: "{{ offerout['data']['definition']['plans'][0]['microsoft-azure-virtualmachines.vmImages'] }}"
+        verbosity: 1
+
+    - debug:
+        msg: "{{ lookup('template', 'offer.yml.j2') }}"
+        verbosity: 1
+
+    - name: bring along the previous offer versions and combine with incoming
+      yedit:
+        content: "{{ lookup('template', 'offer.yml.j2') }}"
+        key: "definition#plans[0]#microsoft-azure-virtualmachines.vmImages#{{ item.key }}"
+        value: "{{ item.value }}"
+        separator: '#'
+      with_dict: "{{ offerout['data']['definition']['plans'][0]['microsoft-azure-virtualmachines.vmImages'] }}"
+      when:
+      - "'data' in offerout"
+      - "'definition' in offerout['data']"
+      register: yeditout
+
+    - debug:
+        msg: "{{ yeditout }}"
+        verbosity: 1
+
+    # this cancel operation returns a 202 whether it cancelled or not.
+    - name: cancel publish operation
+      oo_azure_rm_publish_image:
+        offer: osa
+        state: cancel_op
+
+  - name: create|update an offer in cloudpartner portal
+    oo_azure_rm_publish_image:
+      offer: osa
+      offer_data: "{{ (lookup('template', 'offer.yml.j2') | from_yaml) if 'skipped' in yeditout and yeditout.skipped or not yeditout.changed else yeditout.results[0].result[0].edit }}"
+      force: True
+
+  - name: publish this offer
+    oo_azure_rm_publish_image:
+      state: publish
+      offer: osa
+      emails: "{{ openshift_azure_image_publish_emails }}"

+ 12 - 0
playbooks/azure/openshift-cluster/deprovision.yml

@@ -0,0 +1,12 @@
+---
+# Warning, use with caution, this will remove all resources
+# from the group.
+
+- hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: delete resource group
+    azure_rm_resourcegroup:
+      name: "{{ openshift_azure_resource_group_name }}"
+      state: absent
+      force: yes  # removes all resources within the group

+ 46 - 0
playbooks/azure/openshift-cluster/files/get-node-logs

@@ -0,0 +1,46 @@
+#!/bin/bash
+set -x
+
+LOG_DIR=$(mktemp -d)
+
+trap "{ rm -rf $LOG_DIR }" EXIT
+
+SYSTEMD_SERVICES=("dnsmasq NetworkManager atomic-openshift-node origin-node")
+
+since_docker="24h"
+since_systemd="24 hour ago"
+
+# Dump systemd services logs
+for name in ${SYSTEMD_SERVICES} ; do
+  dump_file_path=${LOG_DIR}/${name}.log
+  journalctl -u ${name}.service --since "${since_systemd}" > $dump_file_path
+done
+
+# Dump command output
+ip a > ${LOG_DIR}/ip_a.log
+netstat -antu > ${LOG_DIR}/netstat_antu.log
+dmesg > ${LOG_DIR}/dmesg.log
+route -n > ${LOG_DIR}/route_n.log
+ss -ntpl > ${LOG_DIR}/ss_ntpl.log
+cat /etc/resolv.conf > ${LOG_DIR}/resolve_conf.log
+df -h > ${LOG_DIR}/df_h.log
+vmstat 2 20 > ${LOG_DIR}/vmstat_2_20.log
+mount > ${LOG_DIR}/mount.log
+
+for table in filter nat; do
+    iptables -t $table -nvL > ${LOG_DIR}/iptables_$table.log
+done
+
+# Dump system journal
+journalctl --since "${since_systemd}" > ${LOG_DIR}/journalctl.log
+
+# Dump sdn container logs
+uid=$(docker ps -l -a --filter "label=io.kubernetes.container.name=sdn" --format '{{ .Label "io.kubernetes.pod.uid" }}')
+if [[ ! -z "${uid}" ]]; then
+  container=$(docker ps -l -a -q --filter "label=io.kubernetes.pod.uid=${uid}" --filter "label=io.kubernetes.container.name=sdn")
+  docker logs --since ${since_docker} "${container}" >& ${LOG_DIR}/sdn.log
+fi
+
+prefix=osa_node_$(hostname)_$(date +%Y%m%d%H%M%S)
+tar -czPf $prefix.tar.gz  --xform="s|^$LOG_DIR|$prefix|" $LOG_DIR
+echo $prefix.tar.gz

+ 13 - 0
playbooks/azure/openshift-cluster/group_vars/all/image_publish.yml

@@ -0,0 +1,13 @@
+---
+azure_image_publish:
+  rhel:
+    templ_plan_id: osa_{{ image_x }}{{ image_y }}
+    templ_sku_title: Openshift {{ image_x }}.{{ image_y }} on Azure
+    templ_os_type: Red Hat Enterprise Linux
+    templ_os: Red Hat Enterprise Linux 7
+
+  centos:
+    templ_plan_id: origin_{{ image_x }}{{ image_y }}
+    templ_sku_title: Openshift Origin {{ image_x }}.{{ image_y }} on Azure
+    templ_os_type: CentOS
+    templ_os: CentOS 7

+ 62 - 0
playbooks/azure/openshift-cluster/group_vars/all/yum_repos.yml

@@ -0,0 +1,62 @@
+---
+azure_base_repos:
+  RedHat:
+  - name: rhel-server-7-releases
+    baseurl: https://mirror.openshift.com/libra/rhui-rhel-server-7-releases/
+    gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
+    sslclientcert: /var/lib/yum/client-cert.pem
+    sslclientkey: /var/lib/yum/client-key.pem
+    enabled: yes
+
+  - name: rhel-server-7-extras
+    baseurl: https://mirror.openshift.com/libra/rhui-rhel-server-7-extras/
+    gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
+    sslclientcert: /var/lib/yum/client-cert.pem
+    sslclientkey: /var/lib/yum/client-key.pem
+    enabled: yes
+
+  CentOS: []
+
+azure_node_repos:
+  RedHat:
+  - name: rhel-server-7-releases
+    baseurl: https://mirror.openshift.com/libra/rhui-rhel-server-7-releases/
+    gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
+    sslclientcert: /var/lib/yum/client-cert.pem
+    sslclientkey: /var/lib/yum/client-key.pem
+    enabled: yes
+
+  - name: rhel-server-7-extras
+    baseurl: https://mirror.openshift.com/libra/rhui-rhel-server-7-extras/
+    gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
+    sslclientcert: /var/lib/yum/client-cert.pem
+    sslclientkey: /var/lib/yum/client-key.pem
+    enabled: yes
+
+  - name: rhel-7-fast-datapath-rpms
+    baseurl: https://mirror.openshift.com/enterprise/rhel/rhel-7-fast-datapath-rpms/
+    gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
+    sslclientcert: /var/lib/yum/client-cert.pem
+    sslclientkey: /var/lib/yum/client-key.pem
+    enabled: yes
+
+  - name: rhel-7-server-ansible-2.4-rpms
+    baseurl: https://mirror.openshift.com/enterprise/rhel/rhel-7-server-ansible-2.4-rpms/
+    gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
+    sslclientcert: /var/lib/yum/client-cert.pem
+    sslclientkey: /var/lib/yum/client-key.pem
+    enabled: yes
+
+  #- name: rhel-server-7-ose-3.10
+  #  baseurl: https://mirror.openshift.com/libra/rhui-rhel-server-7-ose-3.10/
+  #  gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
+  #  sslclientcert: /var/lib/yum/client-cert.pem
+  #  sslclientkey: /var/lib/yum/client-key.pem
+  #  enabled: yes
+
+  CentOS:
+  # TODO: should be using a repo which only provides prerequisites
+  - name: openshift-origin
+    baseurl: http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin/
+    gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+    enabled: yes

+ 131 - 0
playbooks/azure/openshift-cluster/launch.yml

@@ -0,0 +1,131 @@
+---
+- hosts: localhost
+  gather_facts: no
+  tasks:
+  - import_role:
+      name: lib_utils
+
+  - name: create temporary directory
+    tempfile:
+      state: directory
+    register: tmp
+
+  - name: download acs-engine
+    get_url:
+      url: "{{ item }}"
+      dest: "{{ tmp.path }}/"
+    with_list:
+    - "http://acs-engine-build-azure.svc.ci.openshift.org/acs-engine"
+    - "http://acs-engine-build-azure.svc.ci.openshift.org/openshift.json"
+
+  - name: make acs-engine executable
+    file:
+      path: "{{ tmp.path }}/acs-engine"
+      mode: 0755
+
+  - name: configure acs-engine
+    yedit:
+      content_type: json
+      src: "{{ tmp.path }}/openshift.json"
+      edits:
+      - key: properties.orchestratorProfile.openShiftConfig.clusterUsername
+        value: demo
+      - key: properties.orchestratorProfile.openShiftConfig.clusterPassword
+        value: "{{ 16 | lib_utils_oo_random_word }}"
+      - key: properties.orchestratorProfile.orchestratorVersion
+        value: unstable
+      # azProfile
+      - key: properties.azProfile.tenantId
+        value: "{{ lookup('env', 'AZURE_TENANT') }}"
+      - key: properties.azProfile.subscriptionId
+        value: "{{ lookup('env', 'AZURE_SUBSCRIPTION_ID') }}"
+      - key: properties.azProfile.resourceGroup
+        value: "{{ openshift_azure_resource_group_name }}"
+      - key: properties.azProfile.location
+        value: "{{ openshift_azure_resource_location }}"
+      # masterProfile
+      - key: properties.masterProfile.dnsPrefix
+        value: "a{{ 16 | lib_utils_oo_random_word }}a"
+      - key: properties.masterProfile.imageReference.name
+        value: "{{ openshift_azure_input_image_name }}"
+      - key: properties.masterProfile.imageReference.resourceGroup
+        value: "{{ openshift_azure_input_image_ns }}"
+      - key: properties.masterProfile.vmSize
+        value: "{{ openshift_azure_vm_size | default('Standard_D4s_v3') }}"
+      # agentpool compute
+      - key: properties.agentPoolProfiles[0].imageReference.name
+        value: "{{ openshift_azure_input_image_name }}"
+      - key: properties.agentPoolProfiles[0].imageReference.resourceGroup
+        value: "{{ openshift_azure_input_image_ns }}"
+      - key: properties.agentPoolProfiles[0].vmSize
+        value: "{{ openshift_azure_vm_size | default('Standard_D4s_v3') }}"
+      # agentpool infra
+      - key: properties.agentPoolProfiles[1].imageReference.name
+        value: "{{ openshift_azure_input_image_name }}"
+      - key: properties.agentPoolProfiles[1].imageReference.resourceGroup
+        value: "{{ openshift_azure_input_image_ns }}"
+      - key: properties.agentPoolProfiles[1].vmSize
+        value: "{{ openshift_azure_vm_size | default('Standard_D4s_v3') }}"
+      # linuxprofile
+      - key: properties.linuxProfile.adminUsername
+        value: "cloud-user"
+      - key: properties.linuxProfile.ssh.publicKeys[0].keyData
+        value: "{{ openshift_azure_vm_ssh_public_key }}"
+      # serviceprincipal
+      - key: properties.servicePrincipalProfile.clientId
+        value: "{{ lookup('env', 'AZURE_CLIENT_ID') }}"
+      - key: properties.servicePrincipalProfile.secret
+        value: "{{ lookup('env', 'AZURE_SECRET') }}"
+
+  - name: run acs-engine deploy
+    command: |
+      {{ tmp.path }}/acs-engine deploy \
+        --resource-group {{ openshift_azure_resource_group_name }} \
+        --location {{ openshift_azure_resource_location }} \
+        --subscription-id {{ lookup('env', 'AZURE_SUBSCRIPTION_ID') }} \
+        --auth-method client_secret \
+        --client-id {{ lookup('env', 'AZURE_CLIENT_ID') }} \
+        --client-secret {{ lookup('env', 'AZURE_SECRET') }} \
+        --output-directory {{ tmp.path }}/deploy \
+        {{ tmp.path }}/openshift.json
+    no_log: true
+    ignore_errors: yes
+    register: deploy
+
+  - debug:
+      msg: "{{ deploy.stdout }}"
+
+  - debug:
+      msg: "{{ deploy.stderr }}"
+
+  # This code attempts to persist the data to /var/tmp which is bind
+  # mounted into the calling container.  This enables the CI to reuse
+  # the cluster created in the previous steps to perform the e2e tests
+  - name: persist the acs-engine generated artifacts
+    copy:
+      src: "{{ tmp.path }}/deploy"
+      dest: /var/tmp/
+    when: openshift_ci_persist_artifacts | default(False)
+
+  - name: delete temporary directory
+    file:
+      path: "{{ tmp.path }}"
+      state: absent
+
+  - block:
+    - name: get azure deployment message
+      command: >
+        az group deployment list
+        -g "{{ openshift_azure_resource_group_name }}"
+        --query "[0].properties.additionalProperties.error.details[0].message"
+        -o tsv
+      register: message
+
+    - debug:
+        msg: "{{ (message.stdout | from_json).error.details[0].message }}"
+      when: message.stdout != ""
+
+    - assert:
+        that: "{{ not deploy.failed }}"
+
+    when: deploy.failed

+ 59 - 0
playbooks/azure/openshift-cluster/provisioning_vars.yml.example

@@ -0,0 +1,59 @@
+---
+# resource group where temporary resources associated with playbook will be
+# placed
+openshift_azure_resource_group_name:
+
+# azure region where resource group will be created
+openshift_azure_resource_location: eastus
+
+# input image resource group
+openshift_azure_input_image_ns: images
+
+# input image prefix, needed by base and node image building playbooks,
+# e.g. centos7-root or centos7-base
+openshift_azure_input_image_prefix:
+
+# complete name of input image, needed by launch.yml playbook,
+# e.g. centos7-3.10-201806071434
+openshift_azure_input_image_name:
+
+# output image resource group
+openshift_azure_output_image_ns: images
+
+# complete name of output image, e.g. centos7-base-201806071412 or centos7-3.10-201806071434
+openshift_azure_output_image_name:
+
+# azure vm image size, defaults to Standard_D4s_v3
+openshift_azure_vm_size:
+
+# ssh public key for VMs created by playbook; private key must be accessible to
+# ansible
+openshift_azure_vm_ssh_public_key: ssh-rsa ...
+
+# additional yum repo containing origin rpms, used for PR testing
+#openshift_azure_install_repo: http://...
+
+# yum client certificate and key, used if building RHEL images
+#yum_client_cert_contents: |
+#  -----BEGIN CERTIFICATE-----
+#  ...
+#  -----END CERTIFICATE-----
+#yum_client_key_contents: |
+#  -----BEGIN RSA PRIVATE KEY-----
+#  ...
+#  -----END RSA PRIVATE KEY-----
+
+# alternative image registry, used if building OCP pre-release images
+#oreg_url: "registry.reg-aws.openshift.com:443/openshift3/ose-${component}:${version}"
+#oreg_auth_user: <USERNAME>
+#oreg_auth_password: <TOKEN>
+
+# optional storage account in which to place image blob
+#openshift_azure_storage_account:
+
+# resource group of storage account
+#openshift_azure_storage_account_ns:
+
+# container within storage account to hold image blob
+#openshift_azure_container:
+

+ 1 - 0
playbooks/azure/openshift-cluster/roles

@@ -0,0 +1 @@
+../../../roles

+ 9 - 0
playbooks/azure/openshift-cluster/tag_image_as_valid.yml

@@ -0,0 +1,9 @@
+---
+- hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: add valid tag to the image
+    shell: >
+      jsonrtag=$(az resource show -g '{{ openshift_azure_input_image_ns }}' -n '{{ openshift_azure_input_image_name }}' --resource-type 'Microsoft.Compute/images' --query tags);
+      rt=$(echo $jsonrtag | tr -d '"{},' | sed 's/: /=/g');
+      az resource tag --tags $rt 'valid=true' -g '{{ openshift_azure_input_image_ns }}' -n '{{ openshift_azure_input_image_name }}' --resource-type 'Microsoft.Compute/images'

+ 48 - 0
playbooks/azure/openshift-cluster/tasks/create_blob_from_vm.yml

@@ -0,0 +1,48 @@
+---
+- name: get vm details
+  command: >
+    az vm show
+    -g "{{ openshift_azure_resource_group_name }}"
+    -n vm
+  register: vm
+
+- name: get storage account key
+  command: >
+    az storage account keys list
+    -g "{{ openshift_azure_storage_account_ns }}"
+    -n "{{ openshift_azure_storage_account }}"
+  register: keys
+
+- name: get disk sas url
+  command: >
+    az disk grant-access
+    --ids "{{ (vm.stdout | from_json).storageProfile.osDisk.managedDisk.id }}"
+    --duration-in-seconds 3600
+  register: sas
+
+- name: start copy
+  command: >
+    az storage blob copy start
+    --source-uri "{{ (sas.stdout | from_json).accessSas }}"
+    --account-name "{{ openshift_azure_storage_account }}"
+    --account-key "{{ (keys.stdout | from_json)[0].value }}"
+    --destination-container "{{ openshift_azure_container }}"
+    --destination-blob "{{ image_name }}.vhd"
+
+- name: get copy status
+  command: >
+    az storage blob show
+    --account-name "{{ openshift_azure_storage_account }}"
+    --account-key "{{ (keys.stdout | from_json)[0].value }}"
+    --container-name "{{ openshift_azure_container }}"
+    --name "{{ image_name }}.vhd"
+    --query "properties.copy.status"
+  register: status
+  until: status.stdout | from_json == "success"
+  retries: 120
+  delay: 30
+
+- name: revoke disk sas url
+  command: >
+    az disk revoke-access
+    --ids "{{ (vm.stdout | from_json).storageProfile.osDisk.managedDisk.id }}"

+ 66 - 0
playbooks/azure/openshift-cluster/tasks/create_image_from_vm.yml

@@ -0,0 +1,66 @@
+---
+- name: deallocate vm
+  command: >
+    az vm deallocate
+    -g "{{ openshift_azure_resource_group_name }}"
+    -n vm
+
+- name: generalize vm
+  command: >
+    az vm generalize
+    -g "{{ openshift_azure_resource_group_name }}"
+    -n vm
+
+- name: get vm details
+  command: >
+    az vm show
+    -g "{{ openshift_azure_resource_group_name }}"
+    -n vm
+  register: vm
+
+- name: create image resource group
+  azure_rm_resourcegroup:
+    name: "{{ image_resource_group }}"
+    location: "{{ openshift_azure_resource_location }}"
+
+- name: create image
+  command: >
+    az image create
+    -g "{{ image_resource_group }}"
+    -n "{{ image_name }}"
+    --source "{{ (vm.stdout | from_json).storageProfile.osDisk.managedDisk.id }}"
+    --os-type Linux
+
+- name: get input image tags
+  command: az image show -g "{{ openshift_azure_input_image_ns }}" -n "{{ (input_image.stdout | from_json).name }}"
+  register: input_image_tags
+
+- name: remove valid tag from input image tags
+  set_fact:
+    input_image_tags_no_valid: "{{ {} | combine({item.key: item.value}) }}"
+  when: item.key not in ['valid']
+  with_dict: "{{ (input_image_tags.stdout | from_json).tags }}"
+
+- name: calculate final tags
+  set_fact:
+    final_tags: "{{ input_image_tags_no_valid | default({}) | combine(image_tags) }}"
+
+- name: tag image
+  command: >
+    az resource tag
+    --resource-type Microsoft.Compute/images
+    -g "{{ image_resource_group }}"
+    -n "{{ image_name }}"
+    --tags {% for k in final_tags %}{{ k }}={{ final_tags[k] }} {% endfor %}
+
+- name: log image resource group and name
+  debug:
+    msg: "{{ item }}"
+  with_items:
+  - "Resource group: {{ image_resource_group }}"
+  - "Image name: {{ image_name }}"
+
+- name: log image tag information
+  debug:
+    msg: "{{ item.key }}: {{ item.value }}"
+  with_dict: "{{ final_tags }}"

+ 42 - 0
playbooks/azure/openshift-cluster/tasks/provision_instance.yml

@@ -0,0 +1,42 @@
+---
+- name: create resource group
+  azure_rm_resourcegroup:
+    name: "{{ openshift_azure_resource_group_name }}"
+    location: "{{ openshift_azure_resource_location }}"
+    tags:
+      now: "{{ lookup('pipe', 'date +%s') }}"
+
+- name: create vnet
+  azure_rm_virtualnetwork:
+    name: vnet
+    resource_group: "{{ openshift_azure_resource_group_name }}"
+    address_prefixes:
+    - 192.168.0.0/16
+
+- name: create subnet
+  azure_rm_subnet:
+    name: subnet
+    resource_group: "{{ openshift_azure_resource_group_name }}"
+    virtual_network: vnet
+    address_prefix: 192.168.0.0/24
+
+- name: create vm
+  command: >
+    az vm create
+    -n vm
+    -g "{{ openshift_azure_resource_group_name }}"
+    --size "{{ openshift_azure_vm_size | default('Standard_D4s_v3') }}"
+    --image "{{ (input_image.stdout | from_json).id }}"
+    --storage-sku Premium_LRS
+    --admin-username cloud-user
+    --ssh-dest-key-path /home/cloud-user/.ssh/authorized_keys
+    --ssh-key-value "{{ openshift_azure_vm_ssh_public_key }}"
+    {% if create_args is defined %}{{ create_args }}{% endif %}
+  register: vm
+
+- name: add vm to inventory
+  add_host:
+    groups: nodes
+    name: "{{ (vm.stdout | from_json).publicIpAddress }}"
+    ansible_ssh_user: cloud-user
+    ansible_become: True

+ 15 - 0
playbooks/azure/openshift-cluster/tasks/remove_yum.yml

@@ -0,0 +1,15 @@
+---
+- name: remove yum client certificate
+  file:
+    state: absent
+    path: "/var/lib/yum/{{ item.name }}"
+  with_items:
+  - name: client-cert.pem
+  - name: client-key.pem
+  when: ansible_distribution == "RedHat"
+
+- name: remove yum repositories
+  yum_repository:
+    state: absent
+    name: "{{ item.name }}"
+  with_items: "{{ openshift_additional_repos }}"

+ 19 - 0
playbooks/azure/openshift-cluster/tasks/yum_certs.yml

@@ -0,0 +1,19 @@
+---
+- name: copy yum client certificate
+  copy:
+    content: "{{ item.content }}"
+    dest: "/var/lib/yum/{{ item.name }}"
+    mode: '0600'
+  with_items:
+  - name: client-cert.pem
+    content: "{{ yum_client_cert_contents }}"
+  - name: client-key.pem
+    content: "{{ yum_client_key_contents }}"
+  no_log: True
+  when: ansible_distribution == "RedHat"
+
+- name: add yum repositories
+  import_role:
+    name: openshift_repos
+  vars:
+    r_openshift_repos_has_run: True

+ 61 - 0
playbooks/azure/openshift-cluster/templates/offer.yml.j2

@@ -0,0 +1,61 @@
+offerTypeId: microsoft-azure-virtualmachines
+publisherId: redhat
+id: osa
+definition:
+  displayText: OpenShift on Azure
+  offer:
+    microsoft-azure-marketplace-testdrive.enabled: false
+    microsoft-azure-marketplace-testdrive.videos: []
+    microsoft-azure-marketplace.title: OpenShift on Azure
+    microsoft-azure-marketplace.summary: OpenShift on Azure
+    microsoft-azure-marketplace.longSummary: OpenShift on Azure
+    microsoft-azure-marketplace.description: OpenShift on Azure
+    microsoft-azure-marketplace.offerMarketingUrlIdentifier: osa
+    microsoft-azure-marketplace.allowedSubscriptions: {{ openshift_azure_templ_allowed_subscriptions | to_json }}
+    microsoft-azure-marketplace.usefulLinks: []
+    microsoft-azure-marketplace.categories: [appInfrastructure, businessApplication, devService, web]
+    microsoft-azure-marketplace.smallLogo: "https://publishingstoredm.blob.core.windows.net/prodcontent/D6191_publishers_redhat/origin:2Dacsengine/62aad711-f499-461f-b74c-f4f31586b0f5.png?sv=2014-02-14&sr=b&sig=ewX%2F9aAkgG3EMLzBMPjyHuEzRDhvs8TmOsNWYPRXwg8%3D&se=2020-04-13T21%3A02%3A53Z&sp=r"
+    microsoft-azure-marketplace.mediumLogo: "https://publishingstoredm.blob.core.windows.net/prodcontent/D6191_publishers_redhat/origin:2Dacsengine/df6bed86-6891-4558-848f-be236ec981d5.png?sv=2014-02-14&sr=b&sig=QzYeJ6qdoMoUTCeaFycpqrBpO0Lnr7upy%2FQCArYvhno%3D&se=2020-04-13T21%3A02%3A53Z&sp=r"
+    microsoft-azure-marketplace.largeLogo: "https://publishingstoredm.blob.core.windows.net/prodcontent/D6191_publishers_redhat/origin:2Dacsengine/b486c038-bf6a-4881-a5c6-d3619bb27884.png?sv=2014-02-14&sr=b&sig=1BetRsM%2BJch9zhYaagMcYwkD7Lantdm%2FInHUQ7LfJZ0%3D&se=2020-04-13T21%3A02%3A53Z&sp=r"
+    microsoft-azure-marketplace.wideLogo: "https://publishingstoredm.blob.core.windows.net/prodcontent/D6191_publishers_redhat/origin:2Dacsengine/141969bb-300b-47a3-ae9f-2573af3e8720.png?sv=2014-02-14&sr=b&sig=DnhC36RA3rGNOqByHrP8dvIBmgCHQr95%2B5PqJe9D1qk%3D&se=2020-04-13T21%3A02%3A53Z&sp=r"
+    microsoft-azure-marketplace.screenshots: []
+    microsoft-azure-marketplace.videos: []
+    microsoft-azure-marketplace.leadDestination: None
+    microsoft-azure-marketplace.tableLeadConfiguration: {}
+    microsoft-azure-marketplace.blobLeadConfiguration: {}
+    microsoft-azure-marketplace.salesForceLeadConfiguration: {}
+    microsoft-azure-marketplace.crmLeadConfiguration: {}
+    microsoft-azure-marketplace.httpsEndpointLeadConfiguration: {}
+    microsoft-azure-marketplace.marketoLeadConfiguration: {}
+    microsoft-azure-marketplace.privacyURL: "https://www.redhat.com/en/about/privacy-policy"
+    microsoft-azure-marketplace.termsOfUse: TODO
+    microsoft-azure-marketplace.engineeringContactEmail: support@redhat.com
+    microsoft-azure-marketplace.engineeringContactName: Red Hat Support
+    microsoft-azure-marketplace.engineeringContactPhone: 888-467-3342
+    microsoft-azure-marketplace.supportContactEmail: support@redhat.com
+    microsoft-azure-marketplace.supportContactName: Red Hat Support
+    microsoft-azure-marketplace.supportContactPhone: 888-467-3342
+    microsoft-azure-marketplace.publicAzureSupportUrl: ''
+    microsoft-azure-marketplace.fairfaxSupportUrl: ''
+  plans:
+  - planId: {{ azure_image_publish[image_type].templ_plan_id }}
+    microsoft-azure-virtualmachines.skuTitle: {{ azure_image_publish[image_type].templ_sku_title }}
+    microsoft-azure-virtualmachines.skuSummary: {{ azure_image_publish[image_type].templ_sku_title }}
+    microsoft-azure-virtualmachines.skuDescription: {{ azure_image_publish[image_type].templ_sku_title }}
+    microsoft-azure-virtualmachines.hideSKUForSolutionTemplate: true
+    microsoft-azure-virtualmachines.cloudAvailability: [PublicAzure]
+    microsoft-azure-virtualmachines.certificationsFairfax: []
+    restrictedAudience:
+      manualEntries: {{ openshift_azure_templ_restricted_audience_manual_entries | to_json }}
+    virtualMachinePricing: {isByol: true, freeTrialDurationInMonths: 0}
+    microsoft-azure-virtualmachines.operatingSystemFamily: Linux
+    microsoft-azure-virtualmachines.osType: {{ azure_image_publish[image_type].templ_os_type }}
+    microsoft-azure-virtualmachines.operationSystem: {{ azure_image_publish[image_type].templ_os }}
+    microsoft-azure-virtualmachines.recommendedVMSizes: [d2s-standard-v3, d4s-standard-v3]
+    microsoft-azure-virtualmachines.openPorts: []
+    microsoft-azure-virtualmachines.vmImages: {{ image_vm_images | to_json }}
+    regions: [DZ, AR, AU, AT, BH, BY, BE, BR, BG, CA, CL, CO, CR, HR, CY, CZ, DK,
+      DO, EC, EG, SV, EE, FI, FR, DE, GR, GT, HK, HU, IS, IN, ID, IE, IL, IT, JP,
+      JO, KZ, KE, KR, KW, LV, LI, LT, LU, MK, MY, MT, MX, ME, MA, NL, NZ, NG, 'NO',
+      OM, PK, PA, PY, PE, PH, PL, PT, PR, QA, RO, RU, SA, RS, SG, SK, SI, ZA, ES,
+      LK, SE, CH, TW, TH, TT, TN, TR, UA, AE, GB, US, UY, VE]

+ 1 - 3
playbooks/byo/openshift-cluster/upgrades/README.md

@@ -4,6 +4,4 @@ cluster. Additional notes for the associated upgrade playbooks are
 provided in their respective directories.
 
 # Upgrades available
-- [OpenShift Container Platform 3.7 to 3.9](v3_6/README.md) (works also to upgrade OpenShift Origin from 3.7.x to 3.9.x)
-- [OpenShift Container Platform 3.6 to 3.7](v3_7/README.md) (works also to upgrade OpenShift Origin from 3.6.x to 3.7.x)
-- [OpenShift Container Platform 3.5 to 3.6](v3_6/README.md) (works also to upgrade OpenShift Origin from 1.5.x to 3.6.x)
+- [OpenShift Container Platform 3.9 to 3.10](v3_10/README.md) (upgrade OpenShift Origin from 3.9.x to 3.10.x)

playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml → playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_scale_groups.yml


+ 20 - 0
playbooks/byo/openshift-cluster/upgrades/v3_11/README.md

@@ -0,0 +1,20 @@
+# v3.11 Major and Minor Upgrade Playbook
+
+## Overview
+This playbook currently performs the following steps.
+
+ * Upgrade and restart master services
+ * Unschedule node
+ * Upgrade and restart docker
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+
+```
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_11/upgrade.yml
+```

+ 5 - 0
playbooks/byo/openshift-cluster/upgrades/v3_11/upgrade.yml

@@ -0,0 +1,5 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_11/upgrade.yml

+ 16 - 0
playbooks/byo/openshift-cluster/upgrades/v3_11/upgrade_control_plane.yml

@@ -0,0 +1,16 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_11/upgrade_control_plane.yml
+
+- import_playbook: ../../../../openshift-master/private/restart.yml

+ 7 - 0
playbooks/byo/openshift-cluster/upgrades/v3_11/upgrade_nodes.yml

@@ -0,0 +1,7 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_11/upgrade_nodes.yml

+ 7 - 0
playbooks/byo/openshift-cluster/upgrades/v3_11/upgrade_scale_groups.yml

@@ -0,0 +1,7 @@
+---
+#
+# Node Scale Group Upgrade Playbook
+#
+# Upgrades scale group nodes only.
+#
+- import_playbook: ../../../../common/openshift-cluster/upgrades/upgrade_scale_group.yml

+ 0 - 20
playbooks/byo/openshift-cluster/upgrades/v3_6/README.md

@@ -1,20 +0,0 @@
-# v3.6 Major and Minor Upgrade Playbook
-
-## Overview
-This playbook currently performs the following steps.
-
- * Upgrade and restart master services
- * Unschedule node
- * Upgrade and restart docker
- * Upgrade and restart node services
- * Modifies the subset of the configuration necessary
- * Applies the latest cluster policies
- * Updates the default router if one exists
- * Updates the default registry if one exists
- * Updates image streams and quickstarts
-
-## Usage
-
-```
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
-```

+ 0 - 5
playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml

@@ -1,5 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml

+ 0 - 14
playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml

@@ -1,14 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml

+ 0 - 7
playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml

@@ -1,7 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml

+ 0 - 20
playbooks/byo/openshift-cluster/upgrades/v3_7/README.md

@@ -1,20 +0,0 @@
-# v3.7 Major and Minor Upgrade Playbook
-
-## Overview
-This playbook currently performs the following steps.
-
- * Upgrade and restart master services
- * Unschedule node
- * Upgrade and restart docker
- * Upgrade and restart node services
- * Modifies the subset of the configuration necessary
- * Applies the latest cluster policies
- * Updates the default router if one exists
- * Updates the default registry if one exists
- * Updates image streams and quickstarts
-
-## Usage
-
-```
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
-```

+ 0 - 5
playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml

@@ -1,5 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml

+ 0 - 14
playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml

@@ -1,14 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml

+ 0 - 7
playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml

@@ -1,7 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml

+ 0 - 20
playbooks/byo/openshift-cluster/upgrades/v3_9/README.md

@@ -1,20 +0,0 @@
-# v3.9 Major and Minor Upgrade Playbook
-
-## Overview
-This playbook currently performs the following steps.
-
- * Upgrade and restart master services
- * Unschedule node
- * Upgrade and restart docker
- * Upgrade and restart node services
- * Modifies the subset of the configuration necessary
- * Applies the latest cluster policies
- * Updates the default router if one exists
- * Updates the default registry if one exists
- * Updates image streams and quickstarts
-
-## Usage
-
-```
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
-```

+ 0 - 5
playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml

@@ -1,5 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade.yml

+ 0 - 16
playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml

@@ -1,16 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
-
-- import_playbook: ../../../../openshift-master/private/restart.yml

+ 0 - 7
playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml

@@ -1,7 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml

+ 10 - 0
playbooks/cluster-operator/OWNERS

@@ -0,0 +1,10 @@
+# approval == this is a good idea /approve
+approvers:
+  - abutcher
+  - dgoodwin
+  - csrwng
+# review == this code is good /lgtm
+reviewers:
+  - abutcher
+  - dgoodwin
+  - csrwng

+ 1 - 2
playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml

@@ -20,7 +20,6 @@
 - import_playbook: ../../../../init/version.yml
   vars:
     l_openshift_version_set_hosts: "oo_etcd_to_config:oo_nodes_to_upgrade:oo_masters_to_config:!oo_first_master"
-    l_openshift_version_check_hosts: "all:!all"
 
 # Ensure inventory sanity_checks are run.
 - import_playbook: ../../../../init/sanity_checks.yml
@@ -88,7 +87,7 @@
     - l_docker_upgrade_drain_result is failed
     - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
 
-  - include_tasks: tasks/upgrade.yml
+  - import_tasks: tasks/upgrade.yml
     when:
     - l_docker_upgrade is defined
     - l_docker_upgrade | bool

+ 0 - 0
playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml


Some files were not shown because too many files changed in this diff