Bläddra i källkod

Merge remote-tracking branch 'upstream/master' into ports

Emilio Garcia 6 år sedan
förälder
incheckning
5426fe97cf
95 ändrade filer med 6546 tillägg och 422 borttagningar
  1. 1 1
      .tito/packages/openshift-ansible
  2. 41 0
      images/installer/Dockerfile.ci
  3. 13 0
      images/installer/container.yaml
  4. 0 2
      inventory/hosts.example
  5. 170 1
      openshift-ansible.spec
  6. 2 0
      playbooks/aws/openshift-cluster/prerequisites.yml
  7. 10 0
      playbooks/aws/openshift-cluster/provision_iam_role.yml
  8. 1 1
      playbooks/azure/openshift-cluster/tasks/create_blob_from_vm.yml
  9. 0 2
      playbooks/openshift-etcd/private/remove-etcdv2-data.yml
  10. 27 18
      playbooks/openshift-etcd/private/scaleup.yml
  11. 1 1
      playbooks/openshift-etcd/private/server_certificates.yml
  12. 0 1
      playbooks/openshift-etcd/private/upgrade_rpm_members.yml
  13. 0 2
      playbooks/openshift-etcd/private/upgrade_static.yml
  14. 3 2
      playbooks/openshift-glusterfs/private/registry.yml
  15. 1 5
      playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml
  16. 1 1
      playbooks/openshift-master/private/config.yml
  17. 13 0
      playbooks/openshift-node/private/join.yml
  18. 0 7
      playbooks/openshift-node/private/registry_auth.yml
  19. 32 6
      playbooks/openstack/configuration.md
  20. 7 6
      playbooks/openstack/resources.py
  21. 24 1
      playbooks/openstack/sample-inventory/group_vars/all.yml
  22. 37 1
      playbooks/ovirt/provisioning-vars.yaml.example
  23. 46 1
      roles/calico/README.md
  24. 0 0
      roles/calico/defaults/main.yaml
  25. 1 1
      roles/calico/meta/main.yml
  26. 0 0
      roles/calico/tasks/certs.yml
  27. 124 42
      roles/calico/tasks/main.yml
  28. 0 0
      roles/calico/templates/calico-etcd.yml.j2
  29. 0 0
      roles/calico/templates/calico-pull-secret.yml.j2
  30. 0 2
      roles/calico_master/templates/calico.yml.j2
  31. 1 3
      roles/calico_master/templates/calicov3.yml.j2
  32. 0 48
      roles/calico_master/README.md
  33. 0 129
      roles/calico_master/tasks/main.yml
  34. 3 0
      roles/calico_node/README.md
  35. 0 0
      roles/calico_node/files/calico.conf
  36. 0 1
      roles/calico_master/meta/main.yml
  37. 13 0
      roles/calico_node/tasks/main.yml
  38. 2 3
      roles/etcd/defaults/main.yaml
  39. 0 11
      roles/etcd/etcdctl.sh
  40. 1 7
      roles/etcd/tasks/add_new_member.yml
  41. 8 8
      roles/etcd/tasks/remove-etcd-v2-data.yml
  42. 1 1
      roles/etcd/tasks/static.yml
  43. 2 12
      roles/etcd/tasks/upgrade_rpm.yml
  44. 2 12
      roles/etcd/tasks/upgrade_static.yml
  45. 9 0
      roles/etcd/tasks/verify_cluster_health.yml
  46. 1 1
      roles/etcd/templates/etcdctl.sh.j2
  47. 4 0
      roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
  48. 1 0
      roles/lib_utils/action_plugins/generate_pv_pvcs_list.py
  49. 17 10
      roles/openshift_aws/defaults/main.yml
  50. 8 5
      roles/openshift_aws/tasks/build_node_group.yml
  51. 15 6
      roles/openshift_aws/tasks/iam_role.yml
  52. 4 4
      roles/openshift_aws/tasks/launch_config.yml
  53. 4 0
      roles/openshift_aws/tasks/provision_ec2.yml
  54. 8 0
      roles/openshift_aws/tasks/provision_ec2_facts.yml
  55. 1 1
      roles/openshift_cluster_monitoring_operator/defaults/main.yml
  56. 1 1
      roles/openshift_examples/examples-sync.sh
  57. 65 0
      roles/openshift_examples/files/examples/latest/xpaas-streams/datagrid72-image-stream.json
  58. 446 0
      roles/openshift_examples/files/examples/latest/xpaas-templates/datagrid72-basic.json
  59. 638 0
      roles/openshift_examples/files/examples/latest/xpaas-templates/datagrid72-https.json
  60. 955 0
      roles/openshift_examples/files/examples/latest/xpaas-templates/datagrid72-mysql-persistent.json
  61. 928 0
      roles/openshift_examples/files/examples/latest/xpaas-templates/datagrid72-mysql.json
  62. 527 0
      roles/openshift_examples/files/examples/latest/xpaas-templates/datagrid72-partition.json
  63. 926 0
      roles/openshift_examples/files/examples/latest/xpaas-templates/datagrid72-postgresql-persistent.json
  64. 899 0
      roles/openshift_examples/files/examples/latest/xpaas-templates/datagrid72-postgresql.json
  65. 15 2
      roles/openshift_facts/defaults/main.yml
  66. 1 1
      roles/openshift_logging_fluentd/templates/fluentd.j2
  67. 4 0
      roles/openshift_logging_mux/templates/mux.j2
  68. 1 1
      roles/openshift_manage_node/tasks/config.yml
  69. 14 0
      roles/openshift_node_group/files/sync.yaml
  70. 1 1
      roles/openshift_node_group/tasks/sync.yml
  71. 3 0
      roles/openshift_node_group/templates/node-config.yaml.j2
  72. 1 10
      roles/openshift_node_problem_detector/defaults/main.yaml
  73. 1 1
      roles/openshift_node_problem_detector/templates/node-problem-detector-daemonset.yaml.j2
  74. 12 1
      roles/openshift_openstack/defaults/main.yml
  75. 34 1
      roles/openshift_openstack/tasks/check-prerequisites.yml
  76. 3 3
      roles/openshift_openstack/tasks/populate-dns.yml
  77. 34 0
      roles/openshift_openstack/tasks/prerequisites/all-in-one-cluster-deployment-check.yml
  78. 14 0
      roles/openshift_openstack/tasks/prerequisites/cinder-persistent-volume-check.yml
  79. 34 0
      roles/openshift_openstack/tasks/prerequisites/cinder-registry-check.yml
  80. 39 0
      roles/openshift_openstack/tasks/prerequisites/cloud-provider-check.yml
  81. 0 0
      roles/openshift_openstack/tasks/prerequisites/image-and-flavor-check.yml
  82. 35 0
      roles/openshift_openstack/tasks/prerequisites/kuryr-check.yml
  83. 15 0
      roles/openshift_openstack/tasks/prerequisites/neutron-internal-dns-check.yml
  84. 10 0
      roles/openshift_openstack/tasks/prerequisites/no-floating-ip-check.yml
  85. 16 0
      roles/openshift_openstack/tasks/prerequisites/nsupdate-check.yml
  86. 30 0
      roles/openshift_openstack/tasks/prerequisites/openstack-ssl-check.yml
  87. 15 0
      roles/openshift_openstack/tasks/prerequisites/provider-network-check.yml
  88. 24 0
      roles/openshift_openstack/tasks/prerequisites/swift-registry-check.yml
  89. 5 2
      roles/openshift_openstack/templates/heat_stack.yaml.j2
  90. 117 9
      roles/openshift_ovirt/README.md
  91. 9 0
      roles/openshift_ovirt/tasks/build_vm_list.yml
  92. 2 19
      roles/openshift_persistent_volumes/tasks/main.yml
  93. 4 3
      roles/openshift_sdn/files/sdn-ovs.yaml
  94. 4 1
      test/ci/launch.yml
  95. 4 0
      test/ci/vars.yml.sample

+ 1 - 1
.tito/packages/openshift-ansible

@@ -1 +1 @@
-3.11.0-0.35.0 ./
+4.0.0-0.7.0 ./

+ 41 - 0
images/installer/Dockerfile.ci

@@ -0,0 +1,41 @@
+FROM centos:7
+
+MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
+
+USER root
+
+LABEL name="openshift/origin-ansible" \
+      summary="OpenShift's installation and configuration tool" \
+      description="A containerized openshift-ansible image to use in CI - includes necessary packages to test clusters on AWS/GCP/Azure" \
+      url="https://github.com/openshift/openshift-ansible" \
+      io.k8s.display-name="openshift-ansible" \
+      io.k8s.description="A containerized openshift-ansible image to use in CI - includes necessary packages to test clusters on AWS/GCP/Azure" \
+      io.openshift.expose-services="" \
+      io.openshift.tags="openshift,install,upgrade,ansible" \
+      atomic.run="once"
+
+ENV USER_UID=1001 \
+    HOME=/opt/app-root/src \
+    WORK_DIR=/usr/share/ansible/openshift-ansible \
+    OPTS="-v"
+
+# Add image scripts and files for running as a system container
+COPY images/installer/root /
+# Add origin repo for including the oc client
+COPY images/installer/origin-extra-root /
+# Install openshift-ansible RPMs
+RUN yum install -y epel-release && \
+    rm -rf /etc/yum.repos.d/centos-openshift-origin.repo && \
+    yum-config-manager --enable built > /dev/null && \
+    INSTALL_PKGS="openssh google-cloud-sdk azure-cli" \
+    yum install --setopt=tsflags=nodocs -y $INSTALL_PKGS openshift-ansible-test && \
+    yum clean all
+
+RUN /usr/local/bin/user_setup \
+ && rm /usr/local/bin/usage.ocp
+
+USER ${USER_UID}
+
+WORKDIR ${WORK_DIR}
+ENTRYPOINT [ "/usr/local/bin/entrypoint" ]
+CMD [ "/usr/local/bin/run" ]

+ 13 - 0
images/installer/container.yaml

@@ -0,0 +1,13 @@
+---
+compose:
+  packages:
+  - openshift-ansible
+  - python2-crypto
+  - httpd-tools
+  - google-cloud-sdk
+  - java-1.8.0-openjdk-headless
+  - atomic-openshift-clients
+  - openssl
+  - iproute
+  - python-boto
+  - python2-boto3

+ 0 - 2
inventory/hosts.example

@@ -188,8 +188,6 @@ debug_level=2
 #oreg_auth_password='my-pass'
 # NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect.
 # oreg_auth_pass should be generated from running docker login.
-# To update registry auth credentials, uncomment the following:
-#oreg_auth_credentials_replace: True
 
 # OpenShift repository configuration
 #openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]

+ 170 - 1
openshift-ansible.spec

@@ -10,7 +10,7 @@
 
 Name:           openshift-ansible
 Version:        4.0.0
-Release:        0.0.0%{?dist}
+Release:        0.7.0%{?dist}
 Summary:        Openshift and Atomic Enterprise Ansible
 License:        ASL 2.0
 URL:            https://github.com/openshift/openshift-ansible
@@ -31,6 +31,8 @@ Requires:      libselinux-python
 Requires:      python-passlib
 Requires:      python2-crypto
 Requires:      patch
+Requires:      pyOpenSSL
+Requires:      iproute
 
 %description
 Openshift and Atomic Enterprise Ansible
@@ -66,6 +68,7 @@ cp inventory/hosts.* inventory/README.md docs/example-inventories/
 
 # openshift-ansible-playbooks install
 cp -rp playbooks %{buildroot}%{_datadir}/ansible/%{name}/
+cp -rp test %{buildroot}%{_datadir}/ansible/%{name}/
 # remove contiv plabooks
 rm -rf %{buildroot}%{_datadir}/ansible/%{name}/playbooks/adhoc/contiv
 
@@ -168,8 +171,174 @@ if [ -d %{_datadir}/ansible/%{name}/roles/openshift_examples/files/examples ]; t
   find %{_datadir}/ansible/%{name}/roles/openshift_examples/files/examples -name latest -type l -delete
 fi
 
+# ----------------------------------------------------------------------------------
+# openshift-ansible-tests subpackage
+# ----------------------------------------------------------------------------------
+%package test
+Summary:       Openshift and Atomic Enterprise Ansible Test Playbooks
+Requires:      %{name} = %{version}-%{release}
+Requires:      %{name}-roles = %{version}-%{release}
+Requires:      %{name}-playbooks = %{version}-%{release}
+Requires:      python-boto3
+BuildArch:     noarch
+
+%description test
+%{summary}.
+
+%files test
+%{_datadir}/ansible/%{name}/test
 
 %changelog
+* Fri Sep 28 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.7.0
+- Add OpenStack pre-requisites check for various features (tzumainn@redhat.com)
+- [openstack] Add configuration note for all-in-one and DNS (pep@redhat.com)
+- Remove oreg_auth_credentials_replace from inventory (sdodson@redhat.com)
+- test/ci: ensure AWS instances have public hostname (vrutkovs@redhat.com)
+
+* Thu Sep 27 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.6.0
+- Bug 1554293 - logging-eventrouter event not formatted correctly in
+  Elasticsearch when using MUX (nhosoi@redhat.com)
+- Add a new dockerfile to use in CI (vrutkovs@redhat.com)
+- Add new package which contains test playbooks (vrutkovs@redhat.com)
+
+* Wed Sep 26 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.5.0
+- test/ci: set expirationDate flag for CI namespace garbage collector
+  (vrutkovs@redhat.com)
+- Refactored Calico and updated playbooks to reflect self-hosted Calico
+  installs only (mleung975@gmail.com)
+- Enable IAM roles for EC2s in AWS (mazzystr@gmail.com)
+- Fix for recent az changes. (kwoodson@redhat.com)
+- cluster-monitoring: Bump cluster monitoring operator in origin
+  (fbranczyk@gmail.com)
+- Added capability to fix static addresses to openshift_ovirt provider vms
+  (jparrill@redhat.com)
+
+* Mon Sep 24 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.4.0
+- Reload tuned service when node-config.yaml has changed.
+  (jmencak@users.noreply.github.com)
+
+* Fri Sep 21 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.3.0
+- GlusterFS: Fix registry playbook PV creation (jarrpa@redhat.com)
+- Only create OpenStack router if both router and subnet are undefined
+  (tzumainn@redhat.com)
+
+* Fri Sep 21 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.2.0
+- 
+
+* Fri Sep 21 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.1.0
+- Don't re-deploy node system containers when deploying auth credentials
+  (sdodson@redhat.com)
+- etcdv2 remove: avoid using failed_when (vrutkovs@redhat.com)
+- Bump Data Grid to version 1.1.1 (osni.oliveira@redhat.com)
+- remove unix prefix from crio path (sjenning@redhat.com)
+- adding container.yaml (adammhaile@gmail.com)
+- Fix openstack nsupdate record (tzumainn@redhat.com)
+- Always set openstack node private ip (tzumainn@redhat.com)
+- lib_utils_oo_oreg_image preserve path component (jkupfere@redhat.com)
+- Add unit test for oo_oreg_image filter (mgugino@redhat.com)
+- Update installer_checkpoint plugin to handle empty stats (rteague@redhat.com)
+- Fix etcd scaleup playbook (rteague@redhat.com)
+- registry auth: fix check that node_oreg_auth_credentials_stat exists
+  (vrutkovs@redhat.com)
+- Fix openshift_additional_registry_credentials comparison
+  (vrutkovs@redhat.com)
+- Ensure glusterfs host groups are correct for registry play
+  (mgugino@redhat.com)
+- move OpenStack network fact gathering from prereqs to provision tasks
+  (tzumainn@redhat.com)
+- Ensure atomic hosts prepull node image during pre-upgrade
+  (mgugino@redhat.com)
+- Make cloud-user SSH key maintenance more reliable (ironcladlou@gmail.com)
+- Simplify match filter when looking for sync annotations (vrutkovs@redhat.com)
+- Merge upgrade_control_plane playbooks back into one (vrutkovs@redhat.com)
+- test ci: add an option to terminate VMs instead of stopping
+  (vrutkovs@redhat.com)
+- Update main.yml (sheldyakov@tutu.ru)
+- Remove duplicate words (lxia@redhat.com)
+- Remove traces of containerized install (vrutkovs@redhat.com)
+- Move the cluster-cidr assignment to the correct configs (mleung975@gmail.com)
+- Ensure dnsmasq is restarted during upgrades (mgugino@redhat.com)
+- Don't install NM on atomic systems (vrutkovs@redhat.com)
+- openshift-prometheus: remove deprecated prometheus stack install
+  (pgier@redhat.com)
+- GCP upgrade: don't exclude nodes with tag_ocp-bootstrap (vrutkovs@redhat.com)
+- GCP upgrade: don't exclude nodes with tag_ocp-bootstrap (vrutkovs@redhat.com)
+- Hash the registry hostname to generate unique secret names
+  (sdodson@redhat.com)
+- Add retries around api service discovery (sdodson@redhat.com)
+- Ensure that recycler pod definition is deployed during upgrade
+  (sdodson@redhat.com)
+- Change upgrade playbooks to use 4.0 (vrutkovs@redhat.com)
+- Add 3 retries around all image stream create/replace (sdodson@redhat.com)
+- Fix wrong doc default value of logging (teleyic@gmail.com)
+- test/ci: setup network manager (vrutkovs@redhat.com)
+- Update uninstall_masters play to deal with standalone instances
+  (mazzystr@gmail.com)
+- Fix broken package list on fedora (mgugino@redhat.com)
+- certificate_expiry: gather facts so ansible_date_time is defined
+  (sdodson@redhat.com)
+- Fix volume recycler configuration on upgrade (sdodson@redhat.com)
+- openshift_storage_nfs_lvm: fix with_sequence (jfchevrette@gmail.com)
+- Removing launch.yml. (kwoodson@redhat.com)
+- Wait for sync DS to set annotations on all available nodes
+  (vrutkovs@redhat.com)
+- sync annotations: expected number of annotations should be a number of items
+  (vrutkovs@redhat.com)
+- reduce number of openstack heat retries (tzumainn@redhat.com)
+- Fix openstack parameter checks (tzumainn@redhat.com)
+- Add a wait for aggregated APIs when restarting control plane
+  (sdodson@redhat.com)
+- Update openshift ca redeploy to use correct node client-ca
+  (rteague@redhat.com)
+- Enable monitoring of openshift-metering via cluster monitoring
+  (chance.zibolski@coreos.com)
+- Refactor csr approval for client certs ignore ready (mgugino@redhat.com)
+- reducing /sys/fs/selinux/avc/cache_threshold to 8192 instead of 65535
+  (elvirkuric@gmail.com)
+- Add preview operators to OLM Catalog (cordell.evan@gmail.com)
+- Collect provider facts only if cloudprovider is set (vrutkovs@redhat.com)
+- - s3 variables check as part of importing the s3 tasks itself.
+  (sarumuga@redhat.com)
+- Add proper liveness and readiness checks for Calico 3.2 (mleung975@gmail.com)
+- Move controller args back to template (hekumar@redhat.com)
+- Retry our etcd health check (sdodson@redhat.com)
+- Set gquota on slash filesystem (mazzystr@gmail.com)
+- docker_creds: rename image_name to test_image (sdodson@redhat.com)
+- cluster-monitoring: Fix regex_replace to remove image tag
+  (fbranczyk@gmail.com)
+- fix arguments to controller (hekumar@redhat.com)
+- Update recyler to lsm_registry_url (hekumar@redhat.com)
+- cutting 4.0 (aos-team-art@redhat.com)
+- Use oreg_url rather than hardcoding path (hekumar@redhat.com)
+- Formatting fixes on olm and catalog operators (cordell.evan@gmail.com)
+- Update rh-operators catalog to latest (cordell.evan@gmail.com)
+- Update OLM CRDs to latest (cordell.evan@gmail.com)
+- Proper DNS for the subnet created (e.minguez@gmail.com)
+- Set etcd facts necessary for etcd scaleup (rteague@redhat.com)
+- Revert "Don't fetch provider openshift_facts if openshift_cloud_provider_kind
+  is not set" (roignac@gmail.com)
+- cluster-monitoring: Remove version tag for passing image repos
+  (fbranczyk@gmail.com)
+- Fixes: BZ1618547 disable keep ns on error in ASB to prevent resource
+  starvation (jmontleo@redhat.com)
+- Add openshift_additional_registry_credentials (sdodson@redhat.com)
+- docker_creds: Add tls_verify parameter (sdodson@redhat.com)
+- Avoid S3 deployment check (sarumuga@redhat.com)
+- Filter openshift_cloudprovider_openstack_blockstorage_ignore_volume_az to
+  bool (alberto.rodriguez.peon@cern.ch)
+- Add playbook to migrate node imageConfig.format (mgugino@redhat.com)
+- docker_creds: Use bool for test_login param (sdodson@redhat.com)
+- Run the kube-proxy once per cluster for Calico (mleung975@gmail.com)
+- Provide version information (hekumar@redhat.com)
+- Annotate nodes with md5sum of the applied config (vrutkovs@redhat.com)
+- Add a pod template for recycler pod (hekumar@redhat.com)
+- Bump repo constants to support 4.0 RPMs (ccoleman@redhat.com)
+- Add calico-pull-secret (mleung975@gmail.com)
+- Add separate Calico etcd (mleung975@gmail.com)
+- Use true/false instead of yes/no (alberto.rodriguez.peon@cern.ch)
+- Allow to configure BlockStorage.ignore-volume-az for Openstack Cloud Provider
+  (alberto.rodriguez.peon@cern.ch)
+
 * Tue Sep 11 2018 AOS Automation Release Team <aos-team-art@redhat.com> 3.11.0-0.35.0
 - cluster-monitoring: Fix incorrect handling of conditional PVCs
   (fbranczyk@gmail.com)

+ 2 - 0
playbooks/aws/openshift-cluster/prerequisites.yml

@@ -4,3 +4,5 @@
 - import_playbook: provision_ssh_keypair.yml
 
 - import_playbook: provision_sec_group.yml
+
+- import_playbook: provision_iam_role.yml

+ 10 - 0
playbooks/aws/openshift-cluster/provision_iam_role.yml

@@ -0,0 +1,10 @@
+---
+- name: Create iam role
+  hosts: localhost
+  connection: local
+  tasks:
+  - name: create iam role
+    include_role:
+      name: openshift_aws
+      tasks_from: iam_role.yml
+    when: openshift_aws_create_iam_role | default(true) | bool

+ 1 - 1
playbooks/azure/openshift-cluster/tasks/create_blob_from_vm.yml

@@ -23,7 +23,7 @@
 - name: start copy
   command: >
     az storage blob copy start
-    --source-uri "{{ (sas.stdout | from_json).accessSas }}"
+    --source-uri "{{ (sas.stdout | from_json).properties.output.accessSAS }}"
     --account-name "{{ openshift_azure_storage_account }}"
     --account-key "{{ (keys.stdout | from_json)[0].value }}"
     --destination-container "{{ openshift_azure_container }}"

+ 0 - 2
playbooks/openshift-etcd/private/remove-etcdv2-data.yml

@@ -6,5 +6,3 @@
   - import_role:
       name: etcd
       tasks_from: remove-etcd-v2-data.yml
-    vars:
-      etcd_peer: "{{ openshift.common.hostname }}"

+ 27 - 18
playbooks/openshift-etcd/private/scaleup.yml

@@ -19,24 +19,31 @@
   hosts: oo_new_etcd_to_config
   serial: 1
   any_errors_fatal: true
-  pre_tasks:
+  tasks:
   - import_role:
       name: etcd
       tasks_from: add_new_member.yml
+    vars:
+      etcd_peer: "{{ hostvars[etcd_ca_host].etcd_ip }}"
+
   - import_role:
       name: etcd
       tasks_from: server_certificates.yml
     vars:
       etcd_peers: "{{ groups.oo_new_etcd_to_config | default([], true) }}"
       etcd_certificates_etcd_hosts: "{{ groups.oo_new_etcd_to_config | default([], true) }}"
-  tasks:
+
   - import_role:
       name: os_firewall
     when: etcd_add_check.rc == 0
 
+  # Setup etcd as a static pod if collocated with a master
   - import_role:
       name: etcd
-    when: etcd_add_check.rc == 0
+      tasks_from: static.yml
+    when:
+    - etcd_add_check.rc == 0
+    - inventory_hostname in groups['oo_masters']
     vars:
       etcd_peers: "{{ groups.oo_etcd_to_config | union(groups.oo_new_etcd_to_config)| default([], true) }}"
       etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
@@ -44,24 +51,27 @@
       etcd_initial_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
       etcd_ca_setup: False
 
-  # etcd_hostname fact is set in add_new_member.yml called above.
-  - name: Verify cluster is stable
-    command: >
-      {{ r_etcd_common_etcdctl_command }}
-                       --cert-file {{ etcd_peer_cert_file }}
-                       --key-file {{ etcd_peer_key_file }}
-                       --ca-file {{ etcd_peer_ca_file }}
-                       -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }}
-                       cluster-health
-    register: scaleup_health
-    retries: 3
-    delay: 30
-    until: scaleup_health.rc == 0
+  - import_role:
+      name: etcd
+      tasks_from: rpm.yml
+    when:
+    - etcd_add_check.rc == 0
+    - not inventory_hostname in groups['oo_masters']
+    vars:
+      etcd_peers: "{{ groups.oo_etcd_to_config | union(groups.oo_new_etcd_to_config)| default([], true) }}"
+      etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+      etcd_initial_cluster_state: "existing"
+      etcd_initial_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
+      etcd_ca_setup: False
+
+  - import_role:
+      name: etcd
+      tasks_from: verify_cluster_health.yml
 
 - name: Update master etcd client urls
   hosts: oo_masters_to_config
   serial: 1
-  pre_tasks:
+  tasks:
   - set_fact:
       openshift_master_etcd_hosts: "{{ hostvars
                                        | lib_utils_oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) ))
@@ -78,7 +88,6 @@
     vars:
       l_use_ssl: "{{ openshift_master_etcd_use_ssl | default(True) | bool}}"
 
-  post_tasks:
   - import_role:
       name: openshift_control_plane
       tasks_from: update_etcd_client_urls.yml

+ 1 - 1
playbooks/openshift-etcd/private/server_certificates.yml

@@ -2,7 +2,7 @@
 - name: Create etcd server certificates for etcd hosts
   hosts: oo_etcd_to_config
   any_errors_fatal: true
-  post_tasks:
+  tasks:
     - import_role:
         name: etcd
         tasks_from: server_certificates.yml

+ 0 - 1
playbooks/openshift-etcd/private/upgrade_rpm_members.yml

@@ -10,7 +10,6 @@
       tasks_from: upgrade_rpm.yml
     vars:
       r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
-      etcd_peer: "{{ openshift.common.hostname }}"
     when:
     - etcd_rpm_version.stdout | default('99') is version(etcd_upgrade_version, '<')
     - ansible_distribution == 'RedHat'

+ 0 - 2
playbooks/openshift-etcd/private/upgrade_static.yml

@@ -6,6 +6,4 @@
   - import_role:
       name: etcd
       tasks_from: upgrade_static.yml
-    vars:
-      etcd_peer: "{{ openshift.common.hostname }}"
     when: inventory_hostname in groups['oo_masters']

+ 3 - 2
playbooks/openshift-glusterfs/private/registry.yml

@@ -7,10 +7,11 @@
 
 - name: Create persistent volumes
   hosts: oo_first_master
+  vars:
+    openshift_hosted_registry_glusterfs_storage_create_pv: True
+    openshift_hosted_registry_glusterfs_storage_create_pvc: True
   roles:
   - role: openshift_persistent_volumes
-    when:
-    - openshift_hosted_registry_storage_kind | default(none) == 'glusterfs' or openshift_hosted_registry_storage_glusterfs_swap
 
 - import_playbook: ../../openshift-hosted/private/openshift_hosted_registry.yml
 

+ 1 - 5
playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml

@@ -1,8 +1,5 @@
 ---
-# This playbook waits for registry and router pods after both have been
-# created.  It is intended to allow the tasks of deploying both to complete
-# before polling to save time.
-- name: Poll for hosted pod deployments
+- name: Create Hosted Resources - registry storage
   hosts: oo_first_master
   tasks:
   - import_role:
@@ -10,4 +7,3 @@
       tasks_from: registry_storage.yml
     when:
     - openshift_hosted_manage_registry | default(True) | bool
-    - openshift_hosted_registry_registryurl is defined

+ 1 - 1
playbooks/openshift-master/private/config.yml

@@ -79,7 +79,7 @@
     when: openshift_use_nuage | default(false) | bool
   - role: nuage_master
     when: openshift_use_nuage | default(false) | bool
-  - role: calico_master
+  - role: calico
     when: openshift_use_calico | default(false) | bool
   tasks:
   - import_role:

+ 13 - 0
playbooks/openshift-node/private/join.yml

@@ -57,6 +57,19 @@
     openshift_master_host: "{{ groups.oo_first_master.0 }}"
     openshift_manage_node_is_master: "{{ ('oo_masters_to_config' in group_names) | bool }}"
 
+- name: Create additional node network plugin groups
+  hosts: "{{ openshift_node_scale_up_group | default('oo_nodes_to_config') }}"
+  tasks:
+  - group_by:
+      key: oo_nodes_use_{{ (openshift_use_calico | default(False)) | ternary('calico','nothing') }}
+    changed_when: False
+
+- name: Additional calico node config
+  hosts: oo_nodes_use_calico
+  roles:
+  - role: calico_node
+    when: openshift_use_calico | default(false) | bool
+
 - name: Node Join Checkpoint End
   hosts: all
   gather_facts: false

+ 0 - 7
playbooks/openshift-node/private/registry_auth.yml

@@ -7,13 +7,6 @@
   - import_role:
       name: openshift_node
       tasks_from: registry_auth.yml
-  # If there were previously no authenticated registries, the credential file
-  # won't be mounted in the system container;  Need to rerun this step to ensure
-  # additional mounts are provided.
-  - import_role:
-      name: openshift_node
-      tasks_from: node_system_container_install.yml
-    when: openshift_is_atomic
 
 # l_reg_auth_restart_hosts is passed in via imageconfig.yml to prevent
 # the nodes from restarting because the sync pod will be restarting them

+ 32 - 6
playbooks/openstack/configuration.md

@@ -136,7 +136,11 @@ configuration file locally and specify it in `inventory/group_vars/OSEv3.yml`:
 ## OpenStack With SSL Configuration
 
 In order to configure your OpenShift cluster to work properly with OpenStack with
-SSL-endpoints, add the following to `inventory/group_vars/OSEv3.yml`:
+SSL-endpoints, set the following in `inventory/group_vars/all.yml`:
+
+* `openshift_use_openstack_ssl`: True
+
+Then add the following to `inventory/group_vars/OSEv3.yml`:
 
 ```
 openshift_certificates_redeploy: true
@@ -269,6 +273,7 @@ do not have it either. Nor should they use any other internal DNS server.
 Put this in your `inventory/group_vars/all.yml`:
 
 ```yaml
+openshift_openstack_use_neutron_internal_dns: True
 openshift_openstack_fqdn_nodes: false
 openshift_openstack_dns_nameservers: []
 ```
@@ -304,6 +309,8 @@ are created, but before we install anything on them).
 Add this to your `inventory/group_vars/all.yml`:
 
 ```
+    openshift_openstack_use_nsupdate: True
+
     openshift_openstack_external_nsupdate_keys:
       private:
         key_secret: <some nsupdate key>
@@ -484,6 +491,7 @@ You must do this from inside the "bastion" host created in the previous step.
 Put the following to `inventory/group_vars/all.yml`:
 
 ```yaml
+openshift_openstack_use_no_floating_ip: True
 openshift_openstack_router_name: openshift-router
 openshift_openstack_node_subnet_name: openshift
 openshift_openstack_master_floating_ip: false
@@ -500,10 +508,10 @@ And then run the `playbooks/openstack/openshift-cluster/*.yml` as usual.
 If you want to deploy OpenShift on a single node (e.g. for quick evaluation),
 you can do so with a few configuration changes.
 
-First, set the node counts and labels like so in
-`inventory/group_vars/all.yml`:
+First, set the following in `inventory/group_vars/all.yml`:
 
 ```
+openshift_use_all_in_one_cluster_deployment: True
 openshift_openstack_num_masters: 1
 openshift_openstack_num_infra: 0
 openshift_openstack_num_nodes: 0
@@ -532,6 +540,11 @@ this new group to it.
 Note that the "all in one" node must be the "master". openshift-ansible
 expects at least one node in the `masters` Ansible group.
 
+Also keep in mind that if you don't use [LBaaS](#load-balancer-as-a-service)
+with an all-in-one setup the DNS wildcard record for the apps domain will not be
+added, because there are no dedicated infra nodes, so you will have to add it
+manually. See
+[Custom DNS Records Configuration](#custom-dns-records-configuration).
 
 ## Building Node Images
 
@@ -830,12 +843,17 @@ resolve each other by name.
 
 In `inventory/group_vars/all.yml`:
 
+* `openshift_openstack_use_provider_network` True
 * `openshift_openstack_provider_network_name` Provider network name. Setting this will cause the `openshift_openstack_external_network_name` and `openshift_openstack_private_network_name` parameters to be ignored.
 
 
 ## Cinder-Backed Persistent Volumes Configuration
 
-In addition to [setting up an OpenStack cloud provider](#openstack-cloud-provider-configuration),
+Set the following in `inventory/group_vars/all.yml`:
+
+* `openshift_use_cinder_persistent_volume`: True
+
+Then, in addition to [setting up an OpenStack cloud provider](#openstack-cloud-provider-configuration),
 you must set the following in `inventory/group_vars/OSEv3.yml`:
 
 * `openshift_cloudprovider_openstack_blockstorage_version`: v2
@@ -877,7 +895,11 @@ openstack volume create --size <volume size in gb> <volume name>
 Alternatively, the playbooks can create the volume created automatically if you
 specify its name and size.
 
-In either case, you have to [set up an OpenStack cloud provider](#openstack-cloud-provider-configuration),
+Then, set the following in `inventory/group_vars/all.yml`:
+
+* `openshift_use_cinder_registry`: True
+
+And [set up an OpenStack cloud provider](#openstack-cloud-provider-configuration),
 and then set the following in `inventory/group_vars/OSEv3.yml`:
 
 * `openshift_hosted_registry_storage_kind`: openstack
@@ -904,7 +926,11 @@ infra nodes when the registry pod gets started.
 ## Swift or Ceph Rados GW Backed Registry Configuration
 
 You can use OpenStack Swift or Ceph Rados GW to store your OpenShift registry.
-In order to do so, set the following in `inventory/group_vars/OSEv3.yml`:
+In order to do so, set the following in `inventory/group_vars/all.yml`:
+
+* `openshift_use_swift_registry`: true
+
+And the following in `inventory/group_vars/OSEv3.yml`:
 
 * `openshift_hosted_registry_storage_kind`: object
 * `openshift_hosted_registry_storage_provider`: swift

+ 7 - 6
playbooks/openstack/resources.py

@@ -85,13 +85,14 @@ def _get_hostvars(server, docker_storage_mountpoints):
     }
 
     public_v4 = server.public_v4 or server.private_v4
+    private_v4 = server.private_v4 or server.public_v4
     if public_v4:
-        hostvars['public_v4'] = server.public_v4
-        hostvars['openshift_public_ip'] = server.public_v4
+        hostvars['public_v4'] = public_v4
+        hostvars['openshift_public_ip'] = public_v4
     # TODO(shadower): what about multiple networks?
-    if server.private_v4:
-        hostvars['private_v4'] = server.private_v4
-        hostvars['openshift_ip'] = server.private_v4
+    if private_v4:
+        hostvars['private_v4'] = private_v4
+        hostvars['openshift_ip'] = private_v4
 
         # NOTE(shadower): Yes, we set both hostname and IP to the private
         # IP address for each node. OpenStack doesn't resolve nodes by
@@ -99,7 +100,7 @@ def _get_hostvars(server, docker_storage_mountpoints):
         # DNS which would complicate the setup and potentially introduce
         # performance issues.
         hostvars['openshift_hostname'] = server.metadata.get(
-            'openshift_hostname', server.private_v4)
+            'openshift_hostname', private_v4)
     hostvars['openshift_public_hostname'] = server.name
 
     if server.metadata['host-type'] == 'cns':

+ 24 - 1
playbooks/openstack/sample-inventory/group_vars/all.yml

@@ -1,4 +1,25 @@
 ---
+# Uncomment if you plan on using these features. Doing so is not required,
+# but enables pre-requisites checks that will confirm that the right parameters
+# are set.
+
+# Note that these features require other parameters to be set in the inventory.
+# It is highly recommended that you read the configuration documentation.
+
+#openshift_use_all_in_one_cluster_deployment: False
+#openshift_use_cinder_persistent_volume: False
+#openshift_use_cinder_registry: False
+#openshift_use_kuryr: False
+#openshift_use_openstack_ssl: False
+#openshift_use_swift_registry: False
+
+#openshift_openstack_use_neutron_internal_dns: False
+#openshift_openstack_use_no_floating_ip: False
+#openshift_openstack_use_nsupdate: True
+#openshift_openstack_use_provider_network: False
+
+############
+
 openshift_openstack_clusterid: "openshift"
 openshift_openstack_public_dns_domain: "example.com"
 openshift_openstack_dns_nameservers: []
@@ -25,7 +46,6 @@ openshift_openstack_external_network_name: "public"
 #openshift_openstack_kuryr_service_subnet_cidr: "172.30.0.0/16"
 
 ## You should set the following if you want to use Kuryr/Neutron as your SDN
-#openshift_use_kuryr: True
 #openshift_use_openshift_sdn: False
 
 # NOTE: you must uncomment these for Kuryr to work properly as well:
@@ -170,6 +190,9 @@ openshift_openstack_pool_end: "192.168.99.254"
 #rhsub_pool: '<pool name>'
 
 
+# This parameter may need to be set if your nsupdate zone differs from the full OpenShift DNS name
+#openshift_openstack_nsupdate_zone: example.com
+
 # # Roll-your-own DNS
 #openshift_openstack_external_nsupdate_keys:
 #  public:

+ 37 - 1
playbooks/ovirt/provisioning-vars.yaml.example

@@ -79,4 +79,40 @@ node_vm:
       name: localvol_disk
       interface: virtio
   state: running
-...
+
+openshift_ovirt_vm_manifest:
+######################################
+# Single Node Static Ip addresses
+######################################
+- name: 'master'
+  count: 1
+  profile: 'master_vm'
+  nic_mode:
+      master0:
+        nic_ip_address: '192.168.123.165'
+        nic_netmask: '255.255.255.0'
+        nic_gateway: '192.168.123.1'
+        nic_on_boot: True
+#######################################
+# Multiple Node Static Ip addresses
+#######################################
+- name: 'node'
+  count: 2
+  profile: 'node_vm'
+  nic_mode:
+      node0:     # This must fit the same name as this kind of vms. (e.g) if the name is test, this must be test0
+          nic_ip_address: '192.168.123.166'
+          nic_netmask: '255.255.255.0'
+          nic_gateway: '192.168.123.1'
+          nic_on_boot: True
+      node1:
+        nic_ip_address: '192.168.123.168'
+        nic_netmask: '255.255.255.0'
+        nic_gateway: '192.168.123.1'
+        nic_on_boot: True
+################################################
+# Multiple/Single Node Dynamic Ip addresses
+################################################
+- name: 'lb'
+  count: 1
+  profile: 'node_vm'

+ 46 - 1
roles/calico/README.md

@@ -1,3 +1,48 @@
 # Calico
 
-Please see [calico_master](../calico_master/README.md)
+Configure Calico components for the Master host.
+
+## Requirements
+
+* Ansible 2.2
+
+## Installation
+
+To install, set the following inventory configuration parameters:
+
+* `openshift_use_calico=True`
+* `openshift_use_openshift_sdn=False`
+* `os_sdn_network_plugin_name='cni'`
+
+By default, Calico will share the etcd used by OpenShift.
+To configure Calico to use a separate instance of etcd, place etcd SSL client certs on your master,
+then set the following variables in your inventory.ini:
+
+* `calico_etcd_ca_cert_file=/path/to/etcd-ca.crt`
+* `calico_etcd_cert_file=/path/to/etcd-client.crt`
+* `calico_etcd_key_file=/path/to/etcd-client.key`
+* `calico_etcd_endpoints=https://etcd:2379`
+
+## Upgrading
+
+OpenShift-Ansible installs Calico as a self-hosted install. Previously, Calico ran as a systemd service. Running Calico
+in this manner is now deprecated, and must be upgraded to a hosted cluster. Please run the Legacy Upgrade playbook to
+upgrade your existing Calico deployment to a hosted deployment:
+
+        ansible-playbook -i inventory.ini playbooks/byo/calico/legacy_upgrade.yml
+
+## Additional Calico/Node and Felix Configuration Options
+
+Additional parameters that can be defined in the inventory are:
+
+
+| Environment | Description | Schema | Default |   
+|---------|----------------------|---------|---------|
+| CALICO_IPV4POOL_IPIP | IPIP Mode to use for the IPv4 POOL created at start up.	| off, always, cross-subnet	| always |
+| CALICO_LOG_DIR | Directory on the host machine where Calico Logs are written.| String	| /var/log/calico |
+
+### Contact Information
+
+Author: Dan Osborne <dan@projectcalico.org>
+
+For support, join the `#openshift` channel on the [calico users slack](calicousers.slack.com).

roles/calico_master/defaults/main.yaml → roles/calico/defaults/main.yaml


+ 1 - 1
roles/calico/meta/main.yml

@@ -13,5 +13,5 @@ galaxy_info:
   - cloud
   - system
 dependencies:
+- role: lib_utils
 - role: openshift_facts
-- role: container_runtime

roles/calico_master/tasks/certs.yml → roles/calico/tasks/certs.yml


+ 124 - 42
roles/calico/tasks/main.yml

@@ -1,47 +1,129 @@
 ---
-- name: Check for legacy service
-  stat:
-    path: /lib/systemd/system/calico.service
-    get_checksum: false
-    get_attributes: false
-    get_mime: false
-  register: sym
-- fail:
-    msg: You are running a systemd based installation of Calico. Please run the calico upgrade playbook to upgrade to a self-hosted installation.
-  when: sym.stat.exists
-
-- name: Configure NetworkManager to ignore Calico interfaces
-  copy:
-    src: files/calico.conf
-    dest: /etc/NetworkManager/conf.d/
-  when: using_network_manager | default(true) | bool
-  register: nm
-
-- name: restart NetworkManager
-  systemd:
-    name: NetworkManager
-    state: restarted
-  when: nm.changed
-
-# TODO: Move into shared vars file
-- name: Load default node image
+- name: Calico | Run kube proxy
+  run_once: true
+  import_role:
+    name: kube_proxy_and_dns
+
+- include_tasks: certs.yml
+
+- name: Calico | Clean Calico etcd data
+  when: calico_cleanup_path is defined and calico_cleanup_path != ""
+  file:
+    state: absent
+    path: "{{ calico_cleanup_path }}"
+
+- name: Calico | oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:calico-node
+  oc_adm_policy_user:
+    user: system:serviceaccount:kube-system:calico-node
+    resource_kind: scc
+    resource_name: privileged
+    state: present
+
+- name: Calico | oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:calico-kube-controllers
+  oc_adm_policy_user:
+    user: system:serviceaccount:kube-system:calico-kube-controllers
+    resource_kind: scc
+    resource_name: privileged
+    state: present
+
+- name: Calico | oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:calico-upgrade-job
+  oc_adm_policy_user:
+    user: system:serviceaccount:kube-system:calico-upgrade-job
+    resource_kind: scc
+    resource_name: privileged
+    state: present
+
+- name: Calico | Set default selector for kube-system
+  command: >
+    {{ openshift_client_binary }}
+    --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+    annotate  ns kube-system openshift.io/node-selector="" --overwrite
+
+- name: Calico | Create temp directory
+  command: mktemp -d /tmp/openshift-ansible-XXXXXXX
+  register: mktemp
+  changed_when: False
+
+- name: Calico | Write separate Calico etcd manifest
+  when: use_calico_etcd
+  template:
+    dest: "{{ mktemp.stdout }}/calico-etcd.yml"
+    src: calico-etcd.yml.j2
+
+- name: Calico | Launch separate Calico etcd
+  when: use_calico_etcd
+  command: >
+    {{ openshift_client_binary }} apply
+    -f {{ mktemp.stdout }}/calico-etcd.yml
+    --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+  register: calico_etcd_create_output
+  failed_when: "calico_etcd_create_output.rc != 0"
+  changed_when: "('created' in calico_etcd_create_output.stdout) or ('configured' in calico_etcd_create_output.stdout)"
+
+- name: Calico | Parse node version
+  set_fact:
+    node_version: "{{ calico_node_image | regex_replace('^.*node:v?(.*)$', '\\1') }}"
+    cnx: "{{ calico_node_image | regex_replace('^.*/(.*)-node:.*$', '\\1') }}"
+    use_calico_credentials: "{{ calico_image_credentials is defined | bool }}"
+
+- name: Calico | Encode Docker Credentials
+  shell: >
+    cat {{ calico_image_credentials }} | openssl base64 -A
+  register: calico_encoded_credentials_output
+  failed_when: "calico_encoded_credentials_output.rc != 0 or calico_encoded_credentials_output.stdout == ''"
+  when: use_calico_credentials
+
+- name: Calico | Set Encoded Docker Credentials Fact
   set_fact:
-    calico_node_image: "quay.io/calico/node:v2.6.7"
-  when: calico_node_image is not defined
+    calico_encoded_credentials: "{{ calico_encoded_credentials_output.stdout }}"
+  when: use_calico_credentials
 
-- name: Prepull Images
-  command: "{{ openshift_container_cli }} pull {{ calico_node_image }}"
+- name: Calico | Write Calico Pull Secret
+  template:
+    dest: "{{ mktemp.stdout }}/calico-pull-secret.yml"
+    src: calico-pull-secret.yml.j2
+  when: use_calico_credentials
 
-- name: Apply node label
-  delegate_to: "{{ groups.oo_first_master.0 }}"
+- name: Calico | Create Calico Pull Secret
+  when: use_calico_credentials
   command: >
-    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig label node {{ openshift.node.nodename | lower }} --overwrite projectcalico.org/ds-ready=true
-
-- name: Wait for node running
-  uri:
-    url: http://localhost:9099/readiness
-    status_code: 204
-  delay: 3
-  retries: 10
-  register: result
-  until: result.status == 204
+    {{ openshift_client_binary }} apply
+    -f {{ mktemp.stdout }}/calico-pull-secret.yml
+    --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+  register: calico_pull_secret_create_output
+  failed_when: "calico_pull_secret_create_output.rc != 0"
+  changed_when: "('created' in calico_pull_secret_create_output.stdout) or ('configured' in calico_pull_secret_create_output.stdout)"
+
+- name: Calico | Set the correct liveness and readiness checks
+  set_fact:
+    calico_binary_checks: "{{ (node_version > '3.2.0' and cnx != 'cnx') or (node_version > '2.2.0' and cnx == 'cnx') | bool }}"
+
+- name: Calico | Write Calico v2
+  template:
+    dest: "{{ mktemp.stdout }}/calico.yml"
+    src: calico.yml.j2
+  when:
+    - node_version | regex_search('^[0-9]\.[0-9]\.[0-9]') and node_version < '3.0.0'
+    - cnx != "cnx"
+
+- name: Calico | Write Calico v3
+  template:
+    dest: "{{ mktemp.stdout }}/calico.yml"
+    src: calicov3.yml.j2
+  when: (node_version | regex_search('^[0-9]\.[0-9]\.[0-9]') and node_version >= '3.0.0') or (node_version == 'master') or (cnx == "cnx" and node_version >= '2.0.0')
+
+- name: Calico | Launch Calico
+  run_once: true
+  command: >
+    {{ openshift_client_binary }} apply
+    -f {{ mktemp.stdout }}/calico.yml
+    --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+  register: calico_create_output
+  failed_when: "calico_create_output.rc != 0"
+  changed_when: "('created' in calico_create_output.stdout) or ('configured' in calico_create_output.stdout)"
+
+- name: Calico | Delete temp directory
+  file:
+    name: "{{ mktemp.stdout }}"
+    state: absent
+  changed_when: False

roles/calico_master/templates/calico-etcd.yml.j2 → roles/calico/templates/calico-etcd.yml.j2


roles/calico_master/templates/calico-pull-secret.yml.j2 → roles/calico/templates/calico-pull-secret.yml.j2


+ 0 - 2
roles/calico_master/templates/calico.yml.j2

@@ -126,8 +126,6 @@ spec:
       annotations:
         scheduler.alpha.kubernetes.io/critical-pod: ''
     spec:
-      nodeSelector:
-        projectcalico.org/ds-ready: "true"
       hostNetwork: true
       tolerations:
         # Make sure calico/node gets scheduled on all nodes.

+ 1 - 3
roles/calico_master/templates/calicov3.yml.j2

@@ -185,8 +185,6 @@ spec:
       imagePullSecrets:
         - name: calico-pull-secret
 {% endif %}
-      nodeSelector:
-        projectcalico.org/ds-ready: "true"
       hostNetwork: true
       tolerations:
         # Make sure calico/node gets scheduled on all nodes.
@@ -294,7 +292,7 @@ spec:
             # chosen from this range. Changing this value after installation will have
             # no effect. This should fall within '--cluster-cidr'.
             - name: CALICO_IPV4POOL_CIDR
-              value: "{{ openshift.master.sdn_cluster_network_cidr }}"
+              value: "{{ openshift_cluster_network_cidr }}"
             - name: CALICO_IPV4POOL_IPIP
               value: "{{ calico_ipv4pool_ipip }}"
             # Disable IPv6 on Kubernetes.

+ 0 - 48
roles/calico_master/README.md

@@ -1,48 +0,0 @@
-# Calico (Master)
-
-Configure Calico components for the Master host.
-
-## Requirements
-
-* Ansible 2.2
-
-## Installation
-
-To install, set the following inventory configuration parameters:
-
-* `openshift_use_calico=True`
-* `openshift_use_openshift_sdn=False`
-* `os_sdn_network_plugin_name='cni'`
-
-By default, Calico will share the etcd used by OpenShift.
-To configure Calico to use a separate instance of etcd, place etcd SSL client certs on your master,
-then set the following variables in your inventory.ini:
-
-* `calico_etcd_ca_cert_file=/path/to/etcd-ca.crt`
-* `calico_etcd_cert_file=/path/to/etcd-client.crt`
-* `calico_etcd_key_file=/path/to/etcd-client.key`
-* `calico_etcd_endpoints=https://etcd:2379`
-
-## Upgrading
-
-OpenShift-Ansible installs Calico as a self-hosted install. Previously, Calico ran as a systemd service. Running Calico
-in this manner is now deprecated, and must be upgraded to a hosted cluster. Please run the Legacy Upgrade playbook to
-upgrade your existing Calico deployment to a hosted deployment:
-
-        ansible-playbook -i inventory.ini playbooks/byo/calico/legacy_upgrade.yml
-
-## Additional Calico/Node and Felix Configuration Options
-
-Additional parameters that can be defined in the inventory are:
-
-
-| Environment | Description | Schema | Default |   
-|---------|----------------------|---------|---------|
-| CALICO_IPV4POOL_IPIP | IPIP Mode to use for the IPv4 POOL created at start up.	| off, always, cross-subnet	| always |
-| CALICO_LOG_DIR | Directory on the host machine where Calico Logs are written.| String	| /var/log/calico |
-
-### Contact Information
-
-Author: Dan Osborne <dan@projectcalico.org>
-
-For support, join the `#openshift` channel on the [calico users slack](calicousers.slack.com).

+ 0 - 129
roles/calico_master/tasks/main.yml

@@ -1,129 +0,0 @@
----
-- name: Calico | Run kube proxy
-  run_once: true
-  import_role:
-    name: kube_proxy_and_dns
-
-- include_tasks: certs.yml
-
-- name: Calico Master | Clean Calico etcd data
-  when: calico_cleanup_path is defined and calico_cleanup_path != ""
-  file:
-    state: absent
-    path: "{{ calico_cleanup_path }}"
-
-- name: Calico Master | oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:calico-node
-  oc_adm_policy_user:
-    user: system:serviceaccount:kube-system:calico-node
-    resource_kind: scc
-    resource_name: privileged
-    state: present
-
-- name: Calico Master | oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:calico-kube-controllers
-  oc_adm_policy_user:
-    user: system:serviceaccount:kube-system:calico-kube-controllers
-    resource_kind: scc
-    resource_name: privileged
-    state: present
-
-- name: Calico Master | oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:calico-upgrade-job
-  oc_adm_policy_user:
-    user: system:serviceaccount:kube-system:calico-upgrade-job
-    resource_kind: scc
-    resource_name: privileged
-    state: present
-
-- name: Set default selector for kube-system
-  command: >
-    {{ openshift_client_binary }}
-    --config={{ openshift.common.config_base }}/master/admin.kubeconfig
-    annotate  ns kube-system openshift.io/node-selector="" --overwrite
-
-- name: Calico Master | Create temp directory
-  command: mktemp -d /tmp/openshift-ansible-XXXXXXX
-  register: mktemp
-  changed_when: False
-
-- name: Calico Master | Write separate Calico etcd manifest
-  when: use_calico_etcd
-  template:
-    dest: "{{ mktemp.stdout }}/calico-etcd.yml"
-    src: calico-etcd.yml.j2
-
-- name: Calico Master | Launch separate Calico etcd
-  when: use_calico_etcd
-  command: >
-    {{ openshift_client_binary }} apply
-    -f {{ mktemp.stdout }}/calico-etcd.yml
-    --config={{ openshift.common.config_base }}/master/admin.kubeconfig
-  register: calico_etcd_create_output
-  failed_when: "calico_etcd_create_output.rc != 0"
-  changed_when: "('created' in calico_etcd_create_output.stdout) or ('configured' in calico_etcd_create_output.stdout)"
-
-- name: Calico Master | Parse node version
-  set_fact:
-    node_version: "{{ calico_node_image | regex_replace('^.*node:v?(.*)$', '\\1') }}"
-    cnx: "{{ calico_node_image | regex_replace('^.*/(.*)-node:.*$', '\\1') }}"
-    use_calico_credentials: "{{ calico_image_credentials is defined | bool }}"
-
-- name: Calico | Encode Docker Credentials
-  shell: >
-    cat {{ calico_image_credentials }} | openssl base64 -A
-  register: calico_encoded_credentials_output
-  failed_when: "calico_encoded_credentials_output.rc != 0 or calico_encoded_credentials_output.stdout == ''"
-  when: use_calico_credentials
-
-- name: Calico | Set Encoded Docker Credentials Fact
-  set_fact:
-    calico_encoded_credentials: "{{ calico_encoded_credentials_output.stdout }}"
-  when: use_calico_credentials
-
-- name: Calico | Write Calico Pull Secret
-  template:
-    dest: "{{ mktemp.stdout }}/calico-pull-secret.yml"
-    src: calico-pull-secret.yml.j2
-  when: use_calico_credentials
-
-- name: Calico | Create Calico Pull Secret
-  when: use_calico_credentials
-  command: >
-    {{ openshift_client_binary }} apply
-    -f {{ mktemp.stdout }}/calico-pull-secret.yml
-    --config={{ openshift.common.config_base }}/master/admin.kubeconfig
-  register: calico_pull_secret_create_output
-  failed_when: "calico_pull_secret_create_output.rc != 0"
-  changed_when: "('created' in calico_pull_secret_create_output.stdout) or ('configured' in calico_pull_secret_create_output.stdout)"
-
-- name: Calico Master | Set the correct liveness and readiness checks
-  set_fact:
-    calico_binary_checks: "{{ (node_version > '3.2.0' and cnx != 'cnx') or (node_version > '2.2.0' and cnx == 'cnx') | bool }}"
-
-- name: Calico Master | Write Calico v2
-  template:
-    dest: "{{ mktemp.stdout }}/calico.yml"
-    src: calico.yml.j2
-  when:
-    - node_version | regex_search('^[0-9]\.[0-9]\.[0-9]') and node_version < '3.0.0'
-    - cnx != "cnx"
-
-- name: Calico Master | Write Calico v3
-  template:
-    dest: "{{ mktemp.stdout }}/calico.yml"
-    src: calicov3.yml.j2
-  when: (node_version | regex_search('^[0-9]\.[0-9]\.[0-9]') and node_version >= '3.0.0') or (node_version == 'master') or (cnx == "cnx" and node_version >= '2.0.0')
-
-- name: Calico Master | Launch Calico
-  run_once: true
-  command: >
-    {{ openshift_client_binary }} apply
-    -f {{ mktemp.stdout }}/calico.yml
-    --config={{ openshift.common.config_base }}/master/admin.kubeconfig
-  register: calico_create_output
-  failed_when: "calico_create_output.rc != 0"
-  changed_when: "('created' in calico_create_output.stdout) or ('configured' in calico_create_output.stdout)"
-
-- name: Calico Master | Delete temp directory
-  file:
-    name: "{{ mktemp.stdout }}"
-    state: absent
-  changed_when: False

+ 3 - 0
roles/calico_node/README.md

@@ -0,0 +1,3 @@
+# Calico Node
+
+Please see [calico](../calico/README.md)

roles/calico/files/calico.conf → roles/calico_node/files/calico.conf


+ 0 - 1
roles/calico_master/meta/main.yml

@@ -13,5 +13,4 @@ galaxy_info:
   - cloud
   - system
 dependencies:
-- role: lib_utils
 - role: openshift_facts

+ 13 - 0
roles/calico_node/tasks/main.yml

@@ -0,0 +1,13 @@
+---
+- name: Calico Node | Configure NetworkManager to ignore Calico interfaces
+  copy:
+    src: files/calico.conf
+    dest: /etc/NetworkManager/conf.d/
+  when: using_network_manager | default(true) | bool
+  register: nm
+
+- name: Calico Node | Restart NetworkManager
+  systemd:
+    name: NetworkManager
+    state: restarted
+  when: nm.changed

+ 2 - 3
roles/etcd/defaults/main.yaml

@@ -83,9 +83,8 @@ etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_
 etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
 etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
 
-# required role variable
-#etcd_peer: 127.0.0.1
-etcdctlv2: "{{ r_etcd_common_etcdctl_command }} --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}"
+etcd_peer: "{{ openshift.common.hostname }}"
+etcdctlv2: "{{ r_etcd_common_etcdctl_command }} --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} --endpoints {{ etcd_peer_url_scheme }}://{{ etcd_peer }}:{{ etcd_client_port }}"
 
 etcd_service: etcd
 # Location of the service file is fixed and not meant to be changed

+ 0 - 11
roles/etcd/etcdctl.sh

@@ -1,11 +0,0 @@
-#!/bin/bash
-# Sets up handy aliases for etcd, need etcdctl2 and etcdctl3 because
-# command flags are different between the two. Should work on stand
-# alone etcd hosts and master + etcd hosts too because we use the peer keys.
-etcdctl2() {
- /usr/bin/etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://`hostname`:2379 ${@}
-}
-
-etcdctl3() {
- ETCDCTL_API=3 /usr/bin/etcdctl --cert /etc/etcd/peer.crt --key /etc/etcd/peer.key --cacert /etc/etcd/ca.crt --endpoints https://`hostname`:2379 ${@}
-}

+ 1 - 7
roles/etcd/tasks/add_new_member.yml

@@ -3,13 +3,7 @@
 - import_tasks: set_facts.yml
 
 - name: Add new etcd members to cluster
-  command: >
-    {{ r_etcd_common_etcdctl_command }}
-                     --cert-file {{ etcd_peer_cert_file }}
-                     --key-file {{ etcd_peer_key_file }}
-                     --ca-file {{ etcd_peer_ca_file }}
-                     -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_ip }}:{{ etcd_client_port }}
-                     member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}
+  command: "{{ etcdctlv2 }} member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
   delegate_to: "{{ etcd_ca_host }}"
   failed_when:
   - etcd_add_check.rc == 1

+ 8 - 8
roles/etcd/tasks/remove-etcd-v2-data.yml

@@ -1,10 +1,5 @@
 ---
-- name: Verify cluster is healthy pre-upgrade
-  command: "{{ etcdctlv2 }} cluster-health"
-  register: cluster_health
-  retries: 30
-  delay: 6
-  until: cluster_health.rc == 0
+- import_tasks: verify_cluster_health.yml
 
 - name: Check migrated status
   command: "{{ etcdctlv2 }} get /kubernetes.io"
@@ -15,12 +10,17 @@
   - name: Remove etcdv2 kubernetes data
     command: "{{ etcdctlv2 }} rm -r /kubernetes.io"
     register: etcdv2_remove_k8s
-    failed_when: ('Key not found' not in etcdv2_remove_k8s.stderr)
+    when: ('Key not found' not in etcdv2_migrated_status.stderr)
+
+  - name: Get openshift data
+    command: "{{ etcdctlv2 }} get /openshift.io"
+    register: etcdv2_openshift_data
+    failed_when: ('stdout' not in etcdv2_openshift_data)
 
   - name: Remove etcdv2 openshift data
     command: "{{ etcdctlv2 }} rm -r /openshift.io"
     register: etcdv2_remove_openshift
-    failed_when: ('Key not found' not in etcdv2_remove_openshift.stderr)
+    when: ('Key not found' not in etcdv2_openshift_data.stderr)
 
   - name: Set migrated mark
     command: "{{ etcdctlv2 }} set /kubernetes.io migrated"

+ 1 - 1
roles/etcd/tasks/static.yml

@@ -84,7 +84,7 @@
       - "{{ etcd_peer_key_file }}"
       - "--ca-file"
       - "{{ etcd_peer_ca_file }}"
-      - "-C"
+      - "--endpoints"
       - "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
       - "cluster-health"
   with_items:

+ 2 - 12
roles/etcd/tasks/upgrade_rpm.yml

@@ -8,12 +8,7 @@
 # RHEL 7.3.3 with etcd-3.1.0-2.el7
 # RHEL 7.3.2 with etcd-3.0.15-1.el7
 
-- name: Verify cluster is healthy pre-upgrade
-  command: "{{ etcdctlv2 }} cluster-health"
-  register: cluster_health
-  retries: 30
-  delay: 6
-  until: cluster_health.rc == 0
+- import_tasks: verify_cluster_health.yml
 
 - set_fact:
     l_etcd_target_package: "{{ 'etcd' if r_etcd_upgrade_version is not defined else 'etcd-'+r_etcd_upgrade_version+'*' }}"
@@ -30,9 +25,4 @@
 - name: restart etcd
   command: "{{ l_etcd_restart_command }}"
 
-- name: Verify cluster is healthy
-  command: "{{ etcdctlv2 }} cluster-health"
-  register: etcdctl
-  until: etcdctl.rc == 0
-  retries: 3
-  delay: 10
+- import_tasks: verify_cluster_health.yml

+ 2 - 12
roles/etcd/tasks/upgrade_static.yml

@@ -3,12 +3,7 @@
 
 # INPUT r_etcd_upgrade_version
 
-- name: Verify cluster is healthy pre-upgrade
-  command: "{{ etcdctlv2 }} cluster-health"
-  register: cluster_health
-  retries: 30
-  delay: 6
-  until: cluster_health.rc == 0
+- import_tasks: verify_cluster_health.yml
 
 - name: Check for old etcd service files
   stat:
@@ -54,9 +49,4 @@
 - set_fact:
     r_etcd_common_etcd_runtime: static_pod
 
-- name: Verify cluster is healthy
-  command: "{{ etcdctlv2 }} cluster-health"
-  register: etcdctl
-  until: etcdctl.rc == 0 and 'stopped' not in etcdctl.stderr
-  retries: 30
-  delay: 10
+- import_tasks: verify_cluster_health.yml

+ 9 - 0
roles/etcd/tasks/verify_cluster_health.yml

@@ -0,0 +1,9 @@
+---
+- name: Verify cluster is healthy
+  command: "{{ etcdctlv2 }} cluster-health"
+  register: cluster_health
+  retries: 30
+  delay: 6
+  until:
+  - cluster_health.rc == 0
+  - ('stopped' not in cluster_health.stderr)

+ 1 - 1
roles/etcd/templates/etcdctl.sh.j2

@@ -4,7 +4,7 @@
 # alone etcd hosts and master + etcd hosts too because we use the peer keys.
 etcdctl2() {
 
- cmd="ETCDCTL_API=2 etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://`hostname`:2379 ${@}"
+ cmd="ETCDCTL_API=2 etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} --endpoints https://`hostname`:2379 ${@}"
  if [[ -f /usr/local/bin/master-exec ]]; then
    /usr/local/bin/master-exec etcd etcd /bin/sh -c "$cmd"
  else

+ 4 - 0
roles/installer_checkpoint/callback_plugins/installer_checkpoint.py

@@ -19,6 +19,10 @@ class CallbackModule(CallbackBase):
 
     def v2_playbook_on_stats(self, stats):
 
+        # Return if there are no custom stats to process
+        if stats.custom == {}:
+            return
+
         phases = stats.custom['_run']
 
         # Find the longest phase title

+ 1 - 0
roles/lib_utils/action_plugins/generate_pv_pvcs_list.py

@@ -188,6 +188,7 @@ class ActionModule(ActionBase):
         result["failed"] = False
         result["msg"] = "persistent_volumes list and persistent_volume_claims list created"
         vars_to_check = ['openshift_hosted_registry_storage',
+                         'openshift_hosted_registry_glusterfs_storage',
                          'openshift_hosted_router_storage',
                          'openshift_hosted_etcd_storage',
                          'openshift_logging_storage',

+ 17 - 10
roles/openshift_aws/defaults/main.yml

@@ -19,7 +19,6 @@ openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
 openshift_aws_iam_cert_path: ''
 openshift_aws_iam_cert_key_path: ''
 
-openshift_aws_iam_role_name: "openshift_node_describe_instances_{{ openshift_aws_clusterid }}"
 openshift_aws_iam_role_policy_json: "{{ lookup('file', 'describeinstances.json') }}"
 openshift_aws_iam_role_policy_name: "describe_instances_{{ openshift_aws_clusterid }}"
 
@@ -276,9 +275,7 @@ openshift_aws_master_instance_config:
   health_check: "{{ openshift_aws_scale_group_health_check }}"
   exact_count: "{{ openshift_aws_master_group_desired_size | default(3) }}"
   termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
-  iam_role: "{{ openshift_aws_iam_master_role_name | default(openshift_aws_iam_role_name) }}"
-  policy_name: "{{ openshift_aws_iam_master_role_policy_name | default(openshift_aws_iam_role_policy_name) }}"
-  policy_json: "{{ openshift_aws_iam_master_role_policy_json | default(openshift_aws_iam_role_policy_json) }}"
+  iam_role: "{{ openshift_aws_launch_config_iam_roles['master'].name }}"
   elbs: "{{ openshift_aws_elb_dict | json_query('master.[*][0][*].name') }}"
   groups:
   - "{{ openshift_aws_clusterid }}"  # default sg
@@ -296,9 +293,7 @@ openshift_aws_node_group_config:
     desired_size: "{{ openshift_aws_compute_group_desired_size | default(3) }}"
     termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
     replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
-    iam_role: "{{ openshift_aws_iam_node_role_name | default(openshift_aws_iam_role_name) }}"
-    policy_name: "{{ openshift_aws_iam_node_role_policy_name | default(openshift_aws_iam_role_policy_name) }}"
-    policy_json: "{{ openshift_aws_iam_node_role_policy_json | default(openshift_aws_iam_role_policy_json) }}"
+    iam_role: "{{ openshift_aws_launch_config_iam_roles['compute'].name }}"
   # The 'infra' key is always required here.
   infra:
     instance_type: "{{ openshift_aws_infra_group_instance_type | default(openshift_aws_instance_type) }}"
@@ -309,9 +304,7 @@ openshift_aws_node_group_config:
     desired_size: "{{ openshift_aws_infra_group_desired_size | default(2) }}"
     termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
     replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
-    iam_role: "{{ openshift_aws_iam_node_role_name | default(openshift_aws_iam_role_name) }}"
-    policy_name: "{{ openshift_aws_iam_node_role_policy_name | default(openshift_aws_iam_role_policy_name) }}"
-    policy_json: "{{ openshift_aws_iam_node_role_policy_json | default(openshift_aws_iam_role_policy_json) }}"
+    iam_role: "{{ openshift_aws_launch_config_iam_roles['infra'].name }}"
     elbs: "{{ openshift_aws_elb_dict | json_query('infra.[*][0][*].name') }}"
 
 # build_instance_tags is a custom filter in role lib_utils
@@ -333,6 +326,20 @@ openshift_aws_launch_config_security_groups:
   - "{{ openshift_aws_clusterid }}_infra"  # node type sg
   - "{{ openshift_aws_clusterid }}_infra_k8s"  # node type sg k8s
 
+openshift_aws_launch_config_iam_roles:
+  master:
+    name: "{{ openshift_aws_iam_master_role_name | default(openshift_aws_clusterid ~ '-iam_master') }}"
+    policy_name: "{{ openshift_aws_iam_master_role_policy_name | default(openshift_aws_iam_role_policy_name) }}"
+    policy_json: "{{ openshift_aws_iam_master_role_policy_json | default(openshift_aws_iam_role_policy_json) }}"
+  compute:
+    name: "{{ openshift_aws_iam_compute_role_name | default(openshift_aws_clusterid ~ '-iam_compute') }}"
+    policy_name: "{{ openshift_aws_iam_node_role_policy_name | default(openshift_aws_iam_role_policy_name) }}"
+    policy_json: "{{ openshift_aws_iam_node_role_policy_json | default(openshift_aws_iam_role_policy_json) }}"
+  infra:
+    name: "{{ openshift_aws_iam_infra_role_name | default(openshift_aws_clusterid ~ '-iam_infra') }}"
+    policy_name: "{{ openshift_aws_iam_node_role_policy_name | default(openshift_aws_iam_role_policy_name) }}"
+    policy_json: "{{ openshift_aws_iam_node_role_policy_json | default(openshift_aws_iam_role_policy_json) }}"
+
 openshift_aws_security_groups_tags: "{{ openshift_aws_kube_tags }}"
 
 openshift_aws_node_security_groups:

+ 8 - 5
roles/openshift_aws/tasks/build_node_group.yml

@@ -74,17 +74,20 @@
     | combine({'openshift-node-group-config': openshift_aws_node_group.node_group_config | default('unset') }) }}"
     l_node_group_name: "{{ openshift_aws_node_group.name }} {{ l_deployment_serial }}"
 
+- name: fetch the iam role
+  iam_role_facts:
+    name: "{{ openshift_aws_launch_config_iam_roles[openshift_aws_node_group.group].name }}"
+  register: l_profilename
+  retries: 3
+  delay: 3
+  when: openshift_aws_create_iam_role
+
 - name: Set scale group instances autonaming
   set_fact:
     l_instance_tags: "{{ l_instance_tags | combine({'Name': l_node_group_name }) }}"
   when: openshift_aws_autoname_scale_group_instances | default(false)
 
 - when:
-  - openshift_aws_create_iam_role
-  - asgs.results|length != 2
-  include_tasks: iam_role.yml
-
-- when:
   - openshift_aws_create_launch_config
   - asgs.results|length != 2
   include_tasks: launch_config.yml

+ 15 - 6
roles/openshift_aws/tasks/iam_role.yml

@@ -13,10 +13,14 @@
 #####
 - name: Create an iam role
   iam_role:
-    name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role }}"
+    name: "{{ openshift_aws_launch_config_iam_roles[l_item].name }}"
     assume_role_policy_document: "{{ lookup('file','trustpolicy.json') }}"
     state: "{{ openshift_aws_iam_role_state | default('present') }}"
-  when: l_node_group_config[openshift_aws_node_group.group].iam_role is defined
+  loop: "{{ openshift_aws_launch_config_iam_roles | list }}"
+  loop_control:
+    loop_var: l_item
+  retries: 3
+  delay: 3
 
 #####
 # The second part of this task file is linking the role to a policy
@@ -27,8 +31,13 @@
 - name: create an iam policy
   iam_policy:
     iam_type: role
-    iam_name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role }}"
-    policy_json: "{{ l_node_group_config[openshift_aws_node_group.group].policy_json }}"
-    policy_name: "{{ l_node_group_config[openshift_aws_node_group.group].policy_name }}"
+    iam_name: "{{ openshift_aws_launch_config_iam_roles[l_item].name }}"
+    policy_json: "{{ openshift_aws_launch_config_iam_roles[l_item].policy_json }}"
+    policy_name: "{{ openshift_aws_launch_config_iam_roles[l_item].policy_name }}"
     state: "{{ openshift_aws_iam_role_state | default('present') }}"
-  when: "'iam_role' in l_node_group_config[openshift_aws_node_group.group]"
+  register: l_iam_create_policy_out
+  loop: "{{ openshift_aws_launch_config_iam_roles | list }}"
+  loop_control:
+    loop_var: l_item
+  retries: 3
+  delay: 3

+ 4 - 4
roles/openshift_aws/tasks/launch_config.yml

@@ -21,10 +21,10 @@
     image_id: "{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}"
     instance_type: "{{ l_node_group_config[openshift_aws_node_group.group].instance_type }}"
     security_groups: "{{ openshift_aws_launch_config_security_group_id  | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
-    instance_profile_name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role if l_node_group_config[openshift_aws_node_group.group].iam_role is defined and
-                                                                           l_node_group_config[openshift_aws_node_group.group].iam_role != '' and
-                                                                           openshift_aws_create_iam_role
-                                                                        else omit }}"
+    instance_profile_name: "{{ l_profilename.iam_roles[0].role_name if openshift_aws_create_iam_role and
+                                                                      l_node_group_config[openshift_aws_node_group.group].iam_role is defined and
+                                                                      l_node_group_config[openshift_aws_node_group.group].iam_role != ''
+                                                                    else omit }}"
     user_data: "{{ lookup('template', 'user_data.j2') }}"
     key_name: "{{ openshift_aws_ssh_key_name }}"
     ebs_optimized: False

+ 4 - 0
roles/openshift_aws/tasks/provision_ec2.yml

@@ -18,6 +18,10 @@
     volumes: "{{ openshift_aws_master_instance_config.volumes }}"
     vpc_subnet_id: "{{ l_loop }}"
     wait: yes
+    instance_profile_name: "{{ l_profilename.iam_roles[0].role_name if openshift_aws_create_iam_role and
+                                                                      openshift_aws_master_instance_config.iam_role is defined and
+                                                                      openshift_aws_master_instance_config.iam_role != ''
+                                                                    else omit }}"
   loop: "{{ l_subnetout_results | list }}"
   loop_control:
     loop_var: l_loop

+ 8 - 0
roles/openshift_aws/tasks/provision_ec2_facts.yml

@@ -85,3 +85,11 @@
       vpc-id: "{{ vpcout.vpcs[0].id }}"
     region: "{{ openshift_aws_region }}"
   register: ec2sgs
+
+- name: fetch the iam role
+  iam_role_facts:
+    name: "{{ openshift_aws_launch_config_iam_roles['master'].name }}"
+  register: l_profilename
+  retries: 3
+  delay: 3
+  when: openshift_aws_create_iam_role

+ 1 - 1
roles/openshift_cluster_monitoring_operator/defaults/main.yml

@@ -2,7 +2,7 @@
 openshift_cluster_monitoring_operator_namespace: openshift-monitoring
 
 l_openshift_cluster_monitoring_operator_image_dicts:
-  origin: 'quay.io/coreos/cluster-monitoring-operator:v0.1.0'
+  origin: 'quay.io/coreos/cluster-monitoring-operator:v0.1.1'
   openshift-enterprise: "{{ l_osm_registry_url | regex_replace('${component}' | regex_escape, 'cluster-monitoring-operator') }}"
 
 openshift_cluster_monitoring_operator_image: "{{ l_openshift_cluster_monitoring_operator_image_dicts[openshift_deployment_type] }}"

+ 1 - 1
roles/openshift_examples/examples-sync.sh

@@ -8,7 +8,7 @@
 XPAAS_VERSION=ose-v1.4.14
 RHDM70_VERSION=7.0.1.GA
 RHPAM70_VERSION=7.0.2.GA
-DG_72_VERSION=datagrid72-dev
+DG_72_VERSION=1.1.1
 ORIGIN_VERSION=${1:-v3.11}
 ORIGIN_BRANCH=${2:-master}
 RHAMP_TAG=2.0.0.GA

+ 65 - 0
roles/openshift_examples/files/examples/latest/xpaas-streams/datagrid72-image-stream.json

@@ -0,0 +1,65 @@
+{
+    "kind": "List",
+    "apiVersion": "v1",
+    "metadata": {
+        "name": "datagrid72-image-streams",
+        "annotations": {
+            "description": "ImageStream definitions for Red Hat JBoss Data Grid 7.2.",
+            "openshift.io/provider-display-name": "Red Hat, Inc."
+        }
+    },
+    "items": [
+        {
+            "kind": "ImageStream",
+            "apiVersion": "v1",
+            "metadata": {
+                "name": "jboss-datagrid72-openshift",
+                "annotations": {
+                    "openshift.io/display-name": "Red Hat JBoss Data Grid 7.2",
+                    "openshift.io/provider-display-name": "Red Hat, Inc.",
+                    "version": "1.1"
+                }
+            },
+            "spec": {
+                "tags": [
+                    {
+                        "name": "1.0",
+                        "annotations": {
+                            "description": "Red Hat JBoss Data Grid 7.2 S2I images.",
+                            "iconClass": "icon-datagrid",
+                            "tags": "datagrid,jboss,hidden",
+                            "supports": "datagrid:7.2",
+                            "version": "1.0",
+                            "openshift.io/display-name": "Red Hat JBoss Data Grid 7.2"
+                        },
+                        "referencePolicy": {
+                            "type": "Local"
+                        },
+                        "from": {
+                            "kind": "DockerImage",
+                            "name": "registry.redhat.io/jboss-datagrid-7/datagrid72-openshift:1.0"
+                        }
+                    },
+                    {
+                        "name": "1.1",
+                        "annotations": {
+                            "description": "Red Hat JBoss Data Grid 7.2 S2I images.",
+                            "iconClass": "icon-datagrid",
+                            "tags": "datagrid,jboss,hidden",
+                            "supports": "datagrid:7.2",
+                            "version": "1.1",
+                            "openshift.io/display-name": "Red Hat JBoss Data Grid 7.2"
+                        },
+                        "referencePolicy": {
+                            "type": "Local"
+                        },
+                        "from": {
+                            "kind": "DockerImage",
+                            "name": "registry.redhat.io/jboss-datagrid-7/datagrid72-openshift:1.1"
+                        }
+                    }
+                ]
+            }
+        }
+    ]
+}

+ 446 - 0
roles/openshift_examples/files/examples/latest/xpaas-templates/datagrid72-basic.json

@@ -0,0 +1,446 @@
+{
+    "kind": "Template",
+    "apiVersion": "v1",
+    "metadata": {
+        "annotations": {
+            "iconClass": "icon-datagrid",
+            "tags": "datagrid,jboss",
+            "version": "1.1",
+            "openshift.io/display-name": "Red Hat JBoss Data Grid 7.2 (Ephemeral, no https)",
+            "openshift.io/provider-display-name": "Red Hat, Inc.",
+            "description": "An example Red Hat JBoss Data Grid application. For more information about using this template, see https://github.com/jboss-openshift/application-templates.",
+            "template.openshift.io/long-description": "This template defines resources needed to develop Red Hat JBoss Data Grid 7.2 based applications, including a deployment configuration, using ephemeral (temporary) storage and communication using http.",
+            "template.openshift.io/documentation-url": "https://access.redhat.com/documentation/en/red-hat-jboss-data-grid/",
+            "template.openshift.io/support-url": "https://access.redhat.com"
+        },
+        "name": "datagrid72-basic"
+    },
+    "labels": {
+        "template": "datagrid72-basic"
+    },
+    "message": "A new data grid service has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\".",
+    "parameters": [
+        {
+            "displayName": "Application Name",
+            "description": "The name for the application.",
+            "name": "APPLICATION_NAME",
+            "value": "datagrid-app",
+            "required": true
+        },
+        {
+            "displayName": "Custom http Route Hostname",
+            "description": "Custom hostname for http service route.  Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+            "name": "HOSTNAME_HTTP",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Username",
+            "description": "User name for JDG user.",
+            "name": "USERNAME",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Password",
+            "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
+            "name": "PASSWORD",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "JDG User Roles/Groups",
+            "description": "Comma delimited list of roles/groups associated with the JDG user",
+            "name": "ADMIN_GROUP",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Hotrod Authentication",
+            "description": "Enable Hotrod Authentication",
+            "name": "HOTROD_AUTHENTICATION",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Container Security Role Mapper",
+            "description": "Defines which role mapper to use for cache authentication",
+            "name": "CONTAINER_SECURITY_ROLE_MAPPER",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Container Security Roles",
+            "description": "Comma delimited list of role names and assigned permissions",
+            "name": "CONTAINER_SECURITY_ROLES",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "ImageStream Namespace",
+            "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+            "name": "IMAGE_STREAM_NAMESPACE",
+            "value": "openshift",
+            "required": true
+        },
+        {
+            "displayName": "Infinispan Connectors",
+            "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+            "name": "INFINISPAN_CONNECTORS",
+            "value": "hotrod,memcached,rest",
+            "required": false
+        },
+        {
+            "displayName": "Cache Names",
+            "description": "Comma-separated list of caches to configure.  By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
+            "name": "CACHE_NAMES",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Datavirt Cache Names",
+            "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views.  Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+            "name": "DATAVIRT_CACHE_NAMES",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Default Cache Type",
+            "description": "Default cache type for all caches. If empty then distributed will be the default",
+            "name": "CACHE_TYPE_DEFAULT",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Memcached Cache Name",
+            "description": "The name of the cache to expose through this memcached connector",
+            "name": "MEMCACHED_CACHE",
+            "value": "default_memcached",
+            "required": false
+        },
+        {
+            "displayName": "REST Security Domain",
+            "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+            "name": "REST_SECURITY_DOMAIN",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "JGroups Cluster Password",
+            "description": "JGroups cluster password",
+            "name": "JGROUPS_CLUSTER_PASSWORD",
+            "from": "[a-zA-Z0-9]{8}",
+            "generate": "expression",
+            "required": true
+        },
+        {
+            "description": "Container memory limit",
+            "name": "MEMORY_LIMIT",
+            "value": "1Gi",
+            "required": false
+        }
+    ],
+    "objects": [
+        {
+            "kind": "Service",
+            "apiVersion": "v1",
+            "spec": {
+                "ports": [
+                    {
+                        "port": 8080,
+                        "targetPort": 8080
+                    }
+                ],
+                "selector": {
+                    "deploymentConfig": "${APPLICATION_NAME}"
+                }
+            },
+            "metadata": {
+                "name": "${APPLICATION_NAME}",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                },
+                "annotations": {
+                    "description": "The web server's HTTP port."
+                }
+            }
+        },
+        {
+            "kind": "Service",
+            "apiVersion": "v1",
+            "spec": {
+                "ports": [
+                    {
+                        "port": 11211,
+                        "targetPort": 11211
+                    }
+                ],
+                "selector": {
+                    "deploymentConfig": "${APPLICATION_NAME}"
+                }
+            },
+            "metadata": {
+                "name": "${APPLICATION_NAME}-memcached",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                },
+                "annotations": {
+                    "description": "Memcached service for clustered applications."
+                }
+            }
+        },
+        {
+            "kind": "Service",
+            "apiVersion": "v1",
+            "spec": {
+                "ports": [
+                    {
+                        "port": 11333,
+                        "targetPort": 11333
+                    }
+                ],
+                "selector": {
+                    "deploymentConfig": "${APPLICATION_NAME}"
+                }
+            },
+            "metadata": {
+                "name": "${APPLICATION_NAME}-hotrod",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                },
+                "annotations": {
+                    "description": "Hot Rod service for clustered applications."
+                }
+            }
+        },
+        {
+            "kind": "Service",
+            "apiVersion": "v1",
+            "spec": {
+                "clusterIP": "None",
+                "ports": [
+                    {
+                        "name": "ping",
+                        "port": 8888
+                    }
+                ],
+                "selector": {
+                    "deploymentConfig": "${APPLICATION_NAME}"
+                }
+            },
+            "metadata": {
+                "name": "${APPLICATION_NAME}-ping",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                },
+                "annotations": {
+                    "service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
+                    "description": "The JGroups ping port for clustering."
+                }
+            }
+        },
+        {
+            "kind": "Route",
+            "apiVersion": "v1",
+            "id": "${APPLICATION_NAME}-http",
+            "metadata": {
+                "name": "${APPLICATION_NAME}",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                },
+                "annotations": {
+                    "description": "Route for application's HTTP service."
+                }
+            },
+            "spec": {
+                "host": "${HOSTNAME_HTTP}",
+                "to": {
+                    "name": "${APPLICATION_NAME}"
+                }
+            }
+        },
+        {
+            "kind": "DeploymentConfig",
+            "apiVersion": "v1",
+            "metadata": {
+                "name": "${APPLICATION_NAME}",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                }
+            },
+            "spec": {
+                "strategy": {
+                    "type": "Recreate"
+                },
+                "triggers": [
+                    {
+                        "type": "ImageChange",
+                        "imageChangeParams": {
+                            "automatic": true,
+                            "containerNames": [
+                                "${APPLICATION_NAME}"
+                            ],
+                            "from": {
+                                "kind": "ImageStreamTag",
+                                "namespace": "${IMAGE_STREAM_NAMESPACE}",
+                                "name": "jboss-datagrid72-openshift:1.1"
+                            }
+                        }
+                    },
+                    {
+                        "type": "ConfigChange"
+                    }
+                ],
+                "replicas": 1,
+                "selector": {
+                    "deploymentConfig": "${APPLICATION_NAME}"
+                },
+                "template": {
+                    "metadata": {
+                        "name": "${APPLICATION_NAME}",
+                        "labels": {
+                            "deploymentConfig": "${APPLICATION_NAME}",
+                            "application": "${APPLICATION_NAME}"
+                        }
+                    },
+                    "spec": {
+                        "terminationGracePeriodSeconds": 60,
+                        "containers": [
+                            {
+                                "name": "${APPLICATION_NAME}",
+                                "image": "jboss-datagrid72-openshift",
+                                "imagePullPolicy": "Always",
+                                "resources": {
+                                    "limits": {
+                                        "memory": "${MEMORY_LIMIT}"
+                                    }
+                                },
+                                "livenessProbe": {
+                                    "exec": {
+                                        "command": [
+                                            "/bin/bash",
+                                            "-c",
+                                            "/opt/datagrid/bin/livenessProbe.sh"
+                                        ]
+                                    },
+                                    "initialDelaySeconds": 60
+                                },
+                                "readinessProbe": {
+                                    "exec": {
+                                        "command": [
+                                            "/bin/bash",
+                                            "-c",
+                                            "/opt/datagrid/bin/readinessProbe.sh"
+                                        ]
+                                    }
+                                },
+                                "ports": [
+                                    {
+                                        "name": "jolokia",
+                                        "containerPort": 8778,
+                                        "protocol": "TCP"
+                                    },
+                                    {
+                                        "name": "http",
+                                        "containerPort": 8080,
+                                        "protocol": "TCP"
+                                    },
+                                    {
+                                        "name": "ping",
+                                        "containerPort": 8888,
+                                        "protocol": "TCP"
+                                    },
+                                    {
+                                        "name": "memcached",
+                                        "containerPort": 11211,
+                                        "protocol": "TCP"
+                                    },
+                                    {
+                                        "name": "hotrod-internal",
+                                        "containerPort": 11222,
+                                        "protocol": "TCP"
+                                    },
+                                    {
+                                        "name": "hotrod",
+                                        "containerPort": 11333,
+                                        "protocol": "TCP"
+                                    }
+                                ],
+                                "env": [
+                                    {
+                                        "name": "USERNAME",
+                                        "value": "${USERNAME}"
+                                    },
+                                    {
+                                        "name": "PASSWORD",
+                                        "value": "${PASSWORD}"
+                                    },
+                                    {
+                                        "name": "ADMIN_GROUP",
+                                        "value": "${ADMIN_GROUP}"
+                                    },
+                                    {
+                                        "name": "HOTROD_AUTHENTICATION",
+                                        "value": "${HOTROD_AUTHENTICATION}"
+                                    },
+                                    {
+                                        "name": "CONTAINER_SECURITY_ROLE_MAPPER",
+                                        "value": "${CONTAINER_SECURITY_ROLE_MAPPER}"
+                                    },
+                                    {
+                                        "name": "CONTAINER_SECURITY_ROLES",
+                                        "value": "${CONTAINER_SECURITY_ROLES}"
+                                    },
+                                    {
+                                        "name": "JGROUPS_PING_PROTOCOL",
+                                        "value": "openshift.DNS_PING"
+                                    },
+                                    {
+                                        "name": "OPENSHIFT_DNS_PING_SERVICE_NAME",
+                                        "value": "${APPLICATION_NAME}-ping"
+                                    },
+                                    {
+                                        "name": "OPENSHIFT_DNS_PING_SERVICE_PORT",
+                                        "value": "8888"
+                                    },
+                                    {
+                                        "name": "INFINISPAN_CONNECTORS",
+                                        "value": "${INFINISPAN_CONNECTORS}"
+                                    },
+                                    {
+                                        "name": "CACHE_NAMES",
+                                        "value": "${CACHE_NAMES}"
+                                    },
+                                    {
+                                        "name": "DATAVIRT_CACHE_NAMES",
+                                        "value": "${DATAVIRT_CACHE_NAMES}"
+                                    },
+                                    {
+                                        "name": "CACHE_TYPE_DEFAULT",
+                                        "value": "${CACHE_TYPE_DEFAULT}"
+                                    },
+                                    {
+                                        "name": "HOTROD_SERVICE_NAME",
+                                        "value": "${APPLICATION_NAME}-hotrod"
+                                    },
+                                    {
+                                        "name": "MEMCACHED_CACHE",
+                                        "value": "${MEMCACHED_CACHE}"
+                                    },
+                                    {
+                                        "name": "REST_SECURITY_DOMAIN",
+                                        "value": "${REST_SECURITY_DOMAIN}"
+                                    },
+                                    {
+                                        "name": "JGROUPS_CLUSTER_PASSWORD",
+                                        "value": "${JGROUPS_CLUSTER_PASSWORD}"
+                                    }
+                                ]
+                            }
+                        ]
+                    }
+                }
+            }
+        }
+    ]
+}

+ 638 - 0
roles/openshift_examples/files/examples/latest/xpaas-templates/datagrid72-https.json

@@ -0,0 +1,638 @@
+{
+    "kind": "Template",
+    "apiVersion": "v1",
+    "metadata": {
+        "annotations": {
+            "iconClass": "icon-datagrid",
+            "tags": "datagrid,jboss,hidden",
+            "version": "1.1",
+            "openshift.io/display-name": "Red Hat JBoss Data Grid 7.2 (Ephemeral with https)",
+            "openshift.io/provider-display-name": "Red Hat, Inc.",
+            "description": "An example Red Hat JBoss Data Grid application. For more information about using this template, see https://github.com/jboss-openshift/application-templates.",
+            "template.openshift.io/long-description": "This template defines resources needed to develop Red Hat JBoss Data Grid 7.2 based applications, including a deployment configuration, using ephemeral (temporary) storage and secure communication using https.",
+            "template.openshift.io/documentation-url": "https://access.redhat.com/documentation/en/red-hat-jboss-data-grid/",
+            "template.openshift.io/support-url": "https://access.redhat.com"
+        },
+        "name": "datagrid72-https"
+    },
+    "labels": {
+        "template": "datagrid72-https"
+    },
+    "message": "A new data grid service has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". Please be sure to create the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+    "parameters": [
+        {
+            "displayName": "Application Name",
+            "description": "The name for the application.",
+            "name": "APPLICATION_NAME",
+            "value": "datagrid-app",
+            "required": true
+        },
+        {
+            "displayName": "Custom http Route Hostname",
+            "description": "Custom hostname for http service route.  Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+            "name": "HOSTNAME_HTTP",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Custom https Route Hostname",
+            "description": "Custom hostname for https service route.  Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+            "name": "HOSTNAME_HTTPS",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Username",
+            "description": "User name for JDG user.",
+            "name": "USERNAME",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Password",
+            "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
+            "name": "PASSWORD",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "JDG User Roles/Groups",
+            "description": "Comma delimited list of roles/groups associated with the JDG user",
+            "name": "ADMIN_GROUP",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Hotrod Authentication",
+            "description": "Enable Hotrod Authentication",
+            "name": "HOTROD_AUTHENTICATION",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Container Security Role Mapper",
+            "description": "Defines which role mapper to use for cache authentication",
+            "name": "CONTAINER_SECURITY_ROLE_MAPPER",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Container Security Roles",
+            "description": "Comma delimited list of role names and assigned permissions",
+            "name": "CONTAINER_SECURITY_ROLES",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Server Keystore Secret Name",
+            "description": "The name of the secret containing the keystore file",
+            "name": "HTTPS_SECRET",
+            "value": "datagrid-app-secret",
+            "required": true
+        },
+        {
+            "displayName": "Server Keystore Filename",
+            "description": "The name of the keystore file within the secret",
+            "name": "HTTPS_KEYSTORE",
+            "value": "keystore.jks",
+            "required": false
+        },
+        {
+            "displayName": "Server Certificate Name",
+            "description": "The name associated with the server certificate",
+            "name": "HTTPS_NAME",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Server Keystore Password",
+            "description": "The password for the keystore and certificate",
+            "name": "HTTPS_PASSWORD",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "ImageStream Namespace",
+            "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+            "name": "IMAGE_STREAM_NAMESPACE",
+            "value": "openshift",
+            "required": true
+        },
+        {
+            "displayName": "Infinispan Connectors",
+            "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+            "name": "INFINISPAN_CONNECTORS",
+            "value": "hotrod,memcached,rest",
+            "required": false
+        },
+        {
+            "displayName": "Cache Names",
+            "description": "Comma-separated list of caches to configure.  By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
+            "name": "CACHE_NAMES",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Datavirt Cache Names",
+            "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views.  Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+            "name": "DATAVIRT_CACHE_NAMES",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Default Cache Type",
+            "description": "Default cache type for all caches. If empty then distributed will be the default",
+            "name": "CACHE_TYPE_DEFAULT",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Encryption Requires SSL Client Authentication?",
+            "description": "Whether to require client certificate authentication. Defaults to false",
+            "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Memcached Cache Name",
+            "description": "The name of the cache to expose through this memcached connector",
+            "name": "MEMCACHED_CACHE",
+            "value": "default_memcached",
+            "required": false
+        },
+        {
+            "displayName": "REST Security Domain",
+            "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+            "name": "REST_SECURITY_DOMAIN",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "JGroups Secret Name",
+            "description": "The name of the secret containing the keystore file",
+            "name": "JGROUPS_ENCRYPT_SECRET",
+            "value": "datagrid-app-secret",
+            "required": false
+        },
+        {
+            "displayName": "JGroups Keystore Filename",
+            "description": "The name of the keystore file within the secret",
+            "name": "JGROUPS_ENCRYPT_KEYSTORE",
+            "value": "jgroups.jceks",
+            "required": false
+        },
+        {
+            "displayName": "JGroups Certificate Name",
+            "description": "The name associated with the server certificate",
+            "name": "JGROUPS_ENCRYPT_NAME",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "JGroups Keystore Password",
+            "description": "The password for the keystore and certificate",
+            "name": "JGROUPS_ENCRYPT_PASSWORD",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "JGroups Cluster Password",
+            "description": "JGroups cluster password",
+            "name": "JGROUPS_CLUSTER_PASSWORD",
+            "from": "[a-zA-Z0-9]{8}",
+            "generate": "expression",
+            "required": true
+        },
+        {
+            "description": "Container memory limit",
+            "name": "MEMORY_LIMIT",
+            "value": "1Gi",
+            "required": false
+        }
+    ],
+    "objects": [
+        {
+            "kind": "Service",
+            "apiVersion": "v1",
+            "spec": {
+                "ports": [
+                    {
+                        "port": 8080,
+                        "targetPort": 8080
+                    }
+                ],
+                "selector": {
+                    "deploymentConfig": "${APPLICATION_NAME}"
+                }
+            },
+            "metadata": {
+                "name": "${APPLICATION_NAME}",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                },
+                "annotations": {
+                    "description": "The web server's HTTP port."
+                }
+            }
+        },
+        {
+            "kind": "Service",
+            "apiVersion": "v1",
+            "spec": {
+                "ports": [
+                    {
+                        "port": 8443,
+                        "targetPort": 8443
+                    }
+                ],
+                "selector": {
+                    "deploymentConfig": "${APPLICATION_NAME}"
+                }
+            },
+            "metadata": {
+                "name": "secure-${APPLICATION_NAME}",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                },
+                "annotations": {
+                    "description": "The web server's HTTPS port."
+                }
+            }
+        },
+        {
+            "kind": "Service",
+            "apiVersion": "v1",
+            "spec": {
+                "ports": [
+                    {
+                        "port": 11211,
+                        "targetPort": 11211
+                    }
+                ],
+                "selector": {
+                    "deploymentConfig": "${APPLICATION_NAME}"
+                }
+            },
+            "metadata": {
+                "name": "${APPLICATION_NAME}-memcached",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                },
+                "annotations": {
+                    "description": "Memcached service for clustered applications."
+                }
+            }
+        },
+        {
+            "kind": "Service",
+            "apiVersion": "v1",
+            "spec": {
+                "ports": [
+                    {
+                        "port": 11333,
+                        "targetPort": 11333
+                    }
+                ],
+                "selector": {
+                    "deploymentConfig": "${APPLICATION_NAME}"
+                }
+            },
+            "metadata": {
+                "name": "${APPLICATION_NAME}-hotrod",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                },
+                "annotations": {
+                    "description": "Hot Rod service for clustered applications."
+                }
+            }
+        },
+        {
+            "kind": "Service",
+            "apiVersion": "v1",
+            "spec": {
+                "clusterIP": "None",
+                "ports": [
+                    {
+                        "name": "ping",
+                        "port": 8888
+                    }
+                ],
+                "selector": {
+                    "deploymentConfig": "${APPLICATION_NAME}"
+                }
+            },
+            "metadata": {
+                "name": "${APPLICATION_NAME}-ping",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                },
+                "annotations": {
+                    "service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
+                    "description": "The JGroups ping port for clustering."
+                }
+            }
+        },
+        {
+            "kind": "Route",
+            "apiVersion": "v1",
+            "id": "${APPLICATION_NAME}-http",
+            "metadata": {
+                "name": "${APPLICATION_NAME}",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                },
+                "annotations": {
+                    "description": "Route for application's HTTP service."
+                }
+            },
+            "spec": {
+                "host": "${HOSTNAME_HTTP}",
+                "to": {
+                    "name": "${APPLICATION_NAME}"
+                }
+            }
+        },
+        {
+            "kind": "Route",
+            "apiVersion": "v1",
+            "id": "${APPLICATION_NAME}-https",
+            "metadata": {
+                "name": "secure-${APPLICATION_NAME}",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                },
+                "annotations": {
+                    "description": "Route for application's HTTPS service."
+                }
+            },
+            "spec": {
+                "host": "${HOSTNAME_HTTPS}",
+                "to": {
+                    "name": "secure-${APPLICATION_NAME}"
+                },
+                "tls": {
+                    "termination": "passthrough"
+                }
+            }
+        },
+        {
+            "kind": "DeploymentConfig",
+            "apiVersion": "v1",
+            "metadata": {
+                "name": "${APPLICATION_NAME}",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                }
+            },
+            "spec": {
+                "strategy": {
+                    "type": "Recreate"
+                },
+                "triggers": [
+                    {
+                        "type": "ImageChange",
+                        "imageChangeParams": {
+                            "automatic": true,
+                            "containerNames": [
+                                "${APPLICATION_NAME}"
+                            ],
+                            "from": {
+                                "kind": "ImageStreamTag",
+                                "namespace": "${IMAGE_STREAM_NAMESPACE}",
+                                "name": "jboss-datagrid72-openshift:1.1"
+                            }
+                        }
+                    },
+                    {
+                        "type": "ConfigChange"
+                    }
+                ],
+                "replicas": 1,
+                "selector": {
+                    "deploymentConfig": "${APPLICATION_NAME}"
+                },
+                "template": {
+                    "metadata": {
+                        "name": "${APPLICATION_NAME}",
+                        "labels": {
+                            "deploymentConfig": "${APPLICATION_NAME}",
+                            "application": "${APPLICATION_NAME}"
+                        }
+                    },
+                    "spec": {
+                        "terminationGracePeriodSeconds": 60,
+                        "containers": [
+                            {
+                                "name": "${APPLICATION_NAME}",
+                                "image": "jboss-datagrid72-openshift",
+                                "imagePullPolicy": "Always",
+                                "resources": {
+                                    "limits": {
+                                        "memory": "${MEMORY_LIMIT}"
+                                    }
+                                },
+                                "volumeMounts": [
+                                    {
+                                        "name": "datagrid-keystore-volume",
+                                        "mountPath": "/etc/datagrid-secret-volume",
+                                        "readOnly": true
+                                    },
+                                    {
+                                        "name": "datagrid-jgroups-keystore-volume",
+                                        "mountPath": "/etc/jgroups-encrypt-secret-volume",
+                                        "readOnly": true
+                                    }
+                                ],
+                                "livenessProbe": {
+                                    "exec": {
+                                        "command": [
+                                            "/bin/bash",
+                                            "-c",
+                                            "/opt/datagrid/bin/livenessProbe.sh"
+                                        ]
+                                    },
+                                    "initialDelaySeconds": 60
+                                },
+                                "readinessProbe": {
+                                    "exec": {
+                                        "command": [
+                                            "/bin/bash",
+                                            "-c",
+                                            "/opt/datagrid/bin/readinessProbe.sh"
+                                        ]
+                                    }
+                                },
+                                "ports": [
+                                    {
+                                        "name": "jolokia",
+                                        "containerPort": 8778,
+                                        "protocol": "TCP"
+                                    },
+                                    {
+                                        "name": "http",
+                                        "containerPort": 8080,
+                                        "protocol": "TCP"
+                                    },
+                                    {
+                                        "name": "https",
+                                        "containerPort": 8443,
+                                        "protocol": "TCP"
+                                    },
+                                    {
+                                        "name": "ping",
+                                        "containerPort": 8888,
+                                        "protocol": "TCP"
+                                    },
+                                    {
+                                        "name": "memcached",
+                                        "containerPort": 11211,
+                                        "protocol": "TCP"
+                                    },
+                                    {
+                                        "name": "hotrod-internal",
+                                        "containerPort": 11222,
+                                        "protocol": "TCP"
+                                    },
+                                    {
+                                        "name": "hotrod",
+                                        "containerPort": 11333,
+                                        "protocol": "TCP"
+                                    }
+                                ],
+                                "env": [
+                                    {
+                                        "name": "USERNAME",
+                                        "value": "${USERNAME}"
+                                    },
+                                    {
+                                        "name": "PASSWORD",
+                                        "value": "${PASSWORD}"
+                                    },
+                                    {
+                                        "name": "ADMIN_GROUP",
+                                        "value": "${ADMIN_GROUP}"
+                                    },
+                                    {
+                                        "name": "HOTROD_AUTHENTICATION",
+                                        "value": "${HOTROD_AUTHENTICATION}"
+                                    },
+                                    {
+                                        "name": "CONTAINER_SECURITY_ROLE_MAPPER",
+                                        "value": "${CONTAINER_SECURITY_ROLE_MAPPER}"
+                                    },
+                                    {
+                                        "name": "CONTAINER_SECURITY_ROLES",
+                                        "value": "${CONTAINER_SECURITY_ROLES}"
+                                    },
+                                    {
+                                        "name": "HTTPS_KEYSTORE_DIR",
+                                        "value": "/etc/datagrid-secret-volume"
+                                    },
+                                    {
+                                        "name": "HTTPS_KEYSTORE",
+                                        "value": "${HTTPS_KEYSTORE}"
+                                    },
+                                    {
+                                        "name": "HTTPS_NAME",
+                                        "value": "${HTTPS_NAME}"
+                                    },
+                                    {
+                                        "name": "HTTPS_PASSWORD",
+                                        "value": "${HTTPS_PASSWORD}"
+                                    },
+                                    {
+                                        "name": "JGROUPS_PING_PROTOCOL",
+                                        "value": "openshift.DNS_PING"
+                                    },
+                                    {
+                                        "name": "OPENSHIFT_DNS_PING_SERVICE_NAME",
+                                        "value": "${APPLICATION_NAME}-ping"
+                                    },
+                                    {
+                                        "name": "OPENSHIFT_DNS_PING_SERVICE_PORT",
+                                        "value": "8888"
+                                    },
+                                    {
+                                        "name": "INFINISPAN_CONNECTORS",
+                                        "value": "${INFINISPAN_CONNECTORS}"
+                                    },
+                                    {
+                                        "name": "CACHE_NAMES",
+                                        "value": "${CACHE_NAMES}"
+                                    },
+                                    {
+                                        "name": "DATAVIRT_CACHE_NAMES",
+                                        "value": "${DATAVIRT_CACHE_NAMES}"
+                                    },
+                                    {
+                                        "name": "CACHE_TYPE_DEFAULT",
+                                        "value": "${CACHE_TYPE_DEFAULT}"
+                                    },
+                                    {
+                                        "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+                                        "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+                                    },
+                                    {
+                                        "name": "HOTROD_SERVICE_NAME",
+                                        "value": "${APPLICATION_NAME}-hotrod"
+                                    },
+                                    {
+                                        "name": "HOTROD_ENCRYPTION",
+                                        "value": "${HTTPS_NAME}"
+                                    },
+                                    {
+                                        "name": "MEMCACHED_CACHE",
+                                        "value": "${MEMCACHED_CACHE}"
+                                    },
+                                    {
+                                        "name": "REST_SECURITY_DOMAIN",
+                                        "value": "${REST_SECURITY_DOMAIN}"
+                                    },
+                                    {
+                                        "name": "JGROUPS_ENCRYPT_SECRET",
+                                        "value": "${JGROUPS_ENCRYPT_SECRET}"
+                                    },
+                                    {
+                                        "name": "JGROUPS_ENCRYPT_KEYSTORE",
+                                        "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+                                    },
+                                    {
+                                        "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+                                        "value": "/etc/jgroups-encrypt-secret-volume"
+                                    },
+                                    {
+                                        "name": "JGROUPS_ENCRYPT_NAME",
+                                        "value": "${JGROUPS_ENCRYPT_NAME}"
+                                    },
+                                    {
+                                        "name": "JGROUPS_ENCRYPT_PASSWORD",
+                                        "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+                                    },
+                                    {
+                                        "name": "JGROUPS_CLUSTER_PASSWORD",
+                                        "value": "${JGROUPS_CLUSTER_PASSWORD}"
+                                    }
+                                ]
+                            }
+                        ],
+                        "volumes": [
+                            {
+                                "name": "datagrid-keystore-volume",
+                                "secret": {
+                                    "secretName": "${HTTPS_SECRET}"
+                                }
+                            },
+                            {
+                                "name": "datagrid-jgroups-keystore-volume",
+                                "secret": {
+                                    "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+                                }
+                            }
+                        ]
+                    }
+                }
+            }
+        }
+    ]
+}

Filskillnaden har hållts tillbaka eftersom den är för stor
+ 955 - 0
roles/openshift_examples/files/examples/latest/xpaas-templates/datagrid72-mysql-persistent.json


Filskillnaden har hållts tillbaka eftersom den är för stor
+ 928 - 0
roles/openshift_examples/files/examples/latest/xpaas-templates/datagrid72-mysql.json


+ 527 - 0
roles/openshift_examples/files/examples/latest/xpaas-templates/datagrid72-partition.json

@@ -0,0 +1,527 @@
+{
+    "kind": "Template",
+    "apiVersion": "v1",
+    "metadata": {
+        "annotations": {
+            "iconClass": "icon-datagrid",
+            "tags": "datagrid,jboss,hidden",
+            "version": "1.1",
+            "openshift.io/display-name": "Red Hat JBoss Data Grid 7.2 (Ephemeral, no https)",
+            "openshift.io/provider-display-name": "Red Hat, Inc.",
+            "description": "An example Red Hat JBoss Data Grid application. For more information about using this template, see https://github.com/jboss-openshift/application-templates.",
+            "template.openshift.io/long-description": "This template defines resources needed to develop Red Hat JBoss Data Grid 7.2 based applications, including a deployment configuration, using ephemeral (temporary) storage and communication using http.",
+            "template.openshift.io/documentation-url": "https://access.redhat.com/documentation/en/red-hat-jboss-data-grid/",
+            "template.openshift.io/support-url": "https://access.redhat.com"
+        },
+        "name": "datagrid72-partition"
+    },
+    "labels": {
+        "template": "datagrid72-partition"
+    },
+    "message": "A new data grid service has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\".",
+    "parameters": [
+        {
+            "displayName": "Application Name",
+            "description": "The name for the application.",
+            "name": "APPLICATION_NAME",
+            "value": "datagrid-app",
+            "required": true
+        },
+        {
+            "displayName": "Custom http Route Hostname",
+            "description": "Custom hostname for http service route.  Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+            "name": "HOSTNAME_HTTP",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Username",
+            "description": "User name for JDG user.",
+            "name": "USERNAME",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Password",
+            "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
+            "name": "PASSWORD",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "ImageStream Namespace",
+            "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+            "name": "IMAGE_STREAM_NAMESPACE",
+            "value": "openshift",
+            "required": true
+        },
+        {
+            "displayName": "Infinispan Connectors",
+            "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+            "name": "INFINISPAN_CONNECTORS",
+            "value": "hotrod,memcached,rest",
+            "required": false
+        },
+        {
+            "displayName": "Cache Names",
+            "description": "Comma-separated list of caches to configure.  By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
+            "name": "CACHE_NAMES",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Datavirt Cache Names",
+            "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views.  Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+            "name": "DATAVIRT_CACHE_NAMES",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Default Cache Type",
+            "description": "Default cache type for all caches. If empty then distributed will be the default",
+            "name": "CACHE_TYPE_DEFAULT",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "Memcached Cache Name",
+            "description": "The name of the cache to expose through this memcached connector",
+            "name": "MEMCACHED_CACHE",
+            "value": "default_memcached",
+            "required": false
+        },
+        {
+            "displayName": "ADMIN_GROUP",
+            "description": "Comma delimited list of groups/roles for the Application Realm User",
+            "name": "ADMIN_GROUP",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "HOTROD_AUTHENTICATION",
+            "description": "True/False for HotRod Authentication",
+            "name": "HOTROD_AUTHENTICATION",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "CONTAINER_SECURITY_ROLE_MAPPER",
+            "description": "Container Role Mapper",
+            "name": "CONTAINER_SECURITY_ROLE_MAPPER",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "CONTAINER_SECURITY_ROLES",
+            "description": "Comma Delimited List of Container Roles",
+            "name": "CONTAINER_SECURITY_ROLES",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "REST Security Domain",
+            "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+            "name": "REST_SECURITY_DOMAIN",
+            "value": "",
+            "required": false
+        },
+        {
+            "displayName": "JGroups Cluster Password",
+            "description": "JGroups cluster password",
+            "name": "JGROUPS_CLUSTER_PASSWORD",
+            "from": "[a-zA-Z0-9]{8}",
+            "generate": "expression",
+            "required": true
+        },
+        {
+            "displayName": "Datagrid Volume Size",
+            "description": "Size of the volume used by Datagrid for persisting metadata.",
+            "name": "VOLUME_CAPACITY",
+            "value": "1Gi",
+            "required": true
+        },
+        {
+            "displayName": "Split Data?",
+            "description": "Split the data directory for each node in a mesh, this is now the default behaviour.",
+            "name": "DATAGRID_SPLIT",
+            "value": "true",
+            "required": false
+        },
+        {
+            "description": "Container memory limit",
+            "name": "MEMORY_LIMIT",
+            "value": "1Gi",
+            "required": false
+        }
+    ],
+    "objects": [
+        {
+            "kind": "Service",
+            "apiVersion": "v1",
+            "spec": {
+                "ports": [
+                    {
+                        "port": 8080,
+                        "targetPort": 8080
+                    }
+                ],
+                "selector": {
+                    "deploymentConfig": "${APPLICATION_NAME}"
+                }
+            },
+            "metadata": {
+                "name": "${APPLICATION_NAME}",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                },
+                "annotations": {
+                    "description": "The web server's HTTP port."
+                }
+            }
+        },
+        {
+            "kind": "Service",
+            "apiVersion": "v1",
+            "spec": {
+                "ports": [
+                    {
+                        "port": 11211,
+                        "targetPort": 11211
+                    }
+                ],
+                "selector": {
+                    "deploymentConfig": "${APPLICATION_NAME}"
+                }
+            },
+            "metadata": {
+                "name": "${APPLICATION_NAME}-memcached",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                },
+                "annotations": {
+                    "description": "Memcached service for clustered applications."
+                }
+            }
+        },
+        {
+            "kind": "Service",
+            "apiVersion": "v1",
+            "spec": {
+                "ports": [
+                    {
+                        "port": 11333,
+                        "targetPort": 11333
+                    }
+                ],
+                "selector": {
+                    "deploymentConfig": "${APPLICATION_NAME}"
+                }
+            },
+            "metadata": {
+                "name": "${APPLICATION_NAME}-hotrod",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                },
+                "annotations": {
+                    "description": "Hot Rod service for clustered applications."
+                }
+            }
+        },
+        {
+            "kind": "Service",
+            "apiVersion": "v1",
+            "spec": {
+                "ports": [
+                    {
+                        "port": 8787,
+                        "targetPort": 8787
+                    }
+                ],
+                "selector": {
+                    "deploymentConfig": "${APPLICATION_NAME}"
+                }
+            },
+            "metadata": {
+                "name": "${APPLICATION_NAME}-debug",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                },
+                "annotations": {
+                    "description": "Hot Rod service for clustered applications."
+                }
+            }
+        },
+        {
+            "kind": "Service",
+            "apiVersion": "v1",
+            "spec": {
+                "clusterIP": "None",
+                "ports": [
+                    {
+                        "name": "ping",
+                        "port": 8888
+                    }
+                ],
+                "selector": {
+                    "deploymentConfig": "${APPLICATION_NAME}"
+                }
+            },
+            "metadata": {
+                "name": "${APPLICATION_NAME}-ping",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                },
+                "annotations": {
+                    "service.alpha.kubernetes.io/tolerate-unready-endpoints": "true",
+                    "description": "The JGroups ping port for clustering."
+                }
+            }
+        },
+        {
+            "kind": "Route",
+            "apiVersion": "v1",
+            "id": "${APPLICATION_NAME}-http",
+            "metadata": {
+                "name": "${APPLICATION_NAME}",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                },
+                "annotations": {
+                    "description": "Route for application's HTTP service."
+                }
+            },
+            "spec": {
+                "host": "${HOSTNAME_HTTP}",
+                "to": {
+                    "name": "${APPLICATION_NAME}"
+                }
+            }
+        },
+        {
+            "kind": "DeploymentConfig",
+            "apiVersion": "v1",
+            "metadata": {
+                "name": "${APPLICATION_NAME}",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                }
+            },
+            "spec": {
+                "strategy": {
+                    "type": "Recreate"
+                },
+                "triggers": [
+                    {
+                        "type": "ImageChange",
+                        "imageChangeParams": {
+                            "automatic": true,
+                            "containerNames": [
+                                "${APPLICATION_NAME}"
+                            ],
+                            "from": {
+                                "kind": "ImageStreamTag",
+                                "namespace": "${IMAGE_STREAM_NAMESPACE}",
+                                "name": "jboss-datagrid72-openshift:1.1"
+                            }
+                        }
+                    },
+                    {
+                        "type": "ConfigChange"
+                    }
+                ],
+                "replicas": 1,
+                "selector": {
+                    "deploymentConfig": "${APPLICATION_NAME}"
+                },
+                "template": {
+                    "metadata": {
+                        "name": "${APPLICATION_NAME}",
+                        "labels": {
+                            "deploymentConfig": "${APPLICATION_NAME}",
+                            "application": "${APPLICATION_NAME}"
+                        }
+                    },
+                    "spec": {
+                        "terminationGracePeriodSeconds": 60,
+                        "containers": [
+                            {
+                                "name": "${APPLICATION_NAME}",
+                                "image": "jboss-datagrid72-openshift",
+                                "imagePullPolicy": "Always",
+                                "resources": {
+                                    "limits": {
+                                        "memory": "${MEMORY_LIMIT}"
+                                    }
+                                },
+                                "volumeMounts": [
+                                    {
+                                        "mountPath": "/opt/datagrid/standalone/partitioned_data",
+                                        "name": "${APPLICATION_NAME}-datagrid-pvol"
+                                    }
+                                ],
+                                "livenessProbe": {
+                                    "exec": {
+                                        "command": [
+                                            "/bin/bash",
+                                            "-c",
+                                            "/opt/datagrid/bin/livenessProbe.sh"
+                                        ]
+                                    },
+                                    "initialDelaySeconds": 60
+                                },
+                                "readinessProbe": {
+                                    "exec": {
+                                        "command": [
+                                            "/bin/bash",
+                                            "-c",
+                                            "/opt/datagrid/bin/readinessProbe.sh"
+                                        ]
+                                    }
+                                },
+                                "ports": [
+                                    {
+                                        "name": "jolokia",
+                                        "containerPort": 8778,
+                                        "protocol": "TCP"
+                                    },
+                                    {
+                                        "name": "http",
+                                        "containerPort": 8080,
+                                        "protocol": "TCP"
+                                    },
+                                    {
+                                        "name": "ping",
+                                        "containerPort": 8888,
+                                        "protocol": "TCP"
+                                    },
+                                    {
+                                        "name": "memcached",
+                                        "containerPort": 11211,
+                                        "protocol": "TCP"
+                                    },
+                                    {
+                                        "name": "hotrod-internal",
+                                        "containerPort": 11222,
+                                        "protocol": "TCP"
+                                    },
+                                    {
+                                        "name": "hotrod",
+                                        "containerPort": 11333,
+                                        "protocol": "TCP"
+                                    },
+                                    {
+                                        "name": "debug",
+                                        "containerPort": 8787,
+                                        "protocol": "TCP"
+                                    }
+                                ],
+                                "env": [
+                                    {
+                                        "name": "USERNAME",
+                                        "value": "${USERNAME}"
+                                    },
+                                    {
+                                        "name": "PASSWORD",
+                                        "value": "${PASSWORD}"
+                                    },
+                                    {
+                                        "name": "JGROUPS_PING_PROTOCOL",
+                                        "value": "openshift.DNS_PING"
+                                    },
+                                    {
+                                        "name": "OPENSHIFT_DNS_PING_SERVICE_NAME",
+                                        "value": "${APPLICATION_NAME}-ping"
+                                    },
+                                    {
+                                        "name": "OPENSHIFT_DNS_PING_SERVICE_PORT",
+                                        "value": "8888"
+                                    },
+                                    {
+                                        "name": "INFINISPAN_CONNECTORS",
+                                        "value": "${INFINISPAN_CONNECTORS}"
+                                    },
+                                    {
+                                        "name": "CACHE_NAMES",
+                                        "value": "${CACHE_NAMES}"
+                                    },
+                                    {
+                                        "name": "DATAVIRT_CACHE_NAMES",
+                                        "value": "${DATAVIRT_CACHE_NAMES}"
+                                    },
+                                    {
+                                        "name": "CACHE_TYPE_DEFAULT",
+                                        "value": "${CACHE_TYPE_DEFAULT}"
+                                    },
+                                    {
+                                        "name": "HOTROD_SERVICE_NAME",
+                                        "value": "${APPLICATION_NAME}-hotrod"
+                                    },
+                                    {
+                                        "name": "MEMCACHED_CACHE",
+                                        "value": "${MEMCACHED_CACHE}"
+                                    },
+                                    {
+                                        "name": "REST_SECURITY_DOMAIN",
+                                        "value": "${REST_SECURITY_DOMAIN}"
+                                    },
+                                    {
+                                        "name": "JGROUPS_CLUSTER_PASSWORD",
+                                        "value": "${JGROUPS_CLUSTER_PASSWORD}"
+                                    },
+                                    {
+                                        "name": "ADMIN_GROUP",
+                                        "value": "${ADMIN_GROUP}"
+                                    },
+                                    {
+                                        "name": "HOTROD_AUTHENTICATION",
+                                        "value": "${HOTROD_AUTHENTICATION}"
+                                    },
+                                    {
+                                        "name": "CONTAINER_SECURITY_ROLE_MAPPER",
+                                        "value": "${CONTAINER_SECURITY_ROLE_MAPPER}"
+                                    },
+                                    {
+                                        "name": "CONTAINER_SECURITY_ROLES",
+                                        "value": "${CONTAINER_SECURITY_ROLES}"
+                                    },
+                                    {
+                                        "name": "DATAGRID_SPLIT",
+                                        "value": "${DATAGRID_SPLIT}"
+                                    }
+                                ]
+                            }
+                        ],
+                        "volumes": [
+                            {
+                                "name": "${APPLICATION_NAME}-datagrid-pvol",
+                                "persistentVolumeClaim": {
+                                    "claimName": "${APPLICATION_NAME}-datagrid-claim"
+                                }
+                            }
+                        ]
+                    }
+                }
+            }
+        },
+        {
+            "apiVersion": "v1",
+            "kind": "PersistentVolumeClaim",
+            "metadata": {
+                "name": "${APPLICATION_NAME}-datagrid-claim",
+                "labels": {
+                    "application": "${APPLICATION_NAME}"
+                }
+            },
+            "spec": {
+                "accessModes": [
+                    "ReadWriteMany"
+                ],
+                "resources": {
+                    "requests": {
+                        "storage": "${VOLUME_CAPACITY}"
+                    }
+                }
+            }
+        }
+    ]
+}

Filskillnaden har hållts tillbaka eftersom den är för stor
+ 926 - 0
roles/openshift_examples/files/examples/latest/xpaas-templates/datagrid72-postgresql-persistent.json


Filskillnaden har hållts tillbaka eftersom den är för stor
+ 899 - 0
roles/openshift_examples/files/examples/latest/xpaas-templates/datagrid72-postgresql.json


+ 15 - 2
roles/openshift_facts/defaults/main.yml

@@ -48,7 +48,7 @@ repoquery_installed: "{{ (ansible_pkg_mgr == 'dnf') | ternary('dnf repoquery --l
 openshift_use_crio: False
 openshift_use_crio_only: False
 openshift_crio_enable_docker_gc: False
-openshift_crio_var_sock: "unix:///var/run/crio/crio.sock"
+openshift_crio_var_sock: "/var/run/crio/crio.sock"
 openshift_crio_pause_image: "{{ l_os_registry_url | regex_replace('${component}' | regex_escape, 'pod') }}"
 openshift_container_cli: "{{ openshift_use_crio | bool | ternary('crictl', 'docker') }}"
 openshift_crio_docker_gc_node_selector:
@@ -87,7 +87,20 @@ openshift_hosted_registry_storage_glusterfs_ips: []
 openshift_hosted_registry_storage_hostpath_path: /var/lib/openshift_volumes
 # Default to ReadWriteOnce if using hostpath, else default to ReadWriteMany
 openshift_hosted_registry_storage_access_modes:
-  - "{{ (openshift_hosted_registry_storage_kind == 'hostpath') | ternary('ReadWriteOnce', 'ReadWriteMany') }}"
+  - "{{ (openshift_hosted_registry_storage_kind | default(none) == 'hostpath') | ternary('ReadWriteOnce', 'ReadWriteMany') }}"
+
+openshift_hosted_registry_glusterfs_namespace: "{{ openshift_hosted_registry_namespace }}"
+openshift_hosted_registry_glusterfs_storage_kind: 'glusterfs'
+openshift_hosted_registry_glusterfs_storage_volume_name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs"
+openshift_hosted_registry_glusterfs_storage_volume_size: "{{ openshift_hosted_registry_storage_volume_size }}"
+openshift_hosted_registry_glusterfs_storage_create_pv: False
+openshift_hosted_registry_glusterfs_storage_create_pvc: False
+openshift_hosted_registry_glusterfs_storage_glusterfs_endpoints: "{{ openshift_hosted_registry_storage_glusterfs_endpoints }}"
+openshift_hosted_registry_glusterfs_storage_glusterfs_path: "{{ openshift_hosted_registry_storage_glusterfs_path }}"
+openshift_hosted_registry_glusterfs_storage_glusterfs_readOnly: "{{ openshift_hosted_registry_storage_glusterfs_readOnly }}"
+openshift_hosted_registry_glusterfs_storage_glusterfs_ips: "{{ openshift_hosted_registry_storage_glusterfs_ips }}"
+openshift_hosted_registry_glusterfs_storage_access_modes:
+  - 'ReadWriteMany'
 
 openshift_logging_storage_nfs_directory: '/exports'
 openshift_logging_storage_nfs_options: '*(rw,root_squash)'

+ 1 - 1
roles/openshift_logging_fluentd/templates/fluentd.j2

@@ -149,7 +149,7 @@ spec:
         - name: "MUX_CLIENT_MODE"
           value: "{{ openshift_logging_mux_client_mode }}"
 {% endif %}
-{% if openshift_logging_install_eventrouter is defined and openshift_logging_install_eventrouter %}
+{% if openshift_logging_install_eventrouter is defined and (openshift_logging_install_eventrouter | bool) %}
         - name: "TRANSFORM_EVENTS"
           value: "true"
 {% endif %}

+ 4 - 0
roles/openshift_logging_mux/templates/mux.j2

@@ -128,6 +128,10 @@ spec:
               resource: limits.memory
         - name: "FILE_BUFFER_LIMIT"
           value: "{{ openshift_logging_mux_file_buffer_limit | default('2Gi') }}"
+{% if openshift_logging_install_eventrouter is defined and (openshift_logging_install_eventrouter | bool) %}
+        - name: "TRANSFORM_EVENTS"
+          value: "true"
+{% endif %}
 
 {% if openshift_logging_mux_remote_syslog is defined and openshift_logging_mux_remote_syslog %}
         - name: USE_REMOTE_SYSLOG

+ 1 - 1
roles/openshift_manage_node/tasks/config.yml

@@ -22,7 +22,7 @@
     - node_status.results.results | length > 0
     - node_status.results.results[0]['items']
         | map(attribute='metadata.annotations') | map('list') | flatten
-        | select('match', '[\"node.openshift.io/md5sum\"]') | list | length ==
+        | select('match', 'node.openshift.io/md5sum') | list | length ==
       node_status.results.results[0]['items'] | length
   retries: 60
   delay: 10

+ 14 - 0
roles/openshift_node_group/files/sync.yaml

@@ -114,6 +114,7 @@ spec:
             # detect whether the node-config.yaml has changed, and if so trigger a restart of the kubelet.
             md5sum /etc/origin/node/node-config.yaml > /tmp/.new
             if [[ "$( cat /tmp/.old )" != "$( cat /tmp/.new )" ]]; then
+              SYSTEMD_IGNORE_CHROOT=1 systemctl restart tuned || :
               echo "info: Configuration changed, restarting kubelet" 2>&1
               # TODO: kubelet doesn't relabel nodes, best effort for now
               # https://github.com/kubernetes/kubernetes/issues/59314
@@ -164,6 +165,13 @@ spec:
         - mountPath: /etc/sysconfig
           name: host-sysconfig-node
           readOnly: true
+        - mountPath: /var/run/dbus
+          name: var-run-dbus
+          readOnly: true
+        - mountPath: /run/systemd/system
+          name: run-systemd-system
+          readOnly: true
+
 
       volumes:
       # In bootstrap mode, the host config contains information not easily available
@@ -174,3 +182,9 @@ spec:
       - name: host-sysconfig-node
         hostPath:
           path: /etc/sysconfig
+      - hostPath:
+          path: /var/run/dbus
+        name: var-run-dbus
+      - hostPath:
+          path: /run/systemd/system
+        name: run-systemd-system

+ 1 - 1
roles/openshift_node_group/tasks/sync.yml

@@ -84,7 +84,7 @@
     - node_status.results.results | length > 0
     - node_status.results.results[0]['items']
         | map(attribute='metadata.annotations') | map('list') | flatten
-        | select('match', '[\"node.openshift.io/md5sum\"]') | list | length ==
+        | select('match', 'node.openshift.io/md5sum') | list | length ==
       node_status.results.results[0]['items'] | length
   retries: 60
   delay: 10

+ 3 - 0
roles/openshift_node_group/templates/node-config.yaml.j2

@@ -73,6 +73,9 @@ servingInfo:
   bindAddress: 0.0.0.0:10250
   bindNetwork: tcp4
   clientCA: client-ca.crt
+proxyArguments:
+  cluster-cidr:
+    - {{ openshift_cluster_network_cidr }}
 volumeConfig:
   localQuota:
     perFSGroup: null

+ 1 - 10
roles/openshift_node_problem_detector/defaults/main.yaml

@@ -5,17 +5,8 @@ openshift_node_problem_detector_tmp_location: /tmp
 openshift_node_problem_detector_delete_tempfiles: True
 
 # node-problem-detector image setup
-openshift_node_problem_detector_image_dict:
-  origin:
-    prefix: "docker.io/openshift/"
-    version: "{{ openshift_image_tag }}"
-  openshift-enterprise:
-    prefix: "registry.redhat.io/openshift3/ose-"
-    version: "{{ openshift_image_tag }}"
-
-openshift_node_problem_detector_image_prefix: "{{ openshift_node_problem_detector_image_dict[openshift_deployment_type]['prefix'] }}"
-openshift_node_problem_detector_image_version: "{{ openshift_node_problem_detector_image_dict[openshift_deployment_type]['version'] }}"
 
+openshift_node_problem_detector_image: "{{ l_osm_registry_url | regex_replace('${component}' | regex_escape, 'node-problem-detector') }}"
 
 # node_problem_detector daemonset setup
 openshift_node_problem_detector_daemonset_name: node-problem-detector

+ 1 - 1
roles/openshift_node_problem_detector/templates/node-problem-detector-daemonset.yaml.j2

@@ -21,7 +21,7 @@ spec:
             fieldRef:
               apiVersion: v1
               fieldPath: spec.nodeName
-        image: {{ openshift_node_problem_detector_image_prefix }}node-problem-detector:{{ openshift_node_problem_detector_image_version }}
+        image: {{ openshift_node_problem_detector_image }}
         imagePullPolicy: {{ openshift_node_problem_detector_image_pull_policy }}
         name: {{ openshift_node_problem_detector_daemonset_name }}
         resources: {}

+ 12 - 1
roles/openshift_openstack/defaults/main.yml

@@ -1,4 +1,16 @@
 ---
+openshift_use_all_in_one_cluster_deployment: False
+openshift_use_cinder_persistent_volume: False
+openshift_use_cinder_registry: False
+openshift_use_kuryr: False
+openshift_use_openstack_ssl: False
+openshift_use_swift_registry: False
+
+openshift_openstack_use_neutron_internal_dns: False
+openshift_openstack_use_no_floating_ip: False
+openshift_openstack_use_nsupdate: True
+openshift_openstack_use_provider_network: False
+
 openshift_openstack_stack_state: 'present'
 
 openshift_openstack_ssh_ingress_cidr: 0.0.0.0/0
@@ -16,7 +28,6 @@ openshift_openstack_use_lbaas_load_balancer: false
 openshift_openstack_lbaasv2_provider: Octavia
 openshift_openstack_use_vm_load_balancer: false
 
-
 # container-storage-setup
 openshift_openstack_container_storage_setup:
   docker_dev: "/dev/sdb"

+ 34 - 1
roles/openshift_openstack/tasks/check-prerequisites.yml

@@ -17,6 +17,39 @@
     that: 'shade_result.rc == 0'
     msg: "Python module shade is not installed"
 
+- include_tasks: prerequisites/provider-network-check.yml
+  when: openshift_openstack_use_provider_network
+
+- include_tasks: prerequisites/neutron-internal-dns-check.yml
+  when: openshift_openstack_use_neutron_internal_dns
+
+- include_tasks: prerequisites/nsupdate-check.yml
+  when: openshift_openstack_use_nsupdate
+
+- include_tasks: prerequisites/no-floating-ip-check.yml
+  when: openshift_openstack_use_no_floating_ip
+
+- include_tasks: prerequisites/cloud-provider-check.yml
+  when: openshift_use_cinder_persistent_volume or openshift_use_cinder_registry or openshift_use_kuryr
+
+- include_tasks: prerequisites/openstack-ssl-check.yml
+  when: openshift_use_openstack_ssl
+
+- include_tasks: prerequisites/kuryr-check.yml
+  when: openshift_use_kuryr
+
+- include_tasks: prerequisites/all-in-one-cluster-deployment-check.yml
+  when: openshift_use_all_in_one_cluster_deployment
+
+- include_tasks: prerequisites/cinder-persistent-volume-check.yml
+  when: openshift_use_cinder_persistent_volume
+
+- include_tasks: prerequisites/cinder-registry-check.yml
+  when: openshift_use_cinder_registry
+
+- include_tasks: prerequisites/swift-registry-check.yml
+  when: openshift_use_swift_registry
+
 # Gather Neutron extension facts
 - name: Check for Neutron trunk support
   os_network_extensions:
@@ -91,7 +124,7 @@
     msg: "Keypair {{ openshift_openstack_keypair_name }} is not available"
 
 # Check flavors and images
-- include_tasks: image-and-flavor-check.yml
+- include_tasks: prerequisites/image-and-flavor-check.yml
   with_items:
   - { image: "{{ openshift_openstack_default_image_name }}", flavor: "{{ openshift_openstack_default_flavor }}" }
   - { image: "{{ openshift_openstack_master_image }}", flavor: "{{ openshift_openstack_master_flavor }}" }

+ 3 - 3
roles/openshift_openstack/tasks/populate-dns.yml

@@ -6,7 +6,7 @@
     key_algorithm: "{{ openshift_openstack_external_nsupdate_keys['private']['key_algorithm'] | lower }}"
     server: "{{ openshift_openstack_external_nsupdate_keys['private']['server'] }}"
     zone: "{{ openshift_openstack_nsupdate_zone }}"
-    record: "{{ hostvars[item]['ansible_hostname'] + openshift_openstack_private_hostname_suffix + '.' + openshift_openstack_full_dns_domain | replace('.' + openshift_openstack_nsupdate_zone, '') }}"
+    record: "{{ (hostvars[item]['ansible_hostname'] + openshift_openstack_private_hostname_suffix + '.' + openshift_openstack_full_dns_domain) | replace('.' + openshift_openstack_nsupdate_zone, '') }}"
     value: "{{ hostvars[item]['private_v4'] }}"
     type: "A"
     state: "{{ l_dns_record_state | default('present') }}"
@@ -29,7 +29,7 @@
     key_algorithm: "{{ openshift_openstack_external_nsupdate_keys['public']['key_algorithm'] | lower }}"
     server: "{{ openshift_openstack_external_nsupdate_keys['public']['server'] }}"
     zone: "{{ openshift_openstack_nsupdate_zone }}"
-    record: "{{ hostvars[item]['ansible_hostname'] + openshift_openstack_public_hostname_suffix + '.' + openshift_openstack_full_dns_domain | replace('.' + openshift_openstack_nsupdate_zone, '') }}"
+    record: "{{ (hostvars[item]['ansible_hostname'] + openshift_openstack_public_hostname_suffix + '.' + openshift_openstack_full_dns_domain) | replace('.' + openshift_openstack_nsupdate_zone, '') }}"
     value: "{{ hostvars[item]['public_v4'] }}"
     type: "A"
     state: "{{ l_dns_record_state | default('present') }}"
@@ -51,7 +51,7 @@
     key_algorithm: "{{ openshift_openstack_external_nsupdate_keys['public']['key_algorithm'] | lower }}"
     server: "{{ openshift_openstack_external_nsupdate_keys['public']['server'] }}"
     zone: "{{ openshift_openstack_nsupdate_zone }}"
-    record: "{{ '*.' + hostvars[groups.masters[0]].openshift_master_default_subdomain | replace('.' + openshift_openstack_nsupdate_zone, '') }}"
+    record: "{{ ('*.' + hostvars[groups.masters[0]].openshift_master_default_subdomain) | replace('.' + openshift_openstack_nsupdate_zone, '') }}"
     value: "{{ openshift_openstack_public_router_ip }}"
     type: "A"
     state: "{{ l_dns_record_state | default('present') }}"

+ 34 - 0
roles/openshift_openstack/tasks/prerequisites/all-in-one-cluster-deployment-check.yml

@@ -0,0 +1,34 @@
+---
+# add localhost to OSEv3 so we can access OSEv3 inventory variables
+- name: Add localhost to OSEv3
+  add_host:
+    hostname: 'localhost'
+    groupname: 'OSEv3'
+
+- name: Check number of master nodes is greater than 0
+  assert:
+    that: openshift_openstack_num_masters > 0
+    msg: "openshift_use_all_in_one_cluster_deployment: openshift_openstack_num_masters must be greater than 0"
+
+- name: Check number of infra nodes is equal to 0
+  assert:
+    that: openshift_openstack_num_infra == 0
+    msg: "openshift_use_all_in_one_cluster_deployment: openshift_openstack_num_infra must be 0"
+
+- name: Check number of app nodes is equal to 0
+  assert:
+    that: openshift_openstack_num_nodes == 0
+    msg: "openshift_use_all_in_one_cluster_deployment: openshift_openstack_num_nodes must be 0"
+
+- name: Check openshift_openstack_master_group_name is set to node-config-all-in-one
+  assert:
+    that: openshift_openstack_master_group_name == 'node-config-all-in-one'
+    msg: "openshift_use_all_in_one_cluster_deployment: openshift_openstack_num_nodes must be set to node-config-all-in-one"
+
+- name: Check openshift_node_groups contains node-config-all-in-one entry
+  assert:
+    that: openshift_node_groups | selectattr('name', 'equalto', 'node-config-all-in-one') | list | count > 0
+    msg: "openshift_use_all_in_one_cluster_deployment: openshift_node_groups must contain a node-config-all-in-one entry"
+
+- name: Clear inventory
+  meta: refresh_inventory

+ 14 - 0
roles/openshift_openstack/tasks/prerequisites/cinder-persistent-volume-check.yml

@@ -0,0 +1,14 @@
+---
+# add localhost to OSEv3 so we can access OSEv3 inventory variables
+- name: Add localhost to OSEv3
+  add_host:
+    hostname: 'localhost'
+    groupname: 'OSEv3'
+
+- name: Check openshift_cloudprovider_openstack_blockstorage_version
+  assert:
+    that: openshift_cloudprovider_openstack_blockstorage_version == 'v2'
+    msg: "openshift_use_cinder_persistent_volume: openshift_cloudprovider_openstack_blockstorage_version must be set to v2"
+
+- name: Clear inventory
+  meta: refresh_inventory

+ 34 - 0
roles/openshift_openstack/tasks/prerequisites/cinder-registry-check.yml

@@ -0,0 +1,34 @@
+---
+# add localhost to OSEv3 so we can access OSEv3 inventory variables
+- name: Add localhost to OSEv3
+  add_host:
+    hostname: 'localhost'
+    groupname: 'OSEv3'
+
+- name: Check openshift_hosted_registry_storage_kind value
+  assert:
+    that: openshift_hosted_registry_storage_kind == 'openstack'
+    msg: "openshift_use_cinder_registry: openshift_hosted_registry_storage_kind must be set to openstack"
+
+- name: Check openshift_hosted_registry_storage_access_modes is set
+  assert:
+    that: openshift_hosted_registry_storage_access_modes is defined
+    msg: "openshift_use_cinder_registry: openshift_hosted_registry_storage_access_modes must be defined"
+
+- name: Check openshift_hosted_registry_storage_openstack_filesystem is set
+  assert:
+    that: openshift_hosted_registry_storage_openstack_filesystem is defined
+    msg: "openshift_use_cinder_registry: openshift_hosted_registry_storage_openstack_filesystem must be defined"
+
+- name: Check openshift_hosted_registry_storage_volume_size is set
+  assert:
+    that: openshift_hosted_registry_storage_volume_size is defined
+    msg: "openshift_use_cinder_registry: openshift_hosted_registry_storage_volume_size must be defined"
+
+- name: Either openshift_hosted_registry_storage_openstack_volumeID or openshift_hosted_registry_storage_volume_name must be defined
+  assert:
+    that: openshift_hosted_registry_storage_openstack_volumeID is defined or openshift_hosted_registry_storage_volume_name is defined
+    msg: "openshift_use_cinder_registry: Either openshift_hosted_registry_storage_openstack_volumeID or openshift_hosted_registry_storage_volume_name must be defined"
+
+- name: Clear inventory
+  meta: refresh_inventory

+ 39 - 0
roles/openshift_openstack/tasks/prerequisites/cloud-provider-check.yml

@@ -0,0 +1,39 @@
+---
+# add localhost to OSEv3 so we can access OSEv3 inventory variables
+- name: Add localhost to OSEv3
+  add_host:
+    hostname: 'localhost'
+    groupname: 'OSEv3'
+
+- name: Check openshift_cloudprovider_kind value
+  assert:
+    that: openshift_cloudprovider_kind == 'openstack'
+    msg: "openshift_use_cloud_provider: openshift_cloudprovider_kind must be set to openstack"
+  when: openshift_cloudprovider_openstack_conf_file is not defined
+
+- name: Check openshift_cloudprovider_openstack_auth_url is defined
+  assert:
+    that: openshift_cloudprovider_openstack_auth_url is defined
+    msg: "openshift_use_cloud_provider: openshift_cloudprovider_openstack_auth_url must be defined"
+  when: openshift_cloudprovider_openstack_conf_file is not defined
+
+- name: Check openshift_cloudprovider_openstack_username is defined
+  assert:
+    that: openshift_cloudprovider_openstack_username is defined
+    msg: "openshift_use_cloud_provider: openshift_cloudprovider_openstack_username must be defined"
+  when: openshift_cloudprovider_openstack_conf_file is not defined
+
+- name: Check openshift_cloudprovider_openstack_password is defined
+  assert:
+    that: openshift_cloudprovider_openstack_password is defined
+    msg: "openshift_use_cloud_provider: openshift_cloudprovider_openstack_password must be defined"
+  when: openshift_cloudprovider_openstack_conf_file is not defined
+
+- name: Check that a openshift_cloudprovider_openstack tenant parameter is defined
+  assert:
+    that: openshift_cloudprovider_openstack_tenant_id is defined or openshift_cloudprovider_openstack_tenant_name is defined
+    msg: "openshift_use_cloud_provider: either openshift_cloudprovider_openstack_tenant_id or openshift_cloudprovider_openstack_tenant_name must be defined"
+  when: openshift_cloudprovider_openstack_conf_file is not defined
+
+- name: Clear inventory
+  meta: refresh_inventory

roles/openshift_openstack/tasks/image-and-flavor-check.yml → roles/openshift_openstack/tasks/prerequisites/image-and-flavor-check.yml


+ 35 - 0
roles/openshift_openstack/tasks/prerequisites/kuryr-check.yml

@@ -0,0 +1,35 @@
+---
+- name: Check openshift_use_openshift_sdn is false
+  assert:
+    that: not openshift_use_openshift_sdn
+    msg: "openshift_use_kuryr: openshift_use_openshift_sdn must be false"
+
+- name: Check use_trunk_ports is true
+  assert:
+    that: use_trunk_ports
+    msg: "openshift_use_kuryr: use_trunk_ports must be true"
+
+- name: Check os_sdn_network_plugin_name is set to cni
+  assert:
+    that: os_sdn_network_plugin_name == 'cni'
+    msg: "openshift_use_kuryr: os_sdn_network_plugin_name must be set to cni"
+
+- name: Check openshift_node_proxy_mode is set to userspace
+  assert:
+    that: openshift_node_proxy_mode == 'userspace'
+    msg: "openshift_use_kuryr: openshift_node_proxy_mode must be set to userspace"
+
+- name: Check openshift_master_open_ports is set
+  assert:
+    that: openshift_master_open_ports is defined
+    msg: "openshift_use_kuryr: openshift_master_open_ports must be defined"
+
+- name: Check openshift_node_open_ports is set
+  assert:
+    that: openshift_node_open_ports is defined
+    msg: "openshift_use_kuryr: openshift_node_open_ports must be defined"
+
+- name: Check kuryr_openstack_public_net_id is set
+  assert:
+    that: kuryr_openstack_public_net_id is defined
+    msg: "openshift_use_kuryr: kuryr_openstack_public_net_id must be defined"

+ 15 - 0
roles/openshift_openstack/tasks/prerequisites/neutron-internal-dns-check.yml

@@ -0,0 +1,15 @@
+---
+- name: Check openshift_openstack_fqdn_nodes is false
+  assert:
+    that: not openshift_openstack_fqdn_nodes
+    msg: "openshift_openstack_use_neutron_internal_dns: openshift_openstack_fqdn_nodes must be false"
+
+- name: Check openshift_openstack_dns_nameservers is empty
+  assert:
+    that: openshift_openstack_dns_nameservers | count == 0
+    msg: "openshift_openstack_use_neutron_internal_dns: openshift_openstack_dns_nameservers must be empty"
+
+- name: Check openshift_openstack_external_nsupdate_keys does not contain private entry
+  assert:
+    that: openshift_openstack_external_nsupdate_keys.private is not defined
+    msg: "openshift_openstack_use_neutron_internal_dns: openshift_openstack_external_nsupdate_keys must not contain a private entry"

+ 10 - 0
roles/openshift_openstack/tasks/prerequisites/no-floating-ip-check.yml

@@ -0,0 +1,10 @@
+---
+- name: Check openshift_openstack_router_name is defined
+  assert:
+    that: openshift_openstack_router_name is defined and openshift_openstack_router_name
+    msg: "openshift_openstack_use_no_floating_ip: openshift_openstack_router_name must be defined"
+
+- name: Check openshift_openstack_node_subnet_name is defined
+  assert:
+    that: openshift_openstack_node_subnet_name is defined and openshift_openstack_node_subnet_name
+    msg: "openshift_openstack_use_no_floating_ip: openshift_openstack_node_subnet_name must be defined"

+ 16 - 0
roles/openshift_openstack/tasks/prerequisites/nsupdate-check.yml

@@ -0,0 +1,16 @@
+---
+- name: Check openshift_openstack_nsupdate_zone is defined
+  assert:
+    that: openshift_openstack_nsupdate_zone is defined and openshift_openstack_nsupdate_zone
+    msg: "openshift_openstack_use_nsupdate: openshift_openstack_nsupdate_zone must be defined"
+
+- name: Check that there is a public or private entry in openshift_openstack_external_nsupdate_keys
+  assert:
+    that: openshift_openstack_external_nsupdate_keys.private is defined or openshift_openstack_external_nsupdate_keys.public is defined
+    msg: "openshift_openstack_use_nsupdate: openshift_openstack_external_nsupdate_keys must have at least one of a public or private entry"
+
+- name: Check that either openshift_openstack_public_hostname_suffix or openshift_openstack_private_hostname_suffix is defined
+  assert:
+    that: (openshift_openstack_public_hostname_suffix is defined and openshift_openstack_public_hostname_suffix) or (openshift_openstack_private_hostname_suffix is defined and openshift_openstack_private_hostname_suffix)
+    msg: "openshift_openstack_use_nsupdate: either openshift_openstack_public_hostname_suffix or openshift_openstack_private_hostname_suffix must be defined"
+  when: openshift_openstack_external_nsupdate_keys.private is defined and openshift_openstack_external_nsupdate_keys.public is defined

+ 30 - 0
roles/openshift_openstack/tasks/prerequisites/openstack-ssl-check.yml

@@ -0,0 +1,30 @@
+---
+# add localhost to OSEv3 so we can access OSEv3 inventory variables
+- name: Add localhost to OSEv3
+  add_host:
+    hostname: 'localhost'
+    groupname: 'OSEv3'
+
+- name: Check openshift_certificates_redeploy is true
+  assert:
+    that: openshift_certificates_redeploy
+    msg: "openshift_use_openstack_ssl: openshift_certificates_redeploy must be true"
+
+- name: Check openshift_additional_ca is defined
+  assert:
+    that: openshift_additional_ca is defined and openshift_additional_ca
+    msg: "openshift_use_openstack_ssl: openshift_additional_ca must be defined"
+
+- name: Check kuryr_openstack_ca is defined
+  assert:
+    that: kuryr_openstack_ca is defined and kuryr_openstack_ca
+    msg: "openshift_use_openstack_ssl: kuryr_openstack_ca must be defined"
+  when: openshift_use_kuryr
+
+- name: Check openshift_cloudprovider_openstack_ca_file is defined
+  assert:
+    that: openshift_cloudprovider_openstack_ca_file is defined and openshift_cloudprovider_openstack_ca_file
+    msg: "openshift_use_openstack_ssl: openshift_cloudprovider_openstack_ca_file must be defined"
+
+- name: Clear inventory
+  meta: refresh_inventory

+ 15 - 0
roles/openshift_openstack/tasks/prerequisites/provider-network-check.yml

@@ -0,0 +1,15 @@
+---
+- name: Check openshift_openstack_provider_network_name is defined
+  assert:
+    that: openshift_openstack_provider_network_name is defined and openshift_openstack_provider_network_name
+    msg: "openshift_openstack_use_provider_network: openshift_openstack_provider_network_name must be defined"
+
+- name: Check openshift_openstack_external_network_name is undefined
+  assert:
+    that: openshift_openstack_external_network_name is undefined or not openshift_openstack_external_network_name
+    msg: "openshift_openstack_use_provider_network: openshift_openstack_external_network_name must not be defined"
+
+- name: Check openshift_openstack_private_network_name is undefined
+  assert:
+    that: openshift_openstack_private_network_name is undefined or not openshift_openstack_private_network_name
+    msg: "openshift_openstack_use_provider_network: openshift_openstack_private_network_name must not be defined"

+ 24 - 0
roles/openshift_openstack/tasks/prerequisites/swift-registry-check.yml

@@ -0,0 +1,24 @@
+---
+# add localhost to OSEv3 so we can access OSEv3 inventory variables
+- name: Add localhost to OSEv3
+  add_host:
+    hostname: 'localhost'
+    groupname: 'OSEv3'
+
+- name: Check openshift_hosted_registry_storage_kind value
+  assert:
+    that: openshift_hosted_registry_storage_kind == 'object'
+    msg: "openshift_use_swift_registry: openshift_hosted_registry_storage_kind must be set to object"
+
+- name: Check openshift_hosted_registry_storage_provider value
+  assert:
+    that: openshift_hosted_registry_storage_provider == 'swift'
+    msg: "openshift_use_swift_registry: openshift_hosted_registry_storage_provider must be set to swift"
+
+- name: Check openshift_hosted_registry_storage_swift_container is set
+  assert:
+    that: openshift_hosted_registry_storage_swift_container is defined
+    msg: "openshift_use_swift_registry: openshift_hosted_registry_storage_swift_container must be defined"
+
+- name: Clear inventory
+  meta: refresh_inventory

+ 5 - 2
roles/openshift_openstack/templates/heat_stack.yaml.j2

@@ -391,7 +391,7 @@ resources:
       gateway_ip: null
 {% endif %}
 
-{% if not openshift_openstack_router_name %}
+{% if not openshift_openstack_router_name and not openshift_openstack_node_subnet_name %}
   router:
     type: OS::Neutron::Router
     properties:
@@ -580,7 +580,7 @@ resources:
           params:
             cluster_id: {{ openshift_openstack_full_dns_domain }}
       rules: {{ openshift_openstack_infra_secgroup_rules|to_json }}
-
+  {% if openshift_openstack_num_cns > 0 %}
   cns-secgrp:
     type: OS::Neutron::SecurityGroup
     properties:
@@ -595,6 +595,7 @@ resources:
           params:
             cluster_id: {{ openshift_openstack_full_dns_domain }}
       rules: {{ openshift_openstack_cns_secgroup_rules|to_json }}
+  {% endif %}
 {% endif %}
 
   lb-secgrp:
@@ -1093,7 +1094,9 @@ resources:
             - { get_resource: flat-secgrp }
 {% else %}
             - { get_resource: node-secgrp }
+{% if openshift_openstack_num_cns > 0 %}
             - { get_resource: cns-secgrp }
+{% endif %}
 {% if openshift_use_kuryr|default(false)|bool %}
           pod_secgrp:
             - { get_resource: pod_access_sg }

+ 117 - 9
roles/openshift_ovirt/README.md

@@ -1,16 +1,13 @@
-OpenShift oVirt
-=============
+# OpenShift oVirt
 
 OpenShift Provisioned on Red Hat Virtualization and oVirt
 
-Role Tasks
-----------
+## Role Tasks
 
 * `build_vm_list.yml`: Creates a list of virtual machine definitions and
   affinity groups based on a simple manifest (below)
 
-Role Variables
---------------
+## Role Variables
 
 For documentation on virtual machine profile options, see the [oVirt Ansible VM-Infra Documentation](https://github.com/oVirt/ovirt-ansible-vm-infra)
 
@@ -19,6 +16,28 @@ For documentation on virtual machine profile options, see the [oVirt Ansible VM-
 | openshift_ovirt_vm_profile  | See below.    | Dictionary of dictionaries providing common VM parameters for virtual machine creation. |
 | openshift_ovirt_vm_manifest | See below.    | List of dictionaries specifying node base name, count, and which of the above profiles to apply. The default creates three master nodes, three infrastructure nodes, one application node, and a load balancer. |
 
+The `openshift_ovirt_vm_manifest` variable can contain following attributes
+
+| Name      | Type | Default value |                                                                                                                 |
+|-----------|------|---------------|-----------------------------------------------------------------------------------------------------------------|
+| nic_mode  | Dict | UNDEF         | If you define this variable means that the interface on the VM will have static address instead of dynamic one. |
+
+Below `nic_mode` we can find this other parameters
+
+| Name            |  Type  | Default value |                                          |
+|-----------------|--------|---------------|------------------------------------------|
+| nic_ip_address  | String | UNDEF         | Static ipaddress for vm interface.       | 
+| nic_netmask     | String | UNDEF         | Static Netmask for vm interface .        | 
+| nic_gateway     | String | UNDEF         | Static Gateway address for vm interface. | 
+| nic_on_boot     | Bool   | True          | The interface will be up on boot.        | 
+| nic_name        | String | 'eth0'        | The Interface name for the vm.           | 
+| dns_servers     | String | UNDEF         | The DNS set on the VM.                   | 
+
+
+## Examples
+
+- **openshift_ovirt_vm_profile**
+
 ```
 openshift_ovirt_vm_profile:
   master:
@@ -59,25 +78,114 @@ openshift_ovirt_vm_profile:
     state: running
 ```
 
+
+- **openshift_ovirt_vm_manifest**
 ```
 openshift_ovirt_vm_manifest:
+#######################################
+# Multiple Node Static Ip addresses
+#######################################
 - name: 'master'
   count: 3
   profile: 'master'
+  nic_mode:
+      # This must fit the same name as this kind of vms. (e.g) if the name is test, this must be test0
+      master0:
+        nic_ip_address: '192.168.123.160'
+        nic_netmask: '255.255.255.0'
+        nic_gateway: '192.168.123.1'
+        nic_on_boot: True
+        nic_name: 'eth0'
+        dns_servers: "192.168.1.100"
+      master1:
+        nic_ip_address: '192.168.123.161'
+        nic_netmask: '255.255.255.0'
+        nic_gateway: '192.168.123.1'
+        nic_on_boot: True
+        nic_name: 'nic0'
+        dns_servers: "192.168.1.100"
+      master2:
+        nic_ip_address: '192.168.123.162'
+        nic_netmask: '255.255.255.0'
+        nic_gateway: '192.168.123.1'
+        nic_on_boot: True
+        dns_servers: "192.168.1.100"
 - name: 'infra'
-  count: 3
+  count: 2
   profile: 'node'
+  nic_mode:
+      infra0:
+        nic_ip_address: '192.168.123.163'
+        nic_netmask: '255.255.255.0'
+        nic_gateway: '192.168.123.1'
+        nic_on_boot: True
+        dns_servers: "192.168.1.100"
+      infra1:
+        nic_ip_address: '192.168.123.164'
+        nic_netmask: '255.255.255.0'
+        nic_gateway: '192.168.123.1'
+        nic_on_boot: True
+        dns_servers: "192.168.1.100"
+
+################################################
+# Multiple/Single Node Dynamic Ip addresses
+################################################
 - name: 'compute'
-  count: 1
+  count: 2
   profile: 'node'
+
+######################################
+# Single Node Static Ip addresses
+######################################
 - name: 'lb'
   count: 1
-  profile: 'node'
+  profile: 'node_vm'
+  nic_mode:
+      lb0:
+        nic_ip_address: '192.168.123.170'
+        nic_netmask: '255.255.255.0'
+        nic_gateway: '192.168.123.1'
+        dns_servers: "192.168.1.100"
 ```
 
 Example Playbook
 ----------------
 
+```
+---
+- name: Deploy oVirt template and virtual machines
+  hosts: localhost
+  connection: local
+  gather_facts: false
+
+  pre_tasks:
+    - name: Log in to oVirt
+      ovirt_auth:
+        url: "{{ engine_url }}"
+        username: "{{ engine_user }}"
+        password: "{{ engine_password }}"
+        ca_file: "{{ engine_cafile | default(omit) }}"
+        insecure: "{{ engine_insecure | default(true) }}"
+      tags:
+        - always
+    - name: Build virtual machine facts
+      import_role:
+        name: openshift_ovirt
+        tasks_from: build_vm_list.yml
+
+  roles:
+    - oVirt.image-template
+    - oVirt.vm-infra
+
+  post_tasks:
+    - name: Logout from oVirt
+      ovirt_auth:
+        state: absent
+        ovirt_auth: "{{ ovirt_auth }}"
+      tags:
+        - always
+```
+
 License
 -------
 

+ 9 - 0
roles/openshift_ovirt/tasks/build_vm_list.yml

@@ -17,6 +17,15 @@
       'cloud_init':
       {
       'host_name': '{{ item.name }}{{ iter }}.{{ openshift_ovirt_dns_zone }}',
+      {% if item.nic_mode is defined -%}
+      'nic_boot_protocol': 'static',
+      'nic_ip_address': '{{ item["nic_mode"][item["name"] + iter | string ]["nic_ip_address"] }}',
+      'nic_netmask': '{{ item["nic_mode"][item["name"] + iter | string ]["nic_netmask"] }}',
+      'nic_gateway': '{{ item["nic_mode"][item["name"] + iter | string ]["nic_gateway"] }}',
+      'nic_on_boot': {{ item["nic_mode"][item["name"] + iter | string ]["nic_on_boot"] | default(true) | bool }},
+      'nic_name': '{{ item["nic_mode"][item["name"] + iter | string ]["nic_name"] | default("eth0") }}',
+      'dns_servers': '{{ item["nic_mode"][item["name"] + iter | string ]["dns_servers"] }}',
+      {% endif -%}
       'authorized_ssh_keys': '{{ openshift_ovirt_ssh_key }}'
       },
       'profile':  {{ openshift_ovirt_vm_profile[ item.profile ] }} ,

+ 2 - 19
roles/openshift_persistent_volumes/tasks/main.yml

@@ -9,23 +9,6 @@
     cp /etc/origin/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
   changed_when: False
 
-- set_fact:
-    glusterfs_pv:
-    - name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-volume"
-      capacity: "{{ openshift_hosted_registry_storage_volume_size }}"
-      access_modes: "{{ openshift_hosted_registry_storage_access_modes }}"
-      storage:
-        glusterfs:
-          endpoints: "{{ openshift_hosted_registry_storage_glusterfs_endpoints }}"
-          path: "{{ openshift_hosted_registry_storage_glusterfs_path }}"
-          readOnly: "{{ openshift_hosted_registry_storage_glusterfs_readOnly }}"
-    glusterfs_pvc:
-    - name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-claim"
-      capacity: "{{ openshift_hosted_registry_storage_volume_size }}"
-      access_modes: "{{ openshift_hosted_registry_storage_access_modes }}"
-      storageclass: ""
-  when: openshift_hosted_registry_storage_glusterfs_swap | default(False)
-
 - name: create standard pv and pvc lists
   # generate_pv_pvcs_list is a custom action module defined in
   # roles/lib_utils/action_plugins/generate_pv_pvcs_list.py
@@ -34,12 +17,12 @@
 
 - include_tasks: pv.yml
   vars:
-    l_extra_persistent_volumes: "{{ openshift_persistent_volume_extras | union(glusterfs_pv) }}"
+    l_extra_persistent_volumes: "{{ openshift_persistent_volume_extras }}"
     persistent_volumes: "{{ l_pv_pvcs_list.persistent_volumes | union(l_extra_persistent_volumes) }}"
 
 - include_tasks: pvc.yml
   vars:
-    l_extra_persistent_volume_claims: "{{ openshift_persistent_volume_claims_extras | union(glusterfs_pvc) }}"
+    l_extra_persistent_volume_claims: "{{ openshift_persistent_volume_claims_extras }}"
     persistent_volume_claims: "{{ l_pv_pvcs_list.persistent_volume_claims | union(l_extra_persistent_volume_claims) }}"
 
 - name: Delete temp directory

+ 4 - 3
roles/openshift_sdn/files/sdn-ovs.yaml

@@ -63,16 +63,17 @@ spec:
               exit 0
           }
           trap quit SIGTERM
-          /usr/share/openvswitch/scripts/ovs-ctl start --system-id=random
+          /usr/share/openvswitch/scripts/ovs-ctl start --no-ovs-vswitchd --system-id=random
 
           # Restrict the number of pthreads ovs-vswitchd creates to reduce the
           # amount of RSS it uses on hosts with many cores
           # https://bugzilla.redhat.com/show_bug.cgi?id=1571379
           # https://bugzilla.redhat.com/show_bug.cgi?id=1572797
           if [[ `nproc` -gt 12 ]]; then
-              ovs-vsctl set Open_vSwitch . other_config:n-revalidator-threads=4
-              ovs-vsctl set Open_vSwitch . other_config:n-handler-threads=10
+              ovs-vsctl --no-wait set Open_vSwitch . other_config:n-revalidator-threads=4
+              ovs-vsctl --no-wait set Open_vSwitch . other_config:n-handler-threads=10
           fi
+          /usr/share/openvswitch/scripts/ovs-ctl start --no-ovsdb-server --system-id=random
           while true; do sleep 5; done
         securityContext:
           runAsUser: 0

+ 4 - 1
test/ci/launch.yml

@@ -47,6 +47,7 @@
             "Name": "{{ item.name }}",
             "ansible-groups": "{{ item.ansible_groups | join(',') }}",
             "ansible-node-group": "{{ item.node_group }}",
+            "expirationDate": "{{ item.aws_expiration_date | default(aws_expiration_date) }}"
           }
 
     - name: Add machine to inventory
@@ -75,7 +76,9 @@
   tasks:
     - wait_for_connection: {}
     - setup: {}
-
+    - name: Make sure hostname is set to public ansible host
+      hostname:
+        name: "{{ ansible_host }}"
 
 - import_playbook: ../../playbooks/openshift-node/network_manager.yml
 - import_playbook: ../../playbooks/prerequisites.yml

+ 4 - 0
test/ci/vars.yml.sample

@@ -14,6 +14,8 @@ aws_cluster_id: "ci"
 # us-east-1d
 aws_subnet: "subnet-cf57c596"
 
+aws_expiration_date: "{{ lookup('pipe','date -d \"4 hours\" --iso=minutes --utc') }}"
+
 aws_ami_tags:
   "tag:operating_system": "rhel"
   "tag:image_stage": "base"
@@ -40,3 +42,5 @@ aws_instances:
   # - device_name: /dev/sdb
   #   volume_size: 50
   #   delete_on_termination: yes
+  #Set expiration date for instances on CI namespace
+  #aws_expiration_date: "{{ lookup('pipe','date -d \"4 hours\" --iso=minutes --utc') }}"