소스 검색

Merge remote-tracking branch 'upstream/master' into ports

Emilio Garcia 6 년 전
부모
커밋
4013779688
86개의 변경된 파일454개의 추가작업 그리고 173개의 파일을 삭제
  1. 1 1
      .tito/packages/openshift-ansible
  2. 1 1
      README.md
  3. 9 0
      hack/ci-build-rpm.sh
  4. 6 0
      hack/ci-build-unittests.sh
  5. 5 0
      hack/ci-run-unittests.sh
  6. 1 3
      images/installer/Dockerfile
  7. 1 1
      images/installer/Dockerfile.ci
  8. 6 0
      images/installer/origin-extra-root/etc/yum.repos.d/centos-ansible26.repo
  9. 1 1
      images/installer/root/usr/local/bin/generate
  10. 1 1
      inventory/hosts.example
  11. 3 3
      inventory/hosts.openstack
  12. 137 2
      openshift-ansible.spec
  13. 1 1
      playbooks/azure/openshift-cluster/tasks/create_blob_from_vm.yml
  14. 1 1
      playbooks/byo/calico/legacy_upgrade.yml
  15. 3 3
      playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
  16. 2 2
      playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
  17. 2 2
      playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
  18. 4 1
      playbooks/init/cluster_facts.yml
  19. 4 3
      playbooks/init/validate_hostnames.yml
  20. 1 0
      playbooks/openshift-autoheal/private/config.yml
  21. 1 1
      playbooks/openshift-etcd/private/redeploy-ca.yml
  22. 4 1
      playbooks/openshift-etcd/private/scaleup.yml
  23. 1 1
      playbooks/openshift-glusterfs/private/add_hosts.yml
  24. 1 1
      playbooks/openshift-master/private/create_service_signer_cert.yml
  25. 1 1
      playbooks/openshift-master/private/redeploy-openshift-ca.yml
  26. 1 1
      playbooks/openshift-node/private/join.yml
  27. 1 1
      playbooks/openshift-node/private/registry_auth.yml
  28. 1 1
      playbooks/openshift-node/private/restart.yml
  29. 33 0
      playbooks/openstack/configuration.md
  30. 2 0
      playbooks/openstack/openshift-cluster/build_image.yml
  31. 3 0
      playbooks/openstack/openshift-cluster/install.yml
  32. 7 11
      playbooks/openstack/resources.py
  33. 1 0
      playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
  34. 7 0
      playbooks/prerequisites.yml
  35. 1 1
      requirements.txt
  36. 6 0
      roles/ansible_service_broker/defaults/main.yml
  37. 1 1
      roles/ansible_service_broker/templates/configmap.yaml.j2
  38. 2 0
      roles/calico/templates/calicov3.yml.j2
  39. 5 5
      roles/container_runtime/tasks/common/setup_docker_symlink.yml
  40. 1 4
      roles/etcd/tasks/add_new_member.yml
  41. 1 0
      roles/etcd/tasks/set_facts.yml
  42. 1 2
      roles/etcd/tasks/static.yml
  43. 1 1
      roles/lib_openshift/library/oc_group.py
  44. 1 1
      roles/lib_openshift/src/class/oc_group.py
  45. 3 3
      roles/lib_openshift/src/test/unit/test_oc_group.py
  46. 9 24
      roles/lib_utils/action_plugins/sanity_checks.py
  47. 1 1
      roles/lib_utils/callback_plugins/aa_version_requirement.py
  48. 2 1
      roles/lib_utils/library/openshift_cert_expiry.py
  49. 4 0
      roles/lib_utils/test/test_sanity_checks.py
  50. 2 11
      roles/openshift_autoheal/defaults/main.yml
  51. 5 0
      roles/openshift_certificate_expiry/tasks/main.yml
  52. 2 2
      roles/openshift_cloud_provider/tasks/vsphere-svc.yml
  53. 2 0
      roles/openshift_cluster_monitoring_operator/defaults/main.yml
  54. 4 4
      roles/openshift_cluster_monitoring_operator/tasks/install.yaml
  55. 6 0
      roles/openshift_cluster_monitoring_operator/templates/cluster-monitoring-operator-config.j2
  56. 5 0
      roles/openshift_control_plane/files/apiserver.yaml
  57. 6 6
      roles/openshift_control_plane/tasks/check_master_api_is_ready.yml
  58. 4 3
      roles/openshift_control_plane/tasks/main.yml
  59. 2 3
      roles/openshift_control_plane/tasks/pre_pull.yml
  60. 3 2
      roles/openshift_control_plane/tasks/upgrade.yml
  61. 1 1
      roles/openshift_examples/files/examples/latest/image-streams/image-streams-centos7.json
  62. 1 1
      roles/openshift_examples/files/examples/latest/image-streams/image-streams-rhel7.json
  63. 2 2
      roles/openshift_examples/tasks/main.yml
  64. 0 22
      roles/openshift_hosted/tasks/migrate_default_registry_var.yml
  65. 1 1
      roles/openshift_hosted/tasks/storage/glusterfs.yml
  66. 4 1
      roles/openshift_hosted/templates/registry_config.j2
  67. 1 1
      roles/openshift_logging/README.md
  68. 2 2
      roles/openshift_manage_node/tasks/config.yml
  69. 1 1
      roles/openshift_manage_node/tasks/main.yml
  70. 1 1
      roles/openshift_master_certificates/tasks/main.yml
  71. 1 2
      roles/openshift_node/tasks/prepull.yml
  72. 1 1
      roles/openshift_node/tasks/upgrade.yml
  73. 11 3
      roles/openshift_node_group/files/sync.yaml
  74. 1 1
      roles/openshift_node_group/tasks/sync.yml
  75. 1 1
      roles/openshift_openstack/tasks/node-configuration.yml
  76. 1 1
      roles/openshift_openstack/templates/heat_stack_server.yaml.j2
  77. 3 1
      roles/openshift_ovirt/README.md
  78. 43 1
      roles/openshift_ovirt/tasks/build_vm_list.yml
  79. 1 1
      roles/openshift_storage_glusterfs/README.md
  80. 9 2
      roles/openshift_storage_glusterfs/files/glusterfs-template.yml
  81. 2 2
      roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml
  82. 1 1
      roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml
  83. 1 1
      roles/openshift_storage_glusterfs/tasks/label_nodes.yml
  84. 1 1
      roles/openshift_storage_glusterfs/templates/topology.json.j2
  85. 1 0
      test-requirements.txt
  86. 28 1
      test/ci/launch.yml

+ 1 - 1
.tito/packages/openshift-ansible

@@ -1 +1 @@
-4.0.0-0.7.0 ./
+4.0.0-0.33.0 ./

+ 1 - 1
README.md

@@ -61,7 +61,7 @@ Install base dependencies:
 
 
 Requirements:
 Requirements:
 
 
-- Ansible >= 2.6.2
+- Ansible >= 2.6.5, Ansible 2.7 is not yet supported and known to fail
 - Jinja >= 2.7
 - Jinja >= 2.7
 - pyOpenSSL
 - pyOpenSSL
 - python-lxml
 - python-lxml

+ 9 - 0
hack/ci-build-rpm.sh

@@ -0,0 +1,9 @@
+#!/bin/bash
+
+# This script builds RPMs for Prow CI
+tito tag --offline --accept-auto-changelog --use-release '9999%{?dist}'
+tito build --output="_output/local/releases" --rpm --test --offline --quiet
+
+mkdir _output/local/releases/rpms
+mv _output/local/releases/noarch/* _output/local/releases/rpms
+createrepo _output/local/releases/rpms

+ 6 - 0
hack/ci-build-unittests.sh

@@ -0,0 +1,6 @@
+#!/bin/bash
+
+# This script installs tox dependencies in the test container
+yum install -y gcc libffi-devel python-devel openssl-devel python-pip
+pip install tox
+chmod uga+w /etc/passwd

+ 5 - 0
hack/ci-run-unittests.sh

@@ -0,0 +1,5 @@
+#!/bin/bash
+
+# This script runs tox tests in test container
+echo "${USER:-default}:x:$(id -u):$(id -g):Default User:${HOME:-/tmp}:/sbin/nologin" >> /etc/passwd
+tox 2>&1 | tee /tmp/artifacts/output.log

+ 1 - 3
images/installer/Dockerfile

@@ -10,11 +10,9 @@ COPY images/installer/origin-extra-root /
 # install ansible and deps
 # install ansible and deps
 RUN INSTALL_PKGS="python-lxml python-dns pyOpenSSL python2-cryptography openssl python2-passlib httpd-tools openssh-clients origin-clients iproute patch" \
 RUN INSTALL_PKGS="python-lxml python-dns pyOpenSSL python2-cryptography openssl python2-passlib httpd-tools openssh-clients origin-clients iproute patch" \
  && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
  && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
- && EPEL_PKGS="python2-boto python2-boto3 python2-crypto which python2-pip.noarch python2-scandir python2-packaging azure-cli" \
- && EPEL_TESTING_PKGS="ansible" \
+ && EPEL_PKGS="ansible-2.6.5 python2-boto python2-boto3 python2-crypto which python2-pip.noarch python2-scandir python2-packaging azure-cli-2.0.47" \
  && yum install -y epel-release \
  && yum install -y epel-release \
  && yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \
  && yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \
- && yum install -y --setopt=tsflags=nodocs --enablerepo=epel-testing $EPEL_TESTING_PKGS \
  && if [ "$(uname -m)" == "x86_64" ]; then yum install -y https://sdodson.fedorapeople.org/google-cloud-sdk-183.0.0-3.el7.x86_64.rpm ; fi \
  && if [ "$(uname -m)" == "x86_64" ]; then yum install -y https://sdodson.fedorapeople.org/google-cloud-sdk-183.0.0-3.el7.x86_64.rpm ; fi \
  && yum install -y java-1.8.0-openjdk-headless \
  && yum install -y java-1.8.0-openjdk-headless \
  && rpm -V $INSTALL_PKGS $EPEL_PKGS $EPEL_TESTING_PKGS \
  && rpm -V $INSTALL_PKGS $EPEL_PKGS $EPEL_TESTING_PKGS \

+ 1 - 1
images/installer/Dockerfile.ci

@@ -27,7 +27,7 @@ COPY images/installer/origin-extra-root /
 RUN yum install -y epel-release && \
 RUN yum install -y epel-release && \
     rm -rf /etc/yum.repos.d/centos-openshift-origin.repo && \
     rm -rf /etc/yum.repos.d/centos-openshift-origin.repo && \
     yum-config-manager --enable built > /dev/null && \
     yum-config-manager --enable built > /dev/null && \
-    INSTALL_PKGS="openssh google-cloud-sdk azure-cli" \
+    INSTALL_PKGS="openssh google-cloud-sdk azure-cli-2.0.47" \
     yum install --setopt=tsflags=nodocs -y $INSTALL_PKGS openshift-ansible-test && \
     yum install --setopt=tsflags=nodocs -y $INSTALL_PKGS openshift-ansible-test && \
     yum clean all
     yum clean all
 
 

+ 6 - 0
images/installer/origin-extra-root/etc/yum.repos.d/centos-ansible26.repo

@@ -0,0 +1,6 @@
+
+[centos-ansible26-testing]
+name=CentOS Ansible 2.6 testing repo
+baseurl=https://cbs.centos.org/repos/configmanagement7-ansible-26-testing/x86_64/os/
+enabled=1
+gpgcheck=0

+ 1 - 1
images/installer/root/usr/local/bin/generate

@@ -186,7 +186,7 @@ class Host:
         if self.public_ip_addr:
         if self.public_ip_addr:
             info += "openshift_public_ip=" + self.public_ip_addr + " "
             info += "openshift_public_ip=" + self.public_ip_addr + " "
         if self.hostname:
         if self.hostname:
-            info += "openshift_hostname=" + self.hostname + " "
+            info += "openshift_kubelet_name_override=" + self.hostname + " "
         if self.public_hostname:
         if self.public_hostname:
             info += "openshift_public_hostname=" + self.public_hostname
             info += "openshift_public_hostname=" + self.public_hostname
 
 

+ 1 - 1
inventory/hosts.example

@@ -825,7 +825,7 @@ debug_level=2
 #logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
 #logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
 
 
 # The OpenShift-Ansible installer will fail when it detects that the
 # The OpenShift-Ansible installer will fail when it detects that the
-# value of openshift_hostname resolves to an IP address not bound to any local
+# value of openshift_kubelet_name_override resolves to an IP address not bound to any local
 # interfaces. This mis-configuration is problematic for any pod leveraging host
 # interfaces. This mis-configuration is problematic for any pod leveraging host
 # networking and liveness or readiness probes.
 # networking and liveness or readiness probes.
 # Setting this variable to false will override that check.
 # Setting this variable to false will override that check.

+ 3 - 3
inventory/hosts.openstack

@@ -24,7 +24,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 #openshift_pkg_version=-3.0.0.0
 #openshift_pkg_version=-3.0.0.0
 
 
 [masters]
 [masters]
-jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}"
+jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}"
 
 
 [etcd]
 [etcd]
 jdetiber-etcd.usersys.redhat.com
 jdetiber-etcd.usersys.redhat.com
@@ -33,5 +33,5 @@ jdetiber-etcd.usersys.redhat.com
 #ose3-lb-ansible.test.example.com
 #ose3-lb-ansible.test.example.com
 
 
 [nodes]
 [nodes]
-jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_group_name="node-config-master"
-jdetiber-node[1:2].usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_group_name="node-config-compute"
+jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_node_group_name="node-config-master"
+jdetiber-node[1:2].usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_node_group_name="node-config-compute"

+ 137 - 2
openshift-ansible.spec

@@ -10,14 +10,14 @@
 
 
 Name:           openshift-ansible
 Name:           openshift-ansible
 Version:        4.0.0
 Version:        4.0.0
-Release:        0.7.0%{?dist}
+Release:        0.33.0%{?dist}
 Summary:        Openshift and Atomic Enterprise Ansible
 Summary:        Openshift and Atomic Enterprise Ansible
 License:        ASL 2.0
 License:        ASL 2.0
 URL:            https://github.com/openshift/openshift-ansible
 URL:            https://github.com/openshift/openshift-ansible
 Source0:        https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz
 Source0:        https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz
 BuildArch:      noarch
 BuildArch:      noarch
 
 
-Requires:      ansible >= 2.6.2
+Requires:      ansible >= 2.5.7
 Requires:      python2
 Requires:      python2
 Requires:      python-six
 Requires:      python-six
 Requires:      tar
 Requires:      tar
@@ -189,6 +189,141 @@ BuildArch:     noarch
 %{_datadir}/ansible/%{name}/test
 %{_datadir}/ansible/%{name}/test
 
 
 %changelog
 %changelog
+* Tue Oct 23 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.33.0
+- Remove hostname override from OpenStack inventory (tomas@sedovic.cz)
+- Fixing Typo (jparrill@redhat.com)
+- quick fix for formatting of error messages, bz# 1640823 (pruan@redhat.com)
+- Mount /etc/pki into apiserver pod (sdodson@redhat.com)
+- Set openshift_hosted_registry_storage_swift_insecureskipverify's default
+  (mickael.canevet@camptocamp.com)
+- Document openshift_hosted_registry_storage_swift_insecureskipverify
+  (mickael.canevet@camptocamp.com)
+- Added capability to add dns_search and dns_server even without static
+  configuration (jparrill@redhat.com)
+- Fixes #10415 maintains the name and host_name when vm count field are 1.
+  (jparrill@redhat.com)
+- Add openshift_hosted_registry_storage_swift_insecureskipverify parameter
+  (mickael.canevet@camptocamp.com)
+- Updated logging namespace name (andy.block@gmail.com)
+- Update oc_group.py in src (camabeh@gmail.com)
+- cluster-monitoring: Adds storageclass name variable (davivcgarcia@gmail.com)
+- Update tests (camabeh@gmail.com)
+- Fix oc group get (camabeh@gmail.com)
+
+* Mon Oct 22 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.32.0
+- Allow Ansible 2.5.7 (tomas@sedovic.cz)
+- Remove value rather than replacing it with an empty string
+  (sdodson@redhat.com)
+
+* Sun Oct 21 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.31.0
+- 
+
+* Sat Oct 20 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.30.0
+- 
+
+* Fri Oct 19 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.29.0
+- 
+
+* Thu Oct 18 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.28.0
+- Fix scaleup failure for hostname override (mgugino@redhat.com)
+- Fail on openshift_kubelet_name_override for new hosts. (mgugino@redhat.com)
+
+* Thu Oct 18 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.27.0
+- Make sure images are prepulled when CRIO is used (vrutkovs@redhat.com)
+- pin azure cli to version 2.0.47 and fix start copy playbook task
+  (akalugwu@redhat.com)
+
+* Wed Oct 17 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.26.0
+- 
+
+* Wed Oct 17 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.25.0
+- 
+
+* Tue Oct 16 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.24.0
+- 
+
+* Mon Oct 15 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.23.0
+- Add ansible 2.6 repo (vrutkovs@redhat.com)
+
+* Sun Oct 14 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.22.0
+- 
+
+* Sun Oct 14 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.21.0
+- 
+
+* Fri Oct 12 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.20.0
+- Require ansible 2.6.5 (vrutkovs@redhat.com)
+- Dockerfile: install ansible 2.6 and remove epel-testing (vrutkovs@redhat.com)
+- Dockerfile: install ansible 2.6 (vrutkovs@redhat.com)
+
+* Fri Oct 12 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.19.0
+- README: ansible 2.7 is not supported (vrutkovs@redhat.com)
+- Modify sync pod to check for KUBELET_HOSTNAME_OVERRIDE (mgugino@redhat.com)
+- Configure Ansible service broker secrets (simon.ruegg@vshn.ch)
+
+* Wed Oct 10 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.18.0
+- Update main.yml (sgaikwad@redhat.com)
+- Openshift autoheal fails to pull images even if oreg_url is specified
+  (sgaikwad@redhat.com)
+
+* Tue Oct 09 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.17.0
+- Add missing option in Openstack documentation and sample file.
+  (juriarte@redhat.com)
+- Replace openshift.node.nodename with l_kubelet_node_name (mgugino@redhat.com)
+- Increase number of retries in sync DS (vrutkovs@redhat.com)
+- test/ci: update atomic hosts and restart only when necessary
+  (vrutkovs@redhat.com)
+- test/ci: make sure all packages are updated before starting install
+  (vrutkovs@redhat.com)
+- test/ci: set hostname before collecting facts (vrutkovs@redhat.com)
+- Fix etcd scaleup on standalone hosts (rteague@redhat.com)
+
+* Mon Oct 08 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.16.0
+- Fail on openshift_hostname defined; add openshift_kubelet_name_override
+  (mgugino@redhat.com)
+
+* Sun Oct 07 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.15.0
+- 
+
+* Sun Oct 07 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.14.0
+- 
+
+* Sat Oct 06 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.13.0
+- unmount just before removing (rmeggins@redhat.com)
+
+* Fri Oct 05 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.12.0
+- 
+
+* Fri Oct 05 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.11.0
+- prelim/partial update to jenkins imagestream to enable tests (while we wait
+  for global PR in openshift/origin to merge) (gmontero@redhat.com)
+- Remove unused registry migration task (vrutkovs@redhat.com)
+
+* Thu Oct 04 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.10.0
+- glusterfs: add probe script for liveness and readiness checks
+  (jmulligan@redhat.com)
+
+* Thu Oct 04 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.9.0
+- 
+
+* Wed Oct 03 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.8.0
+- roles/cluster_monitoring: minor wording improvement (pgier@redhat.com)
+- Remove unlicensed code from internet in sanity checks (mgugino@redhat.com)
+- Use clusterid attribute to filter servers in dynamic inventory
+  (rusichen@redhat.com)
+- Add CI scripts in hack/ (vrutkovs@redhat.com)
+- Replace 'command chmod' with 'file mode=...' (vrutkovs@redhat.com)
+- Start only the ovsdb so we can add the config safely (bbennett@redhat.com)
+- Add pyOpenSSL and iproute to RPM dependencies (sdodson@redhat.com)
+- Fixes #8267 (mavazque@redhat.com)
+- Node problem detector always pull images from registry.redhat.io for
+  openshift-enterprise (sgaikwad@redhat.com)
+- Replace undefined {{ item }} by filename (info@theothersolution.nl)
+- Pass admin kubeconfig (sdodson@redhat.com)
+- typo correction (i.am.emilio@gmail.com)
+- no longer creates cns security group when number of cns is 0
+  (i.am.emilio@gmail.com)
+
 * Fri Sep 28 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.7.0
 * Fri Sep 28 2018 AOS Automation Release Team <aos-team-art@redhat.com> 4.0.0-0.7.0
 - Add OpenStack pre-requisites check for various features (tzumainn@redhat.com)
 - Add OpenStack pre-requisites check for various features (tzumainn@redhat.com)
 - [openstack] Add configuration note for all-in-one and DNS (pep@redhat.com)
 - [openstack] Add configuration note for all-in-one and DNS (pep@redhat.com)

+ 1 - 1
playbooks/azure/openshift-cluster/tasks/create_blob_from_vm.yml

@@ -23,7 +23,7 @@
 - name: start copy
 - name: start copy
   command: >
   command: >
     az storage blob copy start
     az storage blob copy start
-    --source-uri "{{ (sas.stdout | from_json).properties.output.accessSAS }}"
+    --source-uri "{{ (sas.stdout | from_json).accessSas }}"
     --account-name "{{ openshift_azure_storage_account }}"
     --account-name "{{ openshift_azure_storage_account }}"
     --account-key "{{ (keys.stdout | from_json)[0].value }}"
     --account-key "{{ (keys.stdout | from_json)[0].value }}"
     --destination-container "{{ openshift_azure_container }}"
     --destination-container "{{ openshift_azure_container }}"

+ 1 - 1
playbooks/byo/calico/legacy_upgrade.yml

@@ -100,7 +100,7 @@
   - name: Apply node label
   - name: Apply node label
     delegate_to: "{{ groups.oo_first_master.0 }}"
     delegate_to: "{{ groups.oo_first_master.0 }}"
     command: >
     command: >
-      {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig label node {{ openshift.node.nodename | lower }} --overwrite projectcalico.org/ds-ready=true
+      {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig label node {{ l_kubelet_node_name | lower }} --overwrite projectcalico.org/ds-ready=true
   - name: Wait for node running
   - name: Wait for node running
     uri:
     uri:
       url: http://localhost:9099/readiness
       url: http://localhost:9099/readiness

+ 3 - 3
playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml

@@ -56,7 +56,7 @@
   tasks:
   tasks:
   - name: Mark node unschedulable
   - name: Mark node unschedulable
     oc_adm_manage_node:
     oc_adm_manage_node:
-      node: "{{ openshift.node.nodename | lower }}"
+      node: "{{ l_kubelet_node_name | lower }}"
       schedulable: False
       schedulable: False
     delegate_to: "{{ groups.oo_first_master.0 }}"
     delegate_to: "{{ groups.oo_first_master.0 }}"
     retries: 10
     retries: 10
@@ -70,7 +70,7 @@
 
 
   - name: Drain Node for Kubelet upgrade
   - name: Drain Node for Kubelet upgrade
     command: >
     command: >
-      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ l_kubelet_node_name | lower }}
       --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       --force --delete-local-data --ignore-daemonsets
       --force --delete-local-data --ignore-daemonsets
       --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
       --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
@@ -94,7 +94,7 @@
 
 
   - name: Set node schedulability
   - name: Set node schedulability
     oc_adm_manage_node:
     oc_adm_manage_node:
-      node: "{{ openshift.node.nodename | lower }}"
+      node: "{{ l_kubelet_node_name | lower }}"
       schedulable: True
       schedulable: True
     delegate_to: "{{ groups.oo_first_master.0 }}"
     delegate_to: "{{ groups.oo_first_master.0 }}"
     retries: 10
     retries: 10

+ 2 - 2
playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml

@@ -23,7 +23,7 @@
   # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
   # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
   - name: Mark node unschedulable
   - name: Mark node unschedulable
     oc_adm_manage_node:
     oc_adm_manage_node:
-      node: "{{ openshift.node.nodename | lower }}"
+      node: "{{ l_kubelet_node_name | lower }}"
       schedulable: False
       schedulable: False
     delegate_to: "{{ groups.oo_first_master.0 }}"
     delegate_to: "{{ groups.oo_first_master.0 }}"
     retries: 10
     retries: 10
@@ -33,7 +33,7 @@
 
 
   - name: Drain Node for Kubelet upgrade
   - name: Drain Node for Kubelet upgrade
     command: >
     command: >
-      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ l_kubelet_node_name | lower }}
       --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       --force --delete-local-data --ignore-daemonsets
       --force --delete-local-data --ignore-daemonsets
       --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
       --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s

+ 2 - 2
playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml

@@ -28,7 +28,7 @@
 
 
   - name: Mark node unschedulable
   - name: Mark node unschedulable
     oc_adm_manage_node:
     oc_adm_manage_node:
-      node: "{{ openshift.node.nodename | lower }}"
+      node: "{{ l_kubelet_node_name | lower }}"
       schedulable: False
       schedulable: False
     delegate_to: "{{ groups.oo_first_master.0 }}"
     delegate_to: "{{ groups.oo_first_master.0 }}"
     retries: 10
     retries: 10
@@ -45,7 +45,7 @@
   tasks:
   tasks:
   - name: Drain Node for Kubelet upgrade
   - name: Drain Node for Kubelet upgrade
     command: >
     command: >
-      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ l_kubelet_node_name | lower }}
       --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       --force --delete-local-data --ignore-daemonsets
       --force --delete-local-data --ignore-daemonsets
       --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
       --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s

+ 4 - 1
playbooks/init/cluster_facts.yml

@@ -28,7 +28,7 @@
     openshift_facts:
     openshift_facts:
       role: common
       role: common
       local_facts:
       local_facts:
-        hostname: "{{ openshift_hostname | default(None) }}"
+        hostname: "{{ openshift_kubelet_name_override | default(None) }}"
         ip: "{{ openshift_ip | default(None) }}"
         ip: "{{ openshift_ip | default(None) }}"
         public_hostname: "{{ openshift_public_hostname | default(None) }}"
         public_hostname: "{{ openshift_public_hostname | default(None) }}"
         public_ip: "{{ openshift_public_ip | default(None) }}"
         public_ip: "{{ openshift_public_ip | default(None) }}"
@@ -62,6 +62,9 @@
       role: node
       role: node
       local_facts:
       local_facts:
         sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
         sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
+  - name: set_fact l_kubelet_node_name
+    set_fact:
+      l_kubelet_node_name: "{{ openshift_kubelet_name_override | default(openshift.node.nodename) }}"
 
 
 - name: Initialize etcd host variables
 - name: Initialize etcd host variables
   hosts: oo_masters_to_config
   hosts: oo_masters_to_config

+ 4 - 3
playbooks/init/validate_hostnames.yml

@@ -10,19 +10,20 @@
     changed_when: false
     changed_when: false
     failed_when: false
     failed_when: false
 
 
-  - name: Validate openshift_hostname when defined
+  - name: Validate openshift_kubelet_name_override when defined
     fail:
     fail:
       msg: >
       msg: >
         The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }}
         The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }}
         doesn't resolve to an IP address owned by this host. Please set
         doesn't resolve to an IP address owned by this host. Please set
-        openshift_hostname variable to a hostname that when resolved on the host
+        openshift_kubelet_name_override variable to a hostname that when resolved on the host
         in question resolves to an IP address matching an interface on this host.
         in question resolves to an IP address matching an interface on this host.
         This will ensure proper functionality of OpenShift networking features.
         This will ensure proper functionality of OpenShift networking features.
-        Inventory setting: openshift_hostname={{ openshift_hostname | default ('undefined') }}
+        Inventory setting: openshift_kubelet_name_override={{ openshift_kubelet_name_override | default ('undefined') }}
         This check can be overridden by setting openshift_hostname_check=false in
         This check can be overridden by setting openshift_hostname_check=false in
         the inventory.
         the inventory.
         See https://docs.okd.io/latest/install_config/install/advanced_install.html#configuring-host-variables
         See https://docs.okd.io/latest/install_config/install/advanced_install.html#configuring-host-variables
     when:
     when:
+    - openshift_kubelet_name_override is defined
     - lookupip.stdout != '127.0.0.1'
     - lookupip.stdout != '127.0.0.1'
     - lookupip.stdout not in ansible_all_ipv4_addresses
     - lookupip.stdout not in ansible_all_ipv4_addresses
     - openshift_hostname_check | default(true) | bool
     - openshift_hostname_check | default(true) | bool

+ 1 - 0
playbooks/openshift-autoheal/private/config.yml

@@ -16,6 +16,7 @@
 - name: Auto-heal
 - name: Auto-heal
   hosts: oo_first_master
   hosts: oo_first_master
   roles:
   roles:
+  - role: openshift_facts
   - role: openshift_autoheal
   - role: openshift_autoheal
 
 
 - name: Auto-heal Install Checkpoint End
 - name: Auto-heal Install Checkpoint End

+ 1 - 1
playbooks/openshift-etcd/private/redeploy-ca.yml

@@ -34,7 +34,7 @@
     changed_when: false
     changed_when: false
 
 
   - name: Chmod local temp directory for syncing certs
   - name: Chmod local temp directory for syncing certs
-    local_action: command chmod 777 "{{ g_etcd_mktemp.stdout }}"
+    local_action: file path="{{ g_etcd_mktemp.stdout }}" mode=0777
     changed_when: false
     changed_when: false
 
 
 - name: Distribute etcd CA to etcd hosts
 - name: Distribute etcd CA to etcd hosts

+ 4 - 1
playbooks/openshift-etcd/private/scaleup.yml

@@ -1,5 +1,5 @@
 ---
 ---
-- name: Configure etcd
+- name: Check for etcd stand-alone hosts on atomic
   hosts: oo_etcd_to_config
   hosts: oo_etcd_to_config
   any_errors_fatal: true
   any_errors_fatal: true
   tasks:
   tasks:
@@ -11,6 +11,9 @@
     - openshift_is_atomic | bool
     - openshift_is_atomic | bool
     - not inventory_hostname in groups['oo_masters']
     - not inventory_hostname in groups['oo_masters']
 
 
+- name: Set etcd facts for all hosts
+  hosts: oo_etcd_to_config:oo_new_etcd_to_config
+  tasks:
   - import_role:
   - import_role:
       name: etcd
       name: etcd
       tasks_from: set_facts.yml
       tasks_from: set_facts.yml

+ 1 - 1
playbooks/openshift-glusterfs/private/add_hosts.yml

@@ -2,7 +2,7 @@
 # This play runs when new gluster hosts are part of new_nodes group during
 # This play runs when new gluster hosts are part of new_nodes group during
 # master or node scaleup.
 # master or node scaleup.
 
 
-# Need to gather facts on glusterfs hosts to ensure we collect openshift.node.nodename
+# Need to gather facts on glusterfs hosts to ensure we collect l_kubelet_node_name
 # for topology file.
 # for topology file.
 - import_playbook: ../../init/basic_facts.yml
 - import_playbook: ../../init/basic_facts.yml
   vars:
   vars:

+ 1 - 1
playbooks/openshift-master/private/create_service_signer_cert.yml

@@ -11,7 +11,7 @@
     when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
     when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
 
 
   - name: Chmod local temp directory
   - name: Chmod local temp directory
-    local_action: command chmod 777 "{{ local_cert_sync_tmpdir.stdout }}"
+    local_action: file path="{{ local_cert_sync_tmpdir.stdout }}" mode=0777
     changed_when: false
     changed_when: false
     when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
     when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
 
 

+ 1 - 1
playbooks/openshift-master/private/redeploy-openshift-ca.yml

@@ -132,7 +132,7 @@
     changed_when: false
     changed_when: false
 
 
   - name: Chmod local temp directory for syncing certs
   - name: Chmod local temp directory for syncing certs
-    local_action: command chmod 777 "{{ g_master_mktemp.stdout }}"
+    local_action: file path="{{ g_master_mktemp.stdout }}" mode=0777
     changed_when: false
     changed_when: false
 
 
 - name: Retrieve OpenShift CA
 - name: Retrieve OpenShift CA

+ 1 - 1
playbooks/openshift-node/private/join.yml

@@ -31,7 +31,7 @@
 
 
   - name: Find all hostnames for bootstrapping
   - name: Find all hostnames for bootstrapping
     set_fact:
     set_fact:
-      l_nodes_to_join: "{{ groups['oo_nodes_to_config'] | default([]) | map('extract', hostvars) | map(attribute='openshift.node.nodename') | list }}"
+      l_nodes_to_join: "{{ groups['oo_nodes_to_config'] | default([]) | map('extract', hostvars) | map(attribute='l_kubelet_node_name') | list }}"
 
 
   - name: Dump the bootstrap hostnames
   - name: Dump the bootstrap hostnames
     debug:
     debug:

+ 1 - 1
playbooks/openshift-node/private/registry_auth.yml

@@ -28,7 +28,7 @@
     oc_obj:
     oc_obj:
       state: list
       state: list
       kind: node
       kind: node
-      name: "{{ openshift.node.nodename | lower }}"
+      name: "{{ l_kubelet_node_name | lower }}"
     register: node_output
     register: node_output
     delegate_to: "{{ groups.oo_first_master.0 }}"
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: inventory_hostname in groups.oo_nodes_to_config
     when: inventory_hostname in groups.oo_nodes_to_config

+ 1 - 1
playbooks/openshift-node/private/restart.yml

@@ -36,7 +36,7 @@
     oc_obj:
     oc_obj:
       state: list
       state: list
       kind: node
       kind: node
-      name: "{{ openshift.node.nodename | lower }}"
+      name: "{{ l_kubelet_node_name | lower }}"
     register: node_output
     register: node_output
     delegate_to: "{{ groups.oo_first_master.0 }}"
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: inventory_hostname in groups.oo_nodes_to_config
     when: inventory_hostname in groups.oo_nodes_to_config

+ 33 - 0
playbooks/openstack/configuration.md

@@ -18,6 +18,7 @@ Environment variables may also be used.
 * [DNS Configuration](#dns-configuration)
 * [DNS Configuration](#dns-configuration)
 * [Floating IP Address Configuration](#floating-ip-address-configuration)
 * [Floating IP Address Configuration](#floating-ip-address-configuration)
 * [All-in-one Deployment Configuration](#all-in-one-deployment-configuration)
 * [All-in-one Deployment Configuration](#all-in-one-deployment-configuration)
+* [Multi-env Deployment Configuration](#multi-env-deployment-configuration)
 * [Building Node Images](#building-node-images)
 * [Building Node Images](#building-node-images)
 * [Kuryr Networking Configuration](#kuryr-networking-configuration)
 * [Kuryr Networking Configuration](#kuryr-networking-configuration)
 * [Provider Network Configuration](#provider-network-configuration)
 * [Provider Network Configuration](#provider-network-configuration)
@@ -546,6 +547,36 @@ added, because there are no dedicated infra nodes, so you will have to add it
 manually. See
 manually. See
 [Custom DNS Records Configuration](#custom-dns-records-configuration).
 [Custom DNS Records Configuration](#custom-dns-records-configuration).
 
 
+## Multi-env Deployment Configuration
+
+If you want to deploy multiple OpenShift environments in the same OpenStack
+project, you can do so with a few configuration changes.
+
+First, set the `openshift_openstack_clusterid` option in the
+`inventory/group_vars/all.yml` file with specific unique name for cluster.
+
+```
+vi inventory/group_vars/all.yml
+
+openshift_openstack_clusterid: foobar
+openshift_openstack_public_dns_domain: example.com
+```
+
+Second, set `OPENSHIFT_CLUSTER` environment variables. The `OPENSHIFT_CLUSTER`
+environment variable has to consist of `openshift_openstack_clusterid` and
+`openshift_openstack_public_dns_domain`, that's required because cluster_id
+variable stored in the instance metadata is concatanated in the same way.
+If value will be different then instances won't be accessible in ansible inventory.
+
+```
+export OPENSHIFT_CLUSTER='foobar.example.com'
+```
+
+Then run the deployment playbooks as usual. When you finish deployment of first
+environment, please update above options that correspond to a new environment
+and run the deployment playbooks.
+
+
 ## Building Node Images
 ## Building Node Images
 
 
 It is possible to build the OpenShift images in advance (instead of installing
 It is possible to build the OpenShift images in advance (instead of installing
@@ -704,6 +735,7 @@ openshift_node_groups:
   - name: node-config-master
   - name: node-config-master
     labels:
     labels:
       - 'node-role.kubernetes.io/master=true'
       - 'node-role.kubernetes.io/master=true'
+      - 'pod_vif=nested-vlan'
     edits: []
     edits: []
   - name: node-config-infra
   - name: node-config-infra
     labels:
     labels:
@@ -943,6 +975,7 @@ And the following in `inventory/group_vars/OSEv3.yml`:
 * `openshift_hosted_registry_storage_swift_tenantid`: "{{ lookup('env','OS_PROJECT_ID') }}" _# can also specify tenant_
 * `openshift_hosted_registry_storage_swift_tenantid`: "{{ lookup('env','OS_PROJECT_ID') }}" _# can also specify tenant_
 * `openshift_hosted_registry_storage_swift_domain`: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" _# optional; can also specifiy domainid_
 * `openshift_hosted_registry_storage_swift_domain`: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" _# optional; can also specifiy domainid_
 * `openshift_hosted_registry_storage_swift_domainid`: "{{ lookup('env','OS_USER_DOMAIN_ID') }}" _# optional; can also specifiy domain_
 * `openshift_hosted_registry_storage_swift_domainid`: "{{ lookup('env','OS_USER_DOMAIN_ID') }}" _# optional; can also specifiy domain_
+* `openshift_hosted_registry_storage_swift_insecureskipverify`: "false" # optional; true to skip TLS verification
 
 
 Note that the exact environment variable names may vary depending on the contents of
 Note that the exact environment variable names may vary depending on the contents of
 your OpenStack RC file. If you use Keystone v2, you may not need to set all of these
 your OpenStack RC file. If you use Keystone v2, you may not need to set all of these

+ 2 - 0
playbooks/openstack/openshift-cluster/build_image.yml

@@ -137,6 +137,8 @@
       - cloud-init
       - cloud-init
       - cloud-utils-growpart
       - cloud-utils-growpart
 
 
+- name: run the init
+  import_playbook: ../../init/main.yml
 
 
 # This is the part that installs all of the software and configs for the instance
 # This is the part that installs all of the software and configs for the instance
 # to become a node.
 # to become a node.

+ 3 - 0
playbooks/openstack/openshift-cluster/install.yml

@@ -13,6 +13,9 @@
 
 
 - import_playbook: ../../prerequisites.yml
 - import_playbook: ../../prerequisites.yml
 
 
+- name: Run the init
+  import_playbook: ../../init/main.yml
+
 - name: Prepare the Nodes in the cluster for installation
 - name: Prepare the Nodes in the cluster for installation
   any_errors_fatal: true
   any_errors_fatal: true
   hosts: oo_all_hosts
   hosts: oo_all_hosts

+ 7 - 11
playbooks/openstack/resources.py

@@ -19,6 +19,8 @@ except ImportError:
 from keystoneauth1.exceptions.catalog import EndpointNotFound
 from keystoneauth1.exceptions.catalog import EndpointNotFound
 import shade
 import shade
 
 
+OPENSHIFT_CLUSTER = os.getenv('OPENSHIFT_CLUSTER')
+
 
 
 def base_openshift_inventory(cluster_hosts):
 def base_openshift_inventory(cluster_hosts):
     '''Set the base openshift inventory.'''
     '''Set the base openshift inventory.'''
@@ -94,13 +96,6 @@ def _get_hostvars(server, docker_storage_mountpoints):
         hostvars['private_v4'] = private_v4
         hostvars['private_v4'] = private_v4
         hostvars['openshift_ip'] = private_v4
         hostvars['openshift_ip'] = private_v4
 
 
-        # NOTE(shadower): Yes, we set both hostname and IP to the private
-        # IP address for each node. OpenStack doesn't resolve nodes by
-        # name at all, so using a hostname here would require an internal
-        # DNS which would complicate the setup and potentially introduce
-        # performance issues.
-        hostvars['openshift_hostname'] = server.metadata.get(
-            'openshift_hostname', private_v4)
     hostvars['openshift_public_hostname'] = server.name
     hostvars['openshift_public_hostname'] = server.name
 
 
     if server.metadata['host-type'] == 'cns':
     if server.metadata['host-type'] == 'cns':
@@ -124,11 +119,12 @@ def build_inventory():
     # Use an environment variable to optionally skip returning the app nodes.
     # Use an environment variable to optionally skip returning the app nodes.
     show_compute_nodes = os.environ.get('OPENSTACK_SHOW_COMPUTE_NODES', 'true').lower() == "true"
     show_compute_nodes = os.environ.get('OPENSTACK_SHOW_COMPUTE_NODES', 'true').lower() == "true"
 
 
-    # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
-    # environment variable.
+    # If `OPENSHIFT_CLUSTER` env variable is defined then it's used to
+    # filter servers by metadata.clusterid attribute value.
     cluster_hosts = [
     cluster_hosts = [
         server for server in cloud.list_servers()
         server for server in cloud.list_servers()
-        if 'metadata' in server and 'clusterid' in server.metadata and
+        if 'clusterid' in server.get('metadata', []) and
+        (OPENSHIFT_CLUSTER is None or server.metadata.clusterid == OPENSHIFT_CLUSTER) and
         (show_compute_nodes or server.metadata.get('sub-host-type') != 'app')]
         (show_compute_nodes or server.metadata.get('sub-host-type') != 'app')]
 
 
     inventory = base_openshift_inventory(cluster_hosts)
     inventory = base_openshift_inventory(cluster_hosts)
@@ -183,7 +179,7 @@ def build_inventory():
 
 
 def _get_stack_outputs(cloud_client):
 def _get_stack_outputs(cloud_client):
     """Returns a dictionary with the stack outputs"""
     """Returns a dictionary with the stack outputs"""
-    cluster_name = os.getenv('OPENSHIFT_CLUSTER', 'openshift-cluster')
+    cluster_name = OPENSHIFT_CLUSTER or 'openshift-cluster'
 
 
     stack = cloud_client.get_stack(cluster_name)
     stack = cloud_client.get_stack(cluster_name)
     if stack is None or stack['stack_status'] not in (
     if stack is None or stack['stack_status'] not in (

+ 1 - 0
playbooks/openstack/sample-inventory/group_vars/OSEv3.yml

@@ -33,6 +33,7 @@ openshift_hosted_registry_wait: True
 #  - name: node-config-master
 #  - name: node-config-master
 #    labels:
 #    labels:
 #      - 'node-role.kubernetes.io/master=true'
 #      - 'node-role.kubernetes.io/master=true'
+#      - 'pod_vif=nested-vlan'
 #    edits: []
 #    edits: []
 #  - name: node-config-infra
 #  - name: node-config-infra
 #    labels:
 #    labels:

+ 7 - 0
playbooks/prerequisites.yml

@@ -1,5 +1,12 @@
 ---
 ---
 # l_scale_up_hosts may be passed in via various scaleup plays.
 # l_scale_up_hosts may be passed in via various scaleup plays.
+- name: Fail openshift_kubelet_name_override for new hosts
+  hosts: "{{ l_scale_up_hosts | default('nodes') }}"
+  tasks:
+  - name: Fail when openshift_kubelet_name_override is defined
+    fail:
+      msg: "openshift_kubelet_name_override Cannot be defined for new hosts"
+    when: openshift_kubelet_name_override is defined
 
 
 - import_playbook: init/main.yml
 - import_playbook: init/main.yml
   vars:
   vars:

+ 1 - 1
requirements.txt

@@ -1,6 +1,6 @@
 # Versions are pinned to prevent pypi releases arbitrarily breaking
 # Versions are pinned to prevent pypi releases arbitrarily breaking
 # tests with new APIs/semantics. We want to update versions deliberately.
 # tests with new APIs/semantics. We want to update versions deliberately.
-ansible==2.6.2
+ansible==2.6.5
 boto==2.44.0
 boto==2.44.0
 click==6.7
 click==6.7
 pyOpenSSL==17.5.0
 pyOpenSSL==17.5.0

+ 6 - 0
roles/ansible_service_broker/defaults/main.yml

@@ -31,3 +31,9 @@ l_asb_default_images_default: "{{ l_asb_default_images_dict[openshift_deployment
 l_asb_image_url: "{{ oreg_url | default(l_asb_default_images_default) | regex_replace('${version}' | regex_escape, openshift_image_tag) }}"
 l_asb_image_url: "{{ oreg_url | default(l_asb_default_images_default) | regex_replace('${version}' | regex_escape, openshift_image_tag) }}"
 
 
 ansible_service_broker_image: "{{ l_asb_image_url | regex_replace('${component}' | regex_escape, 'ansible-service-broker') }}"
 ansible_service_broker_image: "{{ l_asb_image_url | regex_replace('${component}' | regex_escape, 'ansible-service-broker') }}"
+# Secrets to be mounted for APBs. Format:
+# - title: Database credentials
+#   secret: db_creds
+#   apb_name: dh-rhscl-postgresql-apb
+# https://github.com/openshift/ansible-service-broker/blob/master/docs/config.md#secrets-configuration
+ansible_service_broker_secrets: []

+ 1 - 1
roles/ansible_service_broker/templates/configmap.yaml.j2

@@ -53,4 +53,4 @@ data:
       auth:
       auth:
         - type: basic
         - type: basic
           enabled: false
           enabled: false
-
+    secrets: {{ ansible_service_broker_secrets | to_yaml }}

+ 2 - 0
roles/calico/templates/calicov3.yml.j2

@@ -14,6 +14,7 @@ rules:
       - namespaces
       - namespaces
       - networkpolicies
       - networkpolicies
       - nodes
       - nodes
+      - serviceaccounts
     verbs:
     verbs:
       - watch
       - watch
       - list
       - list
@@ -49,6 +50,7 @@ rules:
   - apiGroups: [""]
   - apiGroups: [""]
     resources:
     resources:
       - pods
       - pods
+      - namespaces
       - nodes
       - nodes
     verbs:
     verbs:
       - get
       - get

+ 5 - 5
roles/container_runtime/tasks/common/setup_docker_symlink.yml

@@ -11,11 +11,6 @@
       failed_when:
       failed_when:
         - results.rc != 0
         - results.rc != 0
 
 
-    - name: ensure the unmount of top level mount point
-      mount:
-        path: "{{ docker_default_storage_path }}"
-        state: unmounted
-
     - name: "Set the selinux context on {{ docker_alt_storage_path }}"
     - name: "Set the selinux context on {{ docker_alt_storage_path }}"
       command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
       command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
       environment:
       environment:
@@ -28,6 +23,11 @@
     - name: "restorecon the {{ docker_alt_storage_path }}"
     - name: "restorecon the {{ docker_alt_storage_path }}"
       command: "restorecon -r {{ docker_alt_storage_path }}"
       command: "restorecon -r {{ docker_alt_storage_path }}"
 
 
+    - name: ensure the unmount of top level mount point
+      mount:
+        path: "{{ docker_default_storage_path }}"
+        state: unmounted
+
     - name: Remove the old docker location
     - name: Remove the old docker location
       file:
       file:
         state: absent
         state: absent

+ 1 - 4
roles/etcd/tasks/add_new_member.yml

@@ -3,11 +3,8 @@
 - import_tasks: set_facts.yml
 - import_tasks: set_facts.yml
 
 
 - name: Add new etcd members to cluster
 - name: Add new etcd members to cluster
-  command: "{{ etcdctlv2 }} member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
+  command: "{{ hostvars[etcd_ca_host].etcdctlv2 }} member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
   delegate_to: "{{ etcd_ca_host }}"
   delegate_to: "{{ etcd_ca_host }}"
-  failed_when:
-  - etcd_add_check.rc == 1
-  - ("peerURL exists" not in etcd_add_check.stderr)
   register: etcd_add_check
   register: etcd_add_check
   retries: 3
   retries: 3
   delay: 10
   delay: 10

+ 1 - 0
roles/etcd/tasks/set_facts.yml

@@ -3,3 +3,4 @@
   set_fact:
   set_fact:
     etcd_ip: "{{ etcd_ip }}"
     etcd_ip: "{{ etcd_ip }}"
     etcd_hostname: "{{ etcd_hostname }}"
     etcd_hostname: "{{ etcd_hostname }}"
+    etcdctlv2: "{{ etcdctlv2 }}"

+ 1 - 2
roles/etcd/tasks/static.yml

@@ -7,8 +7,7 @@
   register: etcd_image_exists
   register: etcd_image_exists
 
 
 - name: Pre-pull etcd image
 - name: Pre-pull etcd image
-  docker_image:
-    name: "{{ etcd_image }}"
+  command: "{{ openshift_container_cli }} pull {{ etcd_image }}"
   environment:
   environment:
     NO_PROXY: "{{ openshift.common.no_proxy | default('') }}"
     NO_PROXY: "{{ openshift.common.no_proxy | default('') }}"
   when: etcd_image_exists.stdout_lines == []
   when: etcd_image_exists.stdout_lines == []

+ 1 - 1
roles/lib_openshift/library/oc_group.py

@@ -1522,7 +1522,7 @@ class OCGroup(OpenShiftCLI):
         result = self._get(self.kind, self.config.name)
         result = self._get(self.kind, self.config.name)
         if result['returncode'] == 0:
         if result['returncode'] == 0:
             self.group = Group(content=result['results'][0])
             self.group = Group(content=result['results'][0])
-        elif 'groups \"{}\" not found'.format(self.config.name) in result['stderr']:
+        elif 'groups.user.openshift.io \"{}\" not found'.format(self.config.name) in result['stderr']:
             result['returncode'] = 0
             result['returncode'] = 0
             result['results'] = [{}]
             result['results'] = [{}]
 
 

+ 1 - 1
roles/lib_openshift/src/class/oc_group.py

@@ -39,7 +39,7 @@ class OCGroup(OpenShiftCLI):
         result = self._get(self.kind, self.config.name)
         result = self._get(self.kind, self.config.name)
         if result['returncode'] == 0:
         if result['returncode'] == 0:
             self.group = Group(content=result['results'][0])
             self.group = Group(content=result['results'][0])
-        elif 'groups \"{}\" not found'.format(self.config.name) in result['stderr']:
+        elif 'groups.user.openshift.io \"{}\" not found'.format(self.config.name) in result['stderr']:
             result['returncode'] = 0
             result['returncode'] = 0
             result['results'] = [{}]
             result['results'] = [{}]
 
 

+ 3 - 3
roles/lib_openshift/src/test/unit/test_oc_group.py

@@ -46,8 +46,8 @@ class OCGroupTest(unittest.TestCase):
         }'''
         }'''
 
 
         mock_run.side_effect = [
         mock_run.side_effect = [
-            (1, '', 'Error from server: groups "acme" not found'),
-            (1, '', 'Error from server: groups "acme" not found'),
+            (1, '', 'Error from server: groups.user.openshift.io "acme" not found'),
+            (1, '', 'Error from server: groups.user.openshift.io "acme" not found'),
             (0, '', ''),
             (0, '', ''),
             (0, group, ''),
             (0, group, ''),
         ]
         ]
@@ -70,7 +70,7 @@ class OCGroupTest(unittest.TestCase):
         params['name'] = 'noexist'
         params['name'] = 'noexist'
 
 
         mock_run.side_effect = [
         mock_run.side_effect = [
-            (1, '', 'Error from server: groups "acme" not found'),
+            (1, '', 'Error from server: groups.user.openshift.io "acme" not found'),
         ]
         ]
 
 
         mock_tmpfile_copy.side_effect = [
         mock_tmpfile_copy.side_effect = [

+ 9 - 24
roles/lib_utils/action_plugins/sanity_checks.py

@@ -2,6 +2,7 @@
 Ansible action plugin to ensure inventory variables are set
 Ansible action plugin to ensure inventory variables are set
 appropriately and no conflicting options have been provided.
 appropriately and no conflicting options have been provided.
 """
 """
+import fnmatch
 import json
 import json
 import re
 import re
 
 
@@ -63,6 +64,7 @@ IMAGE_POLICY_CONFIG_VAR = "openshift_master_image_policy_config"
 ALLOWED_REGISTRIES_VAR = "openshift_master_image_policy_allowed_registries_for_import"
 ALLOWED_REGISTRIES_VAR = "openshift_master_image_policy_allowed_registries_for_import"
 
 
 REMOVED_VARIABLES = (
 REMOVED_VARIABLES = (
+    ('openshift_hostname', 'Removed: See documentation'),
     # TODO(michaelgugino): Remove in 3.12
     # TODO(michaelgugino): Remove in 3.12
     ('oreg_auth_credentials_replace', 'Removed: Credentials are now always updated'),
     ('oreg_auth_credentials_replace', 'Removed: Credentials are now always updated'),
     ('oreg_url_master', 'oreg_url'),
     ('oreg_url_master', 'oreg_url'),
@@ -330,10 +332,10 @@ class ActionModule(ActionBase):
             raise errors.AnsibleModuleError(msg)
             raise errors.AnsibleModuleError(msg)
 
 
     def check_hostname_vars(self, hostvars, host):
     def check_hostname_vars(self, hostvars, host):
-        """Checks to ensure openshift_hostname
+        """Checks to ensure openshift_kubelet_name_override
            and openshift_public_hostname
            and openshift_public_hostname
            conform to the proper length of 63 characters or less"""
            conform to the proper length of 63 characters or less"""
-        for varname in ('openshift_public_hostname', 'openshift_hostname'):
+        for varname in ('openshift_public_hostname', 'openshift_kubelet_name_override'):
             var_value = self.template_var(hostvars, host, varname)
             var_value = self.template_var(hostvars, host, varname)
             if var_value and len(var_value) > 63:
             if var_value and len(var_value) > 63:
                 msg = '{} must be 63 characters or less'.format(varname)
                 msg = '{} must be 63 characters or less'.format(varname)
@@ -442,9 +444,9 @@ class ActionModule(ActionBase):
     def check_for_oreg_password(self, hostvars, host, odt):
     def check_for_oreg_password(self, hostvars, host, odt):
         """Ensure oreg_password is defined when using registry.redhat.io"""
         """Ensure oreg_password is defined when using registry.redhat.io"""
         reg_to_check = 'registry.redhat.io'
         reg_to_check = 'registry.redhat.io'
-        err_msg = ("oreg_auth_user and oreg_auth_password must be provided when"
+        err_msg = ("oreg_auth_user and oreg_auth_password must be provided when "
                    "deploying openshift-enterprise")
                    "deploying openshift-enterprise")
-        err_msg2 = ("oreg_auth_user and oreg_auth_password must be provided when using"
+        err_msg2 = ("oreg_auth_user and oreg_auth_password must be provided when using "
                     "{}".format(reg_to_check))
                     "{}".format(reg_to_check))
 
 
         oreg_password = self.template_var(hostvars, host, 'oreg_auth_password')
         oreg_password = self.template_var(hostvars, host, 'oreg_auth_password')
@@ -534,23 +536,6 @@ def is_registry_match(item, pattern):
     of the registries will be listed without the port and insecure flag.
     of the registries will be listed without the port and insecure flag.
     """
     """
     item = "schema://" + item.split('://', 1)[-1]
     item = "schema://" + item.split('://', 1)[-1]
-    return is_match(urlparse(item).hostname, pattern.rsplit(':', 1)[0])
-
-
-# taken from https://leetcode.com/problems/wildcard-matching/discuss/17845/python-dp-solution
-# (the same source as for openshift/origin/pkg/util/strings/wildcard.go)
-def is_match(item, pattern):
-    """implements DP algorithm for string matching"""
-    length = len(item)
-    if len(pattern) - pattern.count('*') > length:
-        return False
-    matches = [True] + [False] * length
-    for i in pattern:
-        if i != '*':
-            for index in reversed(range(length)):
-                matches[index + 1] = matches[index] and (i == item[index] or i == '?')
-        else:
-            for index in range(1, length + 1):
-                matches[index] = matches[index - 1] or matches[index]
-        matches[0] = matches[0] and i == '*'
-    return matches[-1]
+    pat = pattern.rsplit(':', 1)[0]
+    name = urlparse(item).hostname
+    return fnmatch.fnmatch(name, pat)

+ 1 - 1
roles/lib_utils/callback_plugins/aa_version_requirement.py

@@ -29,7 +29,7 @@ else:
 
 
 
 
 # Set to minimum required Ansible version
 # Set to minimum required Ansible version
-REQUIRED_VERSION = '2.6.2'
+REQUIRED_VERSION = '2.5.7'
 DESCRIPTION = "Supported versions: %s or newer" % REQUIRED_VERSION
 DESCRIPTION = "Supported versions: %s or newer" % REQUIRED_VERSION
 
 
 
 

+ 2 - 1
roles/lib_utils/library/openshift_cert_expiry.py

@@ -10,6 +10,7 @@ import io
 import os
 import os
 import subprocess
 import subprocess
 import yaml
 import yaml
+import dateutil.parser
 
 
 # pylint import-error disabled because pylint cannot find the package
 # pylint import-error disabled because pylint cannot find the package
 # when installed in a virtualenv
 # when installed in a virtualenv
@@ -145,7 +146,7 @@ platforms missing the Python OpenSSL library.
                 # => 20190207181935Z
                 # => 20190207181935Z
                 not_after_raw = l.partition(' : ')[-1]
                 not_after_raw = l.partition(' : ')[-1]
                 # Last item: ('Not After', ' : ', 'Feb  7 18:19:35 2019 GMT')
                 # Last item: ('Not After', ' : ', 'Feb  7 18:19:35 2019 GMT')
-                not_after_parsed = datetime.datetime.strptime(not_after_raw, '%b %d %H:%M:%S %Y %Z')
+                not_after_parsed = dateutil.parser.parse(not_after_raw)
                 self.not_after = not_after_parsed.strftime('%Y%m%d%H%M%SZ')
                 self.not_after = not_after_parsed.strftime('%Y%m%d%H%M%SZ')
 
 
             elif l.startswith('X509v3 Subject Alternative Name:'):
             elif l.startswith('X509v3 Subject Alternative Name:'):

+ 4 - 0
roles/lib_utils/test/test_sanity_checks.py

@@ -46,3 +46,7 @@ def test_is_registry_match():
     assert not is_registry_match("https://example.com:443/prefix", pat_matchport)
     assert not is_registry_match("https://example.com:443/prefix", pat_matchport)
     assert not is_registry_match("docker.io/library/my", pat_matchport)
     assert not is_registry_match("docker.io/library/my", pat_matchport)
     assert not is_registry_match("https://hello.registry/myapp", pat_matchport)
     assert not is_registry_match("https://hello.registry/myapp", pat_matchport)
+
+
+if __name__ == '__main__':
+    test_is_registry_match()

+ 2 - 11
roles/openshift_autoheal/defaults/main.yml

@@ -2,17 +2,8 @@
 
 
 #
 #
 # Image name:
 # Image name:
-#
-openshift_autoheal_image_dict:
-  origin:
-    prefix: "docker.io/openshift/"
-    version: v0.0.1
-  openshift-enterprise:
-    prefix: "registry.redhat.io/openshift3/ose-"
-    version: "{{ openshift_image_tag }}"
-openshift_autoheal_image_prefix: "{{ openshift_autoheal_image_dict[openshift_deployment_type]['prefix'] }}"
-openshift_autoheal_image_version: "{{ openshift_autoheal_image_dict[openshift_deployment_type]['version'] }}"
-openshift_autoheal_image: "{{ openshift_autoheal_image_prefix }}autoheal:{{ openshift_autoheal_image_version }}"
+
+openshift_autoheal_image: "{{ l_osm_registry_url | regex_replace('${component}' | regex_escape, 'autoheal') }}"
 
 
 #
 #
 # Content of the configuration file of the auto-heal service. Note that this is
 # Content of the configuration file of the auto-heal service. Note that this is

+ 5 - 0
roles/openshift_certificate_expiry/tasks/main.yml

@@ -1,4 +1,9 @@
 ---
 ---
+- name: Ensure python dateutil library is present
+  package:
+    name: "{{ 'python3-dateutil' if ansible_distribution == 'Fedora' else 'python-dateutil' }}"
+    state: present
+
 - name: Check cert expirys on host
 - name: Check cert expirys on host
   openshift_cert_expiry:
   openshift_cert_expiry:
     warning_days: "{{ openshift_certificate_expiry_warning_days|int }}"
     warning_days: "{{ openshift_certificate_expiry_warning_days|int }}"

+ 2 - 2
roles/openshift_cloud_provider/tasks/vsphere-svc.yml

@@ -1,6 +1,6 @@
 ---
 ---
 - name: Check to see if the vsphere cluster role already exists
 - name: Check to see if the vsphere cluster role already exists
-  command: "{{ openshift_client_binary}}  get clusterrole"
+  command: "{{ openshift_client_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get clusterrole"
   register: cluster_role
   register: cluster_role
 
 
 - block:
 - block:
@@ -13,7 +13,7 @@
 
 
   - name: Create vsphere-svc on cluster
   - name: Create vsphere-svc on cluster
     run_once: true
     run_once: true
-    command: "{{ openshift_client_binary}} create -f /tmp/vsphere-svc.yml"
+    command: "{{ openshift_client_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig create -f /tmp/vsphere-svc.yml"
 
 
   - name: Remove vsphere-svc file
   - name: Remove vsphere-svc file
     run_once: true
     run_once: true

+ 2 - 0
roles/openshift_cluster_monitoring_operator/defaults/main.yml

@@ -45,7 +45,9 @@ openshift_cluster_monitoring_operator_proxy_image: "{{l_openshift_cluster_monito
 
 
 openshift_cluster_monitoring_operator_prometheus_storage_enabled: false
 openshift_cluster_monitoring_operator_prometheus_storage_enabled: false
 openshift_cluster_monitoring_operator_prometheus_storage_capacity: "50Gi"
 openshift_cluster_monitoring_operator_prometheus_storage_capacity: "50Gi"
+openshift_cluster_monitoring_operator_prometheus_storage_class_name: ""
 openshift_cluster_monitoring_operator_alertmanager_storage_enabled: false
 openshift_cluster_monitoring_operator_alertmanager_storage_enabled: false
+openshift_cluster_monitoring_operator_alertmanager_storage_class_name: ""
 openshift_cluster_monitoring_operator_alertmanager_storage_capacity: "2Gi"
 openshift_cluster_monitoring_operator_alertmanager_storage_capacity: "2Gi"
 
 
 openshift_cluster_monitoring_operator_cluster_id: "{{ openshift_clusterid | default(openshift_master_cluster_public_hostname, true) | default(openshift_master_cluster_hostname, true) | default('openshift', true) }}"
 openshift_cluster_monitoring_operator_cluster_id: "{{ openshift_clusterid | default(openshift_master_cluster_public_hostname, true) | default(openshift_master_cluster_hostname, true) | default('openshift', true) }}"

+ 4 - 4
roles/openshift_cluster_monitoring_operator/tasks/install.yaml

@@ -79,13 +79,13 @@
   with_items:
   with_items:
   - cluster-monitoring-operator.yaml
   - cluster-monitoring-operator.yaml
 
 
-- name: Set cluster-monitoring-operator configmap template
+- name: Process cluster-monitoring-operator configmap template
   template:
   template:
     src: cluster-monitoring-operator-config.j2
     src: cluster-monitoring-operator-config.j2
     dest: "{{ tempdir }}/templates/cluster-monitoring-operator-config.yaml"
     dest: "{{ tempdir }}/templates/cluster-monitoring-operator-config.yaml"
   changed_when: no
   changed_when: no
 
 
-- name: Set cluster-monitoring-operator configmap
+- name: Create cluster-monitoring-operator configmap
   oc_obj:
   oc_obj:
     state: present
     state: present
     name: "cluster-monitoring-config"
     name: "cluster-monitoring-config"
@@ -95,14 +95,14 @@
     - "{{ tempdir }}/templates/cluster-monitoring-operator-config.yaml"
     - "{{ tempdir }}/templates/cluster-monitoring-operator-config.yaml"
     delete_after: true
     delete_after: true
 
 
-- name: Set cluster-monitoring-operator template
+- name: Process cluster-monitoring-operator deployment template
   template:
   template:
     src: cluster-monitoring-operator-deployment.j2
     src: cluster-monitoring-operator-deployment.j2
     dest: "{{ tempdir }}/templates/cluster-monitoring-operator-deployment.yaml"
     dest: "{{ tempdir }}/templates/cluster-monitoring-operator-deployment.yaml"
   vars:
   vars:
     namespace: "{{ openshift_cluster_monitoring_operator_namespace }}"
     namespace: "{{ openshift_cluster_monitoring_operator_namespace }}"
 
 
-- name: Set cluster-monitoring-operator template
+- name: Create cluster-monitoring-operator deployment
   oc_obj:
   oc_obj:
     state: present
     state: present
     name: "cluster-monitoring-operator"
     name: "cluster-monitoring-operator"

+ 6 - 0
roles/openshift_cluster_monitoring_operator/templates/cluster-monitoring-operator-config.j2

@@ -28,6 +28,9 @@ data:
 {% if openshift_cluster_monitoring_operator_prometheus_storage_enabled | bool %}
 {% if openshift_cluster_monitoring_operator_prometheus_storage_enabled | bool %}
       volumeClaimTemplate:
       volumeClaimTemplate:
         spec:
         spec:
+{% if openshift_cluster_monitoring_operator_prometheus_storage_class_name %}
+          storageClassName: {{ openshift_cluster_monitoring_operator_prometheus_storage_class_name }}
+{% endif %}
           resources:
           resources:
             requests:
             requests:
               storage: {{ openshift_cluster_monitoring_operator_prometheus_storage_capacity }}
               storage: {{ openshift_cluster_monitoring_operator_prometheus_storage_capacity }}
@@ -43,6 +46,9 @@ data:
 {% if openshift_cluster_monitoring_operator_alertmanager_storage_enabled | bool %}
 {% if openshift_cluster_monitoring_operator_alertmanager_storage_enabled | bool %}
       volumeClaimTemplate:
       volumeClaimTemplate:
         spec:
         spec:
+{% if openshift_cluster_monitoring_operator_alertmanager_storage_class_name %}
+          storageClassName: {{ openshift_cluster_monitoring_operator_alertmanager_storage_class_name }}
+{% endif %}
           resources:
           resources:
             requests:
             requests:
               storage: {{ openshift_cluster_monitoring_operator_alertmanager_storage_capacity }}
               storage: {{ openshift_cluster_monitoring_operator_alertmanager_storage_capacity }}

+ 5 - 0
roles/openshift_control_plane/files/apiserver.yaml

@@ -34,6 +34,8 @@ spec:
        name: master-cloud-provider
        name: master-cloud-provider
      - mountPath: /var/lib/origin/
      - mountPath: /var/lib/origin/
        name: master-data
        name: master-data
+     - mountPath: /etc/pki
+       name: master-pki
     livenessProbe:
     livenessProbe:
       httpGet:
       httpGet:
         scheme: HTTPS
         scheme: HTTPS
@@ -58,3 +60,6 @@ spec:
   - name: master-data
   - name: master-data
     hostPath:
     hostPath:
       path: /var/lib/origin
       path: /var/lib/origin
+  - name: master-pki
+    hostPath:
+      path: /etc/pki

+ 6 - 6
roles/openshift_control_plane/tasks/check_master_api_is_ready.yml

@@ -1,7 +1,7 @@
 ---
 ---
 - name: Wait for APIs to become available
 - name: Wait for APIs to become available
   command: >
   command: >
-    {{ openshift_client_binary }} get --raw /apis/{{ item }}/v1
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get --raw /apis/{{ item }}/v1
   register: openshift_apis
   register: openshift_apis
   until: openshift_apis.rc == 0
   until: openshift_apis.rc == 0
   with_items: "{{ l_core_api_list }}"
   with_items: "{{ l_core_api_list }}"
@@ -27,7 +27,7 @@
 
 
 - name: Check for apiservices/v1beta1.metrics.k8s.io registration
 - name: Check for apiservices/v1beta1.metrics.k8s.io registration
   command: >
   command: >
-    {{ openshift_client_binary }} get apiservices/v1beta1.metrics.k8s.io
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get apiservices/v1beta1.metrics.k8s.io
   register: metrics_service_registration
   register: metrics_service_registration
   failed_when: metrics_service_registration.rc != 0 and 'NotFound' not in metrics_service_registration.stderr
   failed_when: metrics_service_registration.rc != 0 and 'NotFound' not in metrics_service_registration.stderr
   retries: 30
   retries: 30
@@ -36,7 +36,7 @@
 
 
 - name: Wait for /apis/metrics.k8s.io/v1beta1 when registered
 - name: Wait for /apis/metrics.k8s.io/v1beta1 when registered
   command: >
   command: >
-    {{ openshift_client_binary }} get --raw /apis/metrics.k8s.io/v1beta1
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get --raw /apis/metrics.k8s.io/v1beta1
   register: metrics_api
   register: metrics_api
   until: metrics_api is succeeded
   until: metrics_api is succeeded
   retries: 30
   retries: 30
@@ -45,17 +45,17 @@
 
 
 - name: Check for apiservices/v1beta1.servicecatalog.k8s.io registration
 - name: Check for apiservices/v1beta1.servicecatalog.k8s.io registration
   command: >
   command: >
-    {{ openshift_client_binary }} get apiservices/v1beta1.servicecatalog.k8s.io
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get apiservices/v1beta1.servicecatalog.k8s.io
   register: servicecatalog_service_registration
   register: servicecatalog_service_registration
   failed_when: servicecatalog_service_registration.rc != 0 and 'NotFound' not in servicecatalog_service_registration.stderr
   failed_when: servicecatalog_service_registration.rc != 0 and 'NotFound' not in servicecatalog_service_registration.stderr
   retries: 30
   retries: 30
   delay: 5
   delay: 5
-  until: metrics_service_registration is succeeded
+  until: servicecatalog_service_registration is succeeded
 
 
 
 
 - name: Wait for /apis/servicecatalog.k8s.io/v1beta1 when registered
 - name: Wait for /apis/servicecatalog.k8s.io/v1beta1 when registered
   command: >
   command: >
-    {{ openshift_client_binary }} get --raw /apis/servicecatalog.k8s.io/v1beta1
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get --raw /apis/servicecatalog.k8s.io/v1beta1
   register: servicecatalog_api
   register: servicecatalog_api
   until: servicecatalog_api is succeeded
   until: servicecatalog_api is succeeded
   retries: 30
   retries: 30

+ 4 - 3
roles/openshift_control_plane/tasks/main.yml

@@ -54,7 +54,8 @@
 
 
 - name: Create the policy file if it does not already exist
 - name: Create the policy file if it does not already exist
   command: >
   command: >
-    {{ openshift_client_binary }} adm create-bootstrap-policy-file
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      adm create-bootstrap-policy-file
       --filename={{ openshift_master_policy }}
       --filename={{ openshift_master_policy }}
   args:
   args:
     creates: "{{ openshift_master_policy }}"
     creates: "{{ openshift_master_policy }}"
@@ -175,7 +176,7 @@
   oc_obj:
   oc_obj:
     state: list
     state: list
     kind: pod
     kind: pod
-    name: "master-{{ item }}-{{ openshift.node.nodename | lower }}"
+    name: "master-{{ item }}-{{ l_kubelet_node_name | lower }}"
     namespace: kube-system
     namespace: kube-system
   register: control_plane_pods
   register: control_plane_pods
   until:
   until:
@@ -227,7 +228,7 @@
   oc_obj:
   oc_obj:
     state: list
     state: list
     kind: pod
     kind: pod
-    name: "master-{{ item }}-{{ openshift.node.nodename | lower }}"
+    name: "master-{{ item }}-{{ l_kubelet_node_name | lower }}"
     namespace: kube-system
     namespace: kube-system
   register: control_plane_health
   register: control_plane_health
   until:
   until:

+ 2 - 3
roles/openshift_control_plane/tasks/pre_pull.yml

@@ -4,9 +4,8 @@
   register: control_plane_image
   register: control_plane_image
 
 
 # This task runs async to save time while the master is being configured
 # This task runs async to save time while the master is being configured
-- name: Pre-pull Origin image
-  docker_image:
-    name: "{{ osm_image }}"
+- name: Pre-pull Origin image (docker)
+  command: "{{ openshift_container_cli }} pull {{ osm_image }}"
   environment:
   environment:
     NO_PROXY: "{{ openshift.common.no_proxy | default('') }}"
     NO_PROXY: "{{ openshift.common.no_proxy | default('') }}"
   when: control_plane_image.stdout_lines == []
   when: control_plane_image.stdout_lines == []

+ 3 - 2
roles/openshift_control_plane/tasks/upgrade.yml

@@ -78,8 +78,9 @@
   yedit:
   yedit:
     src: "{{ openshift.common.config_base }}/master/master-config.yaml"
     src: "{{ openshift.common.config_base }}/master/master-config.yaml"
     key: "kubernetesMasterConfig.apiServerArguments.runtime-config"
     key: "kubernetesMasterConfig.apiServerArguments.runtime-config"
-    value: "{{ runtime_config.result | join(',') | regex_replace('(?:,)*apis/settings\\.k8s\\.io/v1alpha1=true','') }}"
-  when: runtime_config.result
+    value: "{{ r_c | difference(['apis/settings.k8s.io/v1alpha1=true']) }}"
+  vars:
+    r_c: "{{ runtime_config.result | default([]) }}"
 - name: Copy recyler pod to config directory
 - name: Copy recyler pod to config directory
   template:
   template:
     src: "recycler_pod.yaml.j2"
     src: "recycler_pod.yaml.j2"

+ 1 - 1
roles/openshift_examples/files/examples/latest/image-streams/image-streams-centos7.json

@@ -161,7 +161,7 @@
             },
             },
             "from": {
             "from": {
               "kind": "DockerImage",
               "kind": "DockerImage",
-              "name": "docker.io/openshift/jenkins-2-centos7:v3.11"
+              "name": "docker.io/openshift/jenkins-2-centos7:v4.0"
             },
             },
             "name": "2",
             "name": "2",
             "referencePolicy": {
             "referencePolicy": {

+ 1 - 1
roles/openshift_examples/files/examples/latest/image-streams/image-streams-rhel7.json

@@ -112,7 +112,7 @@
             },
             },
             "from": {
             "from": {
               "kind": "DockerImage",
               "kind": "DockerImage",
-              "name": "registry.redhat.io/openshift3/jenkins-2-rhel7:v3.11"
+              "name": "registry.redhat.io/openshift3/jenkins-2-rhel7:v4.0"
             },
             },
             "name": "2",
             "name": "2",
             "referencePolicy": {
             "referencePolicy": {

+ 2 - 2
roles/openshift_examples/tasks/main.yml

@@ -17,7 +17,7 @@
   run_once: True
   run_once: True
 
 
 - name: Chmod local temp dir for OpenShift examples copy
 - name: Chmod local temp dir for OpenShift examples copy
-  local_action: command chmod 777 "{{ copy_examples_mktemp.stdout }}"
+  local_action: file path="{{ copy_examples_mktemp.stdout }}" mode=0777
   run_once: True
   run_once: True
 
 
 - name: Create tar of OpenShift examples
 - name: Create tar of OpenShift examples
@@ -28,7 +28,7 @@
     warn: no
     warn: no
 
 
 - name: Chmod local temp dir for OpenShift examples copy
 - name: Chmod local temp dir for OpenShift examples copy
-  local_action: command chmod 744 "{{ copy_examples_mktemp.stdout }}/openshift-examples.tar"
+  local_action: file path="{{ copy_examples_mktemp.stdout }}/openshift-examples.tar" mode=0744
   run_once: True
   run_once: True
 
 
 - name: Create the remote OpenShift examples directory
 - name: Create the remote OpenShift examples directory

+ 0 - 22
roles/openshift_hosted/tasks/migrate_default_registry_var.yml

@@ -1,22 +0,0 @@
----
-# This work is to migrate the OPENSHIFT_DEFAULT_REGISTRY variable
-# inside of the docker-registry's dc to REGISTRY_OPENSHIFT_SERVER_ADDR
-- name: migrate docker registry env var
-  oc_edit:
-    kind: dc
-    name: "{{ openshift_hosted_registry_name }}"
-    namespace: "{{ openshift_hosted_registry_namespace }}"
-    edits:
-    - action: update
-      key: spec.template.spec.containers[0].env
-      value:
-        name: REGISTRY_OPENSHIFT_SERVER_ADDR
-        value: docker-registry.default.svc:5000
-      curr_value:
-        name: OPENSHIFT_DEFAULT_REGISTRY
-        value: docker-registry.default.svc:5000
-  register: editout
-
-- debug:
-    var: editout
-    verbosity: 1

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
+ 1 - 1
roles/openshift_hosted/tasks/storage/glusterfs.yml


+ 4 - 1
roles/openshift_hosted/templates/registry_config.j2

@@ -1,6 +1,6 @@
 version: 0.1
 version: 0.1
 log:
 log:
-  level: {{ openshift_hosted_registry_log_level }} 
+  level: {{ openshift_hosted_registry_log_level }}
 http:
 http:
   addr: :5000
   addr: :5000
 storage:
 storage:
@@ -56,6 +56,9 @@ storage:
 {%   if openshift_hosted_registry_storage_swift_domainid is defined %}
 {%   if openshift_hosted_registry_storage_swift_domainid is defined %}
     domainid: {{ openshift_hosted_registry_storage_swift_domainid }}
     domainid: {{ openshift_hosted_registry_storage_swift_domainid }}
 {%   endif -%}
 {%   endif -%}
+{%   if openshift_hosted_registry_storage_swift_insecureskipverify | default(false) | bool %}
+    insecureskipverify: {{ openshift_hosted_registry_storage_swift_insecureskipverify }}
+{%   endif -%}
 {% elif openshift_hosted_registry_storage_provider | default('') == 'gcs' %}
 {% elif openshift_hosted_registry_storage_provider | default('') == 'gcs' %}
   gcs:
   gcs:
     bucket: {{ openshift_hosted_registry_storage_gcs_bucket }}
     bucket: {{ openshift_hosted_registry_storage_gcs_bucket }}

+ 1 - 1
roles/openshift_logging/README.md

@@ -23,7 +23,7 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
 - `openshift_logging_use_ops`: If 'True', set up a second ES and Kibana cluster for infrastructure logs. Defaults to 'False'.
 - `openshift_logging_use_ops`: If 'True', set up a second ES and Kibana cluster for infrastructure logs. Defaults to 'False'.
 - `openshift_logging_master_url`: The URL for the Kubernetes master, this does not need to be public facing but should be accessible from within the cluster. Defaults to 'https://kubernetes.default.svc.{{openshift.common.dns_domain}}'.
 - `openshift_logging_master_url`: The URL for the Kubernetes master, this does not need to be public facing but should be accessible from within the cluster. Defaults to 'https://kubernetes.default.svc.{{openshift.common.dns_domain}}'.
 - `openshift_logging_master_public_url`: The public facing URL for the Kubernetes master, this is used for Authentication redirection. Defaults to 'https://{{openshift.common.public_hostname}}:{{openshift_master_api_port}}'.
 - `openshift_logging_master_public_url`: The public facing URL for the Kubernetes master, this is used for Authentication redirection. Defaults to 'https://{{openshift.common.public_hostname}}:{{openshift_master_api_port}}'.
-- `openshift_logging_namespace`: The namespace that Aggregated Logging will be installed in. Defaults to 'logging'.
+- `openshift_logging_namespace`: The namespace that Aggregated Logging will be installed in. Defaults to 'openshift-logging'.
 - `openshift_logging_curator_default_days`: The default minimum age (in days) Curator uses for deleting log records. Defaults to '30'.
 - `openshift_logging_curator_default_days`: The default minimum age (in days) Curator uses for deleting log records. Defaults to '30'.
 - `openshift_logging_curator_run_hour`: The hour of the day that Curator will run at. Defaults to '0'.
 - `openshift_logging_curator_run_hour`: The hour of the day that Curator will run at. Defaults to '0'.
 - `openshift_logging_curator_run_minute`: The minute of the hour that Curator will run at. Defaults to '0'.
 - `openshift_logging_curator_run_minute`: The minute of the hour that Curator will run at. Defaults to '0'.

+ 2 - 2
roles/openshift_manage_node/tasks/config.yml

@@ -1,7 +1,7 @@
 ---
 ---
 - name: Set node schedulability
 - name: Set node schedulability
   oc_adm_manage_node:
   oc_adm_manage_node:
-    node: "{{ openshift.node.nodename | lower }}"
+    node: "{{ l_kubelet_node_name | lower }}"
     schedulable: "{{ 'true' if openshift_schedulable | default(true) | bool else 'false' }}"
     schedulable: "{{ 'true' if openshift_schedulable | default(true) | bool else 'false' }}"
   retries: 10
   retries: 10
   delay: 5
   delay: 5
@@ -24,6 +24,6 @@
         | map(attribute='metadata.annotations') | map('list') | flatten
         | map(attribute='metadata.annotations') | map('list') | flatten
         | select('match', 'node.openshift.io/md5sum') | list | length ==
         | select('match', 'node.openshift.io/md5sum') | list | length ==
       node_status.results.results[0]['items'] | length
       node_status.results.results[0]['items'] | length
-  retries: 60
+  retries: 180
   delay: 10
   delay: 10
   delegate_to: "{{ openshift_master_host }}"
   delegate_to: "{{ openshift_master_host }}"

+ 1 - 1
roles/openshift_manage_node/tasks/main.yml

@@ -24,7 +24,7 @@
 
 
 - name: Wait for Node Registration
 - name: Wait for Node Registration
   oc_obj:
   oc_obj:
-    name: "{{ openshift.node.nodename }}"
+    name: "{{ l_kubelet_node_name | lower }}"
     kind: node
     kind: node
     state: list
     state: list
   register: get_node
   register: get_node

+ 1 - 1
roles/openshift_master_certificates/tasks/main.yml

@@ -137,7 +137,7 @@
   when: master_certs_missing | bool
   when: master_certs_missing | bool
 
 
 - name: Chmod local temp directory for syncing certs
 - name: Chmod local temp directory for syncing certs
-  local_action: command chmod 777 "{{ g_master_certs_mktemp.stdout }}"
+  local_action: file path="{{ g_master_certs_mktemp.stdout }}" mode=0777
   changed_when: False
   changed_when: False
   when: master_certs_missing | bool
   when: master_certs_missing | bool
 
 

+ 1 - 2
roles/openshift_node/tasks/prepull.yml

@@ -21,8 +21,7 @@
 
 
 # This task runs async to save time while other downloads proceed
 # This task runs async to save time while other downloads proceed
 - name: pre-pull pod image
 - name: pre-pull pod image
-  docker_image:
-    name: "{{ osn_pod_image }}"
+  command: "{{ openshift_container_cli }} pull {{ osn_pod_image }}"
   environment:
   environment:
     NO_PROXY: "{{ openshift.common.no_proxy | default('') }}"
     NO_PROXY: "{{ openshift.common.no_proxy | default('') }}"
   when: pod_image.stdout_lines == []
   when: pod_image.stdout_lines == []

+ 1 - 1
roles/openshift_node/tasks/upgrade.yml

@@ -84,7 +84,7 @@
     oc_bin: "{{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }}"
     oc_bin: "{{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }}"
     oc_conf: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
     oc_conf: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
     node_list:
     node_list:
-    - "{{ openshift.node.nodename | lower }}"
+    - "{{ l_kubelet_node_name | lower }}"
   delegate_to: "{{ groups.oo_first_master.0 }}"
   delegate_to: "{{ groups.oo_first_master.0 }}"
   register: node_upgrade_oc_csr_approve
   register: node_upgrade_oc_csr_approve
   retries: 30
   retries: 30

+ 11 - 3
roles/openshift_node_group/files/sync.yaml

@@ -102,18 +102,26 @@ spec:
             ) &
             ) &
             break
             break
           done
           done
-
+          mkdir -p /etc/origin/node/tmp
           # periodically refresh both node-config.yaml and relabel the node
           # periodically refresh both node-config.yaml and relabel the node
           while true; do
           while true; do
-            if ! oc extract "configmaps/${name}" -n openshift-node --to=/etc/origin/node --confirm --request-timeout=10s --config /etc/origin/node/node.kubeconfig "--token=$( cat /var/run/secrets/kubernetes.io/serviceaccount/token )"  > /dev/null; then
+            if ! oc extract "configmaps/${name}" -n openshift-node --to=/etc/origin/node/tmp --confirm --request-timeout=10s --config /etc/origin/node/node.kubeconfig "--token=$( cat /var/run/secrets/kubernetes.io/serviceaccount/token )"  > /dev/null; then
               echo "error: Unable to retrieve latest config for node" 2>&1
               echo "error: Unable to retrieve latest config for node" 2>&1
               sleep 15 &
               sleep 15 &
               wait $!
               wait $!
               continue
               continue
             fi
             fi
+
+            KUBELET_HOSTNAME_OVERRIDE=$(cat /etc/sysconfig/KUBELET_HOSTNAME_OVERRIDE) || :
+            if ! [[ -z "$KUBELET_HOSTNAME_OVERRIDE" ]]; then
+                  #Patching node-config for hostname override
+                  echo "nodeName: $KUBELET_HOSTNAME_OVERRIDE" >> /etc/origin/node/tmp/node-config.yaml
+            fi
+
             # detect whether the node-config.yaml has changed, and if so trigger a restart of the kubelet.
             # detect whether the node-config.yaml has changed, and if so trigger a restart of the kubelet.
-            md5sum /etc/origin/node/node-config.yaml > /tmp/.new
+            md5sum /etc/origin/node/tmp/node-config.yaml > /tmp/.new
             if [[ "$( cat /tmp/.old )" != "$( cat /tmp/.new )" ]]; then
             if [[ "$( cat /tmp/.old )" != "$( cat /tmp/.new )" ]]; then
+              mv /etc/origin/node/tmp/node-config.yaml /etc/origin/node/node-config.yaml
               SYSTEMD_IGNORE_CHROOT=1 systemctl restart tuned || :
               SYSTEMD_IGNORE_CHROOT=1 systemctl restart tuned || :
               echo "info: Configuration changed, restarting kubelet" 2>&1
               echo "info: Configuration changed, restarting kubelet" 2>&1
               # TODO: kubelet doesn't relabel nodes, best effort for now
               # TODO: kubelet doesn't relabel nodes, best effort for now

+ 1 - 1
roles/openshift_node_group/tasks/sync.yml

@@ -86,7 +86,7 @@
         | map(attribute='metadata.annotations') | map('list') | flatten
         | map(attribute='metadata.annotations') | map('list') | flatten
         | select('match', 'node.openshift.io/md5sum') | list | length ==
         | select('match', 'node.openshift.io/md5sum') | list | length ==
       node_status.results.results[0]['items'] | length
       node_status.results.results[0]['items'] | length
-  retries: 60
+  retries: 180
   delay: 10
   delay: 10
 
 
 # Sync DS may have restarted masters
 # Sync DS may have restarted masters

+ 1 - 1
roles/openshift_openstack/tasks/node-configuration.yml

@@ -1,6 +1,6 @@
 ---
 ---
 # NOTE(shadower): we need to do this because some of the install tasks seem to
 # NOTE(shadower): we need to do this because some of the install tasks seem to
-# ignore openshift_hostname and rely on the actual system's hostname
+# ignore openshift_kubelet_name_override and rely on the actual system's hostname
 - name: Update hostname to match the OpenStack name
 - name: Update hostname to match the OpenStack name
   hostname:
   hostname:
     name: "{{ inventory_hostname }}"
     name: "{{ inventory_hostname }}"

+ 1 - 1
roles/openshift_openstack/templates/heat_stack_server.yaml.j2

@@ -245,7 +245,7 @@ resources:
         sub-host-type:    { get_param: subtype }
         sub-host-type:    { get_param: subtype }
         openshift_node_group_name: { get_param: openshift_node_group_name }
         openshift_node_group_name: { get_param: openshift_node_group_name }
 {% if openshift_openstack_dns_nameservers %}
 {% if openshift_openstack_dns_nameservers %}
-        openshift_hostname: { get_param: name }
+        openshift_kubelet_name_override: { get_param: name }
 {% endif %}
 {% endif %}
       scheduler_hints: { get_param: scheduler_hints }
       scheduler_hints: { get_param: scheduler_hints }
 
 

+ 3 - 1
roles/openshift_ovirt/README.md

@@ -141,7 +141,7 @@ openshift_ovirt_vm_manifest:
   count: 1
   count: 1
   profile: 'node_vm'
   profile: 'node_vm'
   nic_mode:
   nic_mode:
-      lb0:
+      lb:
         nic_ip_address: '192.168.123.170'
         nic_ip_address: '192.168.123.170'
         nic_netmask: '255.255.255.0'
         nic_netmask: '255.255.255.0'
         nic_gateway: '192.168.123.1'
         nic_gateway: '192.168.123.1'
@@ -186,6 +186,8 @@ Example Playbook
         - always
         - always
 ```
 ```
 
 
+**Side Note:** Regarding the behaviour, of the iterations, If we have a `count: 1` in our vm definition, the name that you put in the proper field will be preserved, but if we have more than 1 a counter will be raised and the vm name will be `name + iteration` (EG) _node0_, _node1_, _node2_ in case of `count: 3`
+
 License
 License
 -------
 -------
 
 

+ 43 - 1
roles/openshift_ovirt/tasks/build_vm_list.yml

@@ -1,6 +1,10 @@
 ---
 ---
 # Creates a dictionary for use with oVirt.vm-infra role
 # Creates a dictionary for use with oVirt.vm-infra role
 # https://github.com/oVirt/ovirt-ansible-vm-infra
 # https://github.com/oVirt/ovirt-ansible-vm-infra
+# Side note:
+# If we have a count: 1 in our vm definition, the name that you put in the proper field will be preserved
+# if we have more than 1 a counter will be raised and the vm name will be name + iteration (EG) node0, node1, node2
+# in case of count: 3
 - fail:
 - fail:
     msg: "The openshift_ovirt_dns_zone variable is required."
     msg: "The openshift_ovirt_dns_zone variable is required."
   when:
   when:
@@ -12,19 +16,57 @@
       {% for iter in range(item.count) -%}
       {% for iter in range(item.count) -%}
       {% if iter > 0 -%},{% endif -%}
       {% if iter > 0 -%},{% endif -%}
       {
       {
+      {% if item.count == 1 -%}
+      'name': '{{ item.name }}.{{ openshift_ovirt_dns_zone }}',
+      {% elif item.count > 1 -%}
       'name': '{{ item.name }}{{ iter }}.{{ openshift_ovirt_dns_zone }}',
       'name': '{{ item.name }}{{ iter }}.{{ openshift_ovirt_dns_zone }}',
+      {% endif -%}
       'tag': 'openshift_{{ item.profile }}',
       'tag': 'openshift_{{ item.profile }}',
+      'description': '{{ item.description | default("") }}',
       'cloud_init':
       'cloud_init':
       {
       {
+      {% if item.count == 1 -%}
+      'host_name': '{{ item.name }}.{{ openshift_ovirt_dns_zone }}',
+        {% if item.nic_mode is defined -%}
+      'nic_boot_protocol': 'static',
+      'nic_ip_address': '{{ item["nic_mode"][item["name"]]["nic_ip_address"] }}',
+      'nic_netmask': '{{ item["nic_mode"][item["name"]]["nic_netmask"] }}',
+      'nic_gateway': '{{ item["nic_mode"][item["name"]]["nic_gateway"] }}',
+      'nic_on_boot': {{ item["nic_mode"][item["name"]]["nic_on_boot"] | default(true) | bool }},
+      'nic_name': '{{ item["nic_mode"][item["name"]]["nic_name"] | default("eth0") }}',
+          {% if item["nic_mode"][item["name"]]["dns_servers"] is defined -%}
+      'dns_servers': '{{ item["nic_mode"][item["name"]]["dns_servers"] }}',
+      'dns_search': '{{ item["nic_mode"][item["name"]]["dns_search"] }}',
+          {% endif -%}
+        {% endif -%}
+        {% if item.dns_servers is defined -%}
+      'dns_servers': '{{ item["dns_servers"] }}',
+        {% endif -%}
+        {% if item.dns_search is defined -%}
+      'dns_search': '{{ item["dns_search"] }}',
+        {% endif -%}
+      {% elif item.count > 1 -%}
       'host_name': '{{ item.name }}{{ iter }}.{{ openshift_ovirt_dns_zone }}',
       'host_name': '{{ item.name }}{{ iter }}.{{ openshift_ovirt_dns_zone }}',
-      {% if item.nic_mode is defined -%}
+        {% if item.nic_mode is defined -%}
       'nic_boot_protocol': 'static',
       'nic_boot_protocol': 'static',
       'nic_ip_address': '{{ item["nic_mode"][item["name"] + iter | string ]["nic_ip_address"] }}',
       'nic_ip_address': '{{ item["nic_mode"][item["name"] + iter | string ]["nic_ip_address"] }}',
       'nic_netmask': '{{ item["nic_mode"][item["name"] + iter | string ]["nic_netmask"] }}',
       'nic_netmask': '{{ item["nic_mode"][item["name"] + iter | string ]["nic_netmask"] }}',
       'nic_gateway': '{{ item["nic_mode"][item["name"] + iter | string ]["nic_gateway"] }}',
       'nic_gateway': '{{ item["nic_mode"][item["name"] + iter | string ]["nic_gateway"] }}',
       'nic_on_boot': {{ item["nic_mode"][item["name"] + iter | string ]["nic_on_boot"] | default(true) | bool }},
       'nic_on_boot': {{ item["nic_mode"][item["name"] + iter | string ]["nic_on_boot"] | default(true) | bool }},
       'nic_name': '{{ item["nic_mode"][item["name"] + iter | string ]["nic_name"] | default("eth0") }}',
       'nic_name': '{{ item["nic_mode"][item["name"] + iter | string ]["nic_name"] | default("eth0") }}',
+          {% if item["nic_mode"][item["name"] + iter | string ]["dns_servers"] is defined -%}
       'dns_servers': '{{ item["nic_mode"][item["name"] + iter | string ]["dns_servers"] }}',
       'dns_servers': '{{ item["nic_mode"][item["name"] + iter | string ]["dns_servers"] }}',
+          {% endif -%}
+          {% if item["nic_mode"][item["name"] + iter | string ]["dns_search"] is defined -%}
+      'dns_search': '{{ item["nic_mode"][item["name"] + iter | string ]["dns_search"] }}',
+          {% endif -%}
+        {% endif -%}
+        {% if item.dns_servers is defined -%}
+      'dns_servers': '{{ item["dns_servers"] }}',
+        {% endif -%}
+        {% if item.dns_search is defined -%}
+      'dns_search': '{{ item["dns_search"] }}',
+        {% endif -%}
       {% endif -%}
       {% endif -%}
       'authorized_ssh_keys': '{{ openshift_ovirt_ssh_key }}'
       'authorized_ssh_keys': '{{ openshift_ovirt_ssh_key }}'
       },
       },

+ 1 - 1
roles/openshift_storage_glusterfs/README.md

@@ -63,7 +63,7 @@ their configuration as GlusterFS nodes:
 | Name               | Default value             | Description                             |
 | Name               | Default value             | Description                             |
 |--------------------|---------------------------|-----------------------------------------|
 |--------------------|---------------------------|-----------------------------------------|
 | glusterfs_cluster  | 1                         | The ID of the cluster this node should belong to. This is useful when a single heketi service is expected to manage multiple distinct clusters. **NOTE:** For natively-hosted clusters, all pods will be in the same OpenShift namespace
 | glusterfs_cluster  | 1                         | The ID of the cluster this node should belong to. This is useful when a single heketi service is expected to manage multiple distinct clusters. **NOTE:** For natively-hosted clusters, all pods will be in the same OpenShift namespace
-| glusterfs_hostname | openshift.node.nodename   | A hostname (or IP address) that will be used for internal GlusterFS communication
+| glusterfs_hostname | l_kubelet_node_name  | A hostname (or IP address) that will be used for internal GlusterFS communication
 | glusterfs_ip       | openshift.common.ip       | An IP address that will be used by pods to communicate with the GlusterFS node. **NOTE:** Required for external GlusterFS nodes
 | glusterfs_ip       | openshift.common.ip       | An IP address that will be used by pods to communicate with the GlusterFS node. **NOTE:** Required for external GlusterFS nodes
 | glusterfs_zone     | 1                         | A zone number for the node. Zones are used within the cluster for determining how to distribute the bricks of GlusterFS volumes. heketi will try to spread each volumes' bricks as evenly as possible across all zones
 | glusterfs_zone     | 1                         | A zone number for the node. Zones are used within the cluster for determining how to distribute the bricks of GlusterFS volumes. heketi will try to spread each volumes' bricks as evenly as possible across all zones
 
 

+ 9 - 2
roles/openshift_storage_glusterfs/files/glusterfs-template.yml

@@ -38,6 +38,8 @@ objects:
           image: ${IMAGE_NAME}
           image: ${IMAGE_NAME}
           imagePullPolicy: IfNotPresent
           imagePullPolicy: IfNotPresent
           env:
           env:
+          - name: GLUSTER_BLOCKD_STATUS_PROBE_ENABLE
+            value: "${GLUSTER_BLOCKD_STATUS_PROBE_ENABLE}"
           - name: GB_GLFS_LRU_COUNT
           - name: GB_GLFS_LRU_COUNT
             value: "${GB_GLFS_LRU_COUNT}"
             value: "${GB_GLFS_LRU_COUNT}"
           - name: TCMU_LOGDIR
           - name: TCMU_LOGDIR
@@ -86,7 +88,7 @@ objects:
               command:
               command:
               - "/bin/bash"
               - "/bin/bash"
               - "-c"
               - "-c"
-              - systemctl status glusterd.service
+              - "if command -v /usr/local/bin/status-probe.sh; then /usr/local/bin/status-probe.sh readiness; else systemctl status glusterd.service; fi"
             periodSeconds: 25
             periodSeconds: 25
             successThreshold: 1
             successThreshold: 1
             failureThreshold: 50
             failureThreshold: 50
@@ -97,7 +99,7 @@ objects:
               command:
               command:
               - "/bin/bash"
               - "/bin/bash"
               - "-c"
               - "-c"
-              - systemctl status glusterd.service
+              - "if command -v /usr/local/bin/status-probe.sh; then /usr/local/bin/status-probe.sh liveness; else systemctl status glusterd.service; fi"
             periodSeconds: 25
             periodSeconds: 25
             successThreshold: 1
             successThreshold: 1
             failureThreshold: 50
             failureThreshold: 50
@@ -147,6 +149,11 @@ parameters:
   displayName: Daemonset Node Labels
   displayName: Daemonset Node Labels
   description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
   description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
   value: '{ "glusterfs": "storage-host" }'
   value: '{ "glusterfs": "storage-host" }'
+- name: GLUSTER_BLOCKD_STATUS_PROBE_ENABLE
+  displayName: Enable readiness/liveness probe for gluster-blockd
+  description: Setting the value to "1" enables the readiness/liveness probe for gluster-blockd.
+  value: "1"
+  required: false
 - name: IMAGE_NAME
 - name: IMAGE_NAME
   displayName: GlusterFS container image name
   displayName: GlusterFS container image name
   required: True
   required: True

+ 2 - 2
roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml

@@ -47,7 +47,7 @@
 - name: Copy gluster-s3 PVCs template file
 - name: Copy gluster-s3 PVCs template file
   copy:
   copy:
     src: "gluster-s3-pvcs-template.yml"
     src: "gluster-s3-pvcs-template.yml"
-    dest: "{{ mktemp.stdout }}/{{ item }}"
+    dest: "{{ mktemp.stdout }}/gluster-s3-pvcs-template.yml"
 
 
 - name: Create gluster-s3 PVCs template
 - name: Create gluster-s3 PVCs template
   oc_obj:
   oc_obj:
@@ -88,7 +88,7 @@
 - name: Copy gluster-s3 template file
 - name: Copy gluster-s3 template file
   copy:
   copy:
     src: "gluster-s3-template.yml"
     src: "gluster-s3-template.yml"
-    dest: "{{ mktemp.stdout }}/{{ item }}"
+    dest: "{{ mktemp.stdout }}/gluster-s3-template.yml"
 
 
 - name: Create gluster-s3 template
 - name: Create gluster-s3 template
   oc_obj:
   oc_obj:

+ 1 - 1
roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml

@@ -44,7 +44,7 @@
 
 
 - name: Unlabel any existing GlusterFS nodes
 - name: Unlabel any existing GlusterFS nodes
   oc_label:
   oc_label:
-    name: "{{ hostvars[item].openshift.node.nodename }}"
+    name: "{{ hostvars[item].l_kubelet_node_name }}"
     kind: node
     kind: node
     state: absent
     state: absent
     labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"
     labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"

+ 1 - 1
roles/openshift_storage_glusterfs/tasks/label_nodes.yml

@@ -1,7 +1,7 @@
 ---
 ---
 - name: Label GlusterFS nodes
 - name: Label GlusterFS nodes
   oc_label:
   oc_label:
-    name: "{{ hostvars[item].openshift.node.nodename }}"
+    name: "{{ hostvars[item].l_kubelet_node_name }}"
     kind: node
     kind: node
     state: add
     state: add
     labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"
     labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"

+ 1 - 1
roles/openshift_storage_glusterfs/templates/topology.json.j2

@@ -20,7 +20,7 @@
 {%- if 'glusterfs_hostname' in hostvars[node] -%}
 {%- if 'glusterfs_hostname' in hostvars[node] -%}
                 "{{ hostvars[node].glusterfs_hostname }}"
                 "{{ hostvars[node].glusterfs_hostname }}"
 {%- elif 'openshift' in hostvars[node] -%}
 {%- elif 'openshift' in hostvars[node] -%}
-                "{{ hostvars[node].openshift.node.nodename }}"
+                "{{ hostvars[node].l_kubelet_node_name }}"
 {%- else -%}
 {%- else -%}
                 "{{ node }}"
                 "{{ node }}"
 {%- endif -%}
 {%- endif -%}

+ 1 - 0
test-requirements.txt

@@ -12,3 +12,4 @@ coverage==4.3.4
 mock==2.0.0
 mock==2.0.0
 pytest==3.0.7
 pytest==3.0.7
 pytest-cov==2.4.0
 pytest-cov==2.4.0
+python-dateutil==2.7.3

+ 28 - 1
test/ci/launch.yml

@@ -75,10 +75,37 @@
   become: true
   become: true
   tasks:
   tasks:
     - wait_for_connection: {}
     - wait_for_connection: {}
-    - setup: {}
     - name: Make sure hostname is set to public ansible host
     - name: Make sure hostname is set to public ansible host
       hostname:
       hostname:
         name: "{{ ansible_host }}"
         name: "{{ ansible_host }}"
+    - name: Detecting Operating System
+      shell: ls /run/ostree-booted
+      ignore_errors: yes
+      failed_when: false
+      register: ostree_output
+    - name: Update all packages
+      package:
+        name: '*'
+        state: latest
+      when: ostree_output.rc != 0
+      register: yum_update
+    - name: Update Atomic system
+      command: atomic host upgrade
+      when: ostree_output.rc == 0
+      register: ostree_update
+    - name: Reboot machines
+      shell: sleep 5 && systemctl reboot
+      async: 1
+      poll: 0
+      ignore_errors: true
+      when: yum_update | changed or ostree_update | changed
+    - name: Wait for connection
+      wait_for_connection:
+        connect_timeout: 20
+        sleep: 5
+        delay: 5
+        timeout: 300
+    - setup: {}
 
 
 - import_playbook: ../../playbooks/openshift-node/network_manager.yml
 - import_playbook: ../../playbooks/openshift-node/network_manager.yml
 - import_playbook: ../../playbooks/prerequisites.yml
 - import_playbook: ../../playbooks/prerequisites.yml