Bläddra i källkod

Merge branch 'master' into patch-3

Mateus Caruccio 6 år sedan
förälder
incheckning
cca605f4f5
100 ändrade filer med 1208 tillägg och 3248 borttagningar
  1. 1 1
      .tito/packages/openshift-ansible
  2. 3 3
      README.md
  3. 1 1
      README_CONTAINERIZED_INSTALLATION.md
  4. 1 1
      README_CONTAINER_IMAGE.md
  5. 4 4
      examples/README.md
  6. 1 1
      images/installer/root/etc/inventory-generator-config.yaml
  7. 4 4
      images/installer/root/usr/local/bin/generate
  8. 13 20
      inventory/hosts.example
  9. 151 2
      openshift-ansible.spec
  10. 1 1
      playbooks/adhoc/uninstall_docker.yml
  11. 3 1
      playbooks/adhoc/uninstall_openshift.yml
  12. 1 0
      playbooks/aws/README.md
  13. 2 0
      playbooks/aws/openshift-cluster/provision.yml
  14. 9 0
      playbooks/aws/openshift-cluster/provision_dns.yml
  15. 2 2
      playbooks/azure/openshift-cluster/build_node_image.yml
  16. 0 7
      playbooks/azure/openshift-cluster/group_vars/all/yum_repos.yml
  17. 1 1
      playbooks/byo/README.md
  18. 1 1
      playbooks/byo/openshift-cluster/upgrades/README.md
  19. 0 20
      playbooks/byo/openshift-cluster/upgrades/v3_10/README.md
  20. 0 5
      playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml
  21. 0 16
      playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml
  22. 0 7
      playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml
  23. 0 7
      playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_scale_groups.yml
  24. 1 4
      playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
  25. 0 13
      playbooks/common/openshift-cluster/upgrades/v3_10/label_nodes.yml
  26. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml
  27. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_10/roles
  28. 0 7
      playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml
  29. 0 121
      playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml
  30. 0 40
      playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml
  31. 23 0
      playbooks/common/openshift-cluster/upgrades/v3_11/upgrade_control_plane.yml
  32. 1 1
      playbooks/gcp/openshift-cluster/upgrade.yml
  33. 2 2
      playbooks/init/validate_hostnames.yml
  34. 8 0
      playbooks/openshift-glusterfs/private/upgrade.yml
  35. 10 0
      playbooks/openshift-glusterfs/upgrade.yml
  36. 1 1
      playbooks/openshift-hosted/private/redeploy-registry-certificates.yml
  37. 1 1
      playbooks/openshift-hosted/private/redeploy-router-certificates.yml
  38. 2 2
      playbooks/openshift-logging/private/config.yml
  39. 0 1
      playbooks/openshift-master/private/additional_config.yml
  40. 1 0
      playbooks/openshift-master/private/config.yml
  41. 0 23
      playbooks/openshift-master/private/upgrade.yml
  42. 6 11
      playbooks/openshift-metering/README.md
  43. 0 4
      playbooks/openshift-node/certificates.yml
  44. 0 24
      playbooks/openshift-node/private/certificates-backup.yml
  45. 0 6
      playbooks/openshift-node/private/certificates.yml
  46. 1 0
      playbooks/openshift-node/private/join.yml
  47. 0 6
      playbooks/openshift-node/private/redeploy-certificates.yml
  48. 4 1
      playbooks/openshift-node/private/restart.yml
  49. 0 8
      playbooks/openshift-node/redeploy-certificates.yml
  50. 2 2
      playbooks/openstack/README.md
  51. 13 6
      playbooks/openstack/configuration.md
  52. 8 271
      playbooks/openstack/inventory.py
  53. 297 0
      playbooks/openstack/resources.py
  54. 8 6
      playbooks/openstack/sample-inventory/group_vars/all.yml
  55. 15 0
      playbooks/openstack/scaleup_inventory.py
  56. 0 2
      playbooks/redeploy-certificates.yml
  57. 85 0
      playbooks/rhv/README.md
  58. 61 0
      playbooks/rhv/inventory.example
  59. 33 0
      playbooks/rhv/openshift-cluster/ovirt-vm-infra.yml
  60. 37 0
      playbooks/rhv/openshift-cluster/ovirt-vm-uninstall.yml
  61. 1 0
      playbooks/rhv/openshift-cluster/roles
  62. 11 0
      playbooks/rhv/openshift-cluster/unregister-vms.yml
  63. 82 0
      playbooks/rhv/provisioning-vars.yaml.example
  64. 1 1
      requirements.txt
  65. 1 0
      roles/ansible_service_broker/defaults/main.yml
  66. 1 0
      roles/ansible_service_broker/tasks/facts.yml
  67. 3 3
      roles/ansible_service_broker/tasks/install.yml
  68. 6 1
      roles/ansible_service_broker/tasks/remove.yml
  69. 2 1
      roles/ansible_service_broker/templates/configmap.yaml.j2
  70. 2 0
      roles/ansible_service_broker/vars/default_images.yml
  71. 2 0
      roles/ansible_service_broker/vars/openshift-enterprise.yml
  72. 18 1
      roles/cockpit-ui/defaults/main.yml
  73. 4 10
      roles/openshift_hosted_templates/files/v3.10/origin/registry-console.yaml
  74. 61 55
      roles/cockpit-ui/tasks/main.yml
  75. 0 2
      roles/container_runtime/tasks/package_docker.yml
  76. 6 0
      roles/etcd/defaults/main.yaml
  77. 1 0
      roles/kuryr/README.md
  78. 8 2
      roles/kuryr/templates/configmap.yaml.j2
  79. 1 1
      roles/lib_openshift/library/oc_service.py
  80. 1 1
      roles/lib_openshift/src/doc/service
  81. 49 4
      roles/lib_utils/action_plugins/sanity_checks.py
  82. 1 1
      roles/lib_utils/callback_plugins/aa_version_requirement.py
  83. 15 0
      roles/lib_utils/filter_plugins/oo_filters.py
  84. 24 11
      roles/nuage_master/handlers/main.yaml
  85. 1 2
      roles/nuage_master/tasks/main.yaml
  86. 1 4
      roles/olm/files/08-ocs.configmap.yaml
  87. 0 1810
      roles/olm/files/08-tectonicocs.configmap.yaml
  88. 0 19
      roles/olm/files/10-tectonicocs.catalogsource.yaml
  89. 0 20
      roles/olm/files/14-alm-servicebroker.clusterservicebroker.yaml
  90. 0 21
      roles/olm/files/15-alm-servicebroker.service.yaml
  91. 0 13
      roles/olm/files/16-almservicebroker-client.secret.yaml
  92. 0 44
      roles/olm/files/17-alm-servicebroker.deployment.yaml
  93. 0 462
      roles/olm/files/18-upstreamcomponents.configmap.yaml
  94. 0 19
      roles/olm/files/19-upstreamcomponents.catalogsource.yaml
  95. 0 26
      roles/olm/files/20-aggregated.clusterrole.yaml
  96. 0 9
      roles/olm/tasks/install.yaml
  97. 2 2
      roles/olm/tasks/remove.yaml
  98. 33 33
      roles/olm/tasks/remove_components.yaml
  99. 51 0
      roles/openshift_aws/defaults/main.yml
  100. 0 0
      roles/openshift_aws/tasks/build_elb_dict.yml

+ 1 - 1
.tito/packages/openshift-ansible

@@ -1 +1 @@
-3.11.0-0.13.0 ./
+3.11.0-0.20.0 ./

+ 3 - 3
README.md

@@ -61,7 +61,7 @@ Install base dependencies:
 
 Requirements:
 
-- Ansible >= 2.6.0
+- Ansible >= 2.6.2
 - Jinja >= 2.7
 - pyOpenSSL
 - python-lxml
@@ -153,8 +153,8 @@ created for you automatically.
 
 ## Complete Production Installation Documentation:
 
-- [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)
-- [OpenShift Origin](https://docs.openshift.org/latest/install/index.html)
+- [OpenShift Container Platform](https://docs.openshift.com/container-platform/latest/install_config/install/advanced_install.html)
+- [OpenShift Origin](https://docs.okd.io/latest/install/index.html)
 
 ## Containerized OpenShift Ansible
 

+ 1 - 1
README_CONTAINERIZED_INSTALLATION.md

@@ -48,7 +48,7 @@ before attempting to pull any of the following images.
         docker.io/openshift/node (node + openshift-sdn + openvswitch rpm for client tools)
         docker.io/openshift/openvswitch (centos7 + openvswitch rpm, runs ovsdb ovsctl processes)
         registry.redhat.io/rhel7/etcd
-    OpenShift Enterprise
+    OpenShift Container Platform
         registry.access.redhat.com/openshift3/ose
         registry.access.redhat.com/openshift3/node
         registry.access.redhat.com/openshift3/openvswitch

Filskillnaden har hållts tillbaka eftersom den är för stor
+ 1 - 1
README_CONTAINER_IMAGE.md


+ 4 - 4
examples/README.md

@@ -2,7 +2,7 @@
 
 The primary use of `openshift-ansible` is to install, configure and upgrade OpenShift clusters.
 
-This is typically done by direct invocation of Ansible tools like `ansible-playbook`. This use case is covered in detail in the [OpenShift advanced installation documentation](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
+This is typically done by direct invocation of Ansible tools like `ansible-playbook`. This use case is covered in detail in the [OpenShift advanced installation documentation](https://docs.okd.io/latest/install_config/install/advanced_install.html)
 
 For OpenShift Container Platform there's also an installation utility that wraps `openshift-ansible`. This usage case is covered in the [Quick Installation](https://docs.openshift.com/container-platform/latest/install_config/install/quick_install.html) section of the documentation.
 
@@ -16,11 +16,11 @@ You can find more details about the certificate expiration check roles and examp
 
 ### Job to upload certificate expiration reports
 
-The example `Job` in [certificate-check-upload.yaml](certificate-check-upload.yaml) executes a [Job](https://docs.openshift.org/latest/dev_guide/jobs.html) that checks the expiration dates of the internal certificates of the cluster and uploads HTML and JSON reports to `/etc/origin/certificate_expiration_report` in the masters.
+The example `Job` in [certificate-check-upload.yaml](certificate-check-upload.yaml) executes a [Job](https://docs.okd.io/latest/dev_guide/jobs.html) that checks the expiration dates of the internal certificates of the cluster and uploads HTML and JSON reports to `/etc/origin/certificate_expiration_report` in the masters.
 
 This example uses the [`easy-mode-upload.yaml`](../playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml) example playbook, which generates reports and uploads them to the masters. The playbook can be customized via environment variables to control the length of the warning period (`CERT_EXPIRY_WARN_DAYS`) and the location in the masters where the reports are uploaded (`COPY_TO_PATH`).
 
-The job expects the inventory to be provided via the *hosts* key of a [ConfigMap](https://docs.openshift.org/latest/dev_guide/configmaps.html) named *inventory*, and the passwordless ssh key that allows connecting to the hosts to be availalbe as *ssh-privatekey* from a [Secret](https://docs.openshift.org/latest/dev_guide/secrets.html) named *sshkey*, so these are created first:
+The job expects the inventory to be provided via the *hosts* key of a [ConfigMap](https://docs.okd.io/latest/dev_guide/configmaps.html) named *inventory*, and the passwordless ssh key that allows connecting to the hosts to be availalbe as *ssh-privatekey* from a [Secret](https://docs.okd.io/latest/dev_guide/secrets.html) named *sshkey*, so these are created first:
 
     oc new-project certcheck
     oc create configmap inventory --from-file=hosts=/etc/ansible/hosts
@@ -57,7 +57,7 @@ There are two additional examples:
  - A `Job` [certificate-check-volume.yaml](certificate-check-volume.yaml)
  - A `CronJob` [scheduled-certcheck-upload.yaml](scheduled-certcheck-upload.yaml)
 
-These perform the same work as the two examples above, but instead of uploading the generated reports to the masters they store them in a custom path within the container that is expected to be backed by a [PersistentVolumeClaim](https://docs.openshift.org/latest/dev_guide/persistent_volumes.html), so that the reports are actually written to storage external to the container.
+These perform the same work as the two examples above, but instead of uploading the generated reports to the masters they store them in a custom path within the container that is expected to be backed by a [PersistentVolumeClaim](https://docs.okd.io/latest/dev_guide/persistent_volumes.html), so that the reports are actually written to storage external to the container.
 
 These examples assume that there is an existing `PersistentVolumeClaim` called `certcheck-reports` and they use the  [`html_and_json_timestamp.yaml`](../playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml) example playbook to write timestamped reports into it.
 

+ 1 - 1
images/installer/root/etc/inventory-generator-config.yaml

@@ -15,6 +15,6 @@ openshift_deployment_type: origin
 
 openshift_release: 3.6
 openshift_image_tag: v3.6.0
-openshift_hosted_logging_deploy: null  # defaults to "true" if loggingPublicURL is set in master-config.yaml
+openshift_logging_install_logging: null  # defaults to "true" if loggingPublicURL is set in master-config.yaml
 openshift_logging_image_version: v3.6.0
 openshift_disable_check: ""

+ 4 - 4
images/installer/root/usr/local/bin/generate

@@ -298,13 +298,13 @@ def main():
 
     # contains host types (e.g. masters, nodes, etcd)
     host_groups = dict()
-    openshift_hosted_logging_deploy = False
+    openshift_logging_install_logging = False
     is_etcd_deployed = master_config.get("storage-backend", "") in ["etcd3", "etcd2", "etcd"]
 
     if asset_config and asset_config.get('loggingPublicURL'):
-        openshift_hosted_logging_deploy = True
+        openshift_logging_install_logging = True
 
-    openshift_hosted_logging_deploy = user_config.get("openshift_hosted_logging_deploy", openshift_hosted_logging_deploy)
+    openshift_logging_install_logging = user_config.get("openshift_logging_install_logging", openshift_logging_install_logging)
 
     m = Host("masters")
     m.address(master_config["masterIP"])
@@ -382,7 +382,7 @@ def main():
         inv_file_obj.write("openshift_disable_check={}\n".format(str(openshift_disable_check)))
     inv_file_obj.write("\n")
 
-    inv_file_obj.write("openshift_hosted_logging_deploy={}\n".format(str(openshift_hosted_logging_deploy)))
+    inv_file_obj.write("openshift_logging_install_logging={}\n".format(str(openshift_logging_install_logging)))
     inv_file_obj.write("\n")
 
     for group in host_groups:

+ 13 - 20
inventory/hosts.example

@@ -51,7 +51,7 @@ openshift_deployment_type=origin
 # use this to lookup the latest exact version of the container images, which is the tag actually used to configure
 # the cluster. For RPM installations we just verify the version detected in your configured repos matches this
 # release.
-openshift_release="3.9"
+#openshift_release="3.11"
 
 # default subdomain to use for exposed routes, you should have wildcard dns
 # for *.apps.test.example.com that points at your infra nodes which will run
@@ -73,12 +73,12 @@ debug_level=2
 # Specify an exact container image tag to install or configure.
 # WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
 # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v3.9.0
+#openshift_image_tag=v3.11.0
 
 # Specify an exact rpm version to install or configure.
 # WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
 # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-3.9.0
+#openshift_pkg_version=-3.11.0
 
 # If using Atomic Host, you may specify system container image registry for the nodes:
 #system_images_registry="docker.io"
@@ -89,17 +89,17 @@ debug_level=2
 #openshift_install_examples=true
 
 # Configure logoutURL in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
+# See: https://docs.okd.io/latest/install_config/web_console_customization.html#changing-the-logout-url
 #openshift_master_logout_url=http://example.com
 
 # Configure extensions in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
+# See: https://docs.okd.io/latest/install_config/web_console_customization.html#serving-static-files
 #openshift_master_oauth_templates={'login': '/path/to/login-template.html'}
 # openshift_master_oauth_template is deprecated.  Use openshift_master_oauth_templates instead.
 #openshift_master_oauth_template=/path/to/login-template.html
 
 # Configure imagePolicyConfig in the master config
-# See: https://docs.openshift.org/latest/admin_guide/image_policy.html
+# See: https://docs.okd.io/latest/admin_guide/image_policy.html
 #openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
 # This setting overrides allowedRegistriesForImport in openshift_master_image_policy_config. By default, all registries are allowed.
 #openshift_master_image_policy_allowed_registries_for_import=["docker.io", "*.docker.io", "*.redhat.com", "gcr.io", "quay.io", "registry.centos.org", "registry.redhat.io", "*.amazonaws.com"]
@@ -552,7 +552,7 @@ debug_level=2
 #openshift_hosted_registry_storage_gcs_rootdirectory=/registry
 
 # Metrics deployment
-# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
+# See: https://docs.openshift.com/container-platform/latest/install_config/cluster_metrics.html
 #
 # By default metrics are not automatically deployed, set this to enable them
 #openshift_metrics_install_metrics=true
@@ -763,9 +763,9 @@ debug_level=2
 #openshift_master_console_port=8443
 
 # set exact RPM version (include - prefix)
-#openshift_pkg_version=-3.9.0
+#openshift_pkg_version=-3.11.0
 # you may also specify version and release, ie:
-#openshift_pkg_version=-3.9.0-0.126.0.git.0.9351aae.el7
+#openshift_pkg_version=-3.11.0-0.126.0.git.0.9351aae.el7
 
 # Configure custom ca certificate
 #openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'}
@@ -777,8 +777,8 @@ debug_level=2
 
 # Configure custom named certificates (SNI certificates)
 #
-# https://docs.openshift.org/latest/install_config/certificate_customization.html
-# https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html
+# https://docs.okd.io/latest/install_config/certificate_customization.html
+# https://docs.openshift.com/container-platform/latest/install_config/certificate_customization.html
 #
 # NOTE: openshift_master_named_certificates is cached on masters and is an
 # additive fact, meaning that each run with a different set of certificates
@@ -839,10 +839,6 @@ debug_level=2
 # Setting this variable to false will override that check.
 #openshift_hostname_check=true
 
-# openshift_use_dnsmasq is deprecated.  This must be true, or installs will fail
-# in versions >= 3.6
-#openshift_use_dnsmasq=False
-
 # Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
 # This is useful for POC environments where DNS may not actually be available yet or to set
 # options like 'strict-order' to alter dnsmasq configuration.
@@ -873,7 +869,7 @@ debug_level=2
 # configuration into Builds. Proxy related values will default to the global proxy
 # config values. You only need to set these if they differ from the global proxy settings.
 # See BuildDefaults documentation at
-# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+# https://docs.okd.io/latest/admin_guide/build_defaults_overrides.html
 #openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
 #openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
 #openshift_builddefaults_no_proxy=mycorp.com
@@ -894,7 +890,7 @@ debug_level=2
 # These options configure the BuildOverrides admission controller which injects
 # configuration into Builds.
 # See BuildOverrides documentation at
-# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+# https://docs.okd.io/latest/admin_guide/build_defaults_overrides.html
 #openshift_buildoverrides_force_pull=true
 #openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
 #openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}
@@ -915,9 +911,6 @@ debug_level=2
 #openshift_service_catalog_image="docker.io/openshift/origin-service-catalog:{{ openshift_image_tag }}""
 #openshift_service_catalog_image="registry.redhat.io/openshift3/ose-service-catalog:{{ openshift_image_tag }}"
 
-# TSB image tag
-#template_service_broker_version='v3.9'
-
 # Configure one of more namespaces whose templates will be served by the TSB
 #openshift_template_service_broker_namespaces=['openshift']
 

+ 151 - 2
openshift-ansible.spec

@@ -10,14 +10,14 @@
 
 Name:           openshift-ansible
 Version:        3.11.0
-Release:        0.13.0
+Release:        0.20.0
 Summary:        Openshift and Atomic Enterprise Ansible
 License:        ASL 2.0
 URL:            https://github.com/openshift/openshift-ansible
 Source0:        https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz
 BuildArch:      noarch
 
-Requires:      ansible >= 2.6
+Requires:      ansible >= 2.6.2
 Requires:      python2
 Requires:      python-six
 Requires:      tar
@@ -163,6 +163,155 @@ BuildArch:     noarch
 
 
 %changelog
+* Tue Aug 21 2018 AOS Automation Release Team <aos-team-art@redhat.com> 3.11.0-0.20.0
+- Pass region to AWS az lookup (cewong@redhat.com)
+- SDN check: Use openshift_client_binary (miciah.masters@gmail.com)
+- RHV Provider Role and Playbooks (cwilkers@redhat.com)
+- Fix backcompat with OpenStack inventory (tomas@sedovic.cz)
+- update v3.9 to v3.11 used in the example hosts (gpei@redhat.com)
+- GlusterFS: Remove domain from heketi URL (jarrpa@redhat.com)
+- Bug 1615787 - Blacklist broker-apb (david.j.zager@gmail.com)
+- openshift-metering: Update playbook instructions (chance.zibolski@coreos.com)
+- openshift-metering: Update role to use new metering CRD group and schemas and
+  images helm operator image (chance.zibolski@coreos.com)
+- openshift-metering: Update role to allow creating routes
+  (chance.zibolski@coreos.com)
+- Removing unnecessary fail task (ewolinet@redhat.com)
+- Remove correct duplicated SCC check (vrutkovs@redhat.com)
+- Revert "Remove duplicated bootstrapped SCC check" (vrutkovs@redhat.com)
+- Revert "Skip base package check for openshift_ca role" (roignac@gmail.com)
+- Adding file rollover size and max count policies (ewolinet@redhat.com)
+- Rework node initialization procedure to prepull images earlier
+  (vrutkovs@redhat.com)
+- [RHPAM-1241] - Include RHPAM templates in OpenShift release
+  (fspolti@redhat.com)
+- Cleanup old sanitize inventory warnings (mgugino@redhat.com)
+- Override configmap directly on the install role
+  (alberto.rodriguez.peon@cern.ch)
+- Correct typo in config variable (AlbertoPeon@users.noreply.github.com)
+- Allow to override full Ansible Service Broker config map
+  (alberto.rodriguez.peon@cern.ch)
+- Changed sample inventory to reflect vars used in heat_stack.yaml.j2
+  (dluong@redhat.com)
+- Add kuryr namespace isolation support (ltomasbo@redhat.com)
+
+* Mon Aug 20 2018 AOS Automation Release Team <aos-team-art@redhat.com> 3.11.0-0.19.0
+- 
+
+* Sun Aug 19 2018 AOS Automation Release Team <aos-team-art@redhat.com> 3.11.0-0.18.0
+- Require Ansible 2.6.2 (rteague@redhat.com)
+- Remove 3.10 upgrade playbooks (rteague@redhat.com)
+- Use openshift_image_tag for registry-console upgrade (rteague@redhat.com)
+- Clean up GCP disks during deprovision (ironcladlou@gmail.com)
+- Skip base package check for openshift_ca role (vrutkovs@redhat.com)
+- Update search string for registry console (mgugino@redhat.com)
+- Revert "Set correct vars for registry console" (gugino.michael@yahoo.com)
+- service-catalog: use K8s NamespaceLifecycle admission controller
+  (jaboyd@redhat.com)
+- remove name from tag (m.judeikis@gmail.com)
+- Update sanity_checks.py (cwilkers@redhat.com)
+- Provide better error message for json sanity check (cwilkers@redhat.com)
+- Remove asb-user-access cluster-role when uninstalling ASB
+  (jmontleo@redhat.com)
+- Increase maximum number of open file descriptors for dnsmasq
+  (ichavero@redhat.com)
+
+* Thu Aug 16 2018 AOS Automation Release Team <aos-team-art@redhat.com> 3.11.0-0.17.0
+- Update for Bugzilla 1580256 (mazzystr@gmail.com)
+- Remove duplicated bootstrapped SCC check (vrutkovs@redhat.com)
+- cluster_monitoring_operator: update ClusterRole (lserven@gmail.com)
+- Default CFME nodeselector should be a list of str, not a dict
+  (vrutkovs@redhat.com)
+- Added support for ak when registering hosts (e.minguez@gmail.com)
+- Fix audit config interpolation (denis@gladkikh.email)
+- SDN check: Ignore node's canonical name (miciah.masters@gmail.com)
+- fix 1616278. Modify the default logging namespace (jcantril@redhat.com)
+- The file name has changed to heketi_get_key.yml (mbruzek@gmail.com)
+- Bug 1615275. Regenerate session_secret if it can't be used with oauth-proxy
+  (asherkho@redhat.com)
+- Set correct vars for registry console (vrutkovs@redhat.com)
+- Updating to only iterate over oo_nodes_to_config list for
+  oo_elasticsearch_nodes (ewolinet@redhat.com)
+- The l_glusterfs_count is a string need to cast to int for comparison.
+  (mbruzek@gmail.com)
+- Specify external URL for Prometheus (pat2man@gmail.com)
+- Remove unused/broken node cert plays (mgugino@redhat.com)
+
+* Wed Aug 15 2018 AOS Automation Release Team <aos-team-art@redhat.com> 3.11.0-0.16.0
+- remove the olm project (jiazha@redhat.com)
+- fix ASB ClusterServiceBroker removal (jmontleo@redhat.com)
+- Cleanup logging and metrics deprecations (mgugino@redhat.com)
+- Adding default value for openshift_logging_storage_kind (ewolinet@redhat.com)
+- change default sc nam (davis.phillips@gmail.com)
+- update the commands to restart master api and controller
+  (siva_teja.areti@nokia.com)
+- fixing image defaults for logging (ewolinet@redhat.com)
+- node restart: check that all vars are defined (vrutkovs@redhat.com)
+- Revert "loopback_cluster_name: use api_hostname" (roignac@gmail.com)
+- CFME: set default value for openshift_hosted_infra_selector
+  (vrutkovs@redhat.com)
+- vgchange before vgremove update. (sarumuga@redhat.com)
+- To avoid I/O errors, carry out vg deactivate (using vgchange -an) and dmsetup
+  remove device. (sarumuga@redhat.com)
+
+* Tue Aug 14 2018 AOS Automation Release Team <aos-team-art@redhat.com> 3.11.0-0.15.0
+- Update old documentation links (mchappel@redhat.com)
+- Replace OpenShift Enterprise references with OpenShift Container Platform
+  (mchappel@redhat.com)
+- cluster-monitoring: pass through no_proxy setting
+  (sergiusz.urbaniak@gmail.com)
+- Add CentoOS Origin repo for 310 release (dani_comnea@yahoo.com)
+- cluster-monitoring: Fix OCP image names (fbranczyk@gmail.com)
+- Update documentation links, docs.openshift.org -> docs.okd.io
+  (vrutkovs@redhat.com)
+- Require -hyperkube RPMs instead of -master (vrutkovs@redhat.com)
+- [uninstall] Remove hyperkube package (norito.agetsuma@gmail.com)
+- Don't require etcd RPM to be installable on masters (vrutkovs@redhat.com)
+- Don't require fast-datapath channel on RHEL (vrutkovs@redhat.com)
+- No longer require SDN to be installed on nodes (vrutkovs@redhat.com)
+- Update release artifacts for OLM (cordell.evan@gmail.com)
+- GlusterFS: Upgrade playbook (jarrpa@redhat.com)
+- Ensure docker package always installed (mgugino@redhat.com)
+- re-order and required values (rcook@redhat.com)
+- Update route53 dns tasks (mgugino@redhat.com)
+- Refactor registry-console template and vars (mgugino@redhat.com)
+- Fix the ansible-service-broker URL (jmontleo@redhat.com)
+- [bz1552516] set the external url of prometheus (pgier@redhat.com)
+- Update console branding and doc URL for OKD (spadgett@redhat.com)
+- SCC recouncilation has to run with older oc, before node upgrade
+  (vrutkovs@redhat.com)
+- Switch to oc set env, since oc env is now removed (maszulik@redhat.com)
+- Add functionality for AWS DNS framework and route53 provider
+  (mazzystr@gmail.com)
+- matching the name values (rcook@redhat.com)
+- openshift_cluster_monitoring_operator: Fix enterprise images
+  (fbranczyk@gmail.com)
+- adding parameters to allow for load balancer creation (rcook@redhat.com)
+- Limiting additional fact collection to non-masters since we already collect
+  that information for masters (ewolinet@redhat.com)
+- Remove unnecessary passlib check (jkr@adorsys.de)
+
+* Sun Aug 12 2018 AOS Automation Release Team <aos-team-art@redhat.com> 3.11.0-0.14.0
+- Revert "Remove several unused vars" (sdodson@redhat.com)
+- Making the app nodes an optional return. (mbruzek@gmail.com)
+- 'Wait for node to be ready' task should check that all vars are defined
+  (vrutkovs@redhat.com)
+- Ensure kernel-modules not installed on atomic (mgugino@redhat.com)
+- Remove extra namespaces field on configmap (dymurray@redhat.com)
+- Adding min-port to dnsmasq configuration. (rhowe@redhat.com)
+- pull in origin imagestream+template updates (bparees@redhat.com)
+- Revert "openshift_loadbalancer: remove unused vars" (vrutkovs@redhat.com)
+- Remove node CSR approval from upgrade in 3.11 (rteague@redhat.com)
+- loopback_cluster_name: use api_hostname (vrutkovs@redhat.com)
+- Add quotes to node selector (rteague@redhat.com)
+- Bug 1543129 - Add configuration option for ASB local registry namespaces
+  (dymurray@redhat.com)
+- Omit resetting openshift_logging_elasticsearch_pvc_dynamic if volume is NFS
+  (vrutkovs@redhat.com)
+- Set claimRef for logging PVC when NFS volume is created previously
+  (vrutkovs@redhat.com)
+- Fix prometheus annotations typo (vrutkovs@redhat.com)
+
 * Thu Aug 09 2018 AOS Automation Release Team <aos-team-art@redhat.com> 3.11.0-0.13.0
 - SDN check: Fix parsing time stamp's time zone (miciah.masters@gmail.com)
 

+ 1 - 1
playbooks/adhoc/uninstall_docker.yml

@@ -1,4 +1,4 @@
-# This deletes *ALL* Origin and OpenShift Enterprise content installed by
+# This deletes *ALL* Origin and OpenShift Container Platform content installed by
 # ansible.  This includes:
 #
 #    configuration

+ 3 - 1
playbooks/adhoc/uninstall_openshift.yml

@@ -1,4 +1,4 @@
-# This deletes *ALL* Origin and OpenShift Enterprise content installed by
+# This deletes *ALL* Origin and OpenShift Container Platform content installed by
 # ansible.  This includes:
 #
 #    configuration
@@ -109,6 +109,7 @@
         - atomic-openshift-docker-excluder
         - atomic-openshift-node
         - atomic-openshift-sdn-ovs
+        - atomic-openshift-hyperkube
         - cockpit-bridge
         - cockpit-docker
         - cockpit-system
@@ -124,6 +125,7 @@
         - origin-clients
         - origin-node
         - origin-sdn-ovs
+        - origin-hyperkube
         - tuned-profiles-atomic-openshift-node
         - tuned-profiles-origin-node
         register: result

+ 1 - 0
playbooks/aws/README.md

@@ -63,6 +63,7 @@ under the appropriate groups.  Most variables can exist in the 'all' group.
 openshift_deployment_type: # 'origin' or 'openshift-enterprise'
 openshift_release: # example: v3.7
 openshift_pkg_version: # example: -3.7.0
+openshift_aws_clusterid: # example: example
 openshift_aws_ssh_key_name: # example: myuser_key
 openshift_aws_base_ami: # example: ami-12345678
 # These are required when doing SSL on the ELBs

+ 2 - 0
playbooks/aws/openshift-cluster/provision.yml

@@ -21,3 +21,5 @@
     import_role:
       name: openshift_aws
       tasks_from: provision.yml
+
+- import_playbook: provision_dns.yml

+ 9 - 0
playbooks/aws/openshift-cluster/provision_dns.yml

@@ -0,0 +1,9 @@
+---
+- name: provision dns
+  hosts: localhost
+  connection: local
+  tasks:
+  - name: provision dns
+    import_role:
+      name: openshift_aws
+      tasks_from: provision_dns.yml

+ 2 - 2
playbooks/azure/openshift-cluster/build_node_image.yml

@@ -109,7 +109,7 @@
   gather_facts: no
   tasks:
   - set_fact:
-      openshift_rpm: "{{ hostvars[groups['nodes'][0]]['yum'].results | selectattr('name', 'match', '^(origin|atomic-openshift)$') | first }}"
+      openshift_rpm: "{{ hostvars[groups['nodes'][0]]['yum'].results | selectattr('name', 'match', '^(origin-hyperkube|atomic-openshift-hyperkube)$') | first }}"
   - name: create image
     import_tasks: tasks/create_image_from_vm.yml
     vars:
@@ -118,7 +118,7 @@
       image_tags:
         base_image: "{{ (input_image.stdout | from_json).name }}"
         kernel: "{{ hostvars[groups['nodes'][0]]['ansible_kernel'] }}"
-        openshift: "{{ openshift_rpm.name }}-{{ openshift_rpm.version }}-{{ openshift_rpm.release }}.{{ openshift_rpm.arch }}"
+        openshift: "{{ openshift_rpm.version }}-{{ openshift_rpm.release }}.{{ openshift_rpm.arch }}"
 
   - name: create blob
     import_tasks: tasks/create_blob_from_vm.yml

+ 0 - 7
playbooks/azure/openshift-cluster/group_vars/all/yum_repos.yml

@@ -33,13 +33,6 @@ azure_node_repos:
     sslclientkey: /var/lib/yum/client-key.pem
     enabled: yes
 
-  - name: rhel-7-fast-datapath-rpms
-    baseurl: https://mirror.openshift.com/enterprise/rhel/rhel-7-fast-datapath-rpms/
-    gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
-    sslclientcert: /var/lib/yum/client-cert.pem
-    sslclientkey: /var/lib/yum/client-key.pem
-    enabled: yes
-
   - name: rhel-7-server-ansible-2.4-rpms
     baseurl: https://mirror.openshift.com/enterprise/rhel/rhel-7-server-ansible-2.4-rpms/
     gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release

+ 1 - 1
playbooks/byo/README.md

@@ -7,5 +7,5 @@ clusters.
 Usage is documented in the official OpenShift documentation pages, under the
 Advanced Installation topic:
 
-- [OpenShift Origin: Advanced Installation](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
+- [OpenShift Origin: Advanced Installation](https://docs.okd.io/latest/install_config/install/advanced_install.html)
 - [OpenShift Container Platform: Advanced Installation](https://docs.openshift.com/container-platform/latest/install_config/install/advanced_install.html)

+ 1 - 1
playbooks/byo/openshift-cluster/upgrades/README.md

@@ -4,4 +4,4 @@ cluster. Additional notes for the associated upgrade playbooks are
 provided in their respective directories.
 
 # Upgrades available
-- [OpenShift Container Platform 3.9 to 3.10](v3_10/README.md) (upgrade OpenShift Origin from 3.9.x to 3.10.x)
+- [OpenShift Container Platform 3.10 to 3.11](v3_11/README.md) (upgrade OpenShift Origin from 3.10.x to 3.11.x)

+ 0 - 20
playbooks/byo/openshift-cluster/upgrades/v3_10/README.md

@@ -1,20 +0,0 @@
-# v3.10 Major and Minor Upgrade Playbook
-
-## Overview
-This playbook currently performs the following steps.
-
- * Upgrade and restart master services
- * Unschedule node
- * Upgrade and restart docker
- * Upgrade and restart node services
- * Modifies the subset of the configuration necessary
- * Applies the latest cluster policies
- * Updates the default router if one exists
- * Updates the default registry if one exists
- * Updates image streams and quickstarts
-
-## Usage
-
-```
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml
-```

+ 0 - 5
playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml

@@ -1,5 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_10/upgrade.yml

+ 0 - 16
playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml

@@ -1,16 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml
-
-- import_playbook: ../../../../openshift-master/private/restart.yml

+ 0 - 7
playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml

@@ -1,7 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml

+ 0 - 7
playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_scale_groups.yml

@@ -1,7 +0,0 @@
----
-#
-# Node Scale Group Upgrade Playbook
-#
-# Upgrades scale group nodes only.
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/upgrade_scale_group.yml

+ 1 - 4
playbooks/common/openshift-cluster/upgrades/post_control_plane.yml

@@ -34,13 +34,10 @@
   # create and update in one step.
   - role: openshift_examples
     when: openshift_install_examples | default(true) | bool
-  - openshift_hosted_templates
   # Update the existing templates
   - role: openshift_examples
     when: openshift_install_examples | default(true) | bool
     openshift_examples_import_command: replace
-  - role: openshift_hosted_templates
-    openshift_hosted_templates_import_command: replace
 
 # Poll for registry and router pods, redeploy registry certs if needed.
 - import_playbook: ../../../openshift-hosted/private/upgrade_poll_and_check_certs.yml
@@ -65,7 +62,7 @@
 
   - name: Warn if pluginOrderOverride is in use in master-config.yaml
     debug:
-      msg: "WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information."
+      msg: "WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/container-platform/latest/architecture/additional_concepts/admission_controllers.html for more information."
     when:
     - not (grep_plugin_order_override is skipped)
     - grep_plugin_order_override.rc == 0

+ 0 - 13
playbooks/common/openshift-cluster/upgrades/v3_10/label_nodes.yml

@@ -1,13 +0,0 @@
----
-
-- name: Set node schedulability
-  hosts: oo_masters_to_config
-  roles:
-    - openshift_facts
-    - lib_openshift
-  tasks:
-    - import_role:
-        name: openshift_manage_node
-        tasks_from: config.yml
-      vars:
-        openshift_master_host: '{{ groups.oo_first_master.0 }}'

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml

@@ -1 +0,0 @@
----

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_10/roles

@@ -1 +0,0 @@
-../../../../../roles/

+ 0 - 7
playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml

@@ -1,7 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- import_playbook: upgrade_control_plane.yml
-
-- import_playbook: upgrade_nodes.yml

+ 0 - 121
playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml

@@ -1,121 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- import_playbook: ../init.yml
-  vars:
-    l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-    l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config"
-    l_base_packages_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-    l_upgrade_cert_check_hosts: "oo_masters_to_config:oo_etcd_to_config"
-
-- name: Configure the upgrade target for the common upgrade tasks 3.10
-  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
-  tasks:
-  - set_fact:
-      openshift_upgrade_target: '3.10'
-      openshift_upgrade_min: '3.9'
-      openshift_release: '3.10'
-
-- import_playbook: ../pre/config.yml
-  # These vars a meant to exclude oo_nodes from plays that would otherwise include
-  # them by default.
-  vars:
-    l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master"
-    l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-    l_upgrade_no_proxy_hosts: "oo_masters_to_config"
-    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
-    l_upgrade_verify_targets_hosts: "oo_masters_to_config"
-    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
-    l_upgrade_excluder_hosts: "oo_masters_to_config"
-    openshift_protect_installed_version: False
-
-# Need to run sanity checks after version has been run.
-- import_playbook: ../../../../init/sanity_checks.yml
-  vars:
-    # oo_lb_to_config might not be present; Can't use !oo_nodes because masters are nodes.
-    l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_lb_to_config'] | default([]) ) }}"
-
-- name: Flag pre-upgrade checks complete for hosts without errors
-  hosts: oo_masters_to_config:oo_etcd_to_config
-  tasks:
-  - set_fact:
-      pre_upgrade_complete: True
-
-- import_playbook: label_nodes.yml
-
-# To upgrade, we need masters to be capable of signing certificates
-- hosts: oo_masters
-  serial: 1
-  tasks:
-  - name: Enable core bootstrapping components
-    include_tasks: ../../../../openshift-master/private/tasks/enable_bootstrap.yml
-  - name: Place shim commands on the masters before we begin the upgrade
-    import_role:
-      name: openshift_control_plane
-      tasks_from: static_shim.yml
-
-# TODO: need to verify settings about the bootstrap configs
-# 1. Does network policy match the master config
-
-- name: Configure components that must be available prior to upgrade
-  hosts: oo_first_master
-  pre_tasks:
-  - name: Enable core bootstrapping components
-    include_tasks: ../../../../openshift-master/private/tasks/enable_bootstrap_config.yml
-  - name: Ensure the master bootstrap config has bootstrapping config
-    import_role:
-      name: openshift_node_group
-      tasks_from: upgrade.yml
-  - name: Enable node configuration reconciliation
-    import_role:
-      name: openshift_node_group
-      tasks_from: sync.yml
-  roles:
-  - role: openshift_sdn
-    when: openshift_use_openshift_sdn | default(True) | bool
-
-- name: Update master nodes
-  hosts: oo_masters
-  serial: 1
-  tasks:
-  - import_role:
-      name: openshift_node
-      tasks_from: upgrade_pre.yml
-  - import_role:
-      name: openshift_node
-      tasks_from: upgrade.yml
-
-- import_playbook: ../upgrade_control_plane.yml
-  vars:
-    openshift_release: '3.10'
-
-- import_playbook: ../post_control_plane.yml
-
-- hosts: oo_masters
-  tasks:
-  - import_role:
-      name: openshift_web_console
-      tasks_from: remove_old_asset_config.yml
-
-# This is a one time migration. No need to save it in the 3.11.
-# https://bugzilla.redhat.com/show_bug.cgi?id=1565736
-- hosts: oo_first_master
-  tasks:
-  - import_role:
-      name: openshift_hosted
-      tasks_from: registry_service_account.yml
-    when: openshift_hosted_manage_registry | default(True) | bool
-  - import_role:
-      name: openshift_hosted
-      tasks_from: remove_legacy_env_variables.yml
-    when: openshift_hosted_manage_registry | default(True) | bool

+ 0 - 40
playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml

@@ -1,40 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- import_playbook: ../init.yml
-  vars:
-    l_upgrade_cert_check_hosts: "oo_nodes_to_config"
-
-- name: Configure the upgrade target for the common upgrade tasks
-  hosts: oo_all_hosts
-  tasks:
-  - set_fact:
-      openshift_upgrade_target: '3.10'
-      openshift_upgrade_min: '3.9'
-      openshift_release: '3.10'
-
-- import_playbook: ../pre/config.yml
-  vars:
-    l_upgrade_repo_hosts: "oo_nodes_to_config"
-    l_upgrade_no_proxy_hosts: "oo_all_hosts"
-    l_upgrade_health_check_hosts: "oo_nodes_to_config"
-    l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
-    l_upgrade_docker_target_hosts: "oo_nodes_to_config"
-    l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
-    l_upgrade_nodes_only: True
-
-# Need to run sanity checks after version has been run.
-- import_playbook: ../../../../init/sanity_checks.yml
-
-- name: Flag pre-upgrade checks complete for hosts without errors
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
-  tasks:
-  - set_fact:
-      pre_upgrade_complete: True
-
-# Pre-upgrade completed
-
-- import_playbook: ../upgrade_nodes.yml

+ 23 - 0
playbooks/common/openshift-cluster/upgrades/v3_11/upgrade_control_plane.yml

@@ -45,6 +45,29 @@
     # oo_lb_to_config might not be present; Can't use !oo_nodes because masters are nodes.
     l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_lb_to_config'] | default([]) ) }}"
 
+# Some change makes critical outage on current cluster.
+- name: Confirm upgrade will not make critical changes
+  hosts: oo_first_master
+  tasks:
+  - name: Confirm Reconcile Security Context Constraints will not change current SCCs
+    command: >
+      {{ openshift_client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --additive-only=true -o name
+    register: check_reconcile_scc_result
+    when: openshift_reconcile_sccs_reject_change | default(true) | bool
+    until: check_reconcile_scc_result.rc == 0
+    retries: 3
+
+  - fail:
+      msg: >
+        Changes to bootstrapped SCCs have been detected. Please review the changes by running
+        "{{ openshift_client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --additive-only=true"
+        After reviewing the changes please apply those changes by adding the '--confirm' flag.
+        Do not modify the default SCCs. Customizing the default SCCs will cause this check to fail when upgrading.
+        If you require non standard SCCs please refer to https://docs.okd.io/latest/admin_guide/manage_scc.html
+    when:
+    - openshift_reconcile_sccs_reject_change | default(true) | bool
+    - check_reconcile_scc_result.stdout != '' or check_reconcile_scc_result.rc != 0
+
 - name: Flag pre-upgrade checks complete for hosts without errors
   hosts: oo_masters_to_config:oo_etcd_to_config
   tasks:

+ 1 - 1
playbooks/gcp/openshift-cluster/upgrade.yml

@@ -11,4 +11,4 @@
       all_nodes: true
 
 - name: run the upgrade
-  import_playbook: ../../common/openshift-cluster/upgrades/v3_10/upgrade.yml
+  import_playbook: ../../common/openshift-cluster/upgrades/v3_11/upgrade.yml

+ 2 - 2
playbooks/init/validate_hostnames.yml

@@ -21,7 +21,7 @@
         Inventory setting: openshift_hostname={{ openshift_hostname | default ('undefined') }}
         This check can be overridden by setting openshift_hostname_check=false in
         the inventory.
-        See https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-host-variables
+        See https://docs.okd.io/latest/install_config/install/advanced_install.html#configuring-host-variables
     when:
     - lookupip.stdout != '127.0.0.1'
     - lookupip.stdout not in ansible_all_ipv4_addresses
@@ -36,7 +36,7 @@
         Inventory setting: openshift_ip={{ openshift_ip }}
         This check can be overridden by setting openshift_ip_check=false in
         the inventory.
-        See https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-host-variables
+        See https://docs.okd.io/latest/install_config/install/advanced_install.html#configuring-host-variables
     when:
     - openshift_ip is defined
     - openshift_ip not in ansible_all_ipv4_addresses

+ 8 - 0
playbooks/openshift-glusterfs/private/upgrade.yml

@@ -0,0 +1,8 @@
+---
+- name: Upgrade GlusterFS
+  hosts: oo_first_master
+  tasks:
+  - name: Run glusterfs upgrade role
+    import_role:
+      name: openshift_storage_glusterfs
+      tasks_from: upgrade.yml

+ 10 - 0
playbooks/openshift-glusterfs/upgrade.yml

@@ -0,0 +1,10 @@
+---
+- import_playbook: ../init/main.yml
+  vars:
+    l_init_fact_hosts: "oo_masters_to_config:oo_glusterfs_to_config"
+    l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
+    l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] | union(groups['oo_glusterfs_to_config']) }}"
+    l_install_base_packages: False
+    l_base_packages_hosts: "all:!all"
+
+- import_playbook: private/upgrade.yml

+ 1 - 1
playbooks/openshift-hosted/private/redeploy-registry-certificates.yml

@@ -39,7 +39,7 @@
   # Replace dc/docker-registry environment variable certificate data if set.
   - name: Update docker-registry environment variables
     shell: >
-      {{ openshift_client_binary }} env dc/docker-registry
+      {{ openshift_client_binary }} set env dc/docker-registry
       OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)"
       OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-registry.crt)"
       OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-registry.key)"

+ 1 - 1
playbooks/openshift-hosted/private/redeploy-router-certificates.yml

@@ -52,7 +52,7 @@
 
   - name: Update router environment variables
     shell: >
-      {{ openshift_client_binary }} env dc/router
+      {{ openshift_client_binary }} set env dc/router
       OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)"
       OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-router.crt)"
       OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-router.key)"

+ 2 - 2
playbooks/openshift-logging/private/config.yml

@@ -16,7 +16,7 @@
 # Normally we only collect this information for our master group entries
 # we want to also collect this for nodes so we can match group entries to nodes
 - name: Get common IP facts when necessary
-  hosts: all
+  hosts: oo_nodes_to_config:!oo_masters
   gather_facts: false
   tasks:
   - name: Gather Cluster facts
@@ -85,7 +85,7 @@
         groups: oo_elasticsearch_nodes
         ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
         ansible_become: "{{ g_sudo | default(omit) }}"
-      with_items: "{{ groups['OSEv3'] }}"
+      with_items: "{{ groups['oo_nodes_to_config'] }}"
       changed_when: no
       run_once: true
       delegate_to: localhost

+ 0 - 1
playbooks/openshift-master/private/additional_config.yml

@@ -30,7 +30,6 @@
     when: openshift_install_examples | default(true) | bool
   - role: openshift_cluster_autoscaler
     when: openshift_cluster_autoscaler_deploy | default(false) | bool
-  - role: openshift_hosted_templates
   - role: openshift_manageiq
     when: openshift_use_manageiq | default(true) | bool
   - role: cockpit

+ 1 - 0
playbooks/openshift-master/private/config.yml

@@ -138,6 +138,7 @@
   roles:
   - role: openshift_manage_node
     openshift_master_host: "{{ groups.oo_first_master.0 }}"
+    openshift_manage_node_is_master: "{{ ('oo_masters_to_config' in group_names) | bool }}"
     openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
 
 - name: Master Install Checkpoint End

+ 0 - 23
playbooks/openshift-master/private/upgrade.yml

@@ -3,29 +3,6 @@
 # Upgrade Masters
 ###############################################################################
 
-# Some change makes critical outage on current cluster.
-- name: Confirm upgrade will not make critical changes
-  hosts: oo_first_master
-  tasks:
-  - name: Confirm Reconcile Security Context Constraints will not change current SCCs
-    command: >
-      {{ openshift_client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --additive-only=true -o name
-    register: check_reconcile_scc_result
-    when: openshift_reconcile_sccs_reject_change | default(true) | bool
-    until: check_reconcile_scc_result.rc == 0
-    retries: 3
-
-  - fail:
-      msg: >
-        Changes to bootstrapped SCCs have been detected. Please review the changes by running
-        "{{ openshift_client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --additive-only=true"
-        After reviewing the changes please apply those changes by adding the '--confirm' flag.
-        Do not modify the default SCCs. Customizing the default SCCs will cause this check to fail when upgrading.
-        If you require non standard SCCs please refer to https://docs.openshift.org/latest/admin_guide/manage_scc.html
-    when:
-    - openshift_reconcile_sccs_reject_change | default(true) | bool
-    - check_reconcile_scc_result.stdout != '' or check_reconcile_scc_result.rc != 0
-
 # Create service signer cert when missing. Service signer certificate
 # is added to master config in the master_config_upgrade hook.
 - name: Determine if service signer cert must be created

+ 6 - 11
playbooks/openshift-metering/README.md

@@ -13,20 +13,15 @@ openshift_monitoring_deploy: true
 
 ## Installation
 
-To install Openshift Metering, set this variable:
+To install Openshift Metering, run the install playbook:
 
-```yaml
-openshift_metering_install: true
-```
-
-To uninstall, set:
-
-```yaml
-openshift_metering_install: false
+```bash
+ansible-playbook playbooks/openshift-metering/config.yml
 ```
 
-Then run:
+To uninstall, run the uninstall playbook:
 
 ```bash
-ansible-playbook playbooks/openshift-metering/config.yml
+ansible-playbook playbooks/openshift-metering/uninstall.yml
 ```
+

+ 0 - 4
playbooks/openshift-node/certificates.yml

@@ -1,4 +0,0 @@
----
-- import_playbook: ../init/main.yml
-
-- import_playbook: private/certificates.yml

+ 0 - 24
playbooks/openshift-node/private/certificates-backup.yml

@@ -1,24 +0,0 @@
----
-- name: Ensure node directory is absent from generated configs
-  hosts: oo_first_master
-  tasks:
-  # The generated configs directory (/etc/origin/generated-configs) is
-  # backed up during redeployment of the control plane certificates.
-  # We need to ensure that the generated config directory for
-  # individual nodes has been deleted before continuing, so verify
-  # that it is missing here.
-  - name: Ensure node directories and tarballs are absent from generated configs
-    shell: >
-      rm -rf {{ openshift.common.config_base }}/generated-configs/node-*
-    args:
-      warn: no
-
-- name: Redeploy node certificates
-  hosts: oo_nodes_to_config
-  pre_tasks:
-  - name: Remove CA certificate
-    file:
-      path: "{{ item }}"
-      state: absent
-    with_items:
-    - "{{ openshift.common.config_base }}/node/ca.crt"

+ 0 - 6
playbooks/openshift-node/private/certificates.yml

@@ -1,6 +0,0 @@
----
-- name: Create OpenShift certificates for node hosts
-  hosts: oo_nodes_to_config
-  gather_facts: no
-  roles:
-  - role: openshift_node_certificates

+ 1 - 0
playbooks/openshift-node/private/join.yml

@@ -63,6 +63,7 @@
   roles:
   - role: openshift_manage_node
     openshift_master_host: "{{ groups.oo_first_master.0 }}"
+    openshift_manage_node_is_master: "{{ ('oo_masters_to_config' in group_names) | bool }}"
 
 - name: Node Join Checkpoint End
   hosts: all

+ 0 - 6
playbooks/openshift-node/private/redeploy-certificates.yml

@@ -1,6 +0,0 @@
----
-- import_playbook: certificates-backup.yml
-
-- import_playbook: certificates.yml
-  vars:
-    openshift_certificates_redeploy: true

+ 4 - 1
playbooks/openshift-node/private/restart.yml

@@ -43,8 +43,11 @@
     until:
     - node_output.results is defined
     - node_output.results.returncode is defined
-    - node_output.results.results is defined
     - node_output.results.returncode == 0
+    - node_output.results.results is defined
+    - node_output.results.results | length > 0
+    - node_output.results.results[0].status is defined
+    - node_output.results.results[0].status.conditions is defined
     - node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
     # Give the node three minutes to come back online.
     retries: 36

+ 0 - 8
playbooks/openshift-node/redeploy-certificates.yml

@@ -1,8 +0,0 @@
----
-- import_playbook: ../init/main.yml
-
-- import_playbook: private/redeploy-certificates.yml
-
-- import_playbook: private/restart.yml
-  vars:
-    openshift_node_restart_docker_required: False

+ 2 - 2
playbooks/openstack/README.md

@@ -230,8 +230,8 @@ $ ansible-playbook --user openshift \
 [devstack]: https://docs.openstack.org/devstack/
 [tripleo]: http://tripleo.org/
 [packstack]: https://www.rdoproject.org/install/packstack/
-[configure-authentication]: https://docs.openshift.org/latest/install_config/configuring_authentication.html
-[hardware-requirements]: https://docs.openshift.org/latest/install_config/install/prerequisites.html#hardware
+[configure-authentication]: https://docs.okd.io/latest/install_config/configuring_authentication.html
+[hardware-requirements]: https://docs.okd.io/latest/install_config/install/prerequisites.html#hardware
 [origin]: https://www.openshift.org/
 [centos7]: https://www.centos.org/
 [sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/hosts.example

+ 13 - 6
playbooks/openstack/configuration.md

@@ -120,7 +120,7 @@ https://github.com/openshift/openshift-ansible/blob/master/roles/openshift_cloud
 
 For more information, consult the [Configuring for OpenStack page in the OpenShift documentation][openstack-credentials].
 
-[openstack-credentials]: https://docs.openshift.org/latest/install_config/configuring_openstack.html#install-config-configuring-openstack
+[openstack-credentials]: https://docs.okd.io/latest/install_config/configuring_openstack.html#install-config-configuring-openstack
 
 If you would like to use additional parameters, create a custom cloud provider
 configuration file locally and specify it in `inventory/group_vars/OSEv3.yml`:
@@ -620,16 +620,20 @@ openshift_node_groups:
 ```
 
 
-### Namespace Subnet driver
+### Namespace Isolation drivers
 
 By default, kuryr is configured with the default subnet driver where all the
 pods are deployed on the same Neutron subnet. However, there is an option of
 enabling a different subnet driver, named namespace, which makes pods to be
-allocated on different subnets depending on the namespace they belong to. To
-enable this new kuryr subnet driver you need to uncomment:
+allocated on different subnets depending on the namespace they belong to.
+In addition to the subnet driver, to properly enable isolation between
+different namespaces (through OpenStack security groups) there is a need of
+also enabling the related security group driver for namespaces.
+To enable this new kuryr namespace isolation capability you need to uncomment:
 
 ```yaml
 openshift_kuryr_subnet_driver: namespace
+openshift_kuryr_sg_driver: namespace
 ```
 
 
@@ -839,6 +843,9 @@ Adding more nodes to the cluster is a simple process: we need to update the
 node cloud in `inventory/group_vars/all/yml`, then run the appropriate
 scaleup playbook.
 
+**NOTE**: the dynamic inventory used for scaling is different. Make sure you
+use `scaleup_inventory.py` for all the operations below.
+
 
 ### 1. Update The Inventory
 
@@ -856,7 +863,7 @@ openshift_openstack_num_nodes: 8  # 5 existing and 3 new
 
 ### 2. Scale the Cluster
 
-Next, run the appropriate playbook - either 
+Next, run the appropriate playbook - either
 `openshift-ansible/playbooks/openstack/openshift-cluster/master-scaleup.yml`
 for master nodes or
 `openshift-ansible/playbooks/openstack/openshift-cluster/node-scaleup.yml`
@@ -864,7 +871,7 @@ for other nodes. For example:
 
 ```
 $ ansible-playbook --user openshift \
-  -i openshift-ansible/playbooks/openstack/inventory.py \
+  -i openshift-ansible/playbooks/openstack/scaleup_inventory.py \
   -i inventory \
   openshift-ansible/playbooks/openstack/openshift-cluster/master-scaleup.yml
 ```

+ 8 - 271
playbooks/openstack/inventory.py

@@ -7,281 +7,18 @@ environment.
 
 """
 
-from __future__ import print_function
-
-import argparse
-import json
-import os
-try:
-    import ConfigParser
-except ImportError:
-    import configparser as ConfigParser
-
-from keystoneauth1.exceptions.catalog import EndpointNotFound
-import shade
-
-
-def base_openshift_inventory(cluster_hosts):
-    '''Set the base openshift inventory.'''
-    inventory = {}
-
-    masters = [server.name for server in cluster_hosts
-               if server.metadata['host-type'] == 'master']
-
-    etcd = [server.name for server in cluster_hosts
-            if server.metadata['host-type'] == 'etcd']
-    if not etcd:
-        etcd = masters
-
-    infra_hosts = [server.name for server in cluster_hosts
-                   if server.metadata['host-type'] == 'node' and
-                   server.metadata['sub-host-type'] == 'infra']
-
-    app = [server.name for server in cluster_hosts
-           if server.metadata['host-type'] == 'node' and
-           server.metadata['sub-host-type'] == 'app']
-
-    cns = [server.name for server in cluster_hosts
-           if server.metadata['host-type'] == 'cns']
-
-    load_balancers = [server.name for server in cluster_hosts
-                      if server.metadata['host-type'] == 'lb']
-
-    # NOTE: everything that should go to the `[nodes]` group:
-    nodes = list(set(masters + etcd + infra_hosts + app + cns))
-
-    # NOTE: all OpenShift nodes, including `[lb]`, `[nfs]`, etc.:
-    osev3 = list(set(nodes + load_balancers))
-
-    inventory['OSEv3'] = {'hosts': osev3, 'vars': {}}
-    inventory['openstack_nodes'] = {'hosts': nodes}
-    inventory['openstack_master_nodes'] = {'hosts': masters}
-    inventory['openstack_etcd_nodes'] = {'hosts': etcd}
-    inventory['openstack_infra_nodes'] = {'hosts': infra_hosts}
-    inventory['openstack_compute_nodes'] = {'hosts': app}
-    inventory['openstack_cns_nodes'] = {'hosts': cns}
-    inventory['lb'] = {'hosts': load_balancers}
-    inventory['localhost'] = {'ansible_connection': 'local'}
-
-    return inventory
-
-
-def get_docker_storage_mountpoints(volumes):
-    '''Check volumes to see if they're being used for docker storage'''
-    docker_storage_mountpoints = {}
-    for volume in volumes:
-        if volume.metadata.get('purpose') == "openshift_docker_storage":
-            for attachment in volume.attachments:
-                if attachment.server_id in docker_storage_mountpoints:
-                    docker_storage_mountpoints[attachment.server_id].append(attachment.device)
-                else:
-                    docker_storage_mountpoints[attachment.server_id] = [attachment.device]
-    return docker_storage_mountpoints
-
-
-def _get_hostvars(server, docker_storage_mountpoints):
-    ssh_ip_address = server.public_v4 or server.private_v4
-    hostvars = {
-        'ansible_host': ssh_ip_address
-    }
-
-    public_v4 = server.public_v4 or server.private_v4
-    if public_v4:
-        hostvars['public_v4'] = server.public_v4
-        hostvars['openshift_public_ip'] = server.public_v4
-    # TODO(shadower): what about multiple networks?
-    if server.private_v4:
-        hostvars['private_v4'] = server.private_v4
-        hostvars['openshift_ip'] = server.private_v4
-
-        # NOTE(shadower): Yes, we set both hostname and IP to the private
-        # IP address for each node. OpenStack doesn't resolve nodes by
-        # name at all, so using a hostname here would require an internal
-        # DNS which would complicate the setup and potentially introduce
-        # performance issues.
-        hostvars['openshift_hostname'] = server.metadata.get(
-            'openshift_hostname', server.private_v4)
-    hostvars['openshift_public_hostname'] = server.name
-
-    if server.metadata['host-type'] == 'cns':
-        hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
-
-    group_name = server.metadata.get('openshift_node_group_name')
-    hostvars['openshift_node_group_name'] = group_name
-
-    # check for attached docker storage volumes
-    if 'os-extended-volumes:volumes_attached' in server:
-        if server.id in docker_storage_mountpoints:
-            hostvars['docker_storage_mountpoints'] = ' '.join(
-                docker_storage_mountpoints[server.id])
-    return hostvars
+import resources
 
 
 def build_inventory():
-    '''Build the dynamic inventory.'''
-    cloud = shade.openstack_cloud()
-
-    # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
-    # environment variable.
-    cluster_hosts = [
-        server for server in cloud.list_servers()
-        if 'metadata' in server and 'clusterid' in server.metadata]
-
-    inventory = base_openshift_inventory(cluster_hosts)
-
-    inventory['_meta'] = {'hostvars': {}}
-
-    # Some clouds don't have Cinder. That's okay:
-    try:
-        volumes = cloud.list_volumes()
-    except EndpointNotFound:
-        volumes = []
-
-    # cinder volumes used for docker storage
-    docker_storage_mountpoints = get_docker_storage_mountpoints(volumes)
-    for server in cluster_hosts:
-        inventory['_meta']['hostvars'][server.name] = _get_hostvars(
-            server,
-            docker_storage_mountpoints)
-
-    stout = _get_stack_outputs(cloud)
-    if stout is not None:
-        try:
-            inventory['localhost'].update({
-                'openshift_openstack_api_lb_provider':
-                stout['api_lb_provider'],
-                'openshift_openstack_api_lb_port_id':
-                stout['api_lb_vip_port_id'],
-                'openshift_openstack_api_lb_sg_id':
-                stout['api_lb_sg_id']})
-        except KeyError:
-            pass  # Not an API load balanced deployment
-
-        try:
-            inventory['OSEv3']['vars'][
-                'openshift_master_cluster_hostname'] = stout['private_api_ip']
-        except KeyError:
-            pass  # Internal LB not specified
-
-        inventory['localhost']['openshift_openstack_private_api_ip'] = \
-            stout.get('private_api_ip')
-        inventory['localhost']['openshift_openstack_public_api_ip'] = \
-            stout.get('public_api_ip')
-        inventory['localhost']['openshift_openstack_public_router_ip'] = \
-            stout.get('public_router_ip')
-
-        try:
-            inventory['OSEv3']['vars'] = _get_kuryr_vars(cloud, stout)
-        except KeyError:
-            pass  # Not a kuryr deployment
+    """Build the Ansible inventory for the current environment."""
+    inventory = resources.build_inventory()
+    inventory['nodes'] = inventory['openstack_nodes']
+    inventory['masters'] = inventory['openstack_master_nodes']
+    inventory['etcd'] = inventory['openstack_etcd_nodes']
+    inventory['glusterfs'] = inventory['openstack_cns_nodes']
     return inventory
 
 
-def _get_stack_outputs(cloud_client):
-    """Returns a dictionary with the stack outputs"""
-    cluster_name = os.getenv('OPENSHIFT_CLUSTER', 'openshift-cluster')
-
-    stack = cloud_client.get_stack(cluster_name)
-    if stack is None or stack['stack_status'] not in (
-            'CREATE_COMPLETE', 'UPDATE_COMPLETE'):
-        return None
-
-    data = {}
-    for output in stack['outputs']:
-        data[output['output_key']] = output['output_value']
-    return data
-
-
-def _get_kuryr_vars(cloud_client, data):
-    """Returns a dictionary of Kuryr variables resulting of heat stacking"""
-    settings = {}
-    settings['kuryr_openstack_pod_subnet_id'] = data['pod_subnet']
-    if 'pod_subnet_pool' in data:
-        settings['kuryr_openstack_pod_subnet_pool_id'] = data[
-            'pod_subnet_pool']
-    settings['kuryr_openstack_pod_router_id'] = data['pod_router']
-    settings['kuryr_openstack_worker_nodes_subnet_id'] = data['vm_subnet']
-    settings['kuryr_openstack_service_subnet_id'] = data['service_subnet']
-    settings['kuryr_openstack_pod_sg_id'] = data['pod_access_sg_id']
-    settings['kuryr_openstack_pod_project_id'] = (
-        cloud_client.current_project_id)
-    settings['kuryr_openstack_api_lb_ip'] = data['private_api_ip']
-
-    settings['kuryr_openstack_auth_url'] = cloud_client.auth['auth_url']
-    settings['kuryr_openstack_username'] = cloud_client.auth['username']
-    settings['kuryr_openstack_password'] = cloud_client.auth['password']
-    if 'user_domain_id' in cloud_client.auth:
-        settings['kuryr_openstack_user_domain_name'] = (
-            cloud_client.auth['user_domain_id'])
-    else:
-        settings['kuryr_openstack_user_domain_name'] = (
-            cloud_client.auth['user_domain_name'])
-    # FIXME(apuimedo): consolidate kuryr controller credentials into the same
-    #                  vars the openstack playbook uses.
-    settings['kuryr_openstack_project_id'] = cloud_client.current_project_id
-    if 'project_domain_id' in cloud_client.auth:
-        settings['kuryr_openstack_project_domain_name'] = (
-            cloud_client.auth['project_domain_id'])
-    else:
-        settings['kuryr_openstack_project_domain_name'] = (
-            cloud_client.auth['project_domain_name'])
-    return settings
-
-
-def output_inventory(inventory, output_file):
-    """Outputs inventory into a file in ini format"""
-    config = ConfigParser.ConfigParser(allow_no_value=True)
-
-    host_meta_vars = _get_host_meta_vars_as_dict(inventory)
-
-    for key in sorted(inventory.keys()):
-        if key == 'localhost':
-            config.add_section('localhost')
-            config.set('localhost', 'localhost')
-            config.add_section('localhost:vars')
-            for var, value in inventory['localhost'].items():
-                config.set('localhost:vars', var, value)
-        elif key not in ('localhost', '_meta'):
-            if 'hosts' in inventory[key]:
-                config.add_section(key)
-                for host in inventory[key]['hosts']:
-                    if host in host_meta_vars.keys():
-                        config.set(key, host + " " + host_meta_vars[host])
-                    else:
-                        config.set(key, host)
-            if 'vars' in inventory[key]:
-                config.add_section(key + ":vars")
-                for var, value in inventory[key]['vars'].items():
-                    config.set(key + ":vars", var, value)
-
-    with open(output_file, 'w') as configfile:
-        config.write(configfile)
-
-
-def _get_host_meta_vars_as_dict(inventory):
-    """parse host meta vars from inventory as dict"""
-    host_meta_vars = {}
-    if '_meta' in inventory.keys():
-        if 'hostvars' in inventory['_meta']:
-            for host in inventory['_meta']['hostvars'].keys():
-                host_meta_vars[host] = ' '.join(
-                    '{}={}'.format(key, val) for key, val in inventory['_meta']['hostvars'][host].items())
-    return host_meta_vars
-
-
-def parse_args():
-    """parse arguments to script"""
-    parser = argparse.ArgumentParser(description="Create ansible inventory.")
-    parser.add_argument('--static', type=str, default='',
-                        help='File to store a static inventory in.')
-    parser.add_argument('--list', action="store_true", default=False,
-                        help='List inventory.')
-
-    return parser.parse_args()
-
-
 if __name__ == '__main__':
-    if parse_args().static:
-        output_inventory(build_inventory(), parse_args().static)
-    else:
-        print(json.dumps(build_inventory(), indent=4, sort_keys=True))
+    resources.main(build_inventory)

+ 297 - 0
playbooks/openstack/resources.py

@@ -0,0 +1,297 @@
+#!/usr/bin/env python
+"""
+This library is used by the OpenStack's dynamic inventories.
+
+It produces the inventory in a Python dict structure based on the current
+environment.
+"""
+
+from __future__ import print_function
+
+import argparse
+import json
+import os
+try:
+    import ConfigParser
+except ImportError:
+    import configparser as ConfigParser
+
+from keystoneauth1.exceptions.catalog import EndpointNotFound
+import shade
+
+
+def base_openshift_inventory(cluster_hosts):
+    '''Set the base openshift inventory.'''
+    inventory = {}
+
+    masters = [server.name for server in cluster_hosts
+               if server.metadata['host-type'] == 'master']
+
+    etcd = [server.name for server in cluster_hosts
+            if server.metadata['host-type'] == 'etcd']
+    if not etcd:
+        etcd = masters
+
+    infra_hosts = [server.name for server in cluster_hosts
+                   if server.metadata['host-type'] == 'node' and
+                   server.metadata['sub-host-type'] == 'infra']
+
+    app = [server.name for server in cluster_hosts
+           if server.metadata['host-type'] == 'node' and
+           server.metadata['sub-host-type'] == 'app']
+
+    cns = [server.name for server in cluster_hosts
+           if server.metadata['host-type'] == 'cns']
+
+    load_balancers = [server.name for server in cluster_hosts
+                      if server.metadata['host-type'] == 'lb']
+
+    # NOTE: everything that should go to the `[nodes]` group:
+    nodes = list(set(masters + etcd + infra_hosts + app + cns))
+
+    # NOTE: all OpenShift nodes, including `[lb]`, `[nfs]`, etc.:
+    osev3 = list(set(nodes + load_balancers))
+
+    inventory['OSEv3'] = {'hosts': osev3, 'vars': {}}
+    inventory['openstack_nodes'] = {'hosts': nodes}
+    inventory['openstack_master_nodes'] = {'hosts': masters}
+    inventory['openstack_etcd_nodes'] = {'hosts': etcd}
+    inventory['openstack_infra_nodes'] = {'hosts': infra_hosts}
+    inventory['openstack_compute_nodes'] = {'hosts': app}
+    inventory['openstack_cns_nodes'] = {'hosts': cns}
+    inventory['lb'] = {'hosts': load_balancers}
+    inventory['localhost'] = {'ansible_connection': 'local'}
+
+    return inventory
+
+
+def get_docker_storage_mountpoints(volumes):
+    '''Check volumes to see if they're being used for docker storage'''
+    docker_storage_mountpoints = {}
+    for volume in volumes:
+        if volume.metadata.get('purpose') == "openshift_docker_storage":
+            for attachment in volume.attachments:
+                if attachment.server_id in docker_storage_mountpoints:
+                    docker_storage_mountpoints[attachment.server_id].append(attachment.device)
+                else:
+                    docker_storage_mountpoints[attachment.server_id] = [attachment.device]
+    return docker_storage_mountpoints
+
+
+def _get_hostvars(server, docker_storage_mountpoints):
+    ssh_ip_address = server.public_v4 or server.private_v4
+    hostvars = {
+        'ansible_host': ssh_ip_address
+    }
+
+    public_v4 = server.public_v4 or server.private_v4
+    if public_v4:
+        hostvars['public_v4'] = server.public_v4
+        hostvars['openshift_public_ip'] = server.public_v4
+    # TODO(shadower): what about multiple networks?
+    if server.private_v4:
+        hostvars['private_v4'] = server.private_v4
+        hostvars['openshift_ip'] = server.private_v4
+
+        # NOTE(shadower): Yes, we set both hostname and IP to the private
+        # IP address for each node. OpenStack doesn't resolve nodes by
+        # name at all, so using a hostname here would require an internal
+        # DNS which would complicate the setup and potentially introduce
+        # performance issues.
+        hostvars['openshift_hostname'] = server.metadata.get(
+            'openshift_hostname', server.private_v4)
+    hostvars['openshift_public_hostname'] = server.name
+
+    if server.metadata['host-type'] == 'cns':
+        hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
+
+    group_name = server.metadata.get('openshift_node_group_name')
+    hostvars['openshift_node_group_name'] = group_name
+
+    # check for attached docker storage volumes
+    if 'os-extended-volumes:volumes_attached' in server:
+        if server.id in docker_storage_mountpoints:
+            hostvars['docker_storage_mountpoints'] = ' '.join(
+                docker_storage_mountpoints[server.id])
+    return hostvars
+
+
+def build_inventory():
+    '''Build the dynamic inventory.'''
+    cloud = shade.openstack_cloud()
+
+    # Use an environment variable to optionally skip returning the app nodes.
+    show_compute_nodes = os.environ.get('OPENSTACK_SHOW_COMPUTE_NODES', 'true').lower() == "true"
+
+    # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
+    # environment variable.
+    cluster_hosts = [
+        server for server in cloud.list_servers()
+        if 'metadata' in server and 'clusterid' in server.metadata and
+        (show_compute_nodes or server.metadata.get('sub-host-type') != 'app')]
+
+    inventory = base_openshift_inventory(cluster_hosts)
+
+    inventory['_meta'] = {'hostvars': {}}
+
+    # Some clouds don't have Cinder. That's okay:
+    try:
+        volumes = cloud.list_volumes()
+    except EndpointNotFound:
+        volumes = []
+
+    # cinder volumes used for docker storage
+    docker_storage_mountpoints = get_docker_storage_mountpoints(volumes)
+    for server in cluster_hosts:
+        inventory['_meta']['hostvars'][server.name] = _get_hostvars(
+            server,
+            docker_storage_mountpoints)
+
+    stout = _get_stack_outputs(cloud)
+    if stout is not None:
+        try:
+            inventory['localhost'].update({
+                'openshift_openstack_api_lb_provider':
+                stout['api_lb_provider'],
+                'openshift_openstack_api_lb_port_id':
+                stout['api_lb_vip_port_id'],
+                'openshift_openstack_api_lb_sg_id':
+                stout['api_lb_sg_id']})
+        except KeyError:
+            pass  # Not an API load balanced deployment
+
+        try:
+            inventory['OSEv3']['vars'][
+                'openshift_master_cluster_hostname'] = stout['private_api_ip']
+        except KeyError:
+            pass  # Internal LB not specified
+
+        inventory['localhost']['openshift_openstack_private_api_ip'] = \
+            stout.get('private_api_ip')
+        inventory['localhost']['openshift_openstack_public_api_ip'] = \
+            stout.get('public_api_ip')
+        inventory['localhost']['openshift_openstack_public_router_ip'] = \
+            stout.get('public_router_ip')
+
+        try:
+            inventory['OSEv3']['vars'] = _get_kuryr_vars(cloud, stout)
+        except KeyError:
+            pass  # Not a kuryr deployment
+    return inventory
+
+
+def _get_stack_outputs(cloud_client):
+    """Returns a dictionary with the stack outputs"""
+    cluster_name = os.getenv('OPENSHIFT_CLUSTER', 'openshift-cluster')
+
+    stack = cloud_client.get_stack(cluster_name)
+    if stack is None or stack['stack_status'] not in (
+            'CREATE_COMPLETE', 'UPDATE_COMPLETE'):
+        return None
+
+    data = {}
+    for output in stack['outputs']:
+        data[output['output_key']] = output['output_value']
+    return data
+
+
+def _get_kuryr_vars(cloud_client, data):
+    """Returns a dictionary of Kuryr variables resulting of heat stacking"""
+    settings = {}
+    settings['kuryr_openstack_pod_subnet_id'] = data['pod_subnet']
+    if 'pod_subnet_pool' in data:
+        settings['kuryr_openstack_pod_subnet_pool_id'] = data[
+            'pod_subnet_pool']
+    if 'sg_allow_from_default' in data:
+        settings['kuryr_openstack_sg_allow_from_default_id'] = data[
+            'sg_allow_from_default']
+    if 'sg_allow_from_namespace' in data:
+        settings['kuryr_openstack_sg_allow_from_namespace_id'] = data[
+            'sg_allow_from_namespace']
+    settings['kuryr_openstack_pod_router_id'] = data['pod_router']
+    settings['kuryr_openstack_worker_nodes_subnet_id'] = data['vm_subnet']
+    settings['kuryr_openstack_service_subnet_id'] = data['service_subnet']
+    settings['kuryr_openstack_pod_sg_id'] = data['pod_access_sg_id']
+    settings['kuryr_openstack_pod_project_id'] = (
+        cloud_client.current_project_id)
+    settings['kuryr_openstack_api_lb_ip'] = data['private_api_ip']
+
+    settings['kuryr_openstack_auth_url'] = cloud_client.auth['auth_url']
+    settings['kuryr_openstack_username'] = cloud_client.auth['username']
+    settings['kuryr_openstack_password'] = cloud_client.auth['password']
+    if 'user_domain_id' in cloud_client.auth:
+        settings['kuryr_openstack_user_domain_name'] = (
+            cloud_client.auth['user_domain_id'])
+    else:
+        settings['kuryr_openstack_user_domain_name'] = (
+            cloud_client.auth['user_domain_name'])
+    # FIXME(apuimedo): consolidate kuryr controller credentials into the same
+    #                  vars the openstack playbook uses.
+    settings['kuryr_openstack_project_id'] = cloud_client.current_project_id
+    if 'project_domain_id' in cloud_client.auth:
+        settings['kuryr_openstack_project_domain_name'] = (
+            cloud_client.auth['project_domain_id'])
+    else:
+        settings['kuryr_openstack_project_domain_name'] = (
+            cloud_client.auth['project_domain_name'])
+    return settings
+
+
+def output_inventory(inventory, output_file):
+    """Outputs inventory into a file in ini format"""
+    config = ConfigParser.ConfigParser(allow_no_value=True)
+
+    host_meta_vars = _get_host_meta_vars_as_dict(inventory)
+
+    for key in sorted(inventory.keys()):
+        if key == 'localhost':
+            config.add_section('localhost')
+            config.set('localhost', 'localhost')
+            config.add_section('localhost:vars')
+            for var, value in inventory['localhost'].items():
+                config.set('localhost:vars', var, value)
+        elif key not in ('localhost', '_meta'):
+            if 'hosts' in inventory[key]:
+                config.add_section(key)
+                for host in inventory[key]['hosts']:
+                    if host in host_meta_vars.keys():
+                        config.set(key, host + " " + host_meta_vars[host])
+                    else:
+                        config.set(key, host)
+            if 'vars' in inventory[key]:
+                config.add_section(key + ":vars")
+                for var, value in inventory[key]['vars'].items():
+                    config.set(key + ":vars", var, value)
+
+    with open(output_file, 'w') as configfile:
+        config.write(configfile)
+
+
+def _get_host_meta_vars_as_dict(inventory):
+    """parse host meta vars from inventory as dict"""
+    host_meta_vars = {}
+    if '_meta' in inventory.keys():
+        if 'hostvars' in inventory['_meta']:
+            for host in inventory['_meta']['hostvars'].keys():
+                host_meta_vars[host] = ' '.join(
+                    '{}={}'.format(key, val) for key, val in inventory['_meta']['hostvars'][host].items())
+    return host_meta_vars
+
+
+def parse_args():
+    """parse arguments to script"""
+    parser = argparse.ArgumentParser(description="Create ansible inventory.")
+    parser.add_argument('--static', type=str, default='',
+                        help='File to store a static inventory in.')
+    parser.add_argument('--list', action="store_true", default=False,
+                        help='List inventory.')
+
+    return parser.parse_args()
+
+
+def main(inventory_builder):
+    """Ansible dynamic inventory entry point."""
+    if parse_args().static:
+        output_inventory(inventory_builder(), parse_args().static)
+    else:
+        print(json.dumps(inventory_builder(), indent=4, sort_keys=True))

+ 8 - 6
playbooks/openstack/sample-inventory/group_vars/all.yml

@@ -42,6 +42,7 @@ openshift_openstack_external_network_name: "public"
 
 #use_trunk_ports: True
 #os_sdn_network_plugin_name: cni
+#openshift_node_proxy_mode: userspace
 
 # # Kuryr needs to know the network or the subnet you will be taking Floating
 # IPs for the loadbalancer services from.
@@ -53,6 +54,7 @@ openshift_openstack_external_network_name: "public"
 
 # # Kuryr can use a different subnet per namespace
 # openshift_kuryr_subnet_driver: namespace
+# openshift_kuryr_sg_driver: namespace
 
 # If you VM images will name the ethernet device different than 'eth0',
 # override this
@@ -128,12 +130,12 @@ openshift_openstack_default_flavor: "m1.medium"
 # # Docker volume size
 # # - set specific volume size for roles by uncommenting corresponding lines
 # # - note: do not remove docker_default_volume_size definition
-#openshift_openstack_docker_master_volume_size: "15"
-#openshift_openstack_docker_infra_volume_size: "15"
-#openshift_openstack_docker_cns_volume_size: "15"
-#openshift_openstack_docker_node_volume_size: "15"
-#openshift_openstack_docker_etcd_volume_size: "2"
-#openshift_openstack_docker_lb_volume_size: "5"
+#openshift_openstack_master_volume_size: "15"
+#openshift_openstack_infra_volume_size: "15"
+#openshift_openstack_cns_volume_size: "15"
+#openshift_openstack_node_volume_size: "15"
+#openshift_openstack_etcd_volume_size: "2"
+#openshift_openstack_lb_volume_size: "5"
 openshift_openstack_docker_volume_size: "15"
 
 ## Specify server group policies for master and infra nodes. Nova must be configured to

+ 15 - 0
playbooks/openstack/scaleup_inventory.py

@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+"""
+This is an Ansible dynamic inventory for OpenStack, specifically for use with
+the scaling playbooks.
+
+It requires your OpenStack credentials to be set in clouds.yaml or your shell
+environment.
+
+"""
+
+import resources
+
+
+if __name__ == '__main__':
+    resources.main(resources.build_inventory)

+ 0 - 2
playbooks/redeploy-certificates.yml

@@ -5,8 +5,6 @@
 
 - import_playbook: openshift-master/private/redeploy-certificates.yml
 
-- import_playbook: openshift-node/private/redeploy-certificates.yml
-
 - import_playbook: openshift-etcd/private/restart.yml
   vars:
     g_etcd_certificates_expired: "{{ ('expired' in (hostvars | lib_utils_oo_select_keys(groups['etcd']) | lib_utils_oo_collect('check_results.check_results.etcd') | lib_utils_oo_collect('health'))) | bool }}"

+ 85 - 0
playbooks/rhv/README.md

@@ -0,0 +1,85 @@
+# RHV Playbooks
+## Provisioning
+This subdirectory contains the Ansible playbooks used to deploy 
+an OpenShift Container Platform environment on Red Hat Virtualization
+
+### Where do I start?
+Choose a host from which Ansible plays will be executed. This host must have
+the ability to access the web interface of the RHV cluster engine and the
+network on which the OpenShift nodes will be installed. We will refer to
+this host as the *bastion*.
+
+#### oVirt Ansible Roles
+The oVirt project maintains Ansible roles for managing an oVirt or RHV cluster.
+These should be installed on the *bastion* host according to the instructions
+at the [oVirt Ansible Roles page](https://github.com/ovirt/ovirt-ansible/).
+
+#### DNS Server
+An external DNS server is required to provide name resolution to nodes and
+applications. See the
+[OpenShift Installation Documentation](https://docs.openshift.com/container-platform/latest/install_config/install/prerequisites.html#prereq-dns)
+for details.
+
+### Let's Provision!
+#### High-level overview
+After populating inventory and variables files with the proper values,
+(see [The OpenShift Advanced Installation Documentation](https://docs.openshift.com/container-platform/latest/install_config/install/advanced_install.html)
+) a series of Ansible playbooks from this subdirectory will provision a set of
+nodes on the RHV (or oVirt) cluster, prepare them for OpenShift installation,
+and deploy an OpenShift cluster on them.
+
+#### Step 1 Inventory
+The [`inventory.example`](inventory.example) file here is provided as an example of a three master, three inventory
+environment. It is up to the user to add additional OpenShift specific variables to this file to configure
+required elements such as the registry, storage, authentication, and networking.
+
+One required variable added for this environment is the `openshift_rhv_dns_zone`. As this is used to construct
+hostnames during VM creation, it is essential that this be set to the default dns zone for those nodes' hostnames.
+
+#### Step 2 RHV Provisioning Variables
+
+Fill out a provisioning variables file (example [`provisioning-vars.yaml.example`](provisioning-vars.yaml.example)
+with values from your RHV environment, making sure to fill in all commented values.
+
+*Red Hat Virtualization Certificate*
+
+A copy of the `/etc/pki/ovirt-engine/ca.pem` from the RHV engine will need to
+be downloaded to the *bastion* and its location set in the `engine_cafile` variable. Replace the
+example server in the following command to download the certificate:
+
+```
+$ curl --output ca.pem 'http://engine.example.com/ovirt-engine/services/pki-resource?resource=ca-certificate&format=X509-PEM-CA'
+
+```
+
+#### Step 3 Provision Virtual Machines in RHV
+Once all the variables in the `provisioning_vars.yaml` file are set, use the
+[`ovirt-vm-infra.yml`](openshift-cluster/ovirt-vm-infra.yml) playbook to begin
+provisioning.
+
+```
+ansible-playbook -i inventory -e@provisioning_vars.yml ${PATH_TO_OPENSHIFT_ANSIBLE}/playbooks/rhv/openshift-cluster/ovirt-vm-infra.yml
+```
+
+#### Step 4 Update DNS
+
+At this stage, ensure DNS is set up properly for the following access:
+
+* Nodes are available to each other by their hostnames.
+* The nodes running router services (typically the infrastructure nodes) are reachable by the wildcard entry.
+* The load balancer node is reachable as the openshift-master host entry for console access.
+
+#### Step 5 Install Prerequisite Services
+```
+ansible-playbook -i inventory ${PATH_TO_OPENSHIFT_ANSIBLE}/playbooks/prerequisites.yml
+```
+
+#### Step 6 Deploy OpenShift
+```
+ansible-playbook -i inventory ${PATH_TO_OPENSHIFT_ANSIBLE}/playbooks/deploy_cluster.yml
+```
+
+### Ready To Work!
+
+## Uninstall / Deprovisioning
+In case of a failed installation due to a missing variable, it is occasionally necessary to start from a fresh set of virtual machines. Uninstalling the virtual machines and reprovisioning them may be perfomed by running the [`openshift-cluster/unregister-vms.yaml`](openshift-cluster/unregister-vms.yaml) playbook (to recover RHSM entitlements) followed by the [`openshift-cluster/ovirt-vm-uninstall.yaml`](openshift-cluster/ovirt-vm-uninstall.yaml) playbook.

+ 61 - 0
playbooks/rhv/inventory.example

@@ -0,0 +1,61 @@
+[all:vars]
+openshift_rhv_dns_zone=  # example.com
+
+[OSEv3:children]
+nodes
+masters
+etcd
+lb
+
+[OSEv3:vars]
+# General variables
+ansible_user=root
+openshift_deployment_type=origin
+#openshift_deployment_type=openshift-enterprise
+openshift_enable_service_catalog=False
+
+# Hostnames
+load_balancer_hostname=lb0.{{openshift_rhv_dns_zone}}
+openshift_master_cluster_hostname="{{ load_balancer_hostname }}"
+openshift_master_cluster_public_hostname=openshift-master.{{ openshift_rhv_dns_zone }}
+openshift_master_default_subdomain=apps.{{ openshift_rhv_dns_zone }}
+openshift_public_hostname="{{openshift_master_cluster_public_hostname}}"
+
+# Docker setup for extra disks on nodes
+container_runtime_docker_storage_setup_device=/dev/vdb
+container_runtime_docker_storage_type=overlay2
+openshift_node_local_quota_per_fsgroup=512Mi
+
+[masters:vars]
+container_runtime_extra_storage="[{'device': '/dev/vdc', 'path': '/var/lib/origin/openshift.local.volumes', 'options': 'gquota', 'filesystem': 'xfs', 'format': 'True'}, {'device': '/dev/vdd', 'path': '/var/lib/etcd', 'hosts': 'masters', 'filesystem': 'xfs', 'format': 'True'}]"
+
+[nodes:vars]
+container_runtime_extra_storage="[{'device': '/dev/vdc', 'path': '/var/lib/origin/openshift.local.volumes', 'options': 'gquota', 'filesystem': 'xfs', 'format': 'True'}]"
+
+[masters]
+master0.example.com
+master1.example.com
+master2.example.com
+
+[etcd]
+master0.example.com
+master1.example.com
+master2.example.com
+
+[infras]
+infra0.example.com
+infra1.example.com
+infra2.example.com
+
+[lb]
+lb0.example.com
+
+[nodes]
+master0.example.com openshift_node_group_name=node-config-master
+master1.example.com openshift_node_group_name=node-config-master
+master2.example.com openshift_node_group_name=node-config-master
+infra0.example.com openshift_node_group_name=node-config-infra
+infra1.example.com openshift_node_group_name=node-config-infra
+infra2.example.com openshift_node_group_name=node-config-infra
+compute0.example.com openshift_node_group_name=node-config-compute
+# vim: set syntax=dosini

+ 33 - 0
playbooks/rhv/openshift-cluster/ovirt-vm-infra.yml

@@ -0,0 +1,33 @@
+---
+- name: Deploy oVirt template and virtual machines
+  hosts: localhost
+  connection: local
+  gather_facts: false
+
+  pre_tasks:
+    - name: Log in to oVirt
+      ovirt_auth:
+        url: "{{ engine_url }}"
+        username: "{{ engine_user }}"
+        password: "{{ engine_password }}"
+        ca_file: "{{ engine_cafile | default(omit) }}"
+        insecure: "{{ engine_insecure | default(true) }}"
+      tags:
+        - always
+    - name: Build virtual machine facts
+      import_role:
+        name: openshift_rhv
+        tasks_from: build_vm_list.yml
+
+  roles:
+    - oVirt.image-template
+    - oVirt.vm-infra
+
+  post_tasks:
+    - name: Logout from oVirt
+      ovirt_auth:
+        state: absent
+        ovirt_auth: "{{ ovirt_auth }}"
+      tags:
+        - always
+...

+ 37 - 0
playbooks/rhv/openshift-cluster/ovirt-vm-uninstall.yml

@@ -0,0 +1,37 @@
+---
+- name: Destroy oVirt VMs
+  hosts: localhost
+  connection: local
+  gather_facts: false
+
+  pre_tasks:
+    - name: Log in to oVirt
+      ovirt_auth:
+        url: "{{ engine_url }}"
+        username: "{{ engine_user }}"
+        password: "{{ engine_password }}"
+        ca_file: "{{ engine_cafile | default(omit) }}"
+        insecure: "{{ engine_insecure | default(true) }}"
+      tags:
+        - always
+    - name: Build virtual machine facts
+      import_role:
+        name: openshift_rhv
+        tasks_from: build_vm_list.yml
+
+  tasks:
+    - name: Erase vms
+      ovirt_vms:
+        auth: "{{ ovirt_auth }}"
+        state: absent
+        name: "{{ item.name }}"
+      with_items:
+        - "{{ ovirt_vm_infra_vms }}"
+
+  post_tasks:
+    - name: Logout from oVirt
+      ovirt_auth:
+        state: absent
+        ovirt_auth: "{{ ovirt_auth }}"
+      tags:
+        - always

+ 1 - 0
playbooks/rhv/openshift-cluster/roles

@@ -0,0 +1 @@
+../../../roles

+ 11 - 0
playbooks/rhv/openshift-cluster/unregister-vms.yml

@@ -0,0 +1,11 @@
+---
+- name: Unregister VMs
+  gather_facts: true
+  hosts:
+    - nodes
+    - lb
+  tasks:
+    - import_role:
+        name: rhel_unsubscribe
+      ignore_errors: yes
+...

+ 82 - 0
playbooks/rhv/provisioning-vars.yaml.example

@@ -0,0 +1,82 @@
+---
+###########################
+# Engine Connection
+###########################
+compatibility_version: 4.2
+engine_url:                     # https://engine.example.com/ovirt-engine/api
+engine_user:                    # admin@internal
+engine_password:                # secret
+
+# CA file copied from engine:/etc/pki/ovirt-engine/ca.pem
+# path is relative to openshift-cluster directory
+engine_cafile:                  # ../ca.pem
+
+data_center_name:               # Default
+openshift_rhv_cluster:          # Default
+openshift_rhv_data_store:       # vmstore
+openshift_rhv_ssh_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
+
+##########################
+# Template Creation
+# https://github.com/oVirt/ovirt-ansible-image-template
+##########################
+qcow_url:                       # https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2c
+image_path: "{{ lookup('env', 'HOME') }}/Downloads/{{ template_name }}.qcow2"
+template_name:                  # rhel75
+template_cluster: "{{ openshift_rhv_cluster }}"
+template_memory: 8GiB
+template_cpu: 1
+template_disk_storage: "{{ openshift_rhv_data_store }}"
+template_disk_size: 60GiB
+template_nics:
+  - name: nic1
+    profile_name:                # ovirtmgmt
+    interface: virtio
+
+##########################
+# Virtual Machines
+##########################
+wait_for_ip: true                # Required to gather IP addresses from amchines after
+vm_infra_wait_for_ip_retries: 10 # Higher than default to wait for all IPs on all VMs
+vm_infra_wait_for_ip_delay: 8
+
+# Virtual machine profile for master nodes
+# Differs from node profile by 16GiB RAM, extra disk for etcd
+master_vm:
+  cluster: "{{ openshift_rhv_cluster }}"
+  template: "{{ template_name }}"
+  memory: 16GiB
+  cores: 2
+  high_availability: true
+  disks:
+    - size: 15GiB
+      storage_domain: "{{ openshift_rhv_data_store }}"
+      name: docker_disk
+      interface: virtio
+    - size: 30GiB
+      storage_domain: "{{ openshift_rhv_data_store }}"
+      name: localvol_disk
+      interface: virtio
+    - size: 25GiB
+      storage_domain: "{{ openshift_rhv_data_store }}"
+      name: etcd_disk
+      interface: virtio
+  state: running
+
+# Virtual Machine profile for rest of nodes
+node_vm:
+  cluster: "{{ openshift_rhv_cluster }}"
+  template: "{{ template_name }}"
+  memory: 8GiB
+  cores: 2
+  disks:
+    - size: 15GiB
+      storage_domain: "{{ openshift_rhv_data_store }}"
+      name: docker_disk
+      interface: virtio
+    - size: 30GiB
+      storage_domain: "{{ openshift_rhv_data_store }}"
+      name: localvol_disk
+      interface: virtio
+  state: running
+...

+ 1 - 1
requirements.txt

@@ -1,6 +1,6 @@
 # Versions are pinned to prevent pypi releases arbitrarily breaking
 # tests with new APIs/semantics. We want to update versions deliberately.
-ansible==2.6.0.0
+ansible==2.6.2
 boto==2.44.0
 click==6.7
 pyOpenSSL==17.5.0

+ 1 - 0
roles/ansible_service_broker/defaults/main.yml

@@ -21,6 +21,7 @@ ansible_service_broker_image_pull_policy: Always
 ansible_service_broker_sandbox_role: edit
 ansible_service_broker_auto_escalate: false
 ansible_service_broker_local_registry_whitelist: []
+ansible_service_broker_local_registry_namespaces: ["openshift"]
 
 l_asb_default_images_dict:
   origin: 'docker.io/ansibleplaybookbundle/origin-ansible-service-broker:latest'

+ 1 - 0
roles/ansible_service_broker/tasks/facts.yml

@@ -16,5 +16,6 @@
     ansible_service_broker_registry_organization: "{{ ansible_service_broker_registry_organization | default(__ansible_service_broker_registry_organization) }}"
     ansible_service_broker_registry_tag: "{{ ansible_service_broker_registry_tag | default(__ansible_service_broker_registry_tag) }}"
     ansible_service_broker_registry_whitelist: "{{ ansible_service_broker_registry_whitelist | default(__ansible_service_broker_registry_whitelist) }}"
+    ansible_service_broker_registry_blacklist: "{{ ansible_service_broker_registry_blacklist | default(__ansible_service_broker_registry_blacklist) }}"
 
 - include_tasks: validate_facts.yml

+ 3 - 3
roles/ansible_service_broker/tasks/install.yml

@@ -74,7 +74,7 @@
     state: present
     name: asb-access
     rules:
-      - nonResourceURLs: ["/ansible-service-broker", "/ansible-service-broker/*"]
+      - nonResourceURLs: ["/osb", "/osb/*"]
         verbs: ["get", "post", "put", "patch", "delete"]
 
 - name: Bind admin cluster-role to asb serviceaccount
@@ -210,7 +210,7 @@
     kind: ConfigMap
     content:
       path: /tmp/cmout
-      data: "{{ lookup('template', 'configmap.yaml.j2') | from_yaml }}"
+      data: "{{ ansible_service_broker_full_broker_config_map | default(lookup('template', 'configmap.yaml.j2') | from_yaml) }}"
 
 - oc_secret:
     name: asb-registry-auth
@@ -236,7 +236,7 @@
         metadata:
           name: ansible-service-broker
         spec:
-          url: https://asb.openshift-ansible-service-broker.svc:1338/ansible-service-broker
+          url: https://asb.openshift-ansible-service-broker.svc:1338/osb
           authInfo:
             bearer:
               secretRef:

+ 6 - 1
roles/ansible_service_broker/tasks/remove.yml

@@ -46,6 +46,11 @@
     state: absent
     name: asb-access
 
+- name: remove asb-user-access cluster role
+  oc_clusterrole:
+    state: absent
+    name: asb-user-access
+
 - name: remove asb-registry auth secret
   oc_secret:
     state: absent
@@ -118,7 +123,7 @@
     name: ansible-service-broker
     state: absent
     kind: ClusterServiceBroker
-  when: not "'not found' in get_apiservices.stdout"
+  when: not('not found' in get_apiservices.stdout)
 
 - name: remove openshift-ansible-service-broker project
   oc_project:

+ 2 - 1
roles/ansible_service_broker/templates/configmap.yaml.j2

@@ -15,12 +15,13 @@ data:
         org:  {{ ansible_service_broker_registry_organization }}
         tag:  {{ ansible_service_broker_registry_tag }}
         white_list: {{  ansible_service_broker_registry_whitelist | to_yaml }}
+        black_list: {{  ansible_service_broker_registry_blacklist | to_yaml }}
         auth_type: "{{ ansible_service_broker_registry_auth_type | default("") }}"
         auth_name: "{{ ansible_service_broker_registry_auth_name | default("") }}"
       - type: local_openshift
         name: localregistry
-        namespaces: ['openshift']
         white_list: {{ ansible_service_broker_local_registry_whitelist | to_yaml }}
+        namespaces: {{ ansible_service_broker_local_registry_namespaces | to_yaml }}
     dao:
       type: crd
     log:

+ 2 - 0
roles/ansible_service_broker/vars/default_images.yml

@@ -7,3 +7,5 @@ __ansible_service_broker_registry_password: null
 __ansible_service_broker_registry_organization: ansibleplaybookbundle
 __ansible_service_broker_registry_tag: latest
 __ansible_service_broker_registry_whitelist: []
+__ansible_service_broker_registry_blacklist:
+  - '.*automation-broker-apb$'

+ 2 - 0
roles/ansible_service_broker/vars/openshift-enterprise.yml

@@ -8,3 +8,5 @@ __ansible_service_broker_registry_organization: null
 __ansible_service_broker_registry_tag: "{{ openshift_image_tag }}"
 __ansible_service_broker_registry_whitelist:
   - '.*-apb$'
+__ansible_service_broker_registry_blacklist:
+  - '.*automation-broker-apb$'

+ 18 - 1
roles/cockpit-ui/defaults/main.yml

@@ -1,2 +1,19 @@
 ---
-openshift_config_base: "/etc/origin"
+l_os_cockpit_image_version_dict:
+  origin: 'latest'
+  openshift-enterprise: "{{ openshift_image_tag }}"
+l_os_cockpit_image_version: "{{ l_os_cockpit_image_version_dict[openshift_deployment_type] }}"
+
+l_os_cockpit_image_format: "{{ l_os_non_standard_reg_url | regex_replace('${version}' | regex_escape, l_os_cockpit_image_version) }}"
+
+l_openshift_cockit_search_dict:
+  origin: "openshift/origin-${component}"
+  openshift-enterprise: "ose-${component}"
+l_openshift_cockit_search: "{{ l_openshift_cockit_search_dict[openshift_deployment_type] }}"
+
+l_openshift_cockpit_replace_dict:
+  origin: "cockpit/kubernetes"
+  openshift-enterprise: "registry-console"
+l_openshift_cockpit_replace: "{{ l_openshift_cockpit_replace_dict[openshift_deployment_type] }}"
+
+openshift_cockpit_deployer_image: "{{ l_os_cockpit_image_format | regex_replace(l_openshift_cockit_search | regex_escape, l_openshift_cockpit_replace) }}"

+ 4 - 10
roles/openshift_hosted_templates/files/v3.10/origin/registry-console.yaml

@@ -29,7 +29,7 @@ objects:
             node-role.kubernetes.io/master: 'true'
           containers:
             - name: registry-console
-              image: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
+              image: ${IMAGE_NAME}
               ports:
                 - containerPort: 9090
                   protocol: TCP
@@ -89,15 +89,9 @@ objects:
     redirectURIs:
       - "${COCKPIT_KUBE_URL}"
 parameters:
-  - description: 'Specify "registry/namespace" prefix for container image; e.g. for "registry.example.com/cockpit/kubernetes:latest", set prefix "registry.example.com/cockpit/"'
-    name: IMAGE_PREFIX
-    value: "cockpit/"
-  - description: 'Specify component name for container image; e.g. for "registry.example.com/cockpit/kubernetes:latest", use base name "kubernetes"'
-    name: IMAGE_BASENAME
-    value: "kubernetes"
-  - description: 'Specify image version; e.g. for "cockpit/kubernetes:latest", set version "latest"'
-    name: IMAGE_VERSION
-    value: latest
+  - description: 'Specify fully qualified image name and version; e.g. for "registry.access.redhat.com/openshift3/registry-console:v3.11"'
+    name: IMAGE_NAME
+    value: "openshift3/registry-console:latest"
   - description: "The public URL for the Openshift OAuth Provider, e.g. https://openshift.example.com:8443"
     name: OPENSHIFT_OAUTH_PROVIDER_URL
     required: true

+ 61 - 55
roles/cockpit-ui/tasks/main.yml

@@ -1,63 +1,69 @@
 ---
-- block:
+- name: Create local temp dir for registry-console template
+  command: mktemp -d /tmp/openshift-ansible-XXXXXXX
+  register: mktemp
+  # AUDIT:changed_when: not set here because this task actually
+  # creates something
 
-  # When openshift_hosted_manage_registry=true the openshift_hosted
-  # role will create the appropriate route for the docker-registry.
-  # When openshift_hosted_manage_registry=false then this code will
-  # not be run.
-  - name: fetch the docker-registry route
-    oc_route:
-      kubeconfig: "/etc/origin/master/admin.kubeconfig"
-      name: docker-registry
-      namespace: default
-      state: list
-    register: docker_registry_route
+- name: Copy the admin client config(s)
+  command: >
+    cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+  changed_when: False
 
-  - name: Create passthrough route for registry-console
-    oc_route:
-      kubeconfig: "/etc/origin/master/admin.kubeconfig"
-      name: registry-console
-      namespace: default
-      service_name: registry-console
-      state: present
-      tls_termination: passthrough
-    register: registry_console_cockpit_kube
+- name: Copy registry-console template to tmp dir
+  copy:
+    src: "registry-console.yaml"
+    dest: "{{ mktemp.stdout }}/registry-console.yaml"
 
-  # XXX: Required for items still using command
-  - name: Create temp directory for kubeconfig
-    command: mktemp -d /tmp/openshift-ansible-XXXXXX
-    register: mktemp
-    changed_when: False
+- name: Create registry-console template
+  command: >
+    {{ openshift_client_binary }} create
+    -f {{ mktemp.stdout }}/registry-console.yaml
+    --config={{ mktemp.stdout }}/admin.kubeconfig
+    -n openshift
+  register: oht_import_templates
+  failed_when: "'already exists' not in oht_import_templates.stderr and oht_import_templates.rc != 0"
+  changed_when: "'created' in oht_import_templates.stdout"
 
-  - set_fact:
-      openshift_hosted_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+# When openshift_hosted_manage_registry=true the openshift_hosted
+# role will create the appropriate route for the docker-registry.
+# When openshift_hosted_manage_registry=false then this code will
+# not be run.
+- name: fetch the docker-registry route
+  oc_route:
+    kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+    name: docker-registry
+    namespace: default
+    state: list
+  register: docker_registry_route
 
-  - name: Copy the admin client config(s)
-    command: >
-      cp /etc/origin/master/admin.kubeconfig {{ openshift_hosted_kubeconfig }}
-    changed_when: False
+- name: Create passthrough route for registry-console
+  oc_route:
+    kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+    name: registry-console
+    namespace: default
+    service_name: registry-console
+    state: present
+    tls_termination: passthrough
+  register: registry_console_cockpit_kube
 
-  - name: Deploy registry-console
-    command: >
-      {{ openshift_client_binary }} new-app --template=registry-console
-      {% if openshift_cockpit_deployer_prefix is defined  %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %}
-      {% if openshift_cockpit_deployer_basename is defined  %}-p IMAGE_BASENAME="{{ openshift_cockpit_deployer_basename }}"{% endif %}
-      {% if openshift_cockpit_deployer_version is defined  %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %}
-      -p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}"
-      -p REGISTRY_HOST="{{ docker_registry_route.results[0].spec.host }}"
-      -p COCKPIT_KUBE_URL="https://{{ registry_console_cockpit_kube.results.results[0].spec.host }}"
-      --config={{ openshift_hosted_kubeconfig }}
-      -n default
-    register: deploy_registry_console
-    changed_when: "'already exists' not in deploy_registry_console.stderr"
-    failed_when:
-    - "'already exists' not in deploy_registry_console.stderr"
-    - "deploy_registry_console.rc != 0"
+- name: Deploy registry-console
+  command: >
+    {{ openshift_client_binary }} new-app --template=registry-console
+    -p IMAGE_NAME="{{ openshift_cockpit_deployer_image }}"
+    -p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}"
+    -p REGISTRY_HOST="{{ docker_registry_route.results[0].spec.host }}"
+    -p COCKPIT_KUBE_URL="https://{{ registry_console_cockpit_kube.results.results[0].spec.host }}"
+    --config={{ mktemp.stdout }}/admin.kubeconfig
+    -n default
+  register: deploy_registry_console
+  changed_when: "'already exists' not in deploy_registry_console.stderr"
+  failed_when:
+  - "'already exists' not in deploy_registry_console.stderr"
+  - "deploy_registry_console.rc != 0"
 
-  - name: Delete temp directory
-    file:
-      name: "{{ mktemp.stdout }}"
-      state: absent
-    changed_when: False
-    # XXX: End required for items still using command
-  run_once: true
+- name: Delete temp directory
+  file:
+    name: "{{ mktemp.stdout }}"
+    state: absent
+  changed_when: False

+ 0 - 2
roles/container_runtime/tasks/package_docker.yml

@@ -26,8 +26,6 @@
     state: present
   when:
   - not (openshift_is_atomic | bool)
-  - not (curr_docker_version is skipped)
-  - not (curr_docker_version.stdout != '')
   register: result
   until: result is succeeded
   vars:

+ 6 - 0
roles/etcd/defaults/main.yaml

@@ -2,6 +2,8 @@
 r_etcd_common_backup_tag: ''
 r_etcd_common_backup_sufix_name: ''
 
+l_etcd_bootstrapped: '{{ openshift.node.bootstrapped }}'
+
 l_etcd_static_pod: "{{ (inventory_hostname in groups['oo_masters']) | bool }}"
 
 # runc, docker, static pod, host
@@ -56,6 +58,10 @@ etcd_ca_serial: "{{ etcd_ca_dir }}/serial"
 etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber"
 etcd_ca_default_days: 1825
 
+r_etcd_common_master_peer_cert_file: /etc/origin/master/master.etcd-client.crt
+r_etcd_common_master_peer_key_file: /etc/origin/master/master.etcd-client.key
+r_etcd_common_master_peer_ca_file: /etc/origin/master/master.etcd-ca.crt
+
 # etcd server & certificate vars
 etcd_hostname: "{{ openshift.common.hostname }}"
 etcd_ip: "{{ openshift.common.ip }}"

+ 1 - 0
roles/kuryr/README.md

@@ -42,6 +42,7 @@ pods. This allows to have interconnectivity between pods and OpenStack VMs.
 * ``openshift_kuryr_precreate_subports=5``
 * ``openshift_kuryr_device_owner=compute:kuryr``
 * ``openshift_kuryr_subnet_driver=default``
+* ``openshift_kuryr_sg_driver=default``
 
 ## OpenShift API loadbalancer
 

+ 8 - 2
roles/kuryr/templates/configmap.yaml.j2

@@ -222,10 +222,10 @@ data:
     service_subnets_driver = default
 
     # The driver to determine Neutron security groups for pods (string value)
-    pod_security_groups_driver = default
+    pod_security_groups_driver = {{ openshift_kuryr_sg_driver|default('default') }}
 
     # The driver to determine Neutron security groups for services (string value)
-    service_security_groups_driver = default
+    service_security_groups_driver = {{ openshift_kuryr_sg_driver|default('default') }}
 
     # The driver that provides VIFs for Kubernetes Pods. (string value)
     pod_vif_driver = nested-vlan
@@ -318,6 +318,12 @@ data:
     pod_router = {{ kuryr_openstack_pod_router_id }}
 {% endif %}
 
+{% if openshift_kuryr_sg_driver|default('default') == 'namespace' %}
+    [namespace_sg]
+    sg_allow_from_namespaces = {{ kuryr_openstack_sg_allow_from_namespace_id }}
+    sg_allow_from_default = {{ kuryr_openstack_sg_allow_from_default_id }}
+{% endif %}
+
     # Time (in seconds) that Kuryr controller waits for LBaaS to be activated
     lbaas_activation_timeout = 1200
 

+ 1 - 1
roles/lib_openshift/library/oc_service.py

@@ -119,7 +119,7 @@ options:
   portalip:
     description:
     - The portal ip(virtual ip) address to use with this service.
-    - "https://docs.openshift.com/enterprise/3.0/architecture/core_concepts/pods_and_services.html#services"
+    - "https://docs.openshift.com/container-platform/latest/architecture/core_concepts/pods_and_services.html#services"
     required: false
     default: None
     aliases: []

+ 1 - 1
roles/lib_openshift/src/doc/service

@@ -66,7 +66,7 @@ options:
   portalip:
     description:
     - The portal ip(virtual ip) address to use with this service.
-    - "https://docs.openshift.com/enterprise/3.0/architecture/core_concepts/pods_and_services.html#services"
+    - "https://docs.openshift.com/container-platform/latest/architecture/core_concepts/pods_and_services.html#services"
     required: false
     default: None
     aliases: []

+ 49 - 4
roles/lib_utils/action_plugins/sanity_checks.py

@@ -66,7 +66,52 @@ REMOVED_VARIABLES = (
     ('oreg_auth_credentials_replace', 'Removed: Credentials are now always updated'),
     ('oreg_url_master', 'oreg_url'),
     ('oreg_url_node', 'oreg_url'),
-
+    ('openshift_cockpit_deployer_prefix', 'openshift_cockpit_deployer_image'),
+    ('openshift_cockpit_deployer_basename', 'openshift_cockpit_deployer_image'),
+    ('openshift_cockpit_deployer_version', 'openshift_cockpit_deployer_image'),
+    ('openshift_hosted_logging_elasticsearch_pvc_prefix', 'openshift_logging_es_pvc_prefix'),
+    ('logging_ops_hostname', 'openshift_logging_kibana_ops_hostname'),
+    ('openshift_hosted_logging_ops_hostname', 'openshift_logging_kibana_ops_hostname'),
+    ('openshift_hosted_logging_elasticsearch_cluster_size', 'logging_elasticsearch_cluster_size'),
+    ('openshift_hosted_logging_elasticsearch_ops_cluster_size', 'logging_elasticsearch_ops_cluster_size'),
+    ('openshift_hosted_logging_storage_kind', 'openshift_logging_storage_kind'),
+    ('openshift_hosted_logging_storage_host', 'openshift_logging_storage_host'),
+    ('openshift_hosted_logging_storage_labels', 'openshift_logging_storage_labels'),
+    ('openshift_hosted_logging_storage_volume_size', 'openshift_logging_storage_volume_size'),
+    ('openshift_hosted_loggingops_storage_kind', 'openshift_loggingops_storage_kind'),
+    ('openshift_hosted_loggingops_storage_host', 'openshift_loggingops_storage_host'),
+    ('openshift_hosted_loggingops_storage_labels', 'openshift_loggingops_storage_labels'),
+    ('openshift_hosted_loggingops_storage_volume_size', 'openshift_loggingops_storage_volume_size'),
+    ('openshift_hosted_logging_enable_ops_cluster', 'openshift_logging_use_ops'),
+    ('openshift_hosted_logging_image_pull_secret', 'openshift_logging_image_pull_secret'),
+    ('openshift_hosted_logging_hostname', 'openshift_logging_kibana_hostname'),
+    ('openshift_hosted_logging_kibana_nodeselector', 'openshift_logging_kibana_nodeselector'),
+    ('openshift_hosted_logging_kibana_ops_nodeselector', 'openshift_logging_kibana_ops_nodeselector'),
+    ('openshift_hosted_logging_journal_source', 'openshift_logging_fluentd_journal_source'),
+    ('openshift_hosted_logging_journal_read_from_head', 'openshift_logging_fluentd_journal_read_from_head'),
+    ('openshift_hosted_logging_fluentd_nodeselector_label', 'openshift_logging_fluentd_nodeselector'),
+    ('openshift_hosted_logging_elasticsearch_instance_ram', 'openshift_logging_es_memory_limit'),
+    ('openshift_hosted_logging_elasticsearch_nodeselector', 'openshift_logging_es_nodeselector'),
+    ('openshift_hosted_logging_elasticsearch_ops_nodeselector', 'openshift_logging_es_ops_nodeselector'),
+    ('openshift_hosted_logging_elasticsearch_ops_instance_ram', 'openshift_logging_es_ops_memory_limit'),
+    ('openshift_hosted_logging_storage_access_modes', 'openshift_logging_storage_access_modes'),
+    ('openshift_hosted_logging_master_public_url', 'openshift_logging_master_public_url'),
+    ('openshift_hosted_logging_deployer_prefix', 'openshift_logging_image_prefix'),
+    ('openshift_hosted_logging_deployer_version', 'openshift_logging_image_version'),
+    ('openshift_hosted_logging_deploy', 'openshift_logging_install_logging'),
+    ('openshift_hosted_logging_curator_nodeselector', 'openshift_logging_curator_nodeselector'),
+    ('openshift_hosted_logging_curator_ops_nodeselector', 'openshift_logging_curator_ops_nodeselector'),
+    ('openshift_hosted_metrics_storage_access_modes', 'openshift_metrics_storage_access_modes'),
+    ('openshift_hosted_metrics_storage_host', 'openshift_metrics_storage_host'),
+    ('openshift_hosted_metrics_storage_nfs_directory', 'openshift_metrics_storage_nfs_directory'),
+    ('openshift_hosted_metrics_storage_volume_name', 'openshift_metrics_storage_volume_name'),
+    ('openshift_hosted_metrics_storage_volume_size', 'openshift_metrics_storage_volume_size'),
+    ('openshift_hosted_metrics_storage_labels', 'openshift_metrics_storage_labels'),
+    ('openshift_hosted_metrics_deployer_prefix', 'openshift_metrics_image_prefix'),
+    ('openshift_hosted_metrics_deployer_version', 'openshift_metrics_image_version'),
+    ('openshift_hosted_metrics_deploy', 'openshift_metrics_install_metrics'),
+    ('openshift_hosted_metrics_storage_kind', 'openshift_metrics_storage_kind'),
+    ('openshift_hosted_metrics_public_url', 'openshift_metrics_hawkular_hostname'),
 )
 
 # JSON_FORMAT_VARIABLES does not intende to cover all json variables, but
@@ -379,15 +424,15 @@ class ActionModule(ActionBase):
                 json_var = self.template_var(hostvars, host, var)
                 try:
                     json.loads(json_var)
-                except ValueError:
-                    found_invalid_json.append([var, json_var])
+                except ValueError as json_err:
+                    found_invalid_json.append([var, json_var, json_err])
                 except BaseException:
                     pass
 
         if found_invalid_json:
             msg = "Found invalid json format variables:\n"
             for item in found_invalid_json:
-                msg += "    {} specified in {} is invalid json format\n".format(item[1], item[0])
+                msg += "    {} specified in {} is invalid json format\n    {}".format(item[1], item[0], item[2])
             raise errors.AnsibleModuleError(msg)
         return None
 

+ 1 - 1
roles/lib_utils/callback_plugins/aa_version_requirement.py

@@ -29,7 +29,7 @@ else:
 
 
 # Set to minimum required Ansible version
-REQUIRED_VERSION = '2.6.0'
+REQUIRED_VERSION = '2.6.2'
 DESCRIPTION = "Supported versions: %s or newer" % REQUIRED_VERSION
 
 

+ 15 - 0
roles/lib_utils/filter_plugins/oo_filters.py

@@ -668,6 +668,20 @@ def lib_utils_oo_oreg_image(image_default, oreg_url):
     return '/'.join([oreg_parts[0], image_parts[1], image_parts[2]])
 
 
+def lib_utils_oo_list_of_dict_to_dict_from_key(input_list, keyname):
+    '''Converts a list of dictionaries to a dictionary with keyname: dictionary
+
+       Example input: [{'name': 'first', 'url': 'x.com'}, {'name': 'second', 'url': 'y.com'}],
+                      'name'
+       Example output: {'first': {'url': 'x.com', 'name': 'first'}, 'second': {'url': 'y.com', 'name': 'second'}}'''
+    output_dict = {}
+    for item in input_list:
+        retrieved_val = item.get(keyname)
+        if keyname is not None:
+            output_dict[retrieved_val] = item
+    return output_dict
+
+
 class FilterModule(object):
     """ Custom ansible filter mapping """
 
@@ -701,4 +715,5 @@ class FilterModule(object):
             "lib_utils_oo_etcd_host_urls": lib_utils_oo_etcd_host_urls,
             "lib_utils_mutate_htpass_provider": lib_utils_mutate_htpass_provider,
             "lib_utils_oo_oreg_image": lib_utils_oo_oreg_image,
+            "lib_utils_oo_list_of_dict_to_dict_from_key": lib_utils_oo_list_of_dict_to_dict_from_key,
         }

+ 24 - 11
roles/nuage_master/handlers/main.yaml

@@ -1,15 +1,28 @@
 ---
-- name: restart master api
-  systemd: name={{ openshift_service_type }}-master-api state=restarted
-  when: (not master_api_service_status_changed | default(false))
-
-# TODO: need to fix up ignore_errors here
-# We retry the controllers because the API may not be 100% initialized yet.
-- name: restart master controllers
-  command: "systemctl restart {{ openshift_service_type }}-master-controllers"
-  retries: 3
+- name: restart master
+  command: /usr/local/bin/master-restart "{{ item }}"
+  with_items:
+  - api
+  - controllers
+  retries: 5
   delay: 5
   register: result
   until: result.rc == 0
-  when: (not master_controllers_service_status_changed | default(false))
-  ignore_errors: yes
+  notify: verify API server
+
+- name: verify API server
+  # Using curl here since the uri module requires python-httplib2 and
+  # wait_for port doesn't provide health information.
+  command: >
+    curl --silent --tlsv1.2 --max-time 2
+    --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+    {{ openshift.master.api_url }}/healthz/ready
+  args:
+    # Disables the following warning:
+    # Consider using get_url or uri module rather than running curl
+    warn: no
+  register: l_api_available_output
+  until: l_api_available_output.stdout == 'ok'
+  retries: 120
+  delay: 1
+  changed_when: false

+ 1 - 2
roles/nuage_master/tasks/main.yaml

@@ -147,6 +147,5 @@
 - name: Restart daemons
   command: /bin/true
   notify:
-    - restart master api
-    - restart master controllers
+    - restart master
   ignore_errors: true

Filskillnaden har hållts tillbaka eftersom den är för stor
+ 1 - 4
roles/olm/files/08-ocs.configmap.yaml


Filskillnaden har hållts tillbaka eftersom den är för stor
+ 0 - 1810
roles/olm/files/08-tectonicocs.configmap.yaml


+ 0 - 19
roles/olm/files/10-tectonicocs.catalogsource.yaml

@@ -1,19 +0,0 @@
-##---
-# Source: olm/templates/10-tectonicocs.catalogsource.yaml
-
-#! validate-crd: ./deploy/chart/templates/05-catalogsource.crd.yaml
-#! parse-kind: CatalogSource
-apiVersion: app.coreos.com/v1alpha1
-kind: CatalogSource-v1
-metadata:
-  name: tectonic-ocs
-  namespace: operator-lifecycle-manager
-  annotations:
-    tectonic-operators.coreos.com/upgrade-strategy: 'DeleteAndRecreate'
-spec:
-  name: tectonic-ocs
-  sourceType: internal
-  configMap: tectonic-ocs
-  displayName: Tectonic Open Cloud Services
-  publisher: CoreOS, Inc.
-

+ 0 - 20
roles/olm/files/14-alm-servicebroker.clusterservicebroker.yaml

@@ -1,20 +0,0 @@
-##---
-# Source: olm/templates/14-alm-servicebroker.clusterservicebroker.yaml
-
-apiVersion: servicecatalog.k8s.io/v1beta1
-kind: ClusterServiceBroker
-metadata:
-  finalizers:
-  - kubernetes-incubator/service-catalog
-  name: alm-service-broker
-spec:
-  authInfo:
-    bearer:
-      secretRef:
-        name: almservicebroker-client
-        namespace: operator-lifecycle-manager
-  relistBehavior: Duration
-  relistDuration: 15m0s
-  relistRequests: 0
-  url: http://alm-service-broker.operator-lifecycle-manager.svc:8005
-

+ 0 - 21
roles/olm/files/15-alm-servicebroker.service.yaml

@@ -1,21 +0,0 @@
-##---
-# Source: olm/templates/15-alm-servicebroker.service.yaml
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: alm-service-broker
-  namespace: operator-lifecycle-manager
-  labels:
-    app: alm-service-broker
-    tectonic-operators.coreos.com/managed-by: tectonic-x-operator
-spec:
-  type: ClusterIP
-  ports:
-    - name: healthz
-      port: 8080
-    - name: broker
-      port: 8005
-  selector:
-    app: alm-service-broker
-

+ 0 - 13
roles/olm/files/16-almservicebroker-client.secret.yaml

@@ -1,13 +0,0 @@
-##---
-# Source: olm/templates/16-almservicebroker-client.secret.yaml
-
-apiVersion: v1
-kind: Secret
-metadata:
-  name: almservicebroker-client
-  namespace: operator-lifecycle-manager
-  annotations:
-    kubernetes.io/service-account.name: alm-operator-serviceaccount
-    tectonic-operators.coreos.com/managed-by: tectonic-x-operator
-type: kubernetes.io/service-account-token
-

+ 0 - 44
roles/olm/files/17-alm-servicebroker.deployment.yaml

@@ -1,44 +0,0 @@
-##---
-# Source: olm/templates/17-alm-servicebroker.deployment.yaml
-
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
-  name: alm-service-broker
-  namespace: operator-lifecycle-manager
-  labels:
-    app: alm-service-broker
-    tectonic-operators.coreos.com/managed-by: tectonic-x-operator
-spec:
-  strategy:
-    type: RollingUpdate
-  replicas: 1
-  selector:
-    matchLabels:
-      app: alm-service-broker
-  template:
-    metadata:
-      labels:
-        app: alm-service-broker
-    spec:
-      serviceAccountName: alm-operator-serviceaccount
-      containers:
-        - name: alm-service-broker
-          command:
-          - /bin/servicebroker
-          - '-debug'
-          image: quay.io/coreos/alm-service-broker@sha256:3f7bc4b3ead4372df6f455f4f4f791c241e6e47b5fbdf2296de257282a8aec80
-          imagePullPolicy: IfNotPresent
-          ports:
-            - containerPort: 8080
-          livenessProbe:
-            httpGet:
-              path: /healthz
-              port: 8080
-          readinessProbe:
-            httpGet:
-              path: /healthz
-              port: 8080
-      imagePullSecrets:
-        - name: coreos-pull-secret
-

+ 0 - 462
roles/olm/files/18-upstreamcomponents.configmap.yaml

@@ -1,462 +0,0 @@
-##---
-# Source: olm/templates/18-upstreamcomponents.configmap.yaml
-
-kind: ConfigMap
-apiVersion: v1
-metadata:
-  name: upstream-components
-  namespace: operator-lifecycle-manager
-  labels:
-    tectonic-operators.coreos.com/managed-by: tectonic-x-operator
-
-data:
-  customResourceDefinitions: |-
-    - apiVersion: apiextensions.k8s.io/v1beta1
-      kind: CustomResourceDefinition
-      metadata:
-        name: meterings.chargeback.coreos.com
-        annotations:
-          catalog.app.coreos.com/description: An instance of Chargeback
-          catalog.app.coreos.com/displayName: Chargeback
-      spec:
-        group: chargeback.coreos.com
-        version: v1alpha1
-        scope: Namespaced
-        names:
-          plural: meterings
-          singular: metering
-          kind: Metering
-          listKind: MeteringList
-      
-    - apiVersion: apiextensions.k8s.io/v1beta1
-      kind: CustomResourceDefinition
-      metadata:
-        name: prestotables.chargeback.coreos.com
-        annotations:
-          catalog.app.coreos.com/displayName: "Chargeback Presto Table"
-          catalog.app.coreos.com/description: "A table within PrestoDB"
-      spec:
-        group: chargeback.coreos.com
-        version: v1alpha1
-        scope: Namespaced
-        names:
-          plural: prestotables
-          singular: prestotable
-          kind: PrestoTable
-      
-    - apiVersion: apiextensions.k8s.io/v1beta1
-      kind: CustomResourceDefinition
-      metadata:
-        name: reports.chargeback.coreos.com
-        annotations:
-          catalog.app.coreos.com/displayName: "Chargeback Report"
-          catalog.app.coreos.com/description: "A chargeback report for a specific time interval"
-      spec:
-        group: chargeback.coreos.com
-        version: v1alpha1
-        scope: Namespaced
-        names:
-          plural: reports
-          kind: Report
-      
-    - apiVersion: apiextensions.k8s.io/v1beta1
-      kind: CustomResourceDefinition
-      metadata:
-        name: reportdatasources.chargeback.coreos.com
-        annotations:
-          catalog.app.coreos.com/displayName: "Chargeback data source"
-          catalog.app.coreos.com/description: "A resource describing a source of data for usage by Report Generation Queries"
-      spec:
-        group: chargeback.coreos.com
-        version: v1alpha1
-        scope: Namespaced
-        names:
-          plural: reportdatasources
-          singular: reportdatasource
-          kind: ReportDataSource
-      
-    - apiVersion: apiextensions.k8s.io/v1beta1
-      kind: CustomResourceDefinition
-      metadata:
-        name: reportgenerationqueries.chargeback.coreos.com
-        annotations:
-          catalog.app.coreos.com/displayName: "Chargeback generation query"
-          catalog.app.coreos.com/description: "A SQL query used by Chargeback to generate reports"
-      spec:
-        group: chargeback.coreos.com
-        version: v1alpha1
-        scope: Namespaced
-        names:
-          plural: reportgenerationqueries
-          singular: reportgenerationquery
-          kind: ReportGenerationQuery
-      
-    - apiVersion: apiextensions.k8s.io/v1beta1
-      kind: CustomResourceDefinition
-      metadata:
-        name: reportprometheusqueries.chargeback.coreos.com
-        annotations:
-          catalog.app.coreos.com/displayName: "Chargeback prometheus query"
-          catalog.app.coreos.com/description: "A Prometheus query by Chargeback to do metering"
-      spec:
-        group: chargeback.coreos.com
-        version: v1alpha1
-        scope: Namespaced
-        names:
-          plural: reportprometheusqueries
-          singular: reportprometheusquery
-          kind: ReportPrometheusQuery
-      
-    - apiVersion: apiextensions.k8s.io/v1beta1
-      kind: CustomResourceDefinition
-      metadata:
-        name: scheduledreports.chargeback.coreos.com
-        annotations:
-          catalog.app.coreos.com/displayName: "Chargeback Scheduled Report"
-          catalog.app.coreos.com/description: "A chargeback report that runs on a scheduled interval"
-      spec:
-        group: chargeback.coreos.com
-        version: v1alpha1
-        scope: Namespaced
-        names:
-          plural: scheduledreports
-          kind: ScheduledReport
-      
-    - apiVersion: apiextensions.k8s.io/v1beta1
-      kind: CustomResourceDefinition
-      metadata:
-        name: storagelocations.chargeback.coreos.com
-        annotations:
-          catalog.app.coreos.com/displayName: "Chargeback storage location"
-          catalog.app.coreos.com/description: "Represents a configurable storage location for Chargeback to store metering and report data"
-      spec:
-        group: chargeback.coreos.com
-        version: v1alpha1
-        scope: Namespaced
-        names:
-          plural: storagelocations
-          kind: StorageLocation
-      
-  clusterServiceVersions: |-
-    - #! validate-crd: ./deploy/chart/templates/03-clusterserviceversion.crd.yaml
-      #! parse-kind: ClusterServiceVersion
-      apiVersion: app.coreos.com/v1alpha1
-      kind: ClusterServiceVersion-v1
-      metadata:
-        name: metering-helm-operator.v0.6.0
-        namespace: placeholder
-        annotations:
-          tectonic-visibility: tectonic-feature
-        labels:
-          alm-catalog: tectonic-feature
-          operator-metering: "true"
-      spec:
-        displayName: Metering
-        description: Metering can generate reports based on historical usage data from a cluster, providing accountability for how resources have been used.
-        keywords: [metering metrics reporting coreos]
-        version: 0.6.0
-        maturity: alpha
-        maintainers:
-          - email: support@coreos.com
-            name: CoreOS, Inc
-        provider:
-          name: CoreOS, Inc
-        labels:
-          alm-owner-metering: metering-helm-operator
-          alm-status-descriptors: metering-helm-operator.v0.6.0
-        selector:
-          matchLabels:
-            alm-owner-metering: metering-helm-operator
-        install:
-          strategy: deployment
-          spec:
-            permissions:
-              - rules:
-                - apiGroups:
-                  - chargeback.coreos.com
-                  resources:
-                  - '*'
-                  verbs:
-                  - '*'
-                - apiGroups:
-                  - ""
-                  resources:
-                  - pods
-                  - pods/attach
-                  - pods/exec
-                  - pods/portforward
-                  - pods/proxy
-                  verbs:
-                  - create
-                  - delete
-                  - deletecollection
-                  - get
-                  - list
-                  - patch
-                  - update
-                  - watch
-                - apiGroups:
-                  - ""
-                  resources:
-                  - configmaps
-                  - endpoints
-                  - persistentvolumeclaims
-                  - replicationcontrollers
-                  - replicationcontrollers/scale
-                  - secrets
-                  - serviceaccounts
-                  - services
-                  - services/proxy
-                  verbs:
-                  - create
-                  - delete
-                  - deletecollection
-                  - get
-                  - list
-                  - patch
-                  - update
-                  - watch
-                - apiGroups:
-                  - ""
-                  resources:
-                  - bindings
-                  - events
-                  - limitranges
-                  - namespaces/status
-                  - pods/log
-                  - pods/status
-                  - replicationcontrollers/status
-                  - resourcequotas
-                  - resourcequotas/status
-                  verbs:
-                  - get
-                  - list
-                  - watch
-                - apiGroups:
-                  - ""
-                  resources:
-                  - events
-                  verbs:
-                  - create
-                  - update
-                  - patch
-                - apiGroups:
-                  - ""
-                  resources:
-                  - namespaces
-                  verbs:
-                  - get
-                  - list
-                  - watch
-                - apiGroups:
-                  - apps
-                  resources:
-                  - deployments
-                  - deployments/rollback
-                  - deployments/scale
-                  - statefulsets
-                  verbs:
-                  - create
-                  - delete
-                  - deletecollection
-                  - get
-                  - list
-                  - patch
-                  - update
-                  - watch
-                - apiGroups:
-                  - batch
-                  resources:
-                  - cronjobs
-                  - jobs
-                  verbs:
-                  - create
-                  - delete
-                  - deletecollection
-                  - get
-                  - list
-                  - patch
-                  - update
-                  - watch
-                - apiGroups:
-                  - extensions
-                  resources:
-                  - daemonsets
-                  - deployments
-                  - deployments/rollback
-                  - deployments/scale
-                  - replicasets
-                  - replicasets/scale
-                  - replicationcontrollers/scale
-                  verbs:
-                  - create
-                  - delete
-                  - deletecollection
-                  - get
-                  - list
-                  - patch
-                  - update
-                  - watch
-                - apiGroups:
-                  - rbac.authorization.k8s.io
-                  resources:
-                  - rolebindings
-                  - roles
-                  verbs:
-                  - create
-                  - delete
-                  - deletecollection
-                  - get
-                  - list
-                  - patch
-                  - update
-                  - watch
-                serviceAccountName: metering-helm-operator
-            deployments:
-              - name: metering-helm-operator
-                spec:
-                  replicas: 1
-                  selector:
-                    matchLabels:
-                      app: metering-helm-operator
-                  strategy:
-                    type: Recreate
-                  template:
-                    metadata:
-                      labels:
-                        app: metering-helm-operator
-                    spec:
-                      containers:
-                      - args:
-                        - run-operator.sh
-                        env:
-                        - name: HELM_RELEASE_CRD_NAME
-                          value: Metering
-                        - name: HELM_RELEASE_CRD_API_GROUP
-                          value: chargeback.coreos.com
-                        - name: HELM_CHART_PATH
-                          value: /operator-metering-0.1.0.tgz
-                        - name: MY_POD_NAME
-                          valueFrom:
-                            fieldRef:
-                              fieldPath: metadata.name
-                        - name: MY_POD_NAMESPACE
-                          valueFrom:
-                            fieldRef:
-                              fieldPath: metadata.namespace
-                        - name: HELM_HOST
-                          value: 127.0.0.1:44134
-                        - name: HELM_WAIT
-                          value: "false"
-                        - name: HELM_RECONCILE_INTERVAL_SECONDS
-                          value: "30"
-                        - name: RELEASE_HISTORY_LIMIT
-                          value: "3"
-                        image: quay.io/coreos/chargeback-helm-operator:0.6.0
-                        imagePullPolicy: Always
-                        name: metering-helm-operator
-                        resources:
-                          limits:
-                            cpu: 50m
-                            memory: 25Mi
-                          requests:
-                            cpu: 50m
-                            memory: 25Mi
-                      - args:
-                        - /tiller
-                        env:
-                        - name: TILLER_NAMESPACE
-                          valueFrom:
-                            fieldRef:
-                              fieldPath: metadata.namespace
-                        - name: TILLER_HISTORY_MAX
-                          value: "3"
-                        image: quay.io/coreos/chargeback-helm-operator:0.6.0
-                        imagePullPolicy: Always
-                        livenessProbe:
-                          failureThreshold: 3
-                          httpGet:
-                            path: /liveness
-                            port: 44135
-                            scheme: HTTP
-                          initialDelaySeconds: 1
-                          periodSeconds: 10
-                          successThreshold: 1
-                          timeoutSeconds: 1
-                        name: tiller
-                        readinessProbe:
-                          failureThreshold: 3
-                          httpGet:
-                            path: /readiness
-                            port: 44135
-                            scheme: HTTP
-                          initialDelaySeconds: 1
-                          periodSeconds: 10
-                          successThreshold: 1
-                          timeoutSeconds: 1
-                        resources:
-                          limits:
-                            cpu: 50m
-                            memory: 100Mi
-                          requests:
-                            cpu: 50m
-                            memory: 50Mi
-                      imagePullSecrets: []
-                      restartPolicy: Always
-                      securityContext:
-                        runAsNonRoot: true
-                      serviceAccount: metering-helm-operator
-                      terminationGracePeriodSeconds: 30
-        customresourcedefinitions:
-          owned:
-          - description: An instance of Metering
-            displayName: Metering
-            kind: Metering
-            name: meterings.chargeback.coreos.com
-            version: v1alpha1
-          - description: A table within PrestoDB
-            displayName: Chargeback Presto Table
-            kind: PrestoTable
-            name: prestotables.chargeback.coreos.com
-            version: v1alpha1
-          - description: A resource describing a source of data for usage by Report Generation
-              Queries
-            displayName: Chargeback data source
-            kind: ReportDataSource
-            name: reportdatasources.chargeback.coreos.com
-            version: v1alpha1
-          - description: A SQL query used by Chargeback to generate reports
-            displayName: Chargeback generation query
-            kind: ReportGenerationQuery
-            name: reportgenerationqueries.chargeback.coreos.com
-            version: v1alpha1
-          - description: A Prometheus query by Chargeback to do metering
-            displayName: Chargeback prometheus query
-            kind: ReportPrometheusQuery
-            name: reportprometheusqueries.chargeback.coreos.com
-            version: v1alpha1
-          - description: A chargeback report for a specific time interval
-            displayName: Chargeback Report
-            kind: Report
-            name: reports.chargeback.coreos.com
-            version: v1alpha1
-          - description: A chargeback report that runs on a scheduled interval
-            displayName: Chargeback Scheduled Report
-            kind: ScheduledReport
-            name: scheduledreports.chargeback.coreos.com
-            version: v1alpha1
-          - description: Represents a configurable storage location for Chargeback to store
-              metering and report data
-            displayName: Chargeback storage location
-            kind: StorageLocation
-            name: storagelocations.chargeback.coreos.com
-            version: v1alpha1
-      
-  packages: |-
-    - #! package-manifest: ./deploy/chart/catalog_resources/upstream/metering.0.6.0.clusterserviceversion.yaml
-      packageName: metering
-      channels:
-      - currentCSV: metering-helm-operator.v0.6.0
-        name: alpha
-      
-

+ 0 - 19
roles/olm/files/19-upstreamcomponents.catalogsource.yaml

@@ -1,19 +0,0 @@
-##---
-# Source: olm/templates/19-upstreamcomponents.catalogsource.yaml
-
-#! validate-crd: ./deploy/chart/templates/05-catalogsource.crd.yaml
-#! parse-kind: CatalogSource
-apiVersion: app.coreos.com/v1alpha1
-kind: CatalogSource-v1
-metadata:
-  name: upstream-components
-  namespace: operator-lifecycle-manager
-  annotations:
-    tectonic-operators.coreos.com/upgrade-strategy: 'DeleteAndRecreate'
-spec:
-  name: upstream-components
-  sourceType: internal
-  configMap: upstream-components
-  displayName: OLM Upstream Components
-  publisher: CoreOS, Inc.
-

+ 0 - 26
roles/olm/files/20-aggregated.clusterrole.yaml

@@ -1,26 +0,0 @@
-##---
-# Source: olm/templates/20-aggregated.clusterrole.yaml
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: aggregate-olm-edit
-  labels:
-    # Add these permissions to the "admin" and "edit" default roles.
-    rbac.authorization.k8s.io/aggregate-to-admin: "true"
-    rbac.authorization.k8s.io/aggregate-to-edit: "true"
-rules:
-- apiGroups: ["operators.coreos.com"]
-  resources: ["*"]
-  verbs: ["*"]
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: aggregate-olm-view
-  labels:
-    # Add these permissions to the "view" default roles
-    rbac.authorization.k8s.io/aggregate-to-view: "true"
-rules:
-- apiGroups: ["operators.coreos.com"]
-  resources: ["*"]
-  verbs: ["get", "list", "watch"]

+ 0 - 9
roles/olm/tasks/install.yaml

@@ -116,15 +116,6 @@
     files:
       - "{{ mktemp.stdout }}/20-aggregated-edit.clusterrole.yaml"
 
-- name: Apply aggregate-olm-edit ClusterRole manifest
-  oc_obj:
-    state: present
-    kind: ClusterRole
-    name: aggregate-olm-edit
-    namespace: operator-lifecycle-manager
-    files:
-      - "{{ mktemp.stdout }}/20-aggregated.clusterrole.yaml"
-
 - name: Apply aggregate-olm-view ClusterRole manifest
   oc_obj:
     state: present

+ 2 - 2
roles/olm/tasks/remove.yaml

@@ -2,7 +2,7 @@
 
 - import_tasks: remove_components.yaml
 
-- name: remove openshift-ansible-service-broker project
+- name: remove operator-lifecycle-manager project
   oc_project:
-    name: openshift-ansible-service-broker
+    name: operator-lifecycle-manager
     state: absent

+ 33 - 33
roles/olm/tasks/remove_components.yaml

@@ -1,84 +1,84 @@
 ---
-- name: Remove alm-operator-serviceaccount ServiceAccount manifest
+- name: Remove olm-operator-serviceaccount ServiceAccount manifest
   oc_obj:
     state: absent
     kind: ServiceAccount
-    name: alm-operator-serviceaccount
+    name: olm-operator-serviceaccount
     namespace: operator-lifecycle-manager
 
-- name: Remove alm-operator-binding ClusterRoleBinding manifest
+- name: Remove olm-operator-binding-operator-lifecycle-manager ClusterRoleBinding manifest
   oc_obj:
     state: absent
     kind: ClusterRoleBinding
-    name: alm-operator-binding
+    name: olm-operator-binding-operator-lifecycle-manager
     namespace: operator-lifecycle-manager
 
-- name: Remove tectonic-ocs ConfigMap manifest
+- name: Remove clusterserviceversions.operators.coreos.com CustomResourceDefinition manifest
   oc_obj:
     state: absent
-    kind: ConfigMap
-    name: tectonic-ocs
+    kind: CustomResourceDefinition
+    name: clusterserviceversions.operators.coreos.com
     namespace: operator-lifecycle-manager
 
-- name: Remove tectonic-ocs CatalogSource-v1 manifest
+- name: Remove catalogsources.operators.coreos.com CustomResourceDefinition manifest
   oc_obj:
     state: absent
-    kind: CatalogSource-v1
-    name: tectonic-ocs
+    kind: CustomResourceDefinition
+    name: catalogsources.operators.coreos.com
     namespace: operator-lifecycle-manager
 
-- name: Remove alm-operator Deployment manifest
+- name: Remove installplans.operators.coreos.com CustomResourceDefinition manifest
   oc_obj:
     state: absent
-    kind: Deployment
-    name: alm-operator
+    kind: CustomResourceDefinition
+    name: installplans.operators.coreos.com
     namespace: operator-lifecycle-manager
 
-- name: Remove catalog-operator Deployment manifest
+- name: Remove subscriptions.operators.coreos.com CustomResourceDefinition manifest
   oc_obj:
     state: absent
-    kind: Deployment
-    name: catalog-operator
+    kind: CustomResourceDefinition
+    name: subscriptions.operators.coreos.com
     namespace: operator-lifecycle-manager
 
-- name: Remove upstream-components ConfigMap manifest
+- name: Remove ocs ConfigMap manifest
   oc_obj:
     state: absent
     kind: ConfigMap
-    name: upstream-components
+    name: ocs
     namespace: operator-lifecycle-manager
 
-- name: Remove upstream-components CatalogSource-v1 manifest
+- name: Remove ocs CatalogSource manifest
   oc_obj:
     state: absent
-    kind: CatalogSource-v1
-    name: upstream-components
+    kind: CatalogSource
+    name: ocs
     namespace: operator-lifecycle-manager
 
-- name: Remove clusterserviceversion-v1s.app.coreos.com CustomResourceDefinition manifest
+- name: Remove alm-operator Deployment manifest
   oc_obj:
     state: absent
-    kind: CustomResourceDefinition
-    name: clusterserviceversion-v1s.app.coreos.com
+    kind: Deployment
+    name: alm-operator
     namespace: operator-lifecycle-manager
 
-- name: Remove catalogsource-v1s.app.coreos.com CustomResourceDefinition manifest
+- name: Remove catalog-operator Deployment manifest
   oc_obj:
     state: absent
-    kind: CustomResourceDefinition
-    name: catalogsource-v1s.app.coreos.com
+    kind: Deployment
+    name: catalog-operator
     namespace: operator-lifecycle-manager
 
-- name: Remove installplan-v1s.app.coreos.com CustomResourceDefinition manifest
+- name: Remove aggregate-olm-edit ClusterRole manifest
   oc_obj:
     state: absent
-    kind: CustomResourceDefinition
-    name: installplan-v1s.app.coreos.com
+    kind: ClusterRole
+    name: aggregate-olm-edit
     namespace: operator-lifecycle-manager
 
-- name: Remove subscription-v1s.app.coreos.com CustomResourceDefinition manifest
+- name: Remove aggregate-olm-view ClusterRole manifest
   oc_obj:
     state: absent
-    kind: CustomResourceDefinition
-    name: subscription-v1s.app.coreos.com
+    kind: ClusterRole
+    name: aggregate-olm-view
     namespace: operator-lifecycle-manager

+ 51 - 0
roles/openshift_aws/defaults/main.yml

@@ -61,6 +61,57 @@ openshift_aws_vpc:
 #    - cidr: 172.31.16.0/20
 #      az: "us-east-1a"
 
+openshift_aws_create_dns: False
+openshift_aws_dns_provider: "route53"
+# openshift_aws_dns_zone: ""
+# ie. openshift_aws_dns_zone: "{{ openshift_aws_clusterid }}.example.com"
+
+# elb names we want to query to support dns record creation.
+# you don't need to adjust this unless you have modified openshift_aws_elb_dict
+openshift_aws_elb_names:
+- "{{ openshift_aws_elb_master_internal_name }}"
+- "{{ openshift_aws_elb_master_external_name }}"
+- "{{ openshift_aws_elb_infra_name }}"
+
+# l_openshift_aws_elb_facts is created by querying ec2 for all elb names in
+# l_openshift_aws_elb_names via tasks/build_elb_dict.yml
+openshift_aws_dns_records:
+  # Pertains to inventory file key: openshift_master_cluster_public_hostname
+  'api':
+    type: 'CNAME'
+    # A public or private vpc attached Route53 zone will be created based on
+    # private_zone boolean.  Split-tier dns is supported.
+    private_zone: False
+    value: "{{ l_openshift_aws_elb_facts[openshift_aws_elb_master_external_name].dns_name }}"
+  # Pertains to inventory file key: openshift_master_cluster_hostname
+  'internal.api':
+    type: 'CNAME'
+    private_zone: False
+    value: "{{ l_openshift_aws_elb_facts[openshift_aws_elb_master_internal_name].dns_name }}"
+  # Pertains to inventory file key: openshift_master_default_subdomain
+  '*.apps':
+    type: "CNAME"
+    private_zone: False
+    value: "{{ l_openshift_aws_elb_facts[openshift_aws_elb_infra_name].dns_name }}"
+  'logs':
+    type: "CNAME"
+    private_zone: False
+    value: "{{ l_openshift_aws_elb_facts[openshift_aws_elb_infra_name].dns_name }}"
+  'metrics':
+    type: "CNAME"
+    private_zone: False
+    value: "{{ l_openshift_aws_elb_facts[openshift_aws_elb_infra_name].dns_name }}"
+  'registry':
+    type: "CNAME"
+    private_zone: False
+    value: "{{ l_openshift_aws_elb_facts[openshift_aws_elb_infra_name].dns_name }}"
+
+# Allows users to add and recursively override
+# https://docs.ansible.com/ansible/2.5/user_guide/playbooks_filters.html#combining-hashes-dictionaries
+openshift_aws_dns_records_override: {}
+
+l_openshift_aws_dns_records: "{{ openshift_aws_dns_records | combine(openshift_aws_dns_records_override, recursive=True) }}"
+
 openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}"
 openshift_aws_elb_master_external_name: "{{ openshift_aws_elb_basename }}-master-external"
 openshift_aws_elb_master_internal_name: "{{ openshift_aws_elb_basename }}-master-internal"

+ 0 - 0
roles/openshift_aws/tasks/build_elb_dict.yml


Vissa filer visades inte eftersom för många filer har ändrats