Browse Source

Merge branch 'master' into upgrade33

Devan Goodwin 8 years ago
parent
commit
b85c47761d
22 changed files with 613 additions and 446 deletions
  1. 357 243
      playbooks/adhoc/uninstall.yml
  2. 2 2
      playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
  3. 1 0
      playbooks/common/openshift-cluster/additional_config.yml
  4. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
  5. 12 0
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
  6. 2 0
      roles/openshift_examples/defaults/main.yml
  7. 1 1
      roles/openshift_examples/examples-sync.sh
  8. 1 1
      roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-ephemeral-template.json
  9. 1 1
      roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json
  10. 1 1
      roles/openshift_examples/files/examples/v1.2/db-templates/mysql-ephemeral-template.json
  11. 12 35
      roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json
  12. 1 1
      roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-ephemeral-template.json
  13. 1 1
      roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json
  14. 9 1
      roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml
  15. 85 61
      roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml
  16. 6 0
      roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml
  17. 1 1
      roles/openshift_examples/files/examples/v1.2/quickstart-templates/django-postgresql.json
  18. 50 44
      roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-ephemeral-template.json
  19. 51 45
      roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json
  20. 5 0
      roles/openshift_examples/tasks/main.yml
  21. 12 7
      roles/openshift_node/tasks/main.yml
  22. 1 1
      roles/openshift_node/templates/openshift.docker.node.dep.service

+ 357 - 243
playbooks/adhoc/uninstall.yml

@@ -7,253 +7,367 @@
 #    images
 #    RPMs
 ---
-- hosts:
-    - OSEv3:children
+- hosts: OSEv3:children
+  become: yes
+  tasks:
+  - name: Detecting Operating System
+    shell: ls /run/ostree-booted
+    ignore_errors: yes
+    failed_when: false
+    register: ostree_output
+
+  # Since we're not calling openshift_facts we'll do this for now
+  - set_fact:
+      is_atomic: "{{ ostree_output.rc == 0 }}"
+  - set_fact:
+      is_containerized: "{{ is_atomic or containerized | default(false) | bool }}"
 
+- hosts: nodes
   become: yes
+  tasks:
+  - name: Stop services
+    service: name={{ item }} state=stopped
+    with_items:
+    - atomic-enterprise-node
+    - atomic-openshift-node
+    - openshift-node
+    - openvswitch
+    - origin-node
+    failed_when: false
+
+  - name: unmask services
+    command: systemctl unmask "{{ item }}"
+    changed_when: False
+    failed_when: False
+    with_items:
+    - firewalld
+
+  - name: Remove packages
+    action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+    when: not is_atomic | bool
+    with_items:
+    - atomic-enterprise
+    - atomic-enterprise-node
+    - atomic-enterprise-sdn-ovs
+    - atomic-openshift
+    - atomic-openshift-clients
+    - atomic-openshift-node
+    - atomic-openshift-sdn-ovs
+    - cockpit-bridge
+    - cockpit-docker
+    - cockpit-shell
+    - cockpit-ws
+    - kubernetes-client
+    - openshift
+    - openshift-node
+    - openshift-sdn
+    - openshift-sdn-ovs
+    - openvswitch
+    - origin
+    - origin-clients
+    - origin-node
+    - origin-sdn-ovs
+    - tuned-profiles-atomic-enterprise-node
+    - tuned-profiles-atomic-openshift-node
+    - tuned-profiles-openshift-node
+    - tuned-profiles-origin-node
+
+  - shell: systemctl reset-failed
+    changed_when: False
+
+  - shell: systemctl daemon-reload
+    changed_when: False
+
+  - name: Remove br0 interface
+    shell: ovs-vsctl del-br br0
+    changed_when: False
+    failed_when: False
+
+  - name: Remove linux interfaces
+    shell: ip link del "{{ item }}"
+    changed_when: False
+    failed_when: False
+    with_items:
+    - lbr0
+    - vlinuxbr
+    - vovsbr
+
+  - name: restart docker
+    service: name=docker state=restarted
+
+  - name: restart NetworkManager
+    service: name=NetworkManager state=restarted
+
+  - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+    changed_when: False
+
+  - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+    changed_when: False
+
+  - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+    changed_when: False
+
+  - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
+    changed_when: False
+    failed_when: False
+    with_items:
+    - openshift-enterprise
+    - atomic-enterprise
+    - origin
+
+  - shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}'
+    changed_when: False
+    failed_when: False
+    register: exited_containers_to_delete
+    with_items:
+    - aep3.*/aep
+    - aep3.*/node
+    - aep3.*/openvswitch
+    - openshift3/ose
+    - openshift3/node
+    - openshift3/openvswitch
+    - openshift/origin
+
+  - shell: "docker rm {{ item.stdout_lines | join(' ') }}"
+    changed_when: False
+    failed_when: False
+    with_items: "{{ exited_containers_to_delete.results }}"
+
+  - shell: docker images | egrep {{ item }} | awk '{ print $3 }'
+    changed_when: False
+    failed_when: False
+    register: images_to_delete
+    with_items:
+    - registry\.access\..*redhat\.com/openshift3
+    - registry\.access\..*redhat\.com/aep3
+    - registry\.qe\.openshift\.com/.*
+    - registry\.access\..*redhat\.com/rhel7/etcd
+    - docker.io/openshift
+
+  - shell:  "docker rmi -f {{ item.stdout_lines | join(' ') }}"
+    changed_when: False
+    failed_when: False
+    with_items: "{{ images_to_delete.results }}"
+
+  - name: Remove sdn drop files
+    file:
+      path: /run/openshift-sdn
+      state: absent
+
+  - name: Remove remaining files
+    file: path={{ item }} state=absent
+    with_items:
+    - /etc/ansible/facts.d/openshift.fact
+    - /etc/atomic-enterprise
+    - /etc/openshift
+    - /etc/openshift-sdn
+    - /etc/origin
+    - /etc/systemd/system/atomic-openshift-node.service
+    - /etc/systemd/system/atomic-openshift-node-dep.service
+    - /etc/systemd/system/origin-node.service
+    - /etc/systemd/system/origin-node-dep.service
+    - /etc/systemd/system/openvswitch.service
+    - /etc/sysconfig/atomic-enterprise-node
+    - /etc/sysconfig/atomic-openshift-node
+    - /etc/sysconfig/atomic-openshift-node-dep
+    - /etc/sysconfig/origin-node
+    - /etc/sysconfig/origin-node-dep
+    - /etc/sysconfig/openshift-node
+    - /etc/sysconfig/openshift-node-dep
+    - /etc/sysconfig/openvswitch
+    - /etc/sysconfig/origin-node
+    - /etc/systemd/system/atomic-openshift-node.service.wants
+    - /run/openshift-sdn
+    - /var/lib/atomic-enterprise
+    - /var/lib/openshift
+    - /var/lib/origin
+    - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh
+    - /etc/dnsmasq.d/origin-dns.conf
+    - /etc/dnsmasq.d/origin-upstream-dns.conf
 
+- hosts: masters
+  become: yes
   tasks:
-    - name: Detecting Operating System
-      shell: ls /run/ostree-booted
-      ignore_errors: yes
-      failed_when: false
-      register: ostree_output
-
-      # Since we're not calling openshift_facts we'll do this for now
-    - set_fact:
-        is_atomic: "{{ ostree_output.rc == 0 }}"
-    - set_fact:
-        is_containerized: "{{ is_atomic or containerized | default(false) | bool }}"
-
-    - name: Stop services
-      service: name={{ item }} state=stopped
-      with_items:
-        - atomic-enterprise-master
-        - atomic-enterprise-node
-        - atomic-openshift-master
-        - atomic-openshift-master-api
-        - atomic-openshift-master-controllers
-        - atomic-openshift-node
-        - etcd
-        - haproxy
-        - openshift-master
-        - openshift-master-api
-        - openshift-master-controllers
-        - openshift-node
-        - openvswitch
-        - origin-master
-        - origin-master-api
-        - origin-master-controllers
-        - origin-node
-        - pcsd
-      failed_when: false
-
-    - name: unmask services
-      command: systemctl unmask "{{ item }}"
-      changed_when: False
-      failed_when: False
-      with_items:
-        - etcd
-        - firewalld
-        - atomic-openshift-master
-
-    - name: Stop additional atomic services
-      service: name={{ item }} state=stopped
-      when: is_containerized | bool
-      with_items:
-        - etcd_container
-      failed_when: false
-
-    - name: Remove packages
-      action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
-      when: not is_atomic | bool
-      with_items:
-        - atomic-enterprise
-        - atomic-enterprise-master
-        - atomic-enterprise-node
-        - atomic-enterprise-sdn-ovs
-        - atomic-openshift
-        - atomic-openshift-clients
-        - atomic-openshift-master
-        - atomic-openshift-node
-        - atomic-openshift-sdn-ovs
-        - cockpit-bridge
-        - cockpit-docker
-        - cockpit-shell
-        - cockpit-ws
-        - corosync
-        - etcd
-        - haproxy
-        - kubernetes-client
-        - openshift
-        - openshift-master
-        - openshift-node
-        - openshift-sdn
-        - openshift-sdn-ovs
-        - openvswitch
-        - origin
-        - origin-clients
-        - origin-master
-        - origin-node
-        - origin-sdn-ovs
-        - pacemaker
-        - pcs
-        - tuned-profiles-atomic-enterprise-node
-        - tuned-profiles-atomic-openshift-node
-        - tuned-profiles-openshift-node
-        - tuned-profiles-origin-node
-
-    - shell: systemctl reset-failed
-      changed_when: False
-
-    - shell: systemctl daemon-reload
-      changed_when: False
-
-    - name: Remove remaining files
-      file: path={{ item }} state=absent
-      with_items:
-        - "~{{ ansible_ssh_user }}/.kube"
-        - /etc/ansible/facts.d/openshift.fact
-        - /etc/atomic-enterprise
-        - /etc/corosync
-        - /etc/etcd
-        - /etc/openshift
-        - /etc/openshift-sdn
-        - /etc/origin
-        - /etc/systemd/system/atomic-openshift-master.service
-        - /etc/systemd/system/atomic-openshift-master-api.service
-        - /etc/systemd/system/atomic-openshift-master-controllers.service
-        - /etc/systemd/system/atomic-openshift-node.service
-        - /etc/systemd/system/atomic-openshift-node-dep.service
-        - /etc/systemd/system/origin-master.service
-        - /etc/systemd/system/origin-master-api.service
-        - /etc/systemd/system/origin-master-controllers.service
-        - /etc/systemd/system/origin-node.service
-        - /etc/systemd/system/origin-node-dep.service
-        - /etc/systemd/system/etcd_container.service
-        - /etc/systemd/system/openvswitch.service
-        - /etc/sysconfig/atomic-enterprise-master
-        - /etc/sysconfig/atomic-enterprise-master-api
-        - /etc/sysconfig/atomic-enterprise-master-controllers
-        - /etc/sysconfig/atomic-enterprise-node
-        - /etc/sysconfig/atomic-openshift-master
-        - /etc/sysconfig/atomic-openshift-master-api
-        - /etc/sysconfig/atomic-openshift-master-controllers
-        - /etc/sysconfig/atomic-openshift-node
-        - /etc/sysconfig/atomic-openshift-node-dep
-        - /etc/sysconfig/origin-master
-        - /etc/sysconfig/origin-master-api
-        - /etc/sysconfig/origin-master-controllers
-        - /etc/sysconfig/origin-node
-        - /etc/sysconfig/origin-node-dep
-        - /etc/sysconfig/openshift-master
-        - /etc/sysconfig/openshift-node
-        - /etc/sysconfig/openshift-node-dep
-        - /etc/sysconfig/openvswitch
-        - /etc/sysconfig/origin-master
-        - /etc/sysconfig/origin-master-api
-        - /etc/sysconfig/origin-master-controllers
-        - /etc/sysconfig/origin-node
-        - /etc/systemd/system/atomic-openshift-node.service.wants
-        - /root/.kube
-        - /run/openshift-sdn
-        - /usr/share/openshift/examples
-        - /var/lib/atomic-enterprise
-        - /var/lib/etcd
-        - /var/lib/openshift
-        - /var/lib/origin
-        - /var/lib/pacemaker
-        - /usr/lib/systemd/system/atomic-openshift-master-api.service
-        - /usr/lib/systemd/system/atomic-openshift-master-controllers.service
-        - /usr/lib/systemd/system/origin-master-api.service
-        - /usr/lib/systemd/system/origin-master-controllers.service
-        - /usr/local/bin/openshift
-        - /usr/local/bin/oadm
-        - /usr/local/bin/oc
-        - /usr/local/bin/kubectl
-        - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh
-        - /etc/dnsmasq.d/origin-dns.conf
-        - /etc/dnsmasq.d/origin-upstream-dns.conf
-
-    # Since we are potentially removing the systemd unit files for separated
-    # master-api and master-controllers services, so we need to reload the
-    # systemd configuration manager
-    - name: Reload systemd manager configuration
-      command: systemctl daemon-reload
+  - name: Stop services
+    service: name={{ item }} state=stopped
+    with_items:
+    - atomic-enterprise-master
+    - atomic-openshift-master
+    - atomic-openshift-master-api
+    - atomic-openshift-master-controllers
+    - openshift-master
+    - openshift-master-api
+    - openshift-master-controllers
+    - origin-master
+    - origin-master-api
+    - origin-master-controllers
+    - pcsd
+    failed_when: false
 
-- hosts: nodes
+  - name: unmask services
+    command: systemctl unmask "{{ item }}"
+    changed_when: False
+    failed_when: False
+    with_items:
+    - firewalld
+    - atomic-openshift-master
+
+  - name: Remove packages
+    action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+    when: not is_atomic | bool
+    with_items:
+    - atomic-enterprise
+    - atomic-enterprise-master
+    - atomic-openshift
+    - atomic-openshift-clients
+    - atomic-openshift-master
+    - cockpit-bridge
+    - cockpit-docker
+    - cockpit-shell
+    - cockpit-ws
+    - corosync
+    - kubernetes-client
+    - openshift
+    - openshift-master
+    - origin
+    - origin-clients
+    - origin-master
+    - pacemaker
+    - pcs
+
+  - shell: systemctl reset-failed
+    changed_when: False
+
+  - shell: systemctl daemon-reload
+    changed_when: False
+
+  - name: Remove remaining files
+    file: path={{ item }} state=absent
+    with_items:
+    - "~{{ ansible_ssh_user }}/.kube"
+    - /etc/ansible/facts.d/openshift.fact
+    - /etc/atomic-enterprise
+    - /etc/corosync
+    - /etc/openshift
+    - /etc/openshift-sdn
+    - /etc/origin
+    - /etc/systemd/system/atomic-openshift-master.service
+    - /etc/systemd/system/atomic-openshift-master-api.service
+    - /etc/systemd/system/atomic-openshift-master-controllers.service
+    - /etc/systemd/system/origin-master.service
+    - /etc/systemd/system/origin-master-api.service
+    - /etc/systemd/system/origin-master-controllers.service
+    - /etc/systemd/system/openvswitch.service
+    - /etc/sysconfig/atomic-enterprise-master
+    - /etc/sysconfig/atomic-enterprise-master-api
+    - /etc/sysconfig/atomic-enterprise-master-controllers
+    - /etc/sysconfig/atomic-openshift-master
+    - /etc/sysconfig/atomic-openshift-master-api
+    - /etc/sysconfig/atomic-openshift-master-controllers
+    - /etc/sysconfig/origin-master
+    - /etc/sysconfig/origin-master-api
+    - /etc/sysconfig/origin-master-controllers
+    - /etc/sysconfig/openshift-master
+    - /etc/sysconfig/openvswitch
+    - /etc/sysconfig/origin-master
+    - /etc/sysconfig/origin-master-api
+    - /etc/sysconfig/origin-master-controllers
+    - /root/.kube
+    - /usr/share/openshift/examples
+    - /var/lib/atomic-enterprise
+    - /var/lib/openshift
+    - /var/lib/origin
+    - /var/lib/pacemaker
+    - /var/lib/pcsd
+    - /usr/lib/systemd/system/atomic-openshift-master-api.service
+    - /usr/lib/systemd/system/atomic-openshift-master-controllers.service
+    - /usr/lib/systemd/system/origin-master-api.service
+    - /usr/lib/systemd/system/origin-master-controllers.service
+    - /usr/local/bin/openshift
+    - /usr/local/bin/oadm
+    - /usr/local/bin/oc
+    - /usr/local/bin/kubectl
+
+  # Since we are potentially removing the systemd unit files for separated
+  # master-api and master-controllers services, so we need to reload the
+  # systemd configuration manager
+  - name: Reload systemd manager configuration
+    command: systemctl daemon-reload
+
+- hosts: etcd
+  become: yes
+  tasks:
+  - name: Stop services
+    service: name={{ item }} state=stopped
+    with_items:
+    - etcd
+    failed_when: false
+
+  - name: unmask services
+    command: systemctl unmask "{{ item }}"
+    changed_when: False
+    failed_when: False
+    with_items:
+    - etcd
+    - firewalld
+
+  - name: Stop additional atomic services
+    service: name={{ item }} state=stopped
+    when: is_containerized | bool
+    with_items:
+    - etcd_container
+    failed_when: false
+
+  - name: Remove packages
+    action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+    when: not is_atomic | bool
+    with_items:
+    - etcd
+
+  - shell: systemctl reset-failed
+    changed_when: False
+
+  - shell: systemctl daemon-reload
+    changed_when: False
+
+  - name: Remove remaining files
+    file: path={{ item }} state=absent
+    with_items:
+    - /etc/ansible/facts.d/openshift.fact
+    - /etc/etcd
+    - /etc/systemd/system/etcd_container.service
+    - /var/lib/etcd
+
+- hosts: lb
   become: yes
   tasks:
-    - name: Remove br0 interface
-      shell: ovs-vsctl del-br br0
-      changed_when: False
-      failed_when: False
-    - name: Remove linux interfaces
-      shell: ip link del "{{ item }}"
-      changed_when: False
-      failed_when: False
-      with_items:
-        - lbr0
-        - vlinuxbr
-        - vovsbr
-    - name: restart docker
-      service: name=docker state=restarted
-      
-    - name: restart NetworkManager
-      service: name=NetworkManager state=restarted
-      
-    - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
-      changed_when: False
-
-    - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
-      changed_when: False
-
-    - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
-      changed_when: False
-
-    - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
-      changed_when: False
-      failed_when: False
-      with_items:
-        - openshift-enterprise
-        - atomic-enterprise
-        - origin
-
-    - shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}'
-      changed_when: False
-      failed_when: False
-      register: exited_containers_to_delete
-      with_items:
-        - aep3.*/aep
-        - aep3.*/node
-        - aep3.*/openvswitch
-        - openshift3/ose
-        - openshift3/node
-        - openshift3/openvswitch
-        - openshift/origin
-
-    - shell: "docker rm {{ item.stdout_lines | join(' ') }}"
-      changed_when: False
-      failed_when: False
-      with_items: "{{ exited_containers_to_delete.results }}"
-
-    - shell: docker images | egrep {{ item }} | awk '{ print $3 }'
-      changed_when: False
-      failed_when: False
-      register: images_to_delete
-      with_items:
-        - registry\.access\..*redhat\.com/openshift3
-        - registry\.access\..*redhat\.com/aep3
-        - registry\.qe\.openshift\.com/.*
-        - registry\.access\..*redhat\.com/rhel7/etcd
-        - docker.io/openshift
-
-    - shell:  "docker rmi -f {{ item.stdout_lines | join(' ') }}"
-      changed_when: False
-      failed_when: False
-      with_items: "{{ images_to_delete.results }}"
-
-    - name: Remove sdn drop files
-      file:
-        path: /run/openshift-sdn
-        state: absent
+  - name: Stop services
+    service: name={{ item }} state=stopped
+    with_items:
+    - haproxy
+    failed_when: false
+
+  - name: unmask services
+    command: systemctl unmask "{{ item }}"
+    changed_when: False
+    failed_when: False
+    with_items:
+    - firewalld
+
+  - name: Remove packages
+    action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+    when: not is_atomic | bool
+    with_items:
+    - haproxy
+
+  - shell: systemctl reset-failed
+    changed_when: False
+
+  - shell: systemctl daemon-reload
+    changed_when: False
+
+  - name: Remove remaining files
+    file: path={{ item }} state=absent
+    with_items:
+    - /etc/ansible/facts.d/openshift.fact
+    - /var/lib/haproxy

+ 2 - 2
playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml

@@ -59,7 +59,7 @@
       - "{{ openshift.common.service_type }}-master-api"
       - "{{ openshift.common.service_type }}-master-controllers"
       - "{{ openshift.common.service_type }}-node"
-      - etcd
+      - etcd_container
       - openvswitch
     failed_when: false
     when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool
@@ -77,7 +77,7 @@
   - name: Restart containerized services
     service: name={{ item }} state=started
     with_items:
-      - etcd
+      - etcd_container
       - openvswitch
       - "{{ openshift.common.service_type }}-master"
       - "{{ openshift.common.service_type }}-master-api"

+ 1 - 0
playbooks/common/openshift-cluster/additional_config.yml

@@ -17,6 +17,7 @@
   - role: openshift_master_cluster
     when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
   - role: openshift_examples
+    registry_url: "{{ openshift.master.registry_url }}"
     when: openshift.common.install_examples | bool
   - role: openshift_cluster_metrics
     when: openshift.common.use_cluster_metrics | bool

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml

@@ -19,6 +19,7 @@
   - openshift_examples
   # Update the existing templates
   - role: openshift_examples
+    registry_url: "{{ openshift.master.registry_url }}"
     openshift_examples_import_command: replace
   pre_tasks:
   - name: Collect all routers

+ 12 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml

@@ -10,6 +10,18 @@
   roles:
   - openshift_repos
 
+- name: Set openshift_no_proxy_internal_hostnames
+  hosts: oo_masters_to_config:oo_nodes_to_config
+  tasks:
+  - set_fact:
+      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+                                                    | union(groups['oo_masters_to_config'])
+                                                    | union(groups['oo_etcd_to_config'] | default([])))
+                                                | oo_collect('openshift.common.hostname') | default([]) | join (',')
+                                                }}"
+    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+            openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
 - name: Evaluate additional groups for upgrade
   hosts: localhost
   connection: local

+ 2 - 0
roles/openshift_examples/defaults/main.yml

@@ -20,3 +20,5 @@ infrastructure_origin_base: "{{ examples_base }}/infrastructure-templates/origin
 infrastructure_enterprise_base: "{{ examples_base }}/infrastructure-templates/enterprise"
 
 openshift_examples_import_command: "create"
+registry_url: ""
+registry_host: "{{ registry_url.split('/')[0] if '.' in registry_url.split('/')[0] else '' }}"

+ 1 - 1
roles/openshift_examples/examples-sync.sh

@@ -41,7 +41,7 @@ wget https://raw.githubusercontent.com/jboss-fuse/application-templates/master/f
 
 wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml                            -O ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-deployer.yaml
 wget https://raw.githubusercontent.com/openshift/origin-metrics/enterprise/metrics.yaml                        -O ${EXAMPLES_BASE}/infrastructure-templates/enterprise/metrics-deployer.yaml
-wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployment/deployer.yaml     -O ${EXAMPLES_BASE}/infrastructure-templates/origin/logging-deployer.yaml
+wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployer/deployer.yaml     -O ${EXAMPLES_BASE}/infrastructure-templates/origin/logging-deployer.yaml
 wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/enterprise/deployment/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/enterprise/logging-deployer.yaml
 
 popd

+ 1 - 1
roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-ephemeral-template.json

@@ -85,7 +85,7 @@
             "containers": [
               {
                 "name": "mongodb",
-                "image": "mongodb",
+                "image": " ",
                 "ports": [
                   {
                     "containerPort": 27017,

+ 1 - 1
roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json

@@ -102,7 +102,7 @@
             "containers": [
               {
                 "name": "mongodb",
-                "image": "mongodb",
+                "image": " ",
                 "ports": [
                   {
                     "containerPort": 27017,

+ 1 - 1
roles/openshift_examples/files/examples/v1.2/db-templates/mysql-ephemeral-template.json

@@ -85,7 +85,7 @@
             "containers": [
               {
                 "name": "mysql",
-                "image": "mysql",
+                "image": " ",
                 "ports": [
                   {
                     "containerPort": 3306,

+ 12 - 35
roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json

@@ -3,7 +3,6 @@
   "apiVersion": "v1",
   "metadata": {
     "name": "mysql-persistent",
-    "creationTimestamp": null,
     "annotations": {
       "description": "MySQL database service, with persistent storage.  Scaling to more than one replica is not supported.  You must have persistent volumes available in your cluster to use this template.",
       "iconClass": "icon-mysql-database",
@@ -15,28 +14,18 @@
       "kind": "Service",
       "apiVersion": "v1",
       "metadata": {
-        "name": "${DATABASE_SERVICE_NAME}",
-        "creationTimestamp": null
+        "name": "${DATABASE_SERVICE_NAME}"
       },
       "spec": {
         "ports": [
           {
             "name": "mysql",
-            "protocol": "TCP",
-            "port": 3306,
-            "targetPort": 3306,
-            "nodePort": 0
+            "port": 3306
           }
         ],
         "selector": {
           "name": "${DATABASE_SERVICE_NAME}"
-        },
-        "portalIP": "",
-        "type": "ClusterIP",
-        "sessionAffinity": "None"
-      },
-      "status": {
-        "loadBalancer": {}
+        }
       }
     },
     {
@@ -60,8 +49,7 @@
       "kind": "DeploymentConfig",
       "apiVersion": "v1",
       "metadata": {
-        "name": "${DATABASE_SERVICE_NAME}",
-        "creationTimestamp": null
+        "name": "${DATABASE_SERVICE_NAME}"
       },
       "spec": {
         "strategy": {
@@ -79,8 +67,7 @@
                 "kind": "ImageStreamTag",
                 "name": "mysql:latest",
                 "namespace": "${NAMESPACE}"
-              },
-              "lastTriggeredImage": ""
+              }
             }
           },
           {
@@ -93,7 +80,6 @@
         },
         "template": {
           "metadata": {
-            "creationTimestamp": null,
             "labels": {
               "name": "${DATABASE_SERVICE_NAME}"
             }
@@ -102,11 +88,10 @@
             "containers": [
               {
                 "name": "mysql",
-                "image": "mysql",
+                "image": " ",
                 "ports": [
                   {
-                    "containerPort": 3306,
-                    "protocol": "TCP"
+                    "containerPort": 3306
                   }
                 ],
                 "readinessProbe": {
@@ -149,13 +134,7 @@
                     "mountPath": "/var/lib/mysql/data"
                   }
                 ],
-                "terminationMessagePath": "/dev/termination-log",
-                "imagePullPolicy": "IfNotPresent",
-                "capabilities": {},
-                "securityContext": {
-                  "capabilities": {},
-                  "privileged": false
-                }
+                "imagePullPolicy": "IfNotPresent"
               }
             ],
             "volumes": [
@@ -165,13 +144,10 @@
                   "claimName": "${DATABASE_SERVICE_NAME}"
                 }
               }
-            ],
-            "restartPolicy": "Always",
-            "dnsPolicy": "ClusterFirst"
+            ]
           }
         }
-      },
-      "status": {}
+      }
     }
   ],
   "parameters": [
@@ -179,7 +155,8 @@
       "name": "MEMORY_LIMIT",
       "displayName": "Memory Limit",
       "description": "Maximum amount of memory the container can use.",
-      "value": "512Mi"
+      "value": "512Mi",
+      "required": true
     },
     {
       "name": "NAMESPACE",

+ 1 - 1
roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-ephemeral-template.json

@@ -85,7 +85,7 @@
             "containers": [
               {
                 "name": "postgresql",
-                "image": "postgresql",
+                "image": " ",
                 "ports": [
                   {
                     "containerPort": 5432,

+ 1 - 1
roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json

@@ -102,7 +102,7 @@
             "containers": [
               {
                 "name": "postgresql",
-                "image": "postgresql",
+                "image": " ",
                 "ports": [
                   {
                     "containerPort": 5432,

+ 9 - 1
roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml

@@ -54,6 +54,8 @@ objects:
           value: ${IMAGE_VERSION}
         - name: MASTER_URL
           value: ${MASTER_URL}
+        - name: MODE
+          value: ${MODE}
         - name: REDEPLOY
           value: ${REDEPLOY}
         - name: USE_PERSISTENT_STORAGE
@@ -66,6 +68,8 @@ objects:
           value: ${CASSANDRA_PV_SIZE}
         - name: METRIC_DURATION
           value: ${METRIC_DURATION}
+        - name: METRIC_RESOLUTION
+          value: ${METRIC_RESOLUTION}
     dnsPolicy: ClusterFirst
     restartPolicy: Never
     serviceAccount: metrics-deployer
@@ -83,7 +87,7 @@ parameters:
 -
   description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
   name: IMAGE_VERSION
-  value: "3.2.0"
+  value: "3.2.1"
 -
   description: "Internal URL for the master, for authentication retrieval"
   name: MASTER_URL
@@ -112,3 +116,7 @@ parameters:
   description: "How many days metrics should be stored for."
   name: METRIC_DURATION
   value: "7"
+-
+  description: "How often metrics should be gathered. Defaults value of '10s' for 10 seconds"
+  name: METRIC_RESOLUTION
+  value: "10s"

+ 85 - 61
roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml

@@ -10,7 +10,8 @@ items:
       description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
       tags: "infrastructure"
   objects:
-  - apiVersion: v1
+  -
+    apiVersion: v1
     kind: ServiceAccount
     name: logging-deployer
     metadata:
@@ -19,8 +20,6 @@ items:
         logging-infra: deployer
         provider: openshift
         component: deployer
-    secrets:
-    - name: logging-deployer
   -
     apiVersion: v1
     kind: ServiceAccount
@@ -67,18 +66,39 @@ items:
       - watch
       - delete
       - update
+  -
+    apiVersion: v1
+    kind: RoleBinding
+    metadata:
+      name: logging-deployer-edit-role
+    roleRef:
+      kind: ClusterRole
+      name: edit
+    subjects:
+    - kind: ServiceAccount
+      name: logging-deployer
+  -
+    apiVersion: v1
+    kind: RoleBinding
+    metadata:
+      name: logging-deployer-dsadmin-role
+    roleRef:
+      kind: ClusterRole
+      name: daemonset-admin
+    subjects:
+    - kind: ServiceAccount
+      name: logging-deployer
 -
   apiVersion: "v1"
   kind: "Template"
   metadata:
     name: logging-deployer-template
     annotations:
-      description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account and 'logging-deployer' secret."
+      description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
       tags: "infrastructure"
   labels:
     logging-infra: deployer
     provider: openshift
-    component: deployer
   objects:
   -
     apiVersion: v1
@@ -91,9 +111,6 @@ items:
         imagePullPolicy: Always
         name: deployer
         volumeMounts:
-        - name: secret
-          mountPath: /secret
-          readOnly: true
         - name: empty
           mountPath: /etc/deploy
         env:
@@ -125,6 +142,8 @@ items:
             value: ${ES_PVC_SIZE}
           - name: ES_PVC_PREFIX
             value: ${ES_PVC_PREFIX}
+          - name: ES_PVC_DYNAMIC
+            value: ${ES_PVC_DYNAMIC}
           - name: ES_CLUSTER_SIZE
             value: ${ES_CLUSTER_SIZE}
           - name: ES_NODE_QUORUM
@@ -141,6 +160,8 @@ items:
             value: ${ES_OPS_PVC_SIZE}
           - name: ES_OPS_PVC_PREFIX
             value: ${ES_OPS_PVC_PREFIX}
+          - name: ES_OPS_PVC_DYNAMIC
+            value: ${ES_OPS_PVC_DYNAMIC}
           - name: ES_OPS_CLUSTER_SIZE
             value: ${ES_OPS_CLUSTER_SIZE}
           - name: ES_OPS_NODE_QUORUM
@@ -173,130 +194,133 @@ items:
       volumes:
       - name: empty
         emptyDir: {}
-      - name: secret
-        secret:
-          secretName: logging-deployer
   parameters:
   -
-    description: "If true, set up to use a second ES cluster for ops logs."
+    description: "The mode that the deployer runs in."
+    name: MODE
+    value: "install"
+  -
+    description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
+    name: IMAGE_PREFIX
+    value: "docker.io/openshift/origin-"
+  -
+    description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
+    name: IMAGE_VERSION
+    value: "latest"
+  -
+    description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
+    name: IMAGE_PULL_SECRET
+  -
+    description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
+    name: INSECURE_REGISTRY
+    value: "false"
+  -
+    description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
     name: ENABLE_OPS_CLUSTER
     value: "false"
   -
-    description: "External hostname where clients will reach kibana"
+    description: "(Deprecated) External hostname where clients will reach kibana"
     name: KIBANA_HOSTNAME
-    required: true
+    value: "kibana.example.com"
   -
-    description: "External hostname at which admins will visit the ops Kibana."
+    description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
     name: KIBANA_OPS_HOSTNAME
     value: kibana-ops.example.com
   -
-    description: "External URL for the master, for OAuth purposes"
+    description: "(Deprecated) External URL for the master, for OAuth purposes"
     name: PUBLIC_MASTER_URL
-    required: true
+    value: "https://localhost:8443"
   -
-    description: "Internal URL for the master, for authentication retrieval"
+    description: "(Deprecated) Internal URL for the master, for authentication retrieval"
     name: MASTER_URL
     value: "https://kubernetes.default.svc.cluster.local"
   -
-    description: "How many instances of ElasticSearch to deploy."
+    description: "(Deprecated) How many instances of ElasticSearch to deploy."
     name: ES_CLUSTER_SIZE
-    required: true
+    value: "1"
   -
-    description: "Amount of RAM to reserve per ElasticSearch instance."
+    description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
     name: ES_INSTANCE_RAM
     value: "8G"
   -
-    description: "Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
+    description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
     name: ES_PVC_SIZE
   -
-    description: "Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
+    description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
     name: ES_PVC_PREFIX
     value: "logging-es-"
   -
-    description: "Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+    description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
+    name: ES_PVC_DYNAMIC
+  -
+    description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
     name: ES_NODE_QUORUM
   -
-    description: "Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
+    description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
     name: ES_RECOVER_AFTER_NODES
   -
-    description: "Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
+    description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
     name: ES_RECOVER_EXPECTED_NODES
   -
-    description: "Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
+    description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
     name: ES_RECOVER_AFTER_TIME
     value: "5m"
   -
-    description: "How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
+    description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
     name: ES_OPS_CLUSTER_SIZE
   -
-    description: "Amount of RAM to reserve per ops ElasticSearch instance."
+    description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
     name: ES_OPS_INSTANCE_RAM
     value: "8G"
   -
-    description: "Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
+    description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
     name: ES_OPS_PVC_SIZE
   -
-    description: "Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
+    description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
     name: ES_OPS_PVC_PREFIX
     value: "logging-es-ops-"
   -
-    description: "Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+    description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
+    name: ES_OPS_PVC_DYNAMIC
+  -
+    description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
     name: ES_OPS_NODE_QUORUM
   -
-    description: "Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
+    description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
     name: ES_OPS_RECOVER_AFTER_NODES
   -
-    description: "Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
+    description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
     name: ES_OPS_RECOVER_EXPECTED_NODES
   -
-    description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
+    description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
     name: ES_OPS_RECOVER_AFTER_TIME
     value: "5m"
   -
-    description: "The nodeSelector used for the Fluentd DaemonSet."
+    description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
     name: FLUENTD_NODESELECTOR
     value: "logging-infra-fluentd=true"
   -
-    description: "Node selector Elasticsearch cluster (label=value)."
+    description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
     name: ES_NODESELECTOR
     value: ""
   -
-    description: "Node selector Elasticsearch operations cluster (label=value)."
+    description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
     name: ES_OPS_NODESELECTOR
     value: ""
   -
-    description: "Node selector Kibana cluster (label=value)."
+    description: "(Deprecated) Node selector Kibana cluster (label=value)."
     name: KIBANA_NODESELECTOR
     value: ""
   -
-    description: "Node selector Kibana operations cluster (label=value)."
+    description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
     name: KIBANA_OPS_NODESELECTOR
     value: ""
   -
-    description: "Node selector Curator (label=value)."
+    description: "(Deprecated) Node selector Curator (label=value)."
     name: CURATOR_NODESELECTOR
     value: ""
   -
-    description: "Node selector operations Curator (label=value)."
+    description: "(Deprecated) Node selector operations Curator (label=value)."
     name: CURATOR_OPS_NODESELECTOR
     value: ""
-  -
-    description: "The mode that the deployer runs in."
-    name: MODE
-    value: "install"
-  -
-    description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
-    name: IMAGE_PREFIX
-    value: "docker.io/openshift/origin-"
-  -
-    description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
-    name: IMAGE_VERSION
-    value: "latest"
-  -
-    description: 'Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry.'
-    name: IMAGE_PULL_SECRET
-  -
-    description: 'Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)'
-    name: INSECURE_REGISTRY
-    value: "false"
 

+ 6 - 0
roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml

@@ -34,9 +34,11 @@ objects:
   metadata:
     generateName: metrics-deployer-
   spec:
+    securityContext: {}
     containers:
     - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
       name: deployer
+      securityContext: {}
       volumeMounts:
       - name: secret
         mountPath: /secret
@@ -48,6 +50,10 @@ objects:
           valueFrom:
             fieldRef:
               fieldPath: metadata.namespace
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
         - name: IMAGE_PREFIX
           value: ${IMAGE_PREFIX}
         - name: IMAGE_VERSION

+ 1 - 1
roles/openshift_examples/files/examples/v1.2/quickstart-templates/django-postgresql.json

@@ -437,7 +437,7 @@
     },
     {
       "name": "DJANGO_SECRET_KEY",
-      "displayName": "Djange Secret Key",
+      "displayName": "Django Secret Key",
       "description": "Set this to a long random string.",
       "generate": "expression",
       "from": "[\\w]{50}"

+ 50 - 44
roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-ephemeral-template.json

@@ -12,35 +12,10 @@
   },
   "objects": [
     {
-      "kind": "Service",
-      "apiVersion": "v1",
-      "metadata": {
-        "name": "${JENKINS_SERVICE_NAME}",
-        "creationTimestamp": null
-      },
-      "spec": {
-        "ports": [
-          {
-            "name": "web",
-            "protocol": "TCP",
-            "port": 8080,
-            "targetPort": 8080,
-            "nodePort": 0
-          }
-        ],
-        "selector": {
-          "name": "${JENKINS_SERVICE_NAME}"
-        },
-        "portalIP": "",
-        "type": "ClusterIP",
-        "sessionAffinity": "None"
-      }
-    },
-    {
       "kind": "Route",
       "apiVersion": "v1",
       "metadata": {
-        "name": "jenkins",
+        "name": "${JENKINS_SERVICE_NAME}",
         "creationTimestamp": null
       },
       "spec": {
@@ -77,7 +52,7 @@
               ],
               "from": {
                 "kind": "ImageStreamTag",
-                "name": "jenkins:latest",
+                "name": "${JENKINS_IMAGE_STREAM_TAG}",
                 "namespace": "${NAMESPACE}"
               },
               "lastTriggeredImage": ""
@@ -102,7 +77,7 @@
             "containers": [
               {
                 "name": "jenkins",
-                "image": "JENKINS_IMAGE",
+                "image": " ",
                 "readinessProbe": {
                   "timeoutSeconds": 3,
                   "initialDelaySeconds": 3,
@@ -126,10 +101,10 @@
                   }
                 ],
                 "resources": {
-		    "limits": {
-			"memory": "${MEMORY_LIMIT}"
-		    }
-		},
+                  "limits": {
+                    "memory": "${MEMORY_LIMIT}"
+                  }
+                },
                 "volumeMounts": [
                   {
                     "name": "${JENKINS_SERVICE_NAME}-data",
@@ -158,22 +133,35 @@
           }
         }
       }
+    },
+    {
+      "kind": "Service",
+      "apiVersion": "v1",
+      "metadata": {
+        "name": "${JENKINS_SERVICE_NAME}",
+        "creationTimestamp": null
+      },
+      "spec": {
+        "ports": [
+          {
+            "name": "web",
+            "protocol": "TCP",
+            "port": 8080,
+            "targetPort": 8080,
+            "nodePort": 0
+          }
+        ],
+        "selector": {
+          "name": "${JENKINS_SERVICE_NAME}"
+        },
+        "portalIP": "",
+        "type": "ClusterIP",
+        "sessionAffinity": "None"
+      }
     }
   ],
   "parameters": [
     {
-      "name": "MEMORY_LIMIT",
-      "displayName": "Memory Limit",
-      "description": "Maximum amount of memory the container can use.",
-      "value": "512Mi"
-    },
-    {
-      "name": "NAMESPACE",
-      "displayName": "Namespace",
-      "description": "The OpenShift Namespace where the ImageStream resides.",
-      "value": "openshift"
-    },
-    {
       "name": "JENKINS_SERVICE_NAME",
       "displayName": "Jenkins Service Name",
       "description": "The name of the OpenShift Service exposed for the Jenkins container.",
@@ -185,6 +173,24 @@
       "description": "Password for the Jenkins 'admin' user.",
       "generate": "expression",
       "value": "password"
+    },
+    {
+      "name": "MEMORY_LIMIT",
+      "displayName": "Memory Limit",
+      "description": "Maximum amount of memory the container can use.",
+      "value": "512Mi"
+    },
+    {
+      "name": "NAMESPACE",
+      "displayName": "Jenkins ImageStream Namespace",
+      "description": "The OpenShift Namespace where the Jenkins ImageStream resides.",
+      "value": "openshift"
+    },
+    {
+      "name": "JENKINS_IMAGE_STREAM_TAG",
+      "displayName": "Jenkins ImageStreamTag",
+      "description": "Name of the ImageStreamTag to be used for the Jenkins image.",
+      "value": "jenkins:latest"
     }
   ],
   "labels": {

+ 51 - 45
roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json

@@ -12,35 +12,10 @@
   },
   "objects": [
     {
-      "kind": "Service",
-      "apiVersion": "v1",
-      "metadata": {
-        "name": "${JENKINS_SERVICE_NAME}",
-        "creationTimestamp": null
-      },
-      "spec": {
-        "ports": [
-          {
-            "name": "web",
-            "protocol": "TCP",
-            "port": 8080,
-            "targetPort": 8080,
-            "nodePort": 0
-          }
-        ],
-        "selector": {
-          "name": "${JENKINS_SERVICE_NAME}"
-        },
-        "portalIP": "",
-        "type": "ClusterIP",
-        "sessionAffinity": "None"
-      }
-    },
-    {
       "kind": "Route",
       "apiVersion": "v1",
       "metadata": {
-        "name": "jenkins",
+        "name": "${JENKINS_SERVICE_NAME}",
         "creationTimestamp": null
       },
       "spec": {
@@ -82,7 +57,7 @@
       },
       "spec": {
         "strategy": {
-            "type": "Recreate"
+          "type": "Recreate"
         },
         "triggers": [
           {
@@ -94,7 +69,7 @@
               ],
               "from": {
                 "kind": "ImageStreamTag",
-                "name": "jenkins:latest",
+                "name": "${JENKINS_IMAGE_STREAM_TAG}",
                 "namespace": "${NAMESPACE}"
               },
               "lastTriggeredImage": ""
@@ -119,7 +94,7 @@
             "containers": [
               {
                 "name": "jenkins",
-                "image": "JENKINS_IMAGE",
+                "image": " ",
                 "readinessProbe": {
                   "timeoutSeconds": 3,
                   "initialDelaySeconds": 3,
@@ -143,10 +118,10 @@
                   }
                 ],
                 "resources": {
-		    "limits": {
-			"memory": "${MEMORY_LIMIT}"
-		    }
-		},
+                  "limits": {
+                    "memory": "${MEMORY_LIMIT}"
+                  }
+                },
                 "volumeMounts": [
                   {
                     "name": "${JENKINS_SERVICE_NAME}-data",
@@ -175,22 +150,35 @@
           }
         }
       }
+    },
+    {
+      "kind": "Service",
+      "apiVersion": "v1",
+      "metadata": {
+        "name": "${JENKINS_SERVICE_NAME}",
+        "creationTimestamp": null
+      },
+      "spec": {
+        "ports": [
+          {
+            "name": "web",
+            "protocol": "TCP",
+            "port": 8080,
+            "targetPort": 8080,
+            "nodePort": 0
+          }
+        ],
+        "selector": {
+          "name": "${JENKINS_SERVICE_NAME}"
+        },
+        "portalIP": "",
+        "type": "ClusterIP",
+        "sessionAffinity": "None"
+      }
     }
   ],
   "parameters": [
     {
-      "name": "MEMORY_LIMIT",
-      "displayName": "Memory Limit",
-      "description": "Maximum amount of memory the container can use.",
-      "value": "512Mi"
-    },
-    {
-      "name": "NAMESPACE",
-      "displayName": "Namespace",
-      "description": "The OpenShift Namespace where the ImageStream resides.",
-      "value": "openshift"
-    },
-    {
       "name": "JENKINS_SERVICE_NAME",
       "displayName": "Jenkins Service Name",
       "description": "The name of the OpenShift Service exposed for the Jenkins container.",
@@ -204,11 +192,29 @@
       "value": "password"
     },
     {
+      "name": "MEMORY_LIMIT",
+      "displayName": "Memory Limit",
+      "description": "Maximum amount of memory the container can use.",
+      "value": "512Mi"
+    },
+    {
       "name": "VOLUME_CAPACITY",
       "displayName": "Volume Capacity",
       "description": "Volume space available for data, e.g. 512Mi, 2Gi.",
       "value": "1Gi",
       "required": true
+    },
+    {
+      "name": "NAMESPACE",
+      "displayName": "Jenkins ImageStream Namespace",
+      "description": "The OpenShift Namespace where the Jenkins ImageStream resides.",
+      "value": "openshift"
+    },
+    {
+      "name": "JENKINS_IMAGE_STREAM_TAG",
+      "displayName": "Jenkins ImageStreamTag",
+      "description": "Name of the ImageStreamTag to be used for the Jenkins image.",
+      "value": "jenkins:latest"
     }
   ],
   "labels": {

+ 5 - 0
roles/openshift_examples/tasks/main.yml

@@ -4,6 +4,11 @@
     src: "examples/{{ content_version }}/"
     dest: "{{ examples_base }}/"
 
+- name: Modify registry paths if registry_url is not registry.access.redhat.com
+  shell: >
+    find {{ examples_base }} -type f | xargs -n 1 sed -i 's|registry.access.redhat.com|{{ registry_host | quote }}|g'
+  when: registry_host != ''
+
 # RHEL and Centos image streams are mutually exclusive
 - name: Import RHEL streams
   command: >

+ 12 - 7
roles/openshift_node/tasks/main.yml

@@ -110,18 +110,23 @@
   changed_when: false
   when: openshift.common.is_containerized | bool
 
+- name: Start and enable node dep
+  service: name={{ openshift.common.service_type }}-node-dep enabled=yes state=started
+  when: openshift.common.is_containerized | bool
+
 - name: Start and enable node
   service: name={{ openshift.common.service_type }}-node enabled=yes state=started
   register: node_start_result
   ignore_errors: yes
-
-- name: Check logs on failure
-  command: journalctl -xe
-  register: node_failure
+  
+- name: Wait 30 seconds for docker initialization whenever node has failed
+  pause:
+    seconds: 30
   when: node_start_result | failed
-
-- name: Dump failure information
-  debug: var=node_failure
+  
+- name: Start and enable node again
+  service: name={{ openshift.common.service_type }}-node enabled=yes state=started
+  register: node_start_result
   when: node_start_result | failed
 
 - set_fact:

+ 1 - 1
roles/openshift_node/templates/openshift.docker.node.dep.service

@@ -6,6 +6,6 @@ Before={{ openshift.common.service_type }}-node.service
 
 
 [Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
+ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
 ExecStop=
 SyslogIdentifier={{ openshift.common.service_type }}-node-dep