Przeglądaj źródła

Merge pull request #808 from sdodson/containers

Containerized install with SDN support
Brenton Leanhardt 9 lat temu
rodzic
commit
c473a90f4a
68 zmienionych plików z 827 dodań i 122 usunięć
  1. 101 0
      README_CONTAINERIZED_INSTALLATION.md
  2. 29 3
      playbooks/adhoc/uninstall.yml
  3. 2 0
      playbooks/common/openshift-cluster/config.yml
  4. 1 0
      playbooks/common/openshift-cluster/update_repos_and_packages.yml
  5. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
  6. 9 0
      playbooks/common/openshift-docker/config.yml
  7. 1 0
      playbooks/common/openshift-docker/filter_plugins
  8. 1 0
      playbooks/common/openshift-docker/lookup_plugins
  9. 1 0
      playbooks/common/openshift-docker/roles
  10. 3 1
      playbooks/common/openshift-etcd/config.yml
  11. 3 1
      playbooks/common/openshift-master/config.yml
  12. 1 0
      playbooks/common/openshift-node/config.yml
  13. 1 0
      roles/ansible/tasks/main.yml
  14. 1 0
      roles/cockpit/tasks/main.yml
  15. 1 0
      roles/copr_cli/tasks/main.yml
  16. 1 1
      roles/docker/README.md
  17. 4 1
      roles/docker/handlers/main.yml
  18. 9 1
      roles/docker/tasks/main.yml
  19. 1 0
      roles/etcd/defaults/main.yaml
  20. 2 1
      roles/etcd/handlers/main.yml
  21. 53 7
      roles/etcd/tasks/main.yml
  22. 11 11
      roles/etcd/templates/etcd.conf.j2
  23. 13 0
      roles/etcd/templates/etcd.docker.service
  24. 1 0
      roles/flannel/tasks/main.yml
  25. 5 0
      roles/fluentd_master/tasks/main.yml
  26. 5 0
      roles/fluentd_node/tasks/main.yml
  27. 1 0
      roles/haproxy/tasks/main.yml
  28. 5 0
      roles/kube_nfs_volumes/tasks/main.yml
  29. 1 0
      roles/kube_nfs_volumes/tasks/nfs.yml
  30. 1 0
      roles/openshift_ansible_inventory/tasks/main.yml
  31. 16 0
      roles/openshift_cli/meta/main.yml
  32. 48 0
      roles/openshift_cli/tasks/main.yml
  33. 16 0
      roles/openshift_cli/templates/openshift.j2
  34. 1 0
      roles/openshift_common/tasks/main.yml
  35. 16 0
      roles/openshift_docker/meta/main.yml
  36. 53 0
      roles/openshift_docker/tasks/main.yml
  37. 4 4
      roles/openshift_examples/defaults/main.yml
  38. 8 0
      roles/openshift_expand_partition/tasks/main.yml
  39. 66 3
      roles/openshift_facts/library/openshift_facts.py
  40. 17 1
      roles/openshift_facts/tasks/main.yml
  41. 1 0
      roles/openshift_manage_node/tasks/main.yml
  42. 1 0
      roles/openshift_master/meta/main.yml
  43. 70 5
      roles/openshift_master/tasks/main.yml
  44. 26 0
      roles/openshift_master/templates/atomic-openshift-master-api.docker.service.j2
  45. 25 0
      roles/openshift_master/templates/atomic-openshift-master-controllers.docker.service.j2
  46. 16 0
      roles/openshift_master/templates/master.docker.service.j2
  47. 1 0
      roles/openshift_master_ca/meta/main.yml
  48. 14 0
      roles/openshift_master_ca/tasks/main.yml
  49. 2 0
      roles/openshift_master_certificates/tasks/main.yml
  50. 4 0
      roles/openshift_master_cluster/tasks/main.yml
  51. 0 1
      roles/openshift_node/meta/main.yml
  52. 66 62
      roles/openshift_node/tasks/main.yml
  53. 2 1
      roles/openshift_node/tasks/storage_plugins/ceph.yml
  54. 1 0
      roles/openshift_node/tasks/storage_plugins/glusterfs.yml
  55. 3 2
      roles/openshift_node/tasks/storage_plugins/main.yml
  56. 20 0
      roles/openshift_node/templates/openshift.docker.node.service
  57. 13 0
      roles/openshift_node/templates/openvswitch.docker.service
  58. 4 4
      roles/openshift_node_certificates/tasks/main.yml
  59. 10 5
      roles/openshift_repos/tasks/main.yaml
  60. 13 5
      roles/openshift_serviceaccounts/tasks/main.yml
  61. 5 0
      roles/openshift_storage_nfs_lvm/tasks/main.yml
  62. 1 0
      roles/openshift_storage_nfs_lvm/tasks/nfs.yml
  63. 2 1
      roles/os_env_extras/tasks/main.yaml
  64. 1 0
      roles/os_firewall/tasks/firewall/firewalld.yml
  65. 1 0
      roles/os_firewall/tasks/firewall/iptables.yml
  66. 6 1
      roles/os_update_latest/tasks/main.yml
  67. 4 0
      roles/os_zabbix/tasks/main.yml
  68. 1 0
      roles/yum_repos/tasks/main.yml

+ 101 - 0
README_CONTAINERIZED_INSTALLATION.md

@@ -0,0 +1,101 @@
+# Overview
+
+Users may now deploy containerized versions of OpenShift Origin, OpenShift
+Enterprise, or Atomic Enterprise Platform on Atomic
+Host[https://projectatomic.io] or RHEL, Centos, and Fedora. This includes
+OpenvSwitch based SDN.
+
+
+## Installing on Atomic Host
+
+When installing on Atomic Host you will automatically have containerized
+installation methods selected for you based on detection of _/run/ostree-booted_
+
+## Installing on RHEL, Centos, or Fedora
+
+Currently the default installation method for traditional operating systems is
+via RPMs. If you wish to deploy using containerized installation you may set the
+ansible variable 'containerized=true' on a per host basis. This means that you
+may easily deploy environments mixing containerized and RPM based installs. At
+this point we suggest deploying heterogeneous environments.
+
+## CLI Wrappers
+
+When using containerized installations openshift-ansible will deploy a wrapper
+script on each master located in _/usr/local/bin/openshift_ and a set of
+symbolic links _/usr/local/bin/oc_, _/usr/local/bin/oadm_, and
+_/usr/local/bin/kubectl_ to ease administrative tasks. The wrapper script spawns
+a new container on each invocation so you may notice it's slightly slower than
+native clients.
+
+The wrapper scripts mount a limited subset of paths, _~/.kube_, _/etc/origin/_,
+and _/tmp_. Be mindful of this when passing in files to be processed by `oc` or
+ `oadm`. You may find it easier to redirect input like this :
+ 
+ `oc create -f - < my_file.json`
+
+## Technical Notes
+
+### Requisite Images
+
+Based on your deployment_type the installer will make use of the following
+images. Because you may make use of a private repository we've moved the
+configuration of docker additional, insecure, and blocked registries to the
+beginning of the installation process ensuring that these settings are applied
+before attempting to pull any of the following images.
+
+    Origin
+        openshift/origin
+        openshift/node (node + openshift-sdn + openvswitch rpm for client tools)
+        openshift/openvswitch (centos7 + openvswitch rpm, runs ovsdb ovsctl processes)
+        registry.access.redhat.com/rhel7/etcd
+    OpenShift Enterprise
+        openshift3/ose
+        openshift3/node
+        openshift3/openvswitch
+        registry.access.redhat.com/rhel7/etcd
+    Atomic Enterprise Platform
+        aep3/aep
+        aep3/node
+        aep3/openvswitch
+        registry.access.redhat.com/rhel7/etcd
+        
+  * note openshift3/* and aep3/* images come from registry.access.redhat.com and
+rely on the --additional-repository flag being set appropriately.
+
+### Starting and Stopping Containers
+
+The installer will create relevant systemd units which can be used to start,
+stop, and poll services via normal systemctl commands. These unit names match
+those of an RPM installation with the exception of the etcd service which will
+be named 'etcd_container'. This change is necessary as currently Atomic Host
+ships with etcd package installed as part of Atomic Host and we will instead use
+a containerized version. The installer will disable the built in etcd service.
+etcd is slated to be removed from os-tree in the future.
+
+### File Paths
+
+All configuration files are placed in the same locations as RPM based
+installations and will survive os-tree upgrades.
+
+The examples are installed into _/etc/origin/examples_ rather than
+_/usr/share/openshift/examples_ because that is read-only on Atomic Host.
+
+
+### Storage Requirements
+
+Atomic Host installs normally have a very small root filesystem. However the
+etcd, master, and node containers will persist data in /var/lib. Please ensure
+that you have enough space on the root filesystem.
+
+### OpenvSwitch SDN Initialization
+
+OpenShift SDN initialization requires that the docker bridge be reconfigured and
+docker is restarted. This complicates the situation when the node is running
+within a container. When using the OVS SDN you'll see the node start,
+reconfigure docker, restart docker which will restart all containers, and
+finally start successfully.
+
+The node service may fail to start and be restarted a few times because the
+master services are also restarted along with docker. We currently work around
+this by relying on Restart=always in the docker based systemd units.

+ 29 - 3
playbooks/adhoc/uninstall.yml

@@ -19,15 +19,19 @@
       failed_when: false
       register: ostree_output
 
+      # Since we're not calling openshift_facts we'll do this for now
     - set_fact:
         is_atomic: "{{ ostree_output.rc == 0 }}"
+    - set_fact:
+        is_containerized: "{{ is_atomic or containerized | default(false) | bool }}"
 
     - name: Remove br0 interface
       shell: ovs-vsctl del-br br0
       changed_when: False
       failed_when: False
 
-    - service: name={{ item }} state=stopped
+    - name: Stop services
+      service: name={{ item }} state=stopped
       with_items:
         - atomic-enterprise-master
         - atomic-enterprise-node
@@ -46,8 +50,10 @@
         - origin-master-controllers
         - origin-node
         - pcsd
+      failed_when: false
 
-    - action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+    - name: Remove packages
+      action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
       when: not is_atomic | bool
       with_items:
         - atomic-enterprise
@@ -132,14 +138,26 @@
       with_items:
         - registry\.access\..*redhat\.com/openshift3
         - registry\.access\..*redhat\.com/aep3
+        - registry\.access\..*redhat\.com/rhel7/etcd
         - docker.io/openshift
 
     - shell:  "docker rmi -f {{ item.stdout_lines | join(' ') }}"
       changed_when: False
       failed_when: False
       with_items: "{{ images_to_delete.results }}"
+    
+    - name: Remove sdn drop files
+      file: 
+        path: /run/openshift-sdn
+        state: absent
+        
+    - name: restart docker
+      service:
+        name: docker
+        state: restarted
 
-    - file: path={{ item }} state=absent
+    - name: Remove remaining files
+      file: path={{ item }} state=absent
       with_items:
         - "~{{ ansible_ssh_user }}/.kube"
         - /etc/ansible/facts.d/openshift.fact
@@ -149,7 +167,15 @@
         - /etc/openshift
         - /etc/openshift-sdn
         - /etc/origin
+        - /etc/systemd/system/atomic-openshift-master.service
+        - /etc/systemd/system/atomic-openshift-master-api.service
+        - /etc/systemd/system/atomic-openshift-master-controllers.service
+        - /etc/systemd/system/atomic-openshift-node.service
+        - /etc/systemd/system/etcd_container.service
+        - /etc/systemd/system/openvswitch.service
         - /etc/sysconfig/atomic-enterprise-master
+        - /etc/sysconfig/atomic-enterprise-master-api
+        - /etc/sysconfig/atomic-enterprise-master-controllers
         - /etc/sysconfig/atomic-enterprise-node
         - /etc/sysconfig/atomic-openshift-master
         - /etc/sysconfig/atomic-openshift-master-api

+ 2 - 0
playbooks/common/openshift-cluster/config.yml

@@ -1,6 +1,8 @@
 ---
 - include: evaluate_groups.yml
 
+- include: ../openshift-docker/config.yml
+
 - include: ../openshift-etcd/config.yml
 
 - include: ../openshift-master/config.yml

+ 1 - 0
playbooks/common/openshift-cluster/update_repos_and_packages.yml

@@ -8,5 +8,6 @@
           ansible_distribution == "RedHat" and
           lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
             default('no', True) | lower in ['no', 'false']
+          and not openshift.common.is_atomic | bool
   - openshift_repos
   - os_update_latest

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml

@@ -235,6 +235,7 @@
 
   - name: Ensure python-yaml present for config upgrade
     action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+    when: not openshift.common.is_atomic | bool
 
   - name: Upgrade master configuration
     openshift_upgrade_config:

+ 9 - 0
playbooks/common/openshift-docker/config.yml

@@ -0,0 +1,9 @@
+- name: Configure docker hosts
+  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
+  vars:
+    docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
+    docker_insecure_registries: "{{ lookup('oo_option',  'docker_insecure_registries') | oo_split }}"
+    docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
+  roles:
+  - openshift_facts
+  - openshift_docker

+ 1 - 0
playbooks/common/openshift-docker/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 1 - 0
playbooks/common/openshift-docker/lookup_plugins

@@ -0,0 +1 @@
+../../../lookup_plugins

+ 1 - 0
playbooks/common/openshift-docker/roles

@@ -0,0 +1 @@
+../../../roles

+ 3 - 1
playbooks/common/openshift-etcd/config.yml

@@ -14,7 +14,8 @@
           public_hostname: "{{ openshift_public_hostname | default(None) }}"
           deployment_type: "{{ openshift_deployment_type }}"
       - role: etcd
-        local_facts: {}
+        local_facts:
+          etcd_image: "{{ osm_etcd_image | default(None) }}"
   - name: Check status of etcd certificates
     stat:
       path: "{{ item }}"
@@ -88,6 +89,7 @@
   roles:
   - etcd
   - role: nickhammond.logrotate
+    when: not openshift.common.is_containerized | bool
 
 - name: Delete temporary directory on localhost
   hosts: localhost

+ 3 - 1
playbooks/common/openshift-master/config.yml

@@ -246,6 +246,7 @@
     when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is defined) and (openshift_master_session_auth_secrets | length != openshift_master_session_encryption_secrets | length)
   - name: Install OpenSSL package
     action: "{{ ansible_pkg_mgr }} name=openssl state=present"
+    when: not openshift.common.is_atomic | bool
   - name: Generate session authentication key
     command: /usr/bin/openssl rand -base64 24
     register: session_auth_output
@@ -328,6 +329,7 @@
   roles:
   - openshift_master
   - role: nickhammond.logrotate
+    when: not openshift.common.is_containerized | bool
   - role: fluentd_master
     when: openshift.common.use_fluentd | bool
   post_tasks:
@@ -356,7 +358,7 @@
     cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
   roles:
   - role: cockpit
-    when: ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
+    when: not openshift.common.is_containerized and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
       (osm_use_cockpit | bool or osm_use_cockpit is undefined )
 
 - name: Configure flannel

+ 1 - 0
playbooks/common/openshift-node/config.yml

@@ -182,6 +182,7 @@
   - role: flannel
     when: openshift.common.use_flannel | bool
   - role: nickhammond.logrotate
+    when: not openshift.common.is_containerized | bool
   - role: fluentd_node
     when: openshift.common.use_fluentd | bool
   tasks:

+ 1 - 0
roles/ansible/tasks/main.yml

@@ -3,6 +3,7 @@
 
 - name: Install Ansible
   action: "{{ ansible_pkg_mgr }} name=ansible state=present"
+  when: not openshift.common.is_containerized | bool
 
 - include: config.yml
   vars:

+ 1 - 0
roles/cockpit/tasks/main.yml

@@ -6,6 +6,7 @@
     - cockpit-shell
     - cockpit-bridge
     - "{{ cockpit_plugins }}"
+  when: not openshift.common.is_containerized | bool
 
 - name: Enable cockpit-ws
   service:

+ 1 - 0
roles/copr_cli/tasks/main.yml

@@ -1,2 +1,3 @@
 ---
 - action: "{{ ansible_pkg_mgr }} name=copr-cli state=present"
+  when: not openshift.common.is_containerized | bool

+ 1 - 1
roles/docker/README.md

@@ -1,4 +1,4 @@
-Role Name
+Docker
 =========
 
 Ensures docker package is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.

+ 4 - 1
roles/docker/handlers/main.yml

@@ -1,7 +1,10 @@
 ---
 
 - name: restart docker
-  service: name=docker state=restarted
+  service:
+    name: docker
+    state: restarted
+  when: not docker_service_status_changed | default(false)
 
 - name: restart udev
   service:

+ 9 - 1
roles/docker/tasks/main.yml

@@ -2,9 +2,17 @@
 # tasks file for docker
 - name: Install docker
   action: "{{ ansible_pkg_mgr }} name=docker state=present"
+  when: not openshift.common.is_atomic | bool
   
 - name: enable and start the docker service
-  service: name=docker enabled=yes state=started
+  service:
+    name: docker
+    enabled: yes
+    state: started
+  register: start_result
+
+- set_fact:
+    docker_service_status_changed = start_result | changed
 
 - include: udev_workaround.yml
   when: docker_udev_workaround | default(False)

+ 1 - 0
roles/etcd/defaults/main.yaml

@@ -1,4 +1,5 @@
 ---
+etcd_service: "{{ 'etcd' if not openshift.common.is_containerized else 'etcd_container' }}"
 etcd_interface: "{{ ansible_default_ipv4.interface }}"
 etcd_client_port: 2379
 etcd_peer_port: 2380

+ 2 - 1
roles/etcd/handlers/main.yml

@@ -1,4 +1,5 @@
 ---
+
 - name: restart etcd
-  service: name=etcd state=restarted
+  service: name={{ etcd_service }} state=restarted
   when: not etcd_service_status_changed | default(false)

+ 53 - 7
roles/etcd/tasks/main.yml

@@ -9,21 +9,67 @@
 
 - name: Install etcd
   action: "{{ ansible_pkg_mgr }} name=etcd-2.* state=present"
+  when: not openshift.common.is_containerized | bool
+
+- name: Get docker images
+  command: docker images
+  changed_when: false
+  when: openshift.common.is_containerized | bool
+  register: docker_images
+
+- name: Pull etcd container
+  command: docker pull {{ openshift.etcd.etcd_image }}
+  when: openshift.common.is_containerized | bool and openshift.etcd.etcd_image not in docker_images.stdout
+  
+- name: Wait for etcd image
+  command: >
+      docker images
+  register: docker_images
+  until: openshift.etcd.etcd_image in docker_images.stdout
+  retries: 30
+  delay: 10
+  changed_when: false
+  when: openshift.common.is_containerized | bool
+
+- name: Install etcd container service file
+  template:
+    dest: "/etc/systemd/system/etcd_container.service"
+    src: etcd.docker.service
+  register: install_etcd_result
+  when: openshift.common.is_containerized | bool
+
+- name: Ensure etcd datadir exists
+  when: openshift.common.is_containerized | bool
+  file:
+    path: "{{ etcd_data_dir }}"
+    state: directory
+    mode: 0700
+
+- name: Disable system etcd when containerized
+  when: openshift.common.is_containerized | bool
+  service:
+    name: etcd
+    state: stopped
+    enabled: no
+
+- name: Reload systemd units
+  command: systemctl daemon-reload
+  when: openshift.common.is_containerized and ( install_etcd_result | changed )
 
 - name: Validate permissions on the config dir
   file:
     path: "{{ etcd_conf_dir }}"
     state: directory
-    owner: etcd
-    group: etcd
+    owner: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
+    group: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
     mode: 0700
 
 - name: Validate permissions on certificate files
   file:
     path: "{{ item }}"
     mode: 0600
-    group: etcd
-    owner: etcd
+    owner: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
+    group: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
   when: etcd_url_scheme == 'https'
   with_items:
   - "{{ etcd_ca_file }}"
@@ -34,8 +80,8 @@
   file:
     path: "{{ item }}"
     mode: 0600
-    group: etcd
-    owner: etcd
+    owner: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
+    group: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
   when: etcd_peer_url_scheme == 'https'
   with_items:
   - "{{ etcd_peer_ca_file }}"
@@ -52,7 +98,7 @@
 
 - name: Enable etcd
   service:
-    name: etcd
+    name: "{{ etcd_service }}"
     state: started
     enabled: yes
   register: start_result

+ 11 - 11
roles/etcd/templates/etcd.conf.j2

@@ -15,13 +15,13 @@ ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
 ETCD_NAME=default
 {% endif %}
 ETCD_DATA_DIR={{ etcd_data_dir }}
-#ETCD_SNAPSHOT_COUNTER="10000"
-ETCD_HEARTBEAT_INTERVAL="500"
-ETCD_ELECTION_TIMEOUT="2500"
+#ETCD_SNAPSHOT_COUNTER=10000
+ETCD_HEARTBEAT_INTERVAL=500
+ETCD_ELECTION_TIMEOUT=2500
 ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }}
-#ETCD_MAX_SNAPSHOTS="5"
-#ETCD_MAX_WALS="5"
-#ETCD_CORS=""
+#ETCD_MAX_SNAPSHOTS=5
+#ETCD_MAX_WALS=5
+#ETCD_CORS=
 
 {% if groups[etcd_peers_group] and groups[etcd_peers_group] | length > 1 %}
 #[cluster]
@@ -29,15 +29,15 @@ ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }}
 ETCD_INITIAL_CLUSTER={{ initial_cluster() }}
 ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}
 ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }}
-#ETCD_DISCOVERY=""
-#ETCD_DISCOVERY_SRV=""
-#ETCD_DISCOVERY_FALLBACK="proxy"
-#ETCD_DISCOVERY_PROXY=""
+#ETCD_DISCOVERY=
+#ETCD_DISCOVERY_SRV=
+#ETCD_DISCOVERY_FALLBACK=proxy
+#ETCD_DISCOVERY_PROXY=
 {% endif %}
 ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }}
 
 #[proxy]
-#ETCD_PROXY="off"
+#ETCD_PROXY=off
 
 #[security]
 {% if etcd_url_scheme == 'https' -%}

+ 13 - 0
roles/etcd/templates/etcd.docker.service

@@ -0,0 +1,13 @@
+[Unit]
+Description=The Etcd Server container
+After=docker.service
+
+[Service]
+EnvironmentFile=/etc/etcd/etcd.conf
+ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }}
+ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:z --env-file=/etc/etcd/etcd.conf --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
+ExecStop=/usr/bin/docker stop {{ etcd_service }}
+Restart=always
+
+[Install]
+WantedBy=multi-user.target

+ 1 - 0
roles/flannel/tasks/main.yml

@@ -2,6 +2,7 @@
 - name: Install flannel
   sudo: true
   action: "{{ ansible_pkg_mgr }} name=flannel state=present"
+  when: not openshift.common.is_containerized | bool
 
 - name: Set flannel etcd url
   sudo: true

+ 5 - 0
roles/fluentd_master/tasks/main.yml

@@ -1,7 +1,12 @@
 ---
+- fail:
+    msg: "fluentd master is not yet supported on atomic hosts"
+  when: openshift.common.is_containerized | bool
+
 # TODO: Update fluentd install and configuration when packaging is complete
 - name: download and install td-agent
   action: "{{ ansible_pkg_mgr }} name='http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state=present"
+  when: not openshift.common.is_containerized | bool
 
 - name: Verify fluentd plugin installed
   command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'

+ 5 - 0
roles/fluentd_node/tasks/main.yml

@@ -1,7 +1,12 @@
 ---
+- fail:
+    msg: "fluentd node is not yet supported on atomic hosts"
+  when: openshift.common.is_containerized | bool
+
 # TODO: Update fluentd install and configuration when packaging is complete
 - name: download and install td-agent
   action: "{{ ansible_pkg_mgr }} name='http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state=present"
+  when: not openshift.common.is_containerized | bool
 
 - name: Verify fluentd plugin installed
   command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'

+ 1 - 0
roles/haproxy/tasks/main.yml

@@ -1,6 +1,7 @@
 ---
 - name: Install haproxy
   action: "{{ ansible_pkg_mgr }} name=haproxy state=present"
+  when: not openshift.common.is_containerized | bool
 
 - name: Configure haproxy
   template:

+ 5 - 0
roles/kube_nfs_volumes/tasks/main.yml

@@ -1,6 +1,11 @@
 ---
+- fail:
+    msg: "This role is not yet supported on atomic hosts"
+  when: openshift.common.is_atomic | bool
+
 - name: Install pyparted (RedHat/Fedora)
   action: "{{ ansible_pkg_mgr }} name=pyparted,python-httplib2 state=present"
+  when: not openshift.common.is_containerized | bool
 
 - name: partition the drives
   partitionpool: disks={{ disks }} force={{ force }} sizes={{ sizes }}

+ 1 - 0
roles/kube_nfs_volumes/tasks/nfs.yml

@@ -1,6 +1,7 @@
 ---
 - name: Install NFS server
   action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present"
+  when: not openshift.common.is_containerized | bool
 
 - name: Start rpcbind on Fedora/Red Hat
   service: name=rpcbind state=started enabled=yes

+ 1 - 0
roles/openshift_ansible_inventory/tasks/main.yml

@@ -4,6 +4,7 @@
   - openshift-ansible-inventory
   - openshift-ansible-inventory-aws
   - openshift-ansible-inventory-gce
+  when: not openshift.common.is_containerized | bool
 
 - name:
   copy:

+ 16 - 0
roles/openshift_cli/meta/main.yml

@@ -0,0 +1,16 @@
+---
+galaxy_info:
+  author: Jason DeTiberus
+  description: OpenShift Docker
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 1.9
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  categories:
+  - cloud
+dependencies:
+- { role: openshift_common }
+- { role: docker }

+ 48 - 0
roles/openshift_cli/tasks/main.yml

@@ -0,0 +1,48 @@
+---
+- openshift_facts:
+    role: common
+    local_facts:
+      deployment_type: "{{ openshift_deployment_type }}"
+      
+- name: Install clients
+  yum: pkg={{ openshift.common.service_type }}-clients state=installed
+  when: not openshift.common.is_containerized | bool
+  
+- name: List Docker images
+  command: >
+    docker images
+  register: docker_images
+  
+- name: Pull CLI Image
+  command: >
+    docker pull {{ openshift.common.cli_image }}
+  when: openshift.common.is_containerized | bool and openshift.common.cli_image not in docker_images.stdout
+  
+- name: Wait for CLI image
+  command: >
+      docker images
+  register: docker_images
+  until: openshift.common.cli_image in docker_images.stdout
+  retries: 30
+  delay: 10
+  changed_when: false
+  when: openshift.common.is_containerized | bool
+
+  
+- name: Create /usr/local/bin/openshift cli wrapper
+  template:
+    src: openshift.j2
+    dest: /usr/local/bin/openshift
+    mode: 0755
+  when: openshift.common.is_containerized | bool
+  
+- name: Create client symlinks
+  file: 
+    path: "{{ item }}"
+    state: link
+    src: /usr/local/bin/openshift
+  with_items:
+    - /usr/local/bin/oadm
+    - /usr/local/bin/oc
+    - /usr/local/bin/kubectl
+  when: openshift.common.is_containerized | bool

+ 16 - 0
roles/openshift_cli/templates/openshift.j2

@@ -0,0 +1,16 @@
+#!/bin/bash
+if [ ! -d ~/.kube ]; then
+   mkdir -m 0700 ~/.kube
+fi
+cmd=`basename $0`
+user=`id -u`
+group=`id -g`
+
+# docker can only split stderr and stdin when run without -t
+# https://github.com/docker/docker/issues/725
+# ansible checks various streams DO NOT CROSS THE STREAMS
+if [ -z $TERM ]; then
+  $t = '-it'
+fi
+
+docker run ${t} -a STDERR -a STDOUT -a STDIN --privileged --net=host --user=${user}:${group} -v ~/.kube:/root/.kube -v /tmp:/tmp -v {{ openshift.common.config_base}}:{{ openshift.common.config_base }} -e KUBECONFIG=/root/.kube/config --entrypoint ${cmd} --rm {{ openshift.common.cli_image }} ${@}

+ 1 - 0
roles/openshift_common/tasks/main.yml

@@ -27,6 +27,7 @@
 
 - name: Install the base package for versioning
   action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version | default('') }} state=present"
+  when: not openshift.common.is_containerized | bool
 
 - name: Set version facts
   openshift_facts:

+ 16 - 0
roles/openshift_docker/meta/main.yml

@@ -0,0 +1,16 @@
+---
+galaxy_info:
+  author: Jason DeTiberus
+  description: OpenShift Docker
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 1.9
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  categories:
+  - cloud
+dependencies:
+- { role: openshift_common }
+- { role: docker }

+ 53 - 0
roles/openshift_docker/tasks/main.yml

@@ -0,0 +1,53 @@
+---
+- name: Set docker facts
+  openshift_facts:
+    role: "{{ item.role }}"
+    local_facts: "{{ item.local_facts }}"
+  with_items:
+  - role: common
+    local_facts:
+      deployment_type: "{{ openshift_deployment_type }}"
+      docker_additional_registries: "{{ docker_additional_registries }}"
+      docker_insecure_registries: "{{ docker_insecure_registries }}"
+      docker_blocked_registries: "{{ docker_blocked_registries }}"
+  - role: node
+    local_facts:
+      portal_net: "{{ openshift_master_portal_net | default(None) }}"
+      docker_log_driver:  "{{ lookup( 'oo_option' , 'docker_log_driver'  )  | default('',True) }}"
+      docker_log_options: "{{ lookup( 'oo_option' , 'docker_log_options' )  | default('',True) }}"
+
+- stat: path=/etc/sysconfig/docker
+  register: docker_check
+  
+- name: Set registry params
+  lineinfile:
+    dest: /etc/sysconfig/docker
+    regexp: '^{{ item.reg_conf_var }}=.*$'
+    line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"
+  when: "'docker_additional_registries' in openshift.common and docker_check.stat.isreg"
+  with_items:
+  - reg_conf_var: ADD_REGISTRY
+    reg_fact_val: "{{ openshift.common.docker_additional_registries }}"
+    reg_flag: --add-registry
+  - reg_conf_var: BLOCK_REGISTRY
+    reg_fact_val: "{{ openshift.common.docker_blocked_registries }}"
+    reg_flag: --block-registry
+  - reg_conf_var: INSECURE_REGISTRY
+    reg_fact_val: "{{ openshift.common.docker_insecure_registries }}"
+    reg_flag: --insecure-registry
+  notify:
+  - restart docker
+
+# TODO: Enable secure registry when code available in origin
+# TODO: perhaps move this to openshift_docker?
+- name: Secure Registry and Logs Options
+  lineinfile:
+    dest: /etc/sysconfig/docker
+    regexp: '^OPTIONS=.*$'
+    line: "OPTIONS='--insecure-registry={{ openshift.node.portal_net }} \
+      {% if ansible_selinux and ansible_selinux.status == '''enabled''' %}--selinux-enabled{% endif %} \
+      {% if openshift.node.docker_log_driver is defined  %} --log-driver {{ openshift.node.docker_log_driver }}  {% endif %} \
+      {% if openshift.node.docker_log_options is defined %}   {{ openshift.node.docker_log_options |  oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}  {% endif %} '"
+  when: docker_check.stat.isreg
+  notify:
+    - restart docker

+ 4 - 4
roles/openshift_examples/defaults/main.yml

@@ -1,14 +1,14 @@
 ---
 # By default install rhel and xpaas streams on enterprise installs
-openshift_examples_load_centos: "{{ openshift_deployment_type not in ['enterprise','openshift-enterprise','atomic-enterprise','online'] }}"
-openshift_examples_load_rhel: "{{ openshift_deployment_type in ['enterprise','openshift-enterprise','atomic-enterprise','online'] }}"
+openshift_examples_load_centos: "{{ openshift_deployment_type == 'origin' }}"
+openshift_examples_load_rhel: "{{ openshift_deployment_type != 'origin' }}"
 openshift_examples_load_db_templates: true
-openshift_examples_load_xpaas: "{{ openshift_deployment_type in ['enterprise','openshift-enterprise','atomic-enterprise','online']  }}"
+openshift_examples_load_xpaas: "{{ openshift_deployment_type != 'origin' }}"
 openshift_examples_load_quickstarts: true
 
 content_version: "{{ 'v1.1' if openshift.common.version_greater_than_3_1_or_1_1 else 'v1.0' }}"
 
-examples_base: "/usr/share/openshift/examples"
+examples_base: "{{ openshift.common.config_base if openshift.common.is_containerized else '/usr/share/openshift' }}/examples"
 image_streams_base: "{{ examples_base }}/image-streams"
 centos_image_streams: "{{ image_streams_base}}/image-streams-centos7.json"
 rhel_image_streams: "{{ image_streams_base}}/image-streams-rhel7.json"

+ 8 - 0
roles/openshift_expand_partition/tasks/main.yml

@@ -1,6 +1,14 @@
 ---
 - name: Ensure growpart is installed
   action: "{{ ansible_pkg_mgr }} name=cloud-utils-growpart state=present"
+  when: not openshift.common.is_containerized | bool
+
+- name: Determine if growpart is installed
+  command: "rpm -q cloud-utils-growpart"
+  register: has_growpart
+  failed_when: "has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout"
+  changed_when: false
+  when: openshift.common.is_containerized | bool
 
 - name: Grow the partitions
   command: "growpart {{oep_drive}} {{oep_partition}}"

+ 66 - 3
roles/openshift_facts/library/openshift_facts.py

@@ -643,6 +643,19 @@ def set_deployment_facts_if_unset(facts):
                 data_dir = '/var/lib/openshift'
             facts['common']['data_dir'] = data_dir
 
+        # remove duplicate and empty strings from registry lists
+        for cat in  ['additional', 'blocked', 'insecure']:
+            key = 'docker_{0}_registries'.format(cat)
+            if key in facts['common']:
+                facts['common'][key] = list(set(facts['common'][key]) - set(['']))
+
+
+        if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
+            addtl_regs = facts['common'].get('docker_additional_registries', [])
+            ent_reg = 'registry.access.redhat.com'
+            if ent_reg not in addtl_regs:
+                facts['common']['docker_additional_registries'] = addtl_regs + [ent_reg]
+
     for role in ('master', 'node'):
         if role in facts:
             deployment_type = facts['common']['deployment_type']
@@ -710,7 +723,8 @@ def set_sdn_facts_if_unset(facts, system_facts):
     if 'common' in facts:
         use_sdn = facts['common']['use_openshift_sdn']
         if not (use_sdn == '' or isinstance(use_sdn, bool)):
-            facts['common']['use_openshift_sdn'] = bool(strtobool(str(use_sdn)))
+            use_sdn = bool(strtobool(str(use_sdn)))
+            facts['common']['use_openshift_sdn'] = use_sdn
         if 'sdn_network_plugin_name' not in facts['common']:
             plugin = 'redhat/openshift-ovs-subnet' if use_sdn else ''
             facts['common']['sdn_network_plugin_name'] = plugin
@@ -919,6 +933,7 @@ def save_local_facts(filename, facts):
             os.makedirs(fact_dir)
         with open(filename, 'w') as fact_file:
             fact_file.write(module.jsonify(facts))
+        os.chmod(filename, 0o600)
     except (IOError, OSError) as ex:
         raise OpenShiftFactsFileWriteError(
             "Could not create fact file: %s, error: %s" % (filename, ex)
@@ -954,6 +969,53 @@ def get_local_facts_from_file(filename):
     return local_facts
 
 
+def set_container_facts_if_unset(facts):
+    """ Set containerized facts.
+
+        Args:
+            facts (dict): existing facts
+        Returns:
+            dict: the facts dict updated with the generated containerization
+            facts
+    """
+    deployment_type = facts['common']['deployment_type']
+    if deployment_type in ['enterprise', 'openshift-enterprise']:
+        master_image = 'openshift3/ose'
+        cli_image = master_image
+        node_image = 'openshift3/node'
+        ovs_image = 'openshift3/openvswitch'
+        etcd_image = 'registry.access.redhat.com/rhel7/etcd'
+    elif deployment_type == 'atomic-enterprise':
+        master_image = 'aep3_beta/aep'
+        cli_image = master_image
+        node_image = 'aep3_beta/node'
+        ovs_image = 'aep3_beta/openvswitch'
+        etcd_image = 'registry.access.redhat.com/rhel7/etcd'
+    else:
+        master_image = 'openshift/origin'
+        cli_image = master_image
+        node_image = 'openshift/node'
+        ovs_image = 'openshift/openvswitch'
+        etcd_image = 'registry.access.redhat.com/rhel7/etcd'
+
+    facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
+    if 'is_containerized' not in facts['common']:
+        facts['common']['is_containerized'] = facts['common']['is_atomic']
+    if 'cli_image' not in facts['common']:
+        facts['common']['cli_image'] = cli_image
+    if 'etcd' in facts and 'etcd_image' not in facts['etcd']:
+        facts['etcd']['etcd_image'] = etcd_image
+    if 'master' in facts and 'master_image' not in facts['master']:
+        facts['master']['master_image'] = master_image
+    if 'node' in facts:
+        if 'node_image' not in facts['node']:
+            facts['node']['node_image'] = node_image
+        if 'ovs_image' not in facts['node']:
+            facts['node']['ovs_image'] = ovs_image
+
+    return facts
+
+
 class OpenShiftFactsUnsupportedRoleError(Exception):
     """Origin Facts Unsupported Role Error"""
     pass
@@ -1031,6 +1093,7 @@ class OpenShiftFacts(object):
         facts = set_version_facts_if_unset(facts)
         facts = set_aggregate_facts(facts)
         facts = set_etcd_facts_if_unset(facts)
+        facts = set_container_facts_if_unset(facts)
         return dict(openshift=facts)
 
     def get_defaults(self, roles):
@@ -1054,8 +1117,8 @@ class OpenShiftFacts(object):
         common = dict(use_openshift_sdn=True, ip=ip_addr, public_ip=ip_addr,
                       deployment_type='origin', hostname=hostname,
                       public_hostname=hostname, use_manageiq=False)
-        common['client_binary'] = 'oc' if os.path.isfile('/usr/bin/oc') else 'osc'
-        common['admin_binary'] = 'oadm' if os.path.isfile('/usr/bin/oadm') else 'osadm'
+        common['client_binary'] = 'oc'
+        common['admin_binary'] = 'oadm'
         common['dns_domain'] = 'cluster.local'
         common['install_examples'] = True
         defaults['common'] = common

+ 17 - 1
roles/openshift_facts/tasks/main.yml

@@ -5,9 +5,25 @@
     - ansible_version | version_compare('1.8.0', 'ge')
     - ansible_version | version_compare('1.9.0', 'ne')
     - ansible_version | version_compare('1.9.0.1', 'ne')
+    
+- name: Detecting Operating System
+  shell: ls /run/ostree-booted
+  ignore_errors: yes
+  failed_when: false
+  register: ostree_output
+
+# Locally setup containerized facts for now
+- set_fact:
+    l_is_atomic: "{{ ostree_output.rc == 0 }}"
+- set_fact:
+    l_is_containerized: "{{ l_is_atomic or containerized | default(false) | bool }}"
 
 - name: Ensure PyYaml is installed
   action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+  when: not l_is_atomic | bool
 
-- name: Gather Cluster facts
+- name: Gather Cluster facts and set is_containerized if needed
   openshift_facts:
+    role: common
+    local_facts:
+      is_containerized: "{{ containerized | default(None) }}"

+ 1 - 0
roles/openshift_manage_node/tasks/main.yml

@@ -5,6 +5,7 @@
   until: omd_get_node.rc == 0
   retries: 20
   delay: 5
+  changed_when: false
   with_items: openshift_nodes
 
 - name: Set node schedulability

+ 1 - 0
roles/openshift_master/meta/main.yml

@@ -13,3 +13,4 @@ galaxy_info:
   - cloud
 dependencies:
 - { role: openshift_common }
+- { role: openshift_cli }

+ 70 - 5
roles/openshift_master/tasks/main.yml

@@ -20,6 +20,9 @@
 - fail:
     msg: "openshift_master_cluster_password must be set for multi-master installations"
   when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and (openshift_master_cluster_password is not defined or not openshift_master_cluster_password)
+- fail:
+    msg: "Pacemaker based HA is not supported at this time when used with containerized installs"
+  when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and openshift.common.is_containerized | bool
 
 - name: Set master facts
   openshift_facts:
@@ -76,9 +79,52 @@
       disabled_features: "{{ osm_disabled_features | default(None) }}"
       master_count: "{{ openshift_master_count | default(None) }}"
       controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"
+      master_image: "{{ osm_image | default(None) }}"
 
 - name: Install Master package
   action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version  }} state=present"
+  when: not openshift.common.is_containerized | bool
+
+- name: Get docker images
+  command: docker images
+  changed_when: false
+  when: openshift.common.is_containerized | bool
+  register: docker_images
+
+- name: Pull master image
+  command: >
+    docker pull {{ openshift.master.master_image }}
+  when: openshift.common.is_containerized | bool and openshift.master.master_image not in docker_images.stdout
+  
+- name: Wait for master image
+  command: >
+      docker images
+  register: docker_images
+  until: openshift.master.master_image in docker_images.stdout
+  retries: 30
+  delay: 10
+  changed_when: false
+  when: openshift.common.is_containerized | bool
+
+- name: Install Master docker service file
+  template:
+    dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service"
+    src: master.docker.service.j2
+  register: install_result
+  when: openshift.common.is_containerized | bool and not openshift_master_ha | bool
+  
+- name: Create openshift.common.data_dir
+  file: 
+    path: "{{ openshift.common.data_dir }}"
+    state: directory
+    mode: 0755
+    owner: root
+    group: root
+  when: openshift.common.is_containerized | bool
+
+- name: Reload systemd units
+  command: systemctl daemon-reload
+  when: openshift.common.is_containerized | bool and install_result | changed
 
 - name: Re-gather package dependent master facts
   openshift_facts:
@@ -111,7 +157,8 @@
 
 - name: Install httpd-tools if needed
   action: "{{ ansible_pkg_mgr }} name=httpd-tools state=present"
-  when: (item.kind == 'HTPasswdPasswordIdentityProvider')
+  when: (item.kind == 'HTPasswdPasswordIdentityProvider') and
+        not openshift.common.is_atomic | bool
   with_items: openshift.master.identity_providers
 
 - name: Ensure htpasswd directory exists
@@ -130,16 +177,27 @@
   when: item.kind == 'HTPasswdPasswordIdentityProvider'
   with_items: openshift.master.identity_providers
 
+- name: Init HA Service Info
+  set_fact:
+    ha_suffix: ""
+    ha_svcdir: "/usr/lib/systemd/system"
+
+- name: Set HA Service Info for containerized installs
+  set_fact:
+    ha_suffix: ".docker"
+    ha_svcdir: "/etc/systemd/system"
+  when: openshift.common.is_containerized | bool
+
 # workaround for missing systemd unit files for controllers/api
 - name: Create the api service file
   template:
-    src: atomic-openshift-master-api.service.j2
-    dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-api.service
+    src: atomic-openshift-master-api{{ ha_suffix }}.service.j2
+    dest: "{{ ha_svcdir }}/{{ openshift.common.service_type }}-master-api.service"
   when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
 - name: Create the controllers service file
   template:
-    src: atomic-openshift-master-controllers.service.j2
-    dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-controllers.service
+    src: atomic-openshift-master-controllers{{ ha_suffix }}.service.j2
+    dest: "{{ ha_svcdir }}/{{ openshift.common.service_type }}-master-controllers.service"
   when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
 - name: Create the api env file
   template:
@@ -227,6 +285,10 @@
   when: not openshift_master_ha | bool
   register: start_result
 
+- name: Stop and disable non HA master when running HA
+  service: name={{ openshift.common.service_type }}-master enabled=no state=stopped
+  when: openshift_master_ha | bool
+
 - set_fact:
     master_service_status_changed: start_result | changed
   when: not openshift_master_ha | bool
@@ -252,11 +314,13 @@
 - name: Install cluster packages
   action: "{{ ansible_pkg_mgr }} name=pcs state=present"
   when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
+    and not openshift.common.is_containerized | bool
   register: install_result
 
 - name: Start and enable cluster service
   service: name=pcsd enabled=yes state=started
   when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
+    and not openshift.common.is_containerized | bool
 
 - name: Set the cluster user password
   shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster
@@ -264,6 +328,7 @@
 
 - name: Lookup default group for ansible_ssh_user
   command: "/usr/bin/id -g {{ ansible_ssh_user }}"
+  changed_when: false
   register: _ansible_ssh_user_gid
 
 - name: Create the client config dir(s)

+ 26 - 0
roles/openshift_master/templates/atomic-openshift-master-api.docker.service.j2

@@ -0,0 +1,26 @@
+[Unit]
+Description=Atomic OpenShift Master API
+Documentation=https://github.com/openshift/origin
+After=network.target
+After=etcd.service
+Before={{ openshift.common.service_type }}-node.service
+Requires=network.target
+Requires=docker.service
+PartOf=docker.service
+
+[Service]
+EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
+Environment=GOTRACEBACK=crash
+ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {{ openshift.master.master_image }} start master api --config=${CONFIG_FILE} $OPTIONS
+ExecStartPost=/usr/bin/sleep 10
+ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
+LimitNOFILE=131072
+LimitCORE=infinity
+WorkingDirectory={{ openshift.common.data_dir }}
+SyslogIdentifier=atomic-openshift-master-api
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
+WantedBy={{ openshift.common.service_type }}-node.service

+ 25 - 0
roles/openshift_master/templates/atomic-openshift-master-controllers.docker.service.j2

@@ -0,0 +1,25 @@
+[Unit]
+Description=Atomic OpenShift Master Controllers
+Documentation=https://github.com/openshift/origin
+After=network.target
+After={{ openshift.common.service_type }}-master-api.service
+Before={{ openshift.common.service_type }}-node.service
+Requires=docker.service
+PartOf=docker.service
+
+[Service]
+EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+Environment=GOTRACEBACK=crash
+ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {{ openshift.master.master_image }} start master controllers --config=${CONFIG_FILE} $OPTIONS
+ExecStartPost=/usr/bin/sleep 10
+ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers
+LimitNOFILE=131072
+LimitCORE=infinity
+WorkingDirectory={{ openshift.common.data_dir }}
+SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
+WantedBy={{ openshift.common.service_type }}-node.service

+ 16 - 0
roles/openshift_master/templates/master.docker.service.j2

@@ -0,0 +1,16 @@
+[Unit]
+After=docker.service
+Before={{ openshift.common.service_type }}-node.service
+Requires=docker.service
+PartOf=docker.service
+
+[Service]
+EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master
+ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-master
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {{ openshift.master.master_image }} start master --config=${CONFIG_FILE} $OPTIONS
+ExecStartPost=/usr/bin/sleep 10
+ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master
+Restart=always
+
+[Install]
+WantedBy=multi-user.target

+ 1 - 0
roles/openshift_master_ca/meta/main.yml

@@ -14,3 +14,4 @@ galaxy_info:
   - system
 dependencies:
 - { role: openshift_repos }
+- { role: openshift_cli }

+ 14 - 0
roles/openshift_master_ca/tasks/main.yml

@@ -1,15 +1,29 @@
 ---
 - name: Install the base package for admin tooling
   action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version  }} state=present"
+  when: not openshift.common.is_containerized | bool
+  register: install_result
 
 - name: Reload generated facts
   openshift_facts:
+  when: install_result | changed
 
 - name: Create openshift_master_config_dir if it doesn't exist
   file:
     path: "{{ openshift_master_config_dir }}"
     state: directory
 
+- name: Get docker images
+  command: docker images
+  changed_when: false
+  when: openshift.common.is_containerized | bool
+  register: docker_images
+
+- name: Pull required docker image
+  command: >
+    docker pull {{ openshift.common.cli_image }}
+  when: openshift.common.is_containerized | bool and openshift.common.cli_image not in docker_images.stdout
+
 - name: Create the master certificates if they do not already exist
   command: >
     {{ openshift.common.admin_binary }} create-master-certs

+ 2 - 0
roles/openshift_master_certificates/tasks/main.yml

@@ -16,6 +16,8 @@
     - admin.kubeconfig
     - master.kubelet-client.crt
     - master.kubelet-client.key
+    - master.server.crt
+    - master.server.key
     - openshift-master.crt
     - openshift-master.key
     - openshift-master.kubeconfig

+ 4 - 0
roles/openshift_master_cluster/tasks/main.yml

@@ -1,4 +1,8 @@
 ---
+- fail:
+    msg: "Not possible on atomic hosts for now"
+  when: openshift.common.is_containerized | bool
+
 - name: Test if cluster is already configured
   command: pcs status
   register: pcs_status

+ 0 - 1
roles/openshift_node/meta/main.yml

@@ -13,4 +13,3 @@ galaxy_info:
   - cloud
 dependencies:
 - { role: openshift_common }
-- { role: docker }

+ 66 - 62
roles/openshift_node/tasks/main.yml

@@ -22,8 +22,6 @@
     local_facts:
       annotations: "{{ openshift_node_annotations | default(none) }}"
       debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
-      docker_log_driver:  "{{ lookup( 'oo_option' , 'docker_log_driver'  )  | default('',True) }}"
-      docker_log_options: "{{ lookup( 'oo_option' , 'docker_log_options' )  | default('',True) }}"
       iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
       kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
       labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
@@ -33,15 +31,78 @@
       sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
       storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
       set_node_ip: "{{ openshift_set_node_ip | default(None) }}"
+      node_image: "{{ osn_image | default(None) }}"
+      ovs_image: "{{ osn_ovs_image | default(None) }}"
 
 # We have to add tuned-profiles in the same transaction otherwise we run into depsolving
-# problems because the rpms don't pin the version properly.
+# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
 - name: Install Node package
   action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_version  }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version  }} state=present"
+  when: not openshift.common.is_containerized | bool
 
 - name: Install sdn-ovs package
   action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present"
-  when: openshift.common.use_openshift_sdn
+  when: openshift.common.use_openshift_sdn and not openshift.common.is_containerized | bool
+
+- name: Get docker images
+  command: docker images
+  changed_when: false
+  when: openshift.common.is_containerized | bool
+  register: docker_images
+
+- name: Pull node image
+  command: >
+    docker pull {{ openshift.node.node_image }}
+  when: openshift.common.is_containerized | bool and openshift.node.node_image not in docker_images.stdout
+  
+- name: Wait for node image
+  command: >
+      docker images
+  register: docker_images
+  until: openshift.node.node_image in docker_images.stdout
+  retries: 30
+  delay: 10
+  changed_when: false
+  when: openshift.common.is_containerized | bool
+    
+- name: Pull OpenVSwitch image
+  command: >
+    docker pull {{ openshift.node.ovs_image }}
+  when: openshift.common.is_containerized | bool and openshift.node.ovs_image not in docker_images.stdout
+    and openshift.common.use_openshift_sdn | bool
+  
+- name: Wait for OpenVSwitch image
+  command: >
+      docker images
+  register: docker_images
+  until: openshift.node.ovs_image in docker_images.stdout
+  retries: 30
+  delay: 10
+  changed_when: false
+  when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
+
+- name: Install Node docker service file
+  template:
+    dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+    src: openshift.docker.node.service
+  register: install_node_result
+  when: openshift.common.is_containerized | bool
+
+- name: Install OpenvSwitch docker service file
+  template:
+    dest: "/etc/systemd/system/openvswitch.service"
+    src: openvswitch.docker.service
+  register: install_ovs_result
+  when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
+
+- name: Reload systemd units
+  command: systemctl daemon-reload
+  when: openshift.common.is_containerized and ( ( install_node_result  | changed )
+    or ( install_ovs_result | changed ) )
+
+- name: Start and enable openvswitch docker service
+  service: name=openvswitch.service enabled=yes state=started
+  when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
 
 # TODO: add the validate parameter when there is a validation command to run
 - name: Create the Node config
@@ -57,6 +118,7 @@
     dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
     regexp: "{{ item.regex }}"
     line: "{{ item.line }}"
+    create: true
   with_items:
     - regex: '^OPTIONS='
       line: "OPTIONS=--loglevel={{ openshift.node.debug_level }}"
@@ -65,64 +127,6 @@
   notify:
   - restart node
 
-- stat: path=/etc/sysconfig/docker
-  register: docker_check
-
-  # TODO: Enable secure registry when code available in origin
-- name: Secure Registry and Logs Options
-  lineinfile:
-    dest: /etc/sysconfig/docker
-    regexp: '^OPTIONS=.*$'
-    line: "OPTIONS='--insecure-registry={{ openshift.node.portal_net }} \
-{% if ansible_selinux and ansible_selinux.status == '''enabled''' %}--selinux-enabled{% endif %} \
-{% if openshift.node.docker_log_driver is defined  %} --log-driver {{ openshift.node.docker_log_driver }}  {% endif %} \
-{% if openshift.node.docker_log_options is defined %}   {{ openshift.node.docker_log_options |  oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}  {% endif %} '"
-  when: docker_check.stat.isreg
-  notify:
-    - restart docker
-
-- set_fact:
-    docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries')
-                                      | oo_split() | union(['registry.access.redhat.com'])
-                                      | difference(['']) }}"
-  when: openshift.common.deployment_type in ['enterprise', 'openshift-enterprise', 'atomic-enterprise']
-- set_fact:
-    docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries')
-                                      | oo_split() | difference(['']) }}"
-  when: openshift.common.deployment_type not in ['enterprise', 'openshift-enterprise', 'atomic-enterprise']
-
-- name: Add personal registries
-  lineinfile:
-    dest: /etc/sysconfig/docker
-    regexp: '^ADD_REGISTRY=.*$'
-    line: "ADD_REGISTRY='{{ docker_additional_registries
-                            | oo_prepend_strings_in_list('--add-registry ') | join(' ') }}'"
-  when: docker_check.stat.isreg and docker_additional_registries
-  notify:
-    - restart docker
-
-- name: Block registries
-  lineinfile:
-    dest: /etc/sysconfig/docker
-    regexp: '^BLOCK_REGISTRY=.*$'
-    line: "BLOCK_REGISTRY='{{ lookup('oo_option', 'docker_blocked_registries') | oo_split()
-                              | oo_prepend_strings_in_list('--block-registry ') | join(' ') }}'"
-  when: docker_check.stat.isreg and
-        lookup('oo_option', 'docker_blocked_registries') != ''
-  notify:
-    - restart docker
-
-- name: Grant access to additional insecure registries
-  lineinfile:
-    dest: /etc/sysconfig/docker
-    regexp: '^INSECURE_REGISTRY=.*'
-    line: "INSECURE_REGISTRY='{{ lookup('oo_option', 'docker_insecure_registries') | oo_split()
-                              | oo_prepend_strings_in_list('--insecure-registry ') | join(' ') }}'"
-  when: docker_check.stat.isreg and
-        lookup('oo_option', 'docker_insecure_registries') != ''
-  notify:
-    - restart docker
-
 - name: Additional storage plugin configuration
   include: storage_plugins/main.yml
 

+ 2 - 1
roles/openshift_node/tasks/storage_plugins/ceph.yml

@@ -1,3 +1,4 @@
 ---
 - name: Install Ceph storage plugin dependencies
-  action: "{{ ansible_pkg_mgr }} name=ceph-common state=present"
+  action: "{{ ansible_pkg_mgr }} name=ceph-common state=present"
+  when: not openshift.common.is_containerized | bool

+ 1 - 0
roles/openshift_node/tasks/storage_plugins/glusterfs.yml

@@ -1,6 +1,7 @@
 ---
 - name: Install GlusterFS storage plugin dependencies
   action: "{{ ansible_pkg_mgr }} name=glusterfs-fuse state=present"
+  when: not openshift.common.is_containerized | bool
 
 - name: Set sebooleans to allow gluster storage plugin access from containers
   seboolean:

+ 3 - 2
roles/openshift_node/tasks/storage_plugins/main.yml

@@ -3,11 +3,12 @@
 # additional package dependencies
 - name: NFS storage plugin configuration
   include: nfs.yml
+  when: not openshift.common.is_containerized | bool
 
 - name: GlusterFS storage plugin configuration
   include: glusterfs.yml
-  when: "'glusterfs' in openshift.node.storage_plugin_deps"
+  when: "'glusterfs' in openshift.node.storage_plugin_deps and not openshift.common.is_containerized | bool "
 
 - name: Ceph storage plugin configuration
   include: ceph.yml
-  when: "'ceph' in openshift.node.storage_plugin_deps"
+  when: "'ceph' in openshift.node.storage_plugin_deps and not openshift.common.is_containerized | bool"

Plik diff jest za duży
+ 20 - 0
roles/openshift_node/templates/openshift.docker.node.service


+ 13 - 0
roles/openshift_node/templates/openvswitch.docker.service

@@ -0,0 +1,13 @@
+[Unit]
+After=docker.service
+Requires=docker.service
+PartOf=docker.service
+
+[Service]
+ExecStartPre=-/usr/bin/docker rm -f openvswitch
+ExecStart=/usr/bin/docker run --name openvswitch --rm --privileged --net=host --pid=host -v /lib/modules:/lib/modules -v /run:/run -v /sys:/sys:ro -v /etc/origin/openvswitch:/etc/openvswitch {{ openshift.node.ovs_image }}
+ExecStop=/usr/bin/docker stop openvswitch
+Restart=always
+
+[Install]
+WantedBy=multi-user.target

+ 4 - 4
roles/openshift_node_certificates/tasks/main.yml

@@ -17,19 +17,19 @@
       --signer-serial={{ openshift_master_ca_serial }}
       --user=system:node:{{ item.openshift.common.hostname }}
   args:
-    chdir: "{{ openshift_generated_configs_dir }}"
     creates: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}"
   with_items: nodes_needing_certs
 
 - name: Generate the node server certificate
   command: >
-    {{ openshift.common.admin_binary }} create-server-cert
-      --cert=server.crt --key=server.key --overwrite=true
+    {{ openshift.common.admin_binary }} ca create-server-cert
+      --cert={{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}/server.crt
+      --key={{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}/server.key
+      --overwrite=true
       --hostnames={{ item.openshift.common.all_hostnames |join(",") }}
       --signer-cert={{ openshift_master_ca_cert }}
       --signer-key={{ openshift_master_ca_key }}
       --signer-serial={{ openshift_master_ca_serial }}
   args:
-    chdir: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}"
     creates: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}/server.crt"
   with_items: nodes_needing_certs

+ 10 - 5
roles/openshift_repos/tasks/main.yaml

@@ -8,23 +8,24 @@
 #       proper repos correctly.
 
 - assert:
-    that: openshift.common.deployment_type in known_openshift_deployment_types
+    that: openshift_deployment_type in known_openshift_deployment_types
 
 - name: Ensure libselinux-python is installed
   action: "{{ ansible_pkg_mgr }} name=libselinux-python state=present"
+  when: not openshift.common.is_containerized | bool
 
 - name: Create any additional repos that are defined
   template:
     src: yum_repo.j2
     dest: /etc/yum.repos.d/openshift_additional.repo
-  when: openshift_additional_repos | length > 0
+  when: openshift_additional_repos | length > 0 and not openshift.common.is_containerized | bool
   notify: refresh cache
 
 - name: Remove the additional repos if no longer defined
   file:
     dest: /etc/yum.repos.d/openshift_additional.repo
     state: absent
-  when: openshift_additional_repos | length == 0
+  when: openshift_additional_repos | length == 0 and not openshift.common.is_containerized | bool
   notify: refresh cache
 
 - name: Remove any yum repo files for other deployment types RHEL/CentOS
@@ -35,6 +36,7 @@
   - '*/repos/*'
   when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos")) and
         (ansible_os_family == "RedHat" and ansible_distribution != "Fedora")
+        and not openshift.common.is_containerized | bool
   notify: refresh cache
 
 - name: Remove any yum repo files for other deployment types Fedora
@@ -44,7 +46,8 @@
   with_fileglob:
   - '*/repos/*'
   when: not (item | search("/files/fedora-" ~ openshift_deployment_type ~ "/repos")) and
-        (ansible_distribution == "Fedora")
+        (ansible_distribution == "Fedora") 
+        and not openshift.common.is_containerized | bool
   notify: refresh cache
 
 - name: Configure gpg keys if needed
@@ -52,6 +55,7 @@
   with_fileglob:
   - "{{ openshift_deployment_type }}/gpg_keys/*"
   notify: refresh cache
+  when: not openshift.common.is_containerized | bool
 
 - name: Configure yum repositories RHEL/CentOS
   copy: src={{ item }} dest=/etc/yum.repos.d/
@@ -59,10 +63,11 @@
   - "{{ openshift_deployment_type }}/repos/*"
   notify: refresh cache
   when: (ansible_os_family == "RedHat" and ansible_distribution != "Fedora")
+        and not openshift.common.is_containerized | bool
 
 - name: Configure yum repositories Fedora
   copy: src={{ item }} dest=/etc/yum.repos.d/
   with_fileglob:
   - "fedora-{{ openshift_deployment_type }}/repos/*"
   notify: refresh cache
-  when: (ansible_distribution == "Fedora")
+  when: (ansible_distribution == "Fedora") and not openshift.common.is_containerized | bool

+ 13 - 5
roles/openshift_serviceaccounts/tasks/main.yml

@@ -1,12 +1,19 @@
+- name: tmp dir for openshift
+  file:
+    path: /tmp/openshift
+    state: directory
+    owner: root
+    mode: 700
+
 - name: Create service account configs
   template:
     src: serviceaccount.j2
-    dest: "/tmp/{{ item }}-serviceaccount.yaml"
+    dest: "/tmp/openshift/{{ item }}-serviceaccount.yaml"
   with_items: accounts
 
 - name: Create {{ item }} service account
   command: >
-    {{ openshift.common.client_binary }} create -f "/tmp/{{ item }}-serviceaccount.yaml"
+    {{ openshift.common.client_binary }} create -f "/tmp/openshift/{{ item }}-serviceaccount.yaml"
   with_items: accounts
   register: _sa_result
   failed_when: "'serviceaccounts \"{{ item }}\" already exists' not in _sa_result.stderr and _sa_result.rc != 0"
@@ -15,14 +22,15 @@
 - name: Get current security context constraints
   shell: >
     {{ openshift.common.client_binary }} get scc privileged -o yaml
-    --output-version=v1 > /tmp/scc.yaml
+    --output-version=v1 > /tmp/openshift/scc.yaml
+  changed_when: false
 
 - name: Add security context constraint for {{ item }}
   lineinfile:
-    dest: /tmp/scc.yaml
+    dest: /tmp/openshift/scc.yaml
     line: "- system:serviceaccount:default:{{ item }}"
     insertafter: "^users:$"
   with_items: accounts
 
 - name: Apply new scc rules for service accounts
-  command: "{{ openshift.common.client_binary }} update -f /tmp/scc.yaml --api-version=v1"
+  command: "{{ openshift.common.client_binary }} update -f /tmp/openshift/scc.yaml --api-version=v1"

+ 5 - 0
roles/openshift_storage_nfs_lvm/tasks/main.yml

@@ -1,4 +1,9 @@
 ---
+# TODO -- this may actually work on atomic hosts
+- fail:
+    msg: "openshift_storage_nfs_lvm is not compatible with atomic host"
+    when: openshift.common.is_atomic | true
+
 - name: Create lvm volumes
   lvol: vg={{osnl_volume_group}} lv={{ item }} size={{osnl_volume_size}}G
   with_sequence: start={{osnl_volume_num_start}} count={{osnl_number_of_volumes}} format={{osnl_volume_prefix}}{{osnl_volume_size}}g%04d

+ 1 - 0
roles/openshift_storage_nfs_lvm/tasks/nfs.yml

@@ -1,6 +1,7 @@
 ---
 - name: Install NFS server
   action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present"
+  when: not openshift.common.is_containerized | bool
   
 - name: Start rpcbind
   service: name=rpcbind state=started enabled=yes

+ 2 - 1
roles/os_env_extras/tasks/main.yaml

@@ -12,4 +12,5 @@
     dest: /root/.vimrc
 
 - name: Bash Completion
-  action: "{{ ansible_pkg_mgr }} name=bash-completion state=present"
+  action: "{{ ansible_pkg_mgr }} name=bash-completion state=present"
+  when: not openshift.common.is_containerized | bool

+ 1 - 0
roles/os_firewall/tasks/firewall/firewalld.yml

@@ -1,6 +1,7 @@
 ---
 - name: Install firewalld packages
   action: "{{ ansible_pkg_mgr }} name=firewalld state=present"
+  when: not openshift.common.is_containerized | bool
   register: install_result
 
 - name: Check if iptables-services is installed

+ 1 - 0
roles/os_firewall/tasks/firewall/iptables.yml

@@ -5,6 +5,7 @@
   - iptables
   - iptables-services
   register: install_result
+  when: not openshift.common.is_containerized | bool
 
 - name: Check if firewalld is installed
   command: rpm -q firewalld

+ 6 - 1
roles/os_update_latest/tasks/main.yml

@@ -1,3 +1,8 @@
 ---
+- fail:
+    msg: "Update is not yet supported by this playbook on atomic hosts"
+  when: openshift.common.is_containerized | bool
+
 - name: Update all packages
-  action: "{{ ansible_pkg_mgr }} name=* state=latest"
+  action: "{{ ansible_pkg_mgr }} name=* state=latest"
+  when: not openshift.common.is_containerized | bool

+ 4 - 0
roles/os_zabbix/tasks/main.yml

@@ -1,4 +1,8 @@
 ---
+- fail:
+    msg: "Zabbix config is not yet supported on atomic hosts"
+  when: openshift.common.is_containerized | bool
+
 - name: Main List all templates
   zbx_template:
     zbx_server: "{{ ozb_server }}"

+ 1 - 0
roles/yum_repos/tasks/main.yml

@@ -45,3 +45,4 @@
     src: yumrepo.j2
     dest: /etc/yum.repos.d/{{ item.id }}.repo
   with_items: repo_files
+  when: not openshift.common.is_containerized | bool