Browse Source

Atomic Enterprise related changes.

Avesh Agarwal 9 years ago
parent
commit
1b3fff6248
34 changed files with 208 additions and 136 deletions
  1. 23 0
      DEPLOYMENT_TYPES.md
  2. 4 0
      bin/cluster
  3. 3 3
      inventory/byo/hosts.example
  4. 1 1
      playbooks/byo/openshift_facts.yml
  5. 9 8
      playbooks/common/openshift-master/config.yml
  6. 2 2
      playbooks/common/openshift-master/service.yml
  7. 5 5
      playbooks/common/openshift-node/config.yml
  8. 2 2
      playbooks/common/openshift-node/service.yml
  9. 1 1
      roles/openshift_common/tasks/main.yml
  10. 1 1
      roles/openshift_common/vars/main.yml
  11. 50 30
      roles/openshift_facts/library/openshift_facts.py
  12. 1 1
      roles/openshift_facts/tasks/main.yml
  13. 5 5
      roles/openshift_master/README.md
  14. 5 5
      roles/openshift_master/defaults/main.yml
  15. 2 2
      roles/openshift_master/handlers/main.yml
  16. 1 1
      roles/openshift_master/meta/main.yml
  17. 38 17
      roles/openshift_master/tasks/main.yml
  18. 1 1
      roles/openshift_master/vars/main.yml
  19. 2 2
      roles/openshift_master_ca/tasks/main.yml
  20. 1 1
      roles/openshift_master_ca/vars/main.yml
  21. 2 2
      roles/openshift_master_certificates/vars/main.yml
  22. 4 4
      roles/openshift_master_cluster/tasks/configure.yml
  23. 2 2
      roles/openshift_master_cluster/tasks/configure_deferred.yml
  24. 8 8
      roles/openshift_node/README.md
  25. 1 1
      roles/openshift_node/defaults/main.yml
  26. 2 2
      roles/openshift_node/handlers/main.yml
  27. 17 17
      roles/openshift_node/tasks/main.yml
  28. 1 1
      roles/openshift_node/vars/main.yml
  29. 2 2
      roles/openshift_node_certificates/README.md
  30. 3 3
      roles/openshift_node_certificates/vars/main.yml
  31. 1 2
      roles/openshift_registry/vars/main.yml
  32. 6 1
      roles/openshift_repos/vars/main.yml
  33. 1 2
      roles/openshift_router/vars/main.yml
  34. 1 1
      roles/openshift_storage_nfs_lvm/tasks/main.yml

+ 23 - 0
DEPLOYMENT_TYPES.md

@@ -0,0 +1,23 @@
+#Deployment Types
+
+This module supports OpenShift Origin, OpenShift Enterprise, and Atomic
+Enterprise Platform. Each deployment type sets various defaults used throughout
+your environment.
+
+The table below outlines the defaults per `deployment_type`.
+
+| deployment_type                                                 | origin                                   | enterprise (< 3.1)                     | atomic-enterprise                | openshift-enterprise (>= 3.1)    |
+|-----------------------------------------------------------------|------------------------------------------|----------------------------------------|----------------------------------|----------------------------------|
+| **openshift.common.service_type** (also used for package names) | origin                                   | openshift                              | atomic-openshift                 |                                  |
+| **openshift.common.config_base**                                | /etc/origin                              | /etc/openshift                         | /etc/origin                      | /etc/origin                      |
+| **openshift.common.data_dir**                                   | /var/lib/origin                          | /var/lib/openshift                     | /var/lib/origin                  | /var/lib/origin                  |
+| **openshift.master.registry_url openshift.node.registry_url**   | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} | aos3/aos-${component}:${version} | aos3/aos-${component}:${version} |
+| **Image Streams**                                               | centos                                   | rhel + xpaas                           | N/A                              | rhel                             |
+
+
+**NOTE** `enterprise` deloyment type is used for OpenShift Enterprise version
+3.0.x OpenShift Enterprise deployments utilizing version 3.1 and later will
+make use of the new `openshift-enterprise` deployment type.  Additional work to
+migrate between the two will be forthcoming.
+
+

+ 4 - 0
bin/cluster

@@ -48,6 +48,7 @@ class Cluster(object):
             deployment_type = os.environ['OS_DEPLOYMENT_TYPE']
         return deployment_type
 
+
     def create(self, args):
         """
         Create an OpenShift cluster for given provider
@@ -258,6 +259,9 @@ if __name__ == '__main__':
     meta_parser.add_argument('-t', '--deployment-type',
                              choices=['origin', 'online', 'enterprise'],
                              help='Deployment type. (default: origin)')
+    meta_parser.add_argument('-T', '--product-type',
+                             choices=['openshift' 'atomic-enterprise'],
+                             help='Product type. (default: openshift)')
     meta_parser.add_argument('-o', '--option', action='append',
                              help='options')
 

+ 3 - 3
inventory/byo/hosts.example

@@ -18,7 +18,7 @@ ansible_ssh_user=root
 #ansible_sudo=true
 
 # deployment type valid values are origin, online and enterprise
-deployment_type=enterprise
+deployment_type=atomic-enterprise
 
 # Enable cluster metrics
 #use_cluster_metrics=true
@@ -52,7 +52,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # For installation the value of openshift_master_cluster_hostname must resolve
 # to the first master defined in the inventory.
 # The HA solution must be manually configured after installation and must ensure
-# that openshift-master is running on a single master host.
+# that the master is running on a single master host.
 #openshift_master_cluster_hostname=openshift-ansible.test.example.com
 #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
 #openshift_master_cluster_defer_ha=True
@@ -61,7 +61,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 #osm_default_subdomain=apps.test.example.com
 
 # additional cors origins
-#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] 
+#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
 
 # default project node selector
 #osm_default_node_selector='region=primary'

+ 1 - 1
playbooks/byo/openshift_facts.yml

@@ -1,5 +1,5 @@
 ---
-- name: Gather OpenShift facts
+- name: Gather Cluster facts
   hosts: all
   gather_facts: no
   roles:

+ 9 - 8
playbooks/common/openshift-master/config.yml

@@ -37,7 +37,7 @@
           public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
   - name: Check status of external etcd certificatees
     stat:
-      path: "/etc/openshift/master/{{ item }}"
+      path: "{{ openshift.common.config_base }}/master/{{ item }}"
     with_items:
     - master.etcd-client.crt
     - master.etcd-ca.crt
@@ -47,7 +47,7 @@
                                     | map(attribute='stat.exists')
                                     | list | intersect([false])}}"
       etcd_cert_subdir: openshift-master-{{ openshift.common.hostname }}
-      etcd_cert_config_dir: /etc/openshift/master
+      etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
       etcd_cert_prefix: master.etcd-
     when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
 
@@ -96,7 +96,7 @@
   tasks:
   - name: Ensure certificate directory exists
     file:
-      path: /etc/openshift/master
+      path: "{{ openshift.common.config_base }}/master"
       state: directory
     when: etcd_client_certs_missing is defined and etcd_client_certs_missing
   - name: Unarchive the tarball on the master
@@ -134,7 +134,7 @@
 
   - name: Check status of master certificates
     stat:
-      path: "/etc/openshift/master/{{ item }}"
+      path: "{{ openshift.common.config_base }}/master/{{ item }}"
     with_items: openshift_master_certs
     register: g_master_cert_stat_result
   - set_fact:
@@ -142,12 +142,12 @@
                                 | map(attribute='stat.exists')
                                 | list | intersect([false])}}"
       master_cert_subdir: master-{{ openshift.common.hostname }}
-      master_cert_config_dir: /etc/openshift/master
+      master_cert_config_dir: "{{ openshift.common.config_base }}/master"
 
 - name: Configure master certificates
   hosts: oo_first_master
   vars:
-    master_generated_certs_dir: /etc/openshift/generated-configs
+    master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs"
     masters_needing_certs: "{{ hostvars
                                | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
                                | oo_filter_list(filter_attr='master_certs_missing') }}"
@@ -189,7 +189,7 @@
   pre_tasks:
   - name: Ensure certificate directory exists
     file:
-      path: /etc/openshift/master
+      path: "{{ openshift.common.config_base }}/master"
       state: directory
     when: master_certs_missing and 'oo_first_master' not in group_names
   - name: Unarchive the tarball on the master
@@ -214,7 +214,8 @@
   roles:
   - role: openshift_master_cluster
     when: openshift_master_ha | bool
-  - openshift_examples
+  - role: openshift_examples
+    when: deployment_type in ['enterprise','openshift-enterprise','origin']
   - role: openshift_cluster_metrics
     when: openshift.common.use_cluster_metrics | bool
 

+ 2 - 2
playbooks/common/openshift-master/service.yml

@@ -10,9 +10,9 @@
     add_host: name={{ item }} groups=g_service_masters
     with_items: oo_host_group_exp | default([])
 
-- name: Change openshift-master state on master instance(s)
+- name: Change state on master instance(s)
   hosts: g_service_masters
   connection: ssh
   gather_facts: no
   tasks:
-    - service: name=openshift-master state="{{ new_cluster_state }}"
+    - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"

+ 5 - 5
playbooks/common/openshift-node/config.yml

@@ -22,7 +22,7 @@
           annotations: "{{ openshift_node_annotations | default(None) }}"
   - name: Check status of node certificates
     stat:
-      path: "/etc/openshift/node/{{ item }}"
+      path: "{{ openshift.common.config_base }}/node/{{ item }}"
     with_items:
     - "system:node:{{ openshift.common.hostname }}.crt"
     - "system:node:{{ openshift.common.hostname }}.key"
@@ -35,8 +35,8 @@
       certs_missing: "{{ stat_result.results | map(attribute='stat.exists')
                          | list | intersect([false])}}"
       node_subdir: node-{{ openshift.common.hostname }}
-      config_dir: /etc/openshift/generated-configs/node-{{ openshift.common.hostname }}
-      node_cert_dir: /etc/openshift/node
+      config_dir: "{{ openshift.common.config_base }}/generated-configs/node-{{ openshift.common.hostname }}"
+      node_cert_dir: "{{ openshift.common.config_base }}/node"
 
 - name: Create temp directory for syncing certs
   hosts: localhost
@@ -89,9 +89,9 @@
       path: "{{ node_cert_dir }}"
       state: directory
 
-  # TODO: notify restart openshift-node
+  # TODO: notify restart node
   # possibly test service started time against certificate/config file
-  # timestamps in openshift-node to trigger notify
+  # timestamps in node to trigger notify
   - name: Unarchive the tarball on the node
     unarchive:
       src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz"

+ 2 - 2
playbooks/common/openshift-node/service.yml

@@ -10,9 +10,9 @@
     add_host: name={{ item }} groups=g_service_nodes
     with_items: oo_host_group_exp | default([])
 
-- name: Change openshift-node state on node instance(s)
+- name: Change state on node instance(s)
   hosts: g_service_nodes
   connection: ssh
   gather_facts: no
   tasks:
-    - service: name=openshift-node state="{{ new_cluster_state }}"
+    - service: name={{ service_type }}-node state="{{ new_cluster_state }}"

+ 1 - 1
roles/openshift_common/tasks/main.yml

@@ -1,5 +1,5 @@
 ---
-- name: Set common OpenShift facts
+- name: Set common Cluster facts
   openshift_facts:
     role: common
     local_facts:

+ 1 - 1
roles/openshift_common/vars/main.yml

@@ -6,4 +6,4 @@
 # interfaces)
 os_firewall_use_firewalld: False
 
-openshift_data_dir: /var/lib/openshift
+openshift_data_dir: /var/lib/origin

+ 50 - 30
roles/openshift_facts/library/openshift_facts.py

@@ -6,7 +6,7 @@
 DOCUMENTATION = '''
 ---
 module: openshift_facts
-short_description: OpenShift Facts
+short_description: Cluster Facts
 author: Jason DeTiberus
 requirements: [ ]
 '''
@@ -283,28 +283,6 @@ def normalize_provider_facts(provider, metadata):
         facts = normalize_openstack_facts(metadata, facts)
     return facts
 
-def set_registry_url_if_unset(facts):
-    """ Set registry_url fact if not already present in facts dict
-
-        Args:
-            facts (dict): existing facts
-        Returns:
-            dict: the facts dict updated with the generated identity providers
-            facts if they were not already present
-    """
-    for role in ('master', 'node'):
-        if role in facts:
-            deployment_type = facts['common']['deployment_type']
-            if 'registry_url' not in facts[role]:
-                registry_url = "openshift/origin-${component}:${version}"
-                if deployment_type == 'enterprise':
-                    registry_url = "openshift3/ose-${component}:${version}"
-                elif deployment_type == 'online':
-                    registry_url = ("openshift3/ose-${component}:${version}")
-                facts[role]['registry_url'] = registry_url
-
-    return facts
-
 def set_fluentd_facts_if_unset(facts):
     """ Set fluentd facts if not already present in facts dict
             dict: the facts dict updated with the generated fluentd facts if
@@ -448,6 +426,48 @@ def set_aggregate_facts(facts):
 
     return facts
 
+def set_deployment_facts_if_unset(facts):
+    """ Set Facts that vary based on deployment_type. This currently
+        includes common.service_type, common.config_base, master.registry_url,
+        node.registry_url
+
+        Args:
+            facts (dict): existing facts
+        Returns:
+            dict: the facts dict updated with the generated deployment_type
+            facts
+    """
+    if 'common' in facts:
+        deployment_type = facts['common']['deployment_type']
+        if 'service_type' not in facts['common']:
+            service_type = 'atomic-openshift'
+            if deployment_type == 'origin':
+                service_type = 'openshift'
+            elif deployment_type in ['enterprise', 'online']:
+                service_type = 'openshift'
+            facts['common']['service_type'] = service_type
+        if 'config_base' not in facts['common']:
+            config_base = '/etc/origin'
+            if deployment_type in ['enterprise', 'online']:
+                config_base = '/etc/openshift'
+            elif deployment_type == 'origin':
+                config_base = '/etc/openshift'
+            facts['common']['config_base'] = config_base
+
+    for role in ('master', 'node'):
+        if role in facts:
+            deployment_type = facts['common']['deployment_type']
+            if 'registry_url' not in facts[role]:
+                registry_url = 'aos3/aos-${component}:${version}'
+                if deployment_type in ['enterprise', 'online']:
+                    registry_url = 'openshift3/ose-${component}:${version}'
+                elif deployment_type == 'origin':
+                    registry_url = 'openshift/origin-${component}:${version}'
+                facts[role]['registry_url'] = registry_url
+
+    return facts
+
+
 def set_sdn_facts_if_unset(facts):
     """ Set sdn facts if not already present in facts dict
 
@@ -510,7 +530,7 @@ def get_current_config(facts):
         # anything from working properly as far as I can tell, perhaps because
         # we override the kubeconfig path everywhere we use it?
         # Query kubeconfig settings
-        kubeconfig_dir = '/var/lib/openshift/openshift.local.certificates'
+        kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
         if role == 'node':
             kubeconfig_dir = os.path.join(
                 kubeconfig_dir, "node-%s" % facts['common']['hostname']
@@ -657,25 +677,25 @@ def get_local_facts_from_file(filename):
 
 
 class OpenShiftFactsUnsupportedRoleError(Exception):
-    """OpenShift Facts Unsupported Role Error"""
+    """Origin Facts Unsupported Role Error"""
     pass
 
 
 class OpenShiftFactsFileWriteError(Exception):
-    """OpenShift Facts File Write Error"""
+    """Origin Facts File Write Error"""
     pass
 
 
 class OpenShiftFactsMetadataUnavailableError(Exception):
-    """OpenShift Facts Metadata Unavailable Error"""
+    """Origin Facts Metadata Unavailable Error"""
     pass
 
 
 class OpenShiftFacts(object):
-    """ OpenShift Facts
+    """ Origin Facts
 
         Attributes:
-            facts (dict): OpenShift facts for the host
+            facts (dict): facts for the host
 
         Args:
             role (str): role for setting local facts
@@ -720,8 +740,8 @@ class OpenShiftFacts(object):
         facts = set_fluentd_facts_if_unset(facts)
         facts = set_cluster_metrics_facts_if_unset(facts)
         facts = set_identity_providers_if_unset(facts)
-        facts = set_registry_url_if_unset(facts)
         facts = set_sdn_facts_if_unset(facts)
+        facts = set_deployment_facts_if_unset(facts)
         facts = set_aggregate_facts(facts)
         return dict(openshift=facts)
 

+ 1 - 1
roles/openshift_facts/tasks/main.yml

@@ -6,5 +6,5 @@
     - ansible_version | version_compare('1.9.0', 'ne')
     - ansible_version | version_compare('1.9.0.1', 'ne')
 
-- name: Gather OpenShift facts
+- name: Gather Cluster facts
   openshift_facts:

+ 5 - 5
roles/openshift_master/README.md

@@ -1,7 +1,7 @@
-OpenShift Master
-================
+OpenShift/Atomic Enterprise Master
+==================================
 
-OpenShift Master service installation
+Master service installation
 
 Requirements
 ------------
@@ -15,8 +15,8 @@ Role Variables
 From this role:
 | Name                                | Default value         |                                                  |
 |-------------------------------------|-----------------------|--------------------------------------------------|
-| openshift_master_debug_level        | openshift_debug_level | Verbosity of the debug logs for openshift-master |
-| openshift_node_ips                  | []                    | List of the openshift node ip addresses to pre-register when openshift-master starts up |
+| openshift_master_debug_level        | openshift_debug_level | Verbosity of the debug logs for master |
+| openshift_node_ips                  | []                    | List of the openshift node ip addresses to pre-register when master starts up |
 | oreg_url                            | UNDEF                 | Default docker registry to use |
 | openshift_master_api_port           | UNDEF                 | |
 | openshift_master_console_port       | UNDEF                 | |

+ 5 - 5
roles/openshift_master/defaults/main.yml

@@ -5,11 +5,11 @@ openshift_node_ips: []
 os_firewall_allow:
 - service: etcd embedded
   port: 4001/tcp
-- service: OpenShift api https
+- service: api server https
   port: 8443/tcp
-- service: OpenShift dns tcp
+- service: dns tcp
   port: 53/tcp
-- service: OpenShift dns udp
+- service: dns udp
   port: 53/udp
 - service: Fluentd td-agent tcp
   port: 24224/tcp
@@ -22,9 +22,9 @@ os_firewall_allow:
 - service: Corosync UDP
   port: 5405/udp
 os_firewall_deny:
-- service: OpenShift api http
+- service: api server http
   port: 8080/tcp
-- service: former OpenShift web console port
+- service: former web console port
   port: 8444/tcp
 - service: former etcd peer port
   port: 7001/tcp

+ 2 - 2
roles/openshift_master/handlers/main.yml

@@ -1,4 +1,4 @@
 ---
-- name: restart openshift-master
-  service: name=openshift-master state=restarted
+- name: restart master
+  service: name={{ openshift.common.service_type }}-master state=restarted
   when: not openshift_master_ha | bool

+ 1 - 1
roles/openshift_master/meta/main.yml

@@ -1,7 +1,7 @@
 ---
 galaxy_info:
   author: Jhon Honce
-  description: OpenShift Master
+  description: Master
   company: Red Hat, Inc.
   license: Apache License, Version 2.0
   min_ansible_version: 1.7

+ 38 - 17
roles/openshift_master/tasks/main.yml

@@ -12,11 +12,7 @@
     msg: "openshift_master_cluster_password must be set for multi-master installations"
   when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool and openshift_master_cluster_password is not defined
 
-- name: Install OpenShift Master package
-  yum: pkg=openshift-master state=present
-  register: install_result
-
-- name: Set master OpenShift facts
+- name: Set master facts
   openshift_facts:
     role: master
     local_facts:
@@ -59,8 +55,26 @@
       api_server_args: "{{ osm_api_server_args | default(None) }}"
       controller_args: "{{ osm_controller_args | default(None) }}"
 
+- name: Install Master package
+  yum: pkg={{ openshift.common.service_type }}-master state=present
+  register: install_result
+
+- name: Check for RPM generated config marker file /etc/origin/.config_managed
+  stat: path=/etc/origin/.rpmgenerated
+  register: rpmgenerated_config
+
+- name: Remove RPM generated config files
+  file:
+    path: "{{ item }}"
+    state: absent
+  when: openshift.common.service_type in ['atomic-enterprise','openshift-enterprise'] and rpmgenerated_config.stat.exists == true
+  with_items:
+    - "{{ openshift.common.config_base }}/master"
+    - "{{ openshift.common.config_base }}/node"
+    - "{{ openshift.common.config_base }}/.rpmgenerated"
+
 # TODO: These values need to be configurable
-- name: Set dns OpenShift facts
+- name: Set dns facts
   openshift_facts:
     role: dns
     local_facts:
@@ -80,20 +94,27 @@
   args:
     creates: "{{ openshift_master_policy }}"
   notify:
-  - restart openshift-master
+  - restart master
 
 - name: Create the scheduler config
   template:
     dest: "{{ openshift_master_scheduler_conf }}"
     src: scheduler.json.j2
   notify:
-  - restart openshift-master
+  - restart master
 
 - name: Install httpd-tools if needed
   yum: pkg=httpd-tools state=present
   when: item.kind == 'HTPasswdPasswordIdentityProvider'
   with_items: openshift.master.identity_providers
 
+- name: Ensure htpasswd directory exists
+  file:
+    path: "{{ item.filename | dirname }}"
+    state: directory
+  when: item.kind == 'HTPasswdPasswordIdentityProvider'
+  with_items: openshift.master.identity_providers
+
 - name: Create the htpasswd file if needed
   copy:
     dest: "{{ item.filename }}"
@@ -109,11 +130,11 @@
     dest: "{{ openshift_master_config_file }}"
     src: master.yaml.v1.j2
   notify:
-  - restart openshift-master
+  - restart master
 
-- name: Configure OpenShift settings
+- name: Configure master settings
   lineinfile:
-    dest: /etc/sysconfig/openshift-master
+    dest: /etc/sysconfig/{{ openshift.common.service_type }}-master
     regexp: "{{ item.regex }}"
     line: "{{ item.line }}"
   with_items:
@@ -122,10 +143,10 @@
     - regex: '^CONFIG_FILE='
       line: "CONFIG_FILE={{ openshift_master_config_file }}"
   notify:
-  - restart openshift-master
+  - restart master
 
-- name: Start and enable openshift-master
-  service: name=openshift-master enabled=yes state=started
+- name: Start and enable master
+  service: name={{ openshift.common.service_type }}-master enabled=yes state=started
   when: not openshift_master_ha | bool
   register: start_result
 
@@ -146,7 +167,7 @@
   shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster
   when: install_result | changed
 
-- name: Create the OpenShift client config dir(s)
+- name: Create the client config dir(s)
   file:
     path: "~{{ item }}/.kube"
     state: directory
@@ -159,7 +180,7 @@
 
 # TODO: Update this file if the contents of the source file are not present in
 # the dest file, will need to make sure to ignore things that could be added
-- name: Copy the OpenShift admin client config(s)
+- name: Copy the admin client config(s)
   command: cp {{ openshift_master_config_dir }}/admin.kubeconfig ~{{ item }}/.kube/config
   args:
     creates: ~{{ item }}/.kube/config
@@ -167,7 +188,7 @@
   - root
   - "{{ ansible_ssh_user }}"
 
-- name: Update the permissions on the OpenShift admin client config(s)
+- name: Update the permissions on the admin client config(s)
   file:
     path: "~{{ item }}/.kube/config"
     state: file

+ 1 - 1
roles/openshift_master/vars/main.yml

@@ -1,5 +1,5 @@
 ---
-openshift_master_config_dir: /etc/openshift/master
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
 openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
 openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"
 openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"

+ 2 - 2
roles/openshift_master_ca/tasks/main.yml

@@ -1,6 +1,6 @@
 ---
-- name: Install the OpenShift package for admin tooling
-  yum: pkg=openshift state=present
+- name: Install the base package for admin tooling
+  yum: pkg={{ openshift.common.service_type }} state=present
   register: install_result
 
 - name: Reload generated facts

+ 1 - 1
roles/openshift_master_ca/vars/main.yml

@@ -1,5 +1,5 @@
 ---
-openshift_master_config_dir: /etc/openshift/master
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
 openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
 openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
 openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"

+ 2 - 2
roles/openshift_master_certificates/vars/main.yml

@@ -1,3 +1,3 @@
 ---
-openshift_generated_configs_dir: /etc/openshift/generated-configs
-openshift_master_config_dir: /etc/openshift/master
+openshift_generated_configs_dir: "{{ openshift.common.config_base }}/generated-configs"
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"

+ 4 - 4
roles/openshift_master_cluster/tasks/configure.yml

@@ -22,14 +22,14 @@
   command: pcs resource defaults resource-stickiness=100
 
 - name: Add the cluster VIP resource
-  command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_vip }} --group openshift-master
+  command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_vip }} --group {{ openshift.common.service_type }}-master
 
 - name: Add the cluster public VIP resource
-  command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_public_vip }} --group openshift-master
+  command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_public_vip }} --group {{ openshift.common.service_type }}-master
   when: openshift_master_cluster_public_vip != openshift_master_cluster_vip
 
-- name: Add the cluster openshift-master service resource
-  command: pcs resource create master systemd:openshift-master op start timeout=90s stop timeout=90s --group openshift-master
+- name: Add the cluster master service resource
+  command: pcs resource create master systemd:{{ openshift.common.service_type }}-master op start timeout=90s stop timeout=90s --group {{ openshift.common.service_type }}-master
 
 - name: Disable stonith
   command: pcs property set stonith-enabled=false

+ 2 - 2
roles/openshift_master_cluster/tasks/configure_deferred.yml

@@ -1,8 +1,8 @@
 ---
 - debug: msg="Deferring config"
 
-- name: Start and enable openshift-master
+- name: Start and enable the master
   service:
-    name: openshift-master
+    name: "{{ openshift.common.service_type }}-master"
     state: started
     enabled: yes

+ 8 - 8
roles/openshift_node/README.md

@@ -1,12 +1,12 @@
-OpenShift Node
-==============
+OpenShift/Atomic Enterprise Node
+================================
 
-OpenShift Node service installation
+Node service installation
 
 Requirements
 ------------
 
-One or more OpenShift Master servers.
+One or more Master servers.
 
 A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
 rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos.
@@ -14,10 +14,10 @@ rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos.
 Role Variables
 --------------
 From this role:
-| Name                                     | Default value         |                                        |
-|------------------------------------------|-----------------------|----------------------------------------|
-| openshift_node_debug_level               | openshift_debug_level | Verbosity of the debug logs for openshift-node |
-| oreg_url                                 | UNDEF (Optional)      | Default docker registry to use |
+| Name                                     | Default value         |                                                        |
+|------------------------------------------|-----------------------|--------------------------------------------------------|
+| openshift_node_debug_level               | openshift_debug_level | Verbosity of the debug logs for node |
+| oreg_url                                 | UNDEF (Optional)      | Default docker registry to use                         |
 
 From openshift_common:
 | Name                          |  Default Value      |                     |

+ 1 - 1
roles/openshift_node/defaults/main.yml

@@ -1,6 +1,6 @@
 ---
 os_firewall_allow:
-- service: OpenShift kubelet
+- service: Kubernetes kubelet
   port: 10250/tcp
 - service: http
   port: 80/tcp

+ 2 - 2
roles/openshift_node/handlers/main.yml

@@ -1,6 +1,6 @@
 ---
-- name: restart openshift-node
-  service: name=openshift-node state=restarted
+- name: restart node
+  service: name={{ openshift.common.service_type }}-node state=restarted
 
 - name: restart docker
   service: name=docker state=restarted

+ 17 - 17
roles/openshift_node/tasks/main.yml

@@ -10,16 +10,7 @@
     msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
   when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online']
 
-- name: Install OpenShift Node package
-  yum: pkg=openshift-node state=present
-  register: node_install_result
-
-- name: Install openshift-sdn-ovs
-  yum: pkg=openshift-sdn-ovs state=present
-  register: sdn_install_result
-  when: openshift.common.use_openshift_sdn
-
-- name: Set node OpenShift facts
+- name: Set node facts
   openshift_facts:
     role: "{{ item.role }}"
     local_facts: "{{ item.local_facts }}"
@@ -38,17 +29,26 @@
       portal_net: "{{ openshift_master_portal_net | default(None) }}"
       kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
 
+- name: Install Node package
+  yum: pkg={{ openshift.common.service_type }}-node state=present
+  register: node_install_result
+
+- name: Install sdn-ovs package
+  yum: pkg={{ openshift.common.service_type }}-sdn-ovs state=present
+  register: sdn_install_result
+  when: openshift.common.use_openshift_sdn
+
 # TODO: add the validate parameter when there is a validation command to run
 - name: Create the Node config
   template:
     dest: "{{ openshift_node_config_file }}"
     src: node.yaml.v1.j2
   notify:
-  - restart openshift-node
+  - restart node
 
-- name: Configure OpenShift Node settings
+- name: Configure Node settings
   lineinfile:
-    dest: /etc/sysconfig/openshift-node
+    dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
     regexp: "{{ item.regex }}"
     line: "{{ item.line }}"
   with_items:
@@ -57,13 +57,13 @@
     - regex: '^CONFIG_FILE='
       line: "CONFIG_FILE={{ openshift_node_config_file }}"
   notify:
-  - restart openshift-node
+  - restart node
 
 - stat: path=/etc/sysconfig/docker
   register: docker_check
 
   # TODO: Enable secure registry when code available in origin
-- name: Secure OpenShift Registry
+- name: Secure Registry
   lineinfile:
     dest: /etc/sysconfig/docker
     regexp: '^OPTIONS=.*'
@@ -117,8 +117,8 @@
   seboolean: name=virt_use_nfs state=yes persistent=yes
   when: ansible_selinux and ansible_selinux.status == "enabled"
 
-- name: Start and enable openshift-node
-  service: name=openshift-node enabled=yes state=started
+- name: Start and enable node
+  service: name={{ openshift.common.service_type }}-node enabled=yes state=started
   register: start_result
 
 - name: pause to prevent service restart from interfering with bootstrapping

+ 1 - 1
roles/openshift_node/vars/main.yml

@@ -1,3 +1,3 @@
 ---
-openshift_node_config_dir: /etc/openshift/node
+openshift_node_config_dir: "{{ openshift.common.config_base }}/node"
 openshift_node_config_file: "{{ openshift_node_config_dir }}/node-config.yaml"

+ 2 - 2
roles/openshift_node_certificates/README.md

@@ -1,5 +1,5 @@
-OpenShift Node Certificates
-========================
+OpenShift/Atomic Enterprise Node Certificates
+=============================================
 
 TODO
 

+ 3 - 3
roles/openshift_node_certificates/vars/main.yml

@@ -1,7 +1,7 @@
 ---
-openshift_node_config_dir: /etc/openshift/node
-openshift_master_config_dir: /etc/openshift/master
-openshift_generated_configs_dir: /etc/openshift/generated-configs
+openshift_node_config_dir: "{{ openshift.common.config_base }}/node"
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+openshift_generated_configs_dir: "{{ openshift.common.config_base }}/generated-configs"
 openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
 openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
 openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"

+ 1 - 2
roles/openshift_registry/vars/main.yml

@@ -1,3 +1,2 @@
 ---
-openshift_master_config_dir: /etc/openshift/master
-
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"

+ 6 - 1
roles/openshift_repos/vars/main.yml

@@ -1,2 +1,7 @@
 ---
-known_openshift_deployment_types: ['origin', 'online', 'enterprise']
+# origin uses community packages named 'origin'
+# online currently uses 'openshift' packages
+# enterprise is used for OSE 3.0 < 3.1 which uses packages named 'openshift'
+# atomic-enterprise uses Red Hat packages named 'atomic-openshift'
+# openshift-enterprise uses Red Hat packages named 'atomic-openshift' starting with OSE 3.1
+known_openshift_deployment_types: ['origin', 'online', 'enterprise','atomic-enterprise','openshift-enterprise']

+ 1 - 2
roles/openshift_router/vars/main.yml

@@ -1,3 +1,2 @@
 ---
-openshift_master_config_dir: /etc/openshift/master
-
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"

+ 1 - 1
roles/openshift_storage_nfs_lvm/tasks/main.yml

@@ -21,4 +21,4 @@
   template: src=../templates/nfs.json.j2 dest=/root/persistent-volume.{{ item }}.json
   with_sequence: start={{osnl_volume_num_start}} count={{osnl_number_of_volumes}} format={{osnl_volume_prefix}}{{osnl_volume_size}}g%04d
 
-# TODO - Get the json files to an openshift-master, and load them.
+# TODO - Get the json files to a master, and load them.