Browse Source

Merge pull request #1843 from detiber/remove_deprecated_roles

Remove deprecated roles
Jason DeTiberus 9 years ago
parent
commit
ada49b7ecc
47 changed files with 59 additions and 706 deletions
  1. 2 2
      README_AWS.md
  2. 1 1
      README_vagrant.md
  3. 1 1
      bin/cluster
  4. 0 1
      playbooks/adhoc/noc/filter_plugins
  5. 0 1
      playbooks/adhoc/noc/roles
  6. 0 24
      playbooks/aws/ansible-tower/config.yml
  7. 0 1
      playbooks/aws/ansible-tower/filter_plugins
  8. 0 79
      playbooks/aws/ansible-tower/launch.yml
  9. 0 1
      playbooks/aws/ansible-tower/roles
  10. 0 6
      playbooks/aws/ansible-tower/user_data.txt
  11. 0 9
      playbooks/aws/ansible-tower/vars.ops.yml
  12. 0 1
      playbooks/aws/ansible-tower/vars.yml
  13. 0 5
      playbooks/aws/openshift-cluster/add_nodes.yml
  14. 3 1
      playbooks/aws/openshift-cluster/config.yml
  15. 0 5
      playbooks/aws/openshift-cluster/launch.yml
  16. 33 60
      playbooks/aws/openshift-cluster/tasks/launch_instances.yml
  17. 0 31
      playbooks/aws/openshift-cluster/templates/user_data.j2
  18. 1 2
      playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
  19. 0 1
      playbooks/aws/openshift-cluster/vars.defaults.yml
  20. 0 15
      playbooks/aws/openshift-cluster/vars.online.int.yml
  21. 0 15
      playbooks/aws/openshift-cluster/vars.online.prod.yml
  22. 0 15
      playbooks/aws/openshift-cluster/vars.online.stage.yml
  23. 17 28
      playbooks/aws/openshift-cluster/vars.yml
  24. 0 4
      playbooks/common/openshift-cluster/additional_config.yml
  25. 0 7
      playbooks/common/openshift-master/config.yml
  26. 0 8
      playbooks/common/openshift-node/config.yml
  27. 0 5
      playbooks/gce/openshift-cluster/vars.yml
  28. 0 3
      playbooks/libvirt/openshift-cluster/launch.yml
  29. 1 1
      playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
  30. 0 7
      playbooks/libvirt/openshift-cluster/vars.yml
  31. 0 4
      playbooks/openstack/openshift-cluster/launch.yml
  32. 0 4
      playbooks/openstack/openshift-cluster/vars.yml
  33. 0 2
      roles/os_env_extras/files/irbrc
  34. 0 12
      roles/os_env_extras/files/vimrc
  35. 0 16
      roles/os_env_extras/tasks/main.yaml
  36. 0 5
      roles/os_env_extras_node/tasks/main.yml
  37. 0 38
      roles/pods/README.md
  38. 0 30
      roles/pods/files/pods/docker-registry.json
  39. 0 23
      roles/pods/files/pods/fedora_apache.json
  40. 0 23
      roles/pods/files/pods/frontend-controller.json
  41. 0 10
      roles/pods/files/pods/redis-master-service.json
  42. 0 22
      roles/pods/files/pods/redis-master.json
  43. 0 24
      roles/pods/files/pods/redis-slave-controller.json
  44. 0 13
      roles/pods/files/pods/redis-slave-service.json
  45. 0 10
      roles/pods/files/pods/registry-service.json
  46. 0 124
      roles/pods/meta/main.yml
  47. 0 6
      roles/pods/tasks/main.yml

+ 2 - 2
README_AWS.md

@@ -178,9 +178,9 @@ Terminating a cluster
 Specifying a deployment type
 ---------------------------
 The --deployment-type flag can be passed to bin/cluster to specify the deployment type
-1. To launch an online cluster (requires access to private repositories and amis):
+1. To launch an OpenShift Enterprise cluster (requires a valid subscription):
 ```
-  bin/cluster create aws --deployment-type=online <cluster-id>
+  bin/cluster create aws --deployment-type=openshift-enterprise <cluster-id>
 ```
 Note: If no deployment type is specified, then the default is origin.
 

+ 1 - 1
README_vagrant.md

@@ -42,7 +42,7 @@ vagrant provision
 Environment Variables
 ---------------------
 The following environment variables can be overriden:
-- ``OPENSHIFT_DEPLOYMENT_TYPE`` (defaults to origin, choices: origin, enterprise, online)
+- ``OPENSHIFT_DEPLOYMENT_TYPE`` (defaults to origin, choices: origin, openshift-enterprise)
 - ``OPENSHIFT_NUM_NODES`` (the number of nodes to create, defaults to 2)
 
 Note that if ``OPENSHIFT_DEPLOYMENT_TYPE`` is ``enterprise`` you should also specify environment variables related to ``subscription-manager`` which are used by the ``rhel_subscribe`` role:

+ 1 - 1
bin/cluster

@@ -316,7 +316,7 @@ This wrapper is overriding the following ansible variables:
         meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
 
     meta_parser.add_argument('-t', '--deployment-type',
-                             choices=['origin', 'online', 'enterprise', 'atomic-enterprise', 'openshift-enterprise'],
+                             choices=['origin', 'atomic-enterprise', 'openshift-enterprise'],
                              help='Deployment type. (default: origin)')
     meta_parser.add_argument('-o', '--option', action='append',
                              help='options')

+ 0 - 1
playbooks/adhoc/noc/filter_plugins

@@ -1 +0,0 @@
-../../../filter_plugins

+ 0 - 1
playbooks/adhoc/noc/roles

@@ -1 +0,0 @@
-../../../roles

+ 0 - 24
playbooks/aws/ansible-tower/config.yml

@@ -1,24 +0,0 @@
----
-- name: "populate oo_hosts_to_config host group if needed"
-  hosts: localhost
-  gather_facts: no
-  connection: local
-  become: no
-  tasks:
-  - name: Evaluate oo_host_group_exp if it's set
-    add_host: "name={{ item }} groups=oo_hosts_to_config"
-    with_items: "{{ oo_host_group_exp | default(['']) }}"
-    when: oo_host_group_exp is defined
-
-- name: "Configure instances"
-  hosts: oo_hosts_to_config
-  connection: ssh
-  user: root
-  vars_files:
-    - vars.yml
-    - "vars.{{ oo_env }}.yml"
-  roles:
-    - os_ipv6_disable
-    - ansible
-    - ansible_tower
-    - os_env_extras

+ 0 - 1
playbooks/aws/ansible-tower/filter_plugins

@@ -1 +0,0 @@
-../../../filter_plugins

+ 0 - 79
playbooks/aws/ansible-tower/launch.yml

@@ -1,79 +0,0 @@
----
-- name: Launch instance(s)
-  hosts: localhost
-  connection: local
-  become: no
-  gather_facts: no
-
-  vars:
-    inst_region: us-east-1
-    rhel7_ami: ami-9101c8fa
-    user_data_file: user_data.txt
-
-  vars_files:
-    - vars.yml
-    - "vars.{{ oo_env }}.yml"
-
-  tasks:
-    - name: Launch instances in VPC
-      ec2:
-        state: present
-        region: "{{ inst_region }}"
-        keypair: mmcgrath_libra
-        group_id: "{{ oo_security_group_ids }}"
-        instance_type: c4.xlarge
-        image: "{{ rhel7_ami }}"
-        count: "{{ oo_new_inst_names | length }}"
-        user_data: "{{ lookup('file', user_data_file) }}"
-        wait: yes
-        assign_public_ip: "{{ oo_assign_public_ip }}"
-        vpc_subnet_id: "{{ oo_vpc_subnet_id }}"
-      register: ec2
-
-    - name: Add Name and environment tags to instances
-      ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present"
-      with_together:
-        - oo_new_inst_names
-        - ec2.instances
-      args:
-        tags:
-          Name: "{{ item.0 }}"
-
-    - name: Add other tags to instances
-      ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
-      with_items: ec2.instances
-      args:
-        tags: "{{ oo_new_inst_tags }}"
-
-    - name: Add new instances public IPs to oo_hosts_to_config
-      add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.public_ip }} groupname=oo_hosts_to_config"
-      with_together:
-        - oo_new_inst_names
-        - ec2.instances
-
-    - debug: var=ec2
-
-    - name: Wait for ssh
-      wait_for: "port=22 host={{ item.public_ip }}"
-      with_items: ec2.instances
-
-    - name: Wait for root user setup
-      command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
-      register: result
-      until: result.rc == 0
-      retries: 20
-      delay: 10
-      with_items: ec2.instances
-
-- name: Initial setup
-  hosts: oo_hosts_to_config
-  user: root
-  gather_facts: true
-
-  tasks:
-
-    - name: Update All Things
-      action: "{{ ansible_pkg_mgr }} name=* state=latest"
-
-# Apply the configs, seprate so that just the configs can be run by themselves
-- include: config.yml

+ 0 - 1
playbooks/aws/ansible-tower/roles

@@ -1 +0,0 @@
-../../../roles

+ 0 - 6
playbooks/aws/ansible-tower/user_data.txt

@@ -1,6 +0,0 @@
-#cloud-config
-disable_root: 0
-
-system_info:
-  default_user:
-    name: root

+ 0 - 9
playbooks/aws/ansible-tower/vars.ops.yml

@@ -1,9 +0,0 @@
----
-oo_env_long: operations
-oo_zabbix_hostgroups: ['OPS Environment']
-oo_vpc_subnet_id: subnet-4f0bdd38  # USE OPS
-oo_assign_public_ip: yes
-oo_security_group_ids:
-  - sg-02c2f267 # Libra (vpc)
-  - sg-7fc4f41a # ops (vpc)
-  - sg-4dc26829 # ops_tower (vpc)

+ 0 - 1
playbooks/aws/ansible-tower/vars.yml

@@ -1 +0,0 @@
----

+ 0 - 5
playbooks/aws/openshift-cluster/add_nodes.yml

@@ -6,14 +6,9 @@
   gather_facts: no
   vars_files:
   - vars.yml
-  - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml]
   vars:
     oo_extend_env: True
   tasks:
-  - fail:
-      msg: Deployment type not supported for aws provider yet
-    when: deployment_type == 'enterprise'
-
   - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
     vars:
       type: "compute"

+ 3 - 1
playbooks/aws/openshift-cluster/config.yml

@@ -13,7 +13,9 @@
     openshift_registry_selector: 'type=infra'
     openshift_hosted_router_selector: 'type=infra'
     openshift_infra_nodes: "{{ g_infra_hosts }}"
-    openshift_node_labels: '{"region": "{{ ec2_region }}", "type": "{{ hostvars[inventory_hostname]["ec2_tag_sub-host-type"] if inventory_hostname in groups["tag_host-type_node"] else hostvars[inventory_hostname]["ec2_tag_host-type"] }}"}'
+    openshift_node_labels:
+      region: "{{ deployment_vars[deployment_type].region }}"
+      type: "{{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] if inventory_hostname in groups['tag_host-type_node'] else hostvars[inventory_hostname]['ec2_tag_host-type'] }}"
     openshift_master_cluster_method: 'native'
     openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
     os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"

+ 0 - 5
playbooks/aws/openshift-cluster/launch.yml

@@ -6,12 +6,7 @@
   gather_facts: no
   vars_files:
   - vars.yml
-  - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml]
   tasks:
-  - fail:
-      msg: Deployment type not supported for aws provider yet
-    when: deployment_type == 'enterprise'
-
   - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
   - include: tasks/launch_instances.yml
     vars:

+ 33 - 60
playbooks/aws/openshift-cluster/tasks/launch_instances.yml

@@ -8,80 +8,50 @@
     sub_host_type: "{{ g_sub_host_type }}"
 
 - set_fact:
-    ec2_region: "{{ lookup('env', 'ec2_region')
-                    | default(deployment_vars[deployment_type].region, true) }}"
-  when: ec2_region is not defined
-- set_fact:
-    ec2_image_name: "{{ lookup('env', 'ec2_image_name')
-                        | default(deployment_vars[deployment_type].image_name, true) }}"
-  when: ec2_image_name is not defined and ec2_image is not defined
-- set_fact:
-    ec2_image: "{{ lookup('env', 'ec2_image')
-                   | default(deployment_vars[deployment_type].image, true) }}"
-  when: ec2_image is not defined and not ec2_image_name
-- set_fact:
-    ec2_keypair: "{{ lookup('env', 'ec2_keypair')
-                    | default(deployment_vars[deployment_type].keypair, true) }}"
-  when: ec2_keypair is not defined
-- set_fact:
-    ec2_vpc_subnet: "{{ lookup('env', 'ec2_vpc_subnet')
-                    | default(deployment_vars[deployment_type].vpc_subnet, true) }}"
-  when: ec2_vpc_subnet is not defined
-- set_fact:
-    ec2_assign_public_ip: "{{ lookup('env', 'ec2_assign_public_ip')
-                    | default(deployment_vars[deployment_type].assign_public_ip, true) }}"
-  when: ec2_assign_public_ip is not defined
-
-- set_fact:
-    ec2_instance_type: "{{ ec2_master_instance_type | default(lookup('env', 'ec2_master_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
-    ec2_security_groups: "{{ ec2_master_security_groups | default(lookup('env', 'ec2_master_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}"
+    ec2_instance_type: "{{ lookup('env', 'ec2_master_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
+    ec2_security_groups: "{{ lookup('env', 'ec2_master_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
   when: host_type == "master" and sub_host_type == "default"
 
 - set_fact:
-    ec2_instance_type: "{{ ec2_etcd_instance_type | default(lookup('env', 'ec2_etcd_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
-    ec2_security_groups: "{{ ec2_etcd_security_groups | default(lookup('env', 'ec2_etcd_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}"
+    ec2_instance_type: "{{ lookup('env', 'ec2_etcd_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
+    ec2_security_groups: "{{ lookup('env', 'ec2_etcd_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
   when: host_type == "etcd" and sub_host_type == "default"
 
 - set_fact:
-    ec2_instance_type: "{{ ec2_infra_instance_type | default(lookup('env', 'ec2_infra_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
-    ec2_security_groups: "{{ ec2_infra_security_groups | default(lookup('env', 'ec2_infra_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}"
+    ec2_instance_type: "{{ lookup('env', 'ec2_infra_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
+    ec2_security_groups: "{{ lookup('env', 'ec2_infra_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
   when: host_type == "node" and sub_host_type == "infra"
 
 - set_fact:
-    ec2_instance_type: "{{ ec2_node_instance_type | default(lookup('env', 'ec2_node_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
-    ec2_security_groups: "{{ ec2_node_security_groups | default(lookup('env', 'ec2_node_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}"
+    ec2_instance_type: "{{ lookup('env', 'ec2_node_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
+    ec2_security_groups: "{{ lookup('env', 'ec2_node_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
   when: host_type == "node" and sub_host_type == "compute"
 
 - set_fact:
-    ec2_instance_type: "{{ lookup('env', 'ec2_instance_type')
-                          | default(deployment_vars[deployment_type].type, true) }}"
+    ec2_instance_type: "{{ deployment_vars[deployment_type].type }}"
   when: ec2_instance_type is not defined
 - set_fact:
-    ec2_security_groups: "{{ lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
+    ec2_security_groups: "{{ deployment_vars[deployment_type].security_groups }}"
   when: ec2_security_groups is not defined
 
 - name: Find amis for deployment_type
   ec2_ami_find:
-    region: "{{ ec2_region }}"
-    ami_id: "{{ ec2_image | default(omit, true) }}"
-    name: "{{ ec2_image_name | default(omit, true) }}"
+    region: "{{ deployment_vars[deployment_type].region }}"
+    ami_id: "{{ deployment_vars[deployment_type].image }}"
+    name: "{{ deployment_vars[deployment_type].image_name }}"
   register: ami_result
 
 - fail: msg="Could not find requested ami"
   when: not ami_result.results
 
 - set_fact:
-    latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}"
+    latest_ami: "{{ ami_result.results | oo_ami_selector(deployment_vars[deployment_type].image_name) }}"
     volume_defs:
       etcd:
         root:
           volume_size: "{{ lookup('env', 'os_etcd_root_vol_size') | default(25, true) }}"
           device_type: "{{ lookup('env', 'os_etcd_root_vol_type') | default('gp2', true) }}"
           iops: "{{ lookup('env', 'os_etcd_root_vol_iops') | default(500, true) }}"
-        etcd:
-          volume_size: "{{ lookup('env', 'os_etcd_vol_size') | default(32, true) }}"
-          device_type: "{{ lookup('env', 'os_etcd_vol_type') | default('gp2', true) }}"
-          iops: "{{ lookup('env', 'os_etcd_vol_iops') | default(500, true) }}"
       master:
         root:
           volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}"
@@ -107,14 +77,14 @@
 - name: Launch instance(s)
   ec2:
     state: present
-    region: "{{ ec2_region }}"
-    keypair: "{{ ec2_keypair }}"
-    group: "{{ ec2_security_groups }}"
+    region: "{{ deployment_vars[deployment_type].region }}"
+    keypair: "{{ deployment_vars[deployment_type].keypair }}"
+    group: "{{ deployment_vars[deployment_type].security_groups }}"
     instance_type: "{{ ec2_instance_type }}"
-    image: "{{ latest_ami }}"
+    image: "{{ deployment_vars[deployment_type].image }}"
     count: "{{ instances | length }}"
-    vpc_subnet_id: "{{ ec2_vpc_subnet | default(omit, true) }}"
-    assign_public_ip: "{{ ec2_assign_public_ip | default(omit, true) }}"
+    vpc_subnet_id: "{{ deployment_vars[deployment_type].vpc_subnet }}"
+    assign_public_ip: "{{ deployment_vars[deployment_type].assign_public_ip }}"
     user_data: "{{ lookup('template', '../templates/user_data.j2') }}"
     wait: yes
     instance_tags:
@@ -127,7 +97,7 @@
   register: ec2
 
 - name: Add Name tag to instances
-  ec2_tag: resource={{ item.1.id }} region={{ ec2_region }} state=present
+  ec2_tag: resource={{ item.1.id }} region={{ deployment_vars[deployment_type].region }} state=present
   with_together:
   - instances
   - ec2.instances
@@ -136,29 +106,32 @@
       Name: "{{ item.0 }}"
 
 - set_fact:
-    instance_groups: "tag_created-by_{{ created_by }}, tag_clusterid_{{ cluster }}, tag_environment_{{ cluster_env }},
-                    tag_host-type_{{ host_type }}, tag_sub-host-type_{{ sub_host_type }}"
+    instance_groups: >
+      tag_created-by_{{ created_by }}, tag_clusterid_{{ cluster }},
+      tag_environment_{{ cluster_env }}, tag_host-type_{{ host_type }},
+      tag_sub-host-type_{{ sub_host_type }}
 
 - set_fact:
     node_label:
-      region: "{{ec2_region}}"
+      region: "{{ deployment_vars[deployment_type].region }}"
       type: "{{sub_host_type}}"
   when: host_type == "node"
 
 - set_fact:
     node_label:
-      region: "{{ec2_region}}"
+      region: "{{ deployment_vars[deployment_type].region }}"
       type: "{{host_type}}"
   when: host_type != "node"
 
 - set_fact:
     logrotate:
         - name: syslog
-          path: "/var/log/cron
-                 \n/var/log/maillog
-                 \n/var/log/messages
-                 \n/var/log/secure
-                 \n/var/log/spooler \n"
+          path: |
+            /var/log/cron
+            /var/log/maillog
+            /var/log/messages
+            /var/log/secure
+            /var/log/spooler"
           options:
             - daily
             - rotate 7

+ 0 - 31
playbooks/aws/openshift-cluster/templates/user_data.j2

@@ -1,24 +1,4 @@
 #cloud-config
-{% if type == 'etcd' and 'etcd' in volume_defs[type] %}
-cloud_config_modules:
-- disk_setup
-- mounts
-
-mounts:
-- [ xvdb, /var/lib/etcd, xfs, "defaults" ]
-
-disk_setup:
-  xvdb:
-    table_type: mbr
-    layout: True
-
-fs_setup:
-- label: etcd_storage
-  filesystem: xfs
-  device: /dev/xvdb
-  partition: auto
-{% endif %}
-
 {% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}
 mounts:
 - [ xvdb ]
@@ -33,17 +13,6 @@ write_files:
   permissions: '0644'
 {% endif %}
 
-{% if deployment_type == 'online' %}
-devices: ['/var'] # Workaround for https://bugs.launchpad.net/bugs/1455436
-
-disable_root: 0
-growpart:
-  mode: auto
-  devices: ['/var']
-runcmd:
-- xfs_growfs /var
-{% endif %}
-
 {% if deployment_vars[deployment_type].become %}
 - path: /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}-cloud-init-requiretty
   permissions: 440

+ 1 - 2
playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml

@@ -1,7 +1,6 @@
 ---
-# This playbook upgrades an existing AWS cluster, leaving nodes untouched if used with an 'online' deployment type.
 # Usage:
-#  ansible-playbook playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml -e deployment_type=online -e cluster_id=<cluster_id>
+#  ansible-playbook playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml -e deployment_type=<deployment_type> -e cluster_id=<cluster_id>
 - include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
   vars_files:
   - "{{lookup('file', '../../../../aws/openshift-cluster/vars.yml')}}"

+ 0 - 1
playbooks/aws/openshift-cluster/vars.defaults.yml

@@ -1 +0,0 @@
----

+ 0 - 15
playbooks/aws/openshift-cluster/vars.online.int.yml

@@ -1,15 +0,0 @@
----
-ec2_image: ami-9101c8fa
-ec2_image_name: libra-ops-rhel7*
-ec2_region: us-east-1
-ec2_keypair: mmcgrath_libra
-ec2_master_instance_type: t2.medium
-ec2_master_security_groups: [ 'integration', 'integration-master' ]
-ec2_infra_instance_type: c4.large
-ec2_infra_security_groups: [ 'integration', 'integration-infra' ]
-ec2_node_instance_type: m4.large
-ec2_node_security_groups: [ 'integration', 'integration-node' ]
-ec2_etcd_instance_type: m4.large
-ec2_etcd_security_groups: [ 'integration', 'integration-etcd' ]
-ec2_vpc_subnet: subnet-987c0def
-ec2_assign_public_ip: yes

+ 0 - 15
playbooks/aws/openshift-cluster/vars.online.prod.yml

@@ -1,15 +0,0 @@
----
-ec2_image: ami-9101c8fa
-ec2_image_name: libra-ops-rhel7*
-ec2_region: us-east-1
-ec2_keypair: mmcgrath_libra
-ec2_master_instance_type: t2.medium
-ec2_master_security_groups: [ 'production', 'production-master' ]
-ec2_infra_instance_type: c4.large
-ec2_infra_security_groups: [ 'production', 'production-infra' ]
-ec2_node_instance_type: m4.large
-ec2_node_security_groups: [ 'production', 'production-node' ]
-ec2_etcd_instance_type: m4.large
-ec2_etcd_security_groups: [ 'production', 'production-etcd' ]
-ec2_vpc_subnet: subnet-987c0def
-ec2_assign_public_ip: yes

+ 0 - 15
playbooks/aws/openshift-cluster/vars.online.stage.yml

@@ -1,15 +0,0 @@
----
-ec2_image: ami-9101c8fa
-ec2_image_name: libra-ops-rhel7*
-ec2_region: us-east-1
-ec2_keypair: mmcgrath_libra
-ec2_master_instance_type: t2.medium
-ec2_master_security_groups: [ 'stage', 'stage-master' ]
-ec2_infra_instance_type: c4.large
-ec2_infra_security_groups: [ 'stage', 'stage-infra' ]
-ec2_node_instance_type: m4.large
-ec2_node_security_groups: [ 'stage', 'stage-node' ]
-ec2_etcd_instance_type: m4.large
-ec2_etcd_security_groups: [ 'stage', 'stage-etcd' ]
-ec2_vpc_subnet: subnet-987c0def
-ec2_assign_public_ip: yes

+ 17 - 28
playbooks/aws/openshift-cluster/vars.yml

@@ -3,42 +3,31 @@ debug_level: 2
 
 deployment_rhel7_ent_base:
   # rhel-7.1, requires cloud access subscription
-  image: ami-10663b78
-  image_name:
-  region: us-east-1
+  image: "{{ lookup('oo_option', 'ec2_image') | default('ami-10251c7a', True) }}"
+  image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}"
+  region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"
   ssh_user: ec2-user
   become: yes
-  keypair: libra
-  type: m4.large
-  security_groups: [ 'public' ]
-  vpc_subnet:
-  assign_public_ip:
+  keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}"
+  type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}"
+  security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}"
+  vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}"
+  assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}"
 
 deployment_vars:
   origin:
     # centos-7, requires marketplace
-    image: ami-61bbf104
-    image_name:
-    region: us-east-1
+    image: "{{ lookup('oo_option', 'ec2_image') | default('ami-61bbf104', True) }}"
+    image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}"
+    region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"
     ssh_user: centos
     become: yes
-    keypair: libra
-    type: m4.large
-    security_groups: [ 'public' ]
-    vpc_subnet:
-    assign_public_ip:
-  online:
-    # private ami
-    image: ami-7a9e9812
-    image_name: openshift-rhel7_*
-    region: us-east-1
-    ssh_user: root
-    become: no
-    keypair: libra
-    type: m4.large
-    security_groups: [ 'public' ]
-    vpc_subnet:
-    assign_public_ip:
+    keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}"
+    type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}"
+    security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}"
+    vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}"
+    assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}"
+
   enterprise: "{{ deployment_rhel7_ent_base }}"
   openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
   atomic-enterprise: "{{ deployment_rhel7_ent_base }}"

+ 0 - 4
playbooks/common/openshift-cluster/additional_config.yml

@@ -27,10 +27,6 @@
       (osm_use_cockpit | bool or osm_use_cockpit is undefined )
   - role: flannel_register
     when: openshift.common.use_flannel | bool
-  - role: pods
-    when: openshift.common.deployment_type == 'online'
-  - role: os_env_extras
-    when: openshift.common.deployment_type == 'online'
 
 - name: Create persistent volumes and create hosted services
   hosts: oo_first_master

+ 0 - 7
playbooks/common/openshift-master/config.yml

@@ -367,13 +367,6 @@
     group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
     changed_when: False
 
-# Additional instance config for online deployments
-- name: Additional instance config
-  hosts: oo_masters_deployment_type_online
-  roles:
-  - pods
-  - os_env_extras
-
 - name: Delete temporary directory on localhost
   hosts: localhost
   connection: local

+ 0 - 8
playbooks/common/openshift-node/config.yml

@@ -245,14 +245,6 @@
   - file: name={{ mktemp.stdout }} state=absent
     changed_when: False
 
-# Additional config for online type deployments
-- name: Additional instance config
-  hosts: oo_nodes_deployment_type_online
-  gather_facts: no
-  roles:
-  - os_env_extras
-  - os_env_extras_node
-
 - name: Set schedulability
   hosts: oo_first_master
   vars:

+ 0 - 5
playbooks/gce/openshift-cluster/vars.yml

@@ -13,11 +13,6 @@ deployment_vars:
     machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
     ssh_user: "{{ lookup('env', 'gce_ssh_user') |  default(ansible_ssh_user, true) }}"
     become: yes
-  online:
-    image: libra-rhel7
-    machine_type: n1-standard-1
-    ssh_user: root
-    become: no
   enterprise: "{{ deployment_rhel7_ent_base }}"
   openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
   atomic-enterprise: "{{ deployment_rhel7_ent_base }}"

+ 0 - 3
playbooks/libvirt/openshift-cluster/launch.yml

@@ -12,9 +12,6 @@
     image_name: "{{ deployment_vars[deployment_type].image.name }}"
     image_compression: "{{ deployment_vars[deployment_type].image.compression }}"
   tasks:
-  - fail: msg="Deployment type not supported for libvirt provider yet"
-    when: deployment_type == 'online'
-
   - include: tasks/configure_libvirt.yml
 
   - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml

+ 1 - 1
playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml

@@ -1,7 +1,7 @@
 ---
 # TODO: Add support for choosing base image based on deployment_type and os
 # wanted (os wanted needs support added in bin/cluster with sane defaults:
-# fedora/centos for origin, rhel for online/enterprise)
+# fedora/centos for origin, rhel for enterprise)
 
 # TODO: create a role to encapsulate some of this complexity, possibly also
 # create a module to manage the storage tasks, network tasks, and possibly

+ 0 - 7
playbooks/libvirt/openshift-cluster/vars.yml

@@ -35,13 +35,6 @@ deployment_vars:
                   default('dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471', True) }}"
     ssh_user: openshift
     become: yes
-  online:
-    image:
-      url:
-      name:
-      sha256:
-    ssh_user: root
-    become: no
   enterprise: "{{ deployment_rhel7_ent_base }}"
   openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
   atomic-enterprise: "{{ deployment_rhel7_ent_base }}"

+ 0 - 4
playbooks/openstack/openshift-cluster/launch.yml

@@ -7,10 +7,6 @@
   vars_files:
   - vars.yml
   tasks:
-  - fail:
-      msg: "Deployment type not supported for OpenStack provider yet"
-    when: deployment_type == 'online'
-
   # TODO: Write an Ansible module for dealing with HEAT stacks
   #       Dealing with the outputs is currently terrible
 

+ 0 - 4
playbooks/openstack/openshift-cluster/vars.yml

@@ -28,10 +28,6 @@ deployment_vars:
     image: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}"
     ssh_user: openshift
     become: yes
-  online:
-    image:
-    ssh_user: root
-    become: no
   enterprise: "{{ deployment_rhel7_ent_base }}"
   openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
   atomic-enterprise: "{{ deployment_rhel7_ent_base }}"

+ 0 - 2
roles/os_env_extras/files/irbrc

@@ -1,2 +0,0 @@
-require 'irb/completion'
-IRB.conf[:PROMPT_MODE] = :SIMPLE

+ 0 - 12
roles/os_env_extras/files/vimrc

@@ -1,12 +0,0 @@
-set tabstop=4
-set shiftwidth=4
-set expandtab
-set list
-
-"flag problematic whitespace (trailing and spaces before tabs)
-"Note you get the same by doing let c_space_errors=1 but
-"this rule really applies to everything.
-highlight RedundantSpaces term=standout ctermbg=red guibg=red
-match RedundantSpaces /\s\+$\| \+\ze\t/ "\ze sets end of match so only spaces highlighted
-"use :set list! to toggle visible whitespace on/off
-set listchars=tab:>-,trail:.,extends:>

+ 0 - 16
roles/os_env_extras/tasks/main.yaml

@@ -1,16 +0,0 @@
----
-# environment configuration role, configures irbrc, vimrc
-
-- name: Ensure irbrc is installed for user root
-  copy:
-    src: irbrc
-    dest: /root/.irbrc
-
-- name: Ensure vimrc is installed for user root
-  copy:
-    src: vimrc
-    dest: /root/.vimrc
-
-- name: Bash Completion
-  action: "{{ ansible_pkg_mgr }} name=bash-completion state=present"
-  when: not openshift.common.is_containerized | bool

+ 0 - 5
roles/os_env_extras_node/tasks/main.yml

@@ -1,5 +0,0 @@
----
-# From the origin rpm there exists instructions on how to
-# setup origin properly.  The following steps come from there
-- name: Change root to be in the Docker group
-  user: name=root groups=dockerroot append=yes

+ 0 - 38
roles/pods/README.md

@@ -1,38 +0,0 @@
-Role Name
-=========
-
-A brief description of the role goes here.
-
-Requirements
-------------
-
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
-    - hosts: servers
-      roles:
-         - { role: username.rolename, x: 42 }
-
-License
--------
-
-BSD
-
-Author Information
-------------------
-
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).

+ 0 - 30
roles/pods/files/pods/docker-registry.json

@@ -1,30 +0,0 @@
-{
-  "kind": "Pod",
-  "version": "v1beta2",
-  "desiredState": {
-    "manifest": {
-      "version": "v1beta1",
-      "containers": [{
-        "name": "docker-registry",
-        "image": "registry",
-        "volumeMounts": [{
-            "name": "data",
-            "mountPath": "/var/lib/docker-registry"
-         }],
-        "ports": [{
-          "containerPort": 5000,
-          "hostPort": 9999
-        }]
-      }],
-      "volumes": [{
-        "name": "data",
-        "source": {
-          "emptyDir": true
-        }
-      }]
-    }
-  },
-  "labels": {
-    "name": "docker-registry"
-  }
-}

+ 0 - 23
roles/pods/files/pods/fedora_apache.json

@@ -1,23 +0,0 @@
-{
-  "id": "apache",
-  "kind": "Pod",
-  "apiVersion": "v1beta2",
-  "desiredState": {
-    "manifest": {
-      "version": "v1beta1",
-      "id": "apache-1",
-      "containers": [{
-        "name": "master",
-        "image": "fedora/apache",
-        "ports": [{
-          "containerPort": 80,
-          "hostPort": 80
-        }]
-      }]
-    }
-  },
-  "labels": {
-    "name": "apache",
-    "distro": "fedora"
-  }
-}

+ 0 - 23
roles/pods/files/pods/frontend-controller.json

@@ -1,23 +0,0 @@
-{
-  "id": "frontendController",
-  "kind": "ReplicationController",
-  "apiVersion": "v1beta2",
-  "desiredState": {
-    "replicas": 2,
-    "replicaSelector": {"name": "frontend"},
-    "podTemplate": {
-      "desiredState": {
-         "manifest": {
-           "version": "v1beta1",
-           "id": "frontendController",
-           "containers": [{
-             "name": "php-redis",
-             "image": "brendanburns/php-redis",
-             "ports": [{"containerPort": 80, "hostPort": 8000}]
-           }]
-         }
-       },
-       "labels": {"name": "frontend"}
-      }},
-  "labels": {"name": "frontend"}
-}

+ 0 - 10
roles/pods/files/pods/redis-master-service.json

@@ -1,10 +0,0 @@
-{
-  "id": "redismaster",
-  "kind": "Service",
-  "apiVersion": "v1beta2",
-  "port": 10000,
-  "containerPort": 6379,
-  "selector": {
-    "name": "redis-master"
-  }
-}

+ 0 - 22
roles/pods/files/pods/redis-master.json

@@ -1,22 +0,0 @@
-{
-  "id": "redis-master-2",
-  "kind": "Pod",
-  "apiVersion": "v1beta2",
-  "desiredState": {
-    "manifest": {
-      "version": "v1beta1",
-      "id": "redis-master-2",
-      "containers": [{
-        "name": "master",
-        "image": "dockerfile/redis",
-        "ports": [{
-          "containerPort": 6379,
-          "hostPort": 6379
-        }]
-      }]
-    }
-  },
-  "labels": {
-    "name": "redis-master"
-  }
-}

+ 0 - 24
roles/pods/files/pods/redis-slave-controller.json

@@ -1,24 +0,0 @@
-{
-  "id": "redisSlaveController",
-  "kind": "ReplicationController",
-  "apiVersion": "v1beta2",
-  "desiredState": {
-    "replicas": 2,
-    "replicaSelector": {"name": "redisslave"},
-    "podTemplate": {
-      "desiredState": {
-         "manifest": {
-           "version": "v1beta1",
-           "id": "redisSlaveController",
-           "containers": [{
-             "name": "slave",
-             "image": "brendanburns/redis-slave",
-             "ports": [{"containerPort": 6379, "hostPort": 6380}]
-           }]
-         }
-       },
-       "labels": {"name": "redisslave"}
-      }},
-  "labels": {"name": "redisslave"}
-}
-

+ 0 - 13
roles/pods/files/pods/redis-slave-service.json

@@ -1,13 +0,0 @@
-{
-  "id": "redisslave",
-  "kind": "Service",
-  "apiVersion": "v1beta2",
-  "port": 10001,
-  "containerPort": 6379,
-  "labels": {
-    "name": "redisslave"
-  },
-  "selector": {
-    "name": "redisslave"
-  }
-}

+ 0 - 10
roles/pods/files/pods/registry-service.json

@@ -1,10 +0,0 @@
-{
-  "id": "dockerregistry",
-  "kind": "Service",
-  "apiVersion": "v1beta2",
-  "port": 8888,
-  "selector": {
-    "name": "docker-registry"
-  }
-}
-

+ 0 - 124
roles/pods/meta/main.yml

@@ -1,124 +0,0 @@
----
-galaxy_info:
-  author: your name
-  description:
-  company: your company (optional)
-  # Some suggested licenses:
-  # - BSD (default)
-  # - MIT
-  # - GPLv2
-  # - GPLv3
-  # - Apache
-  # - CC-BY
-  license: license (GPLv2, CC-BY, etc)
-  min_ansible_version: 1.2
-  #
-  # Below are all platforms currently available. Just uncomment
-  # the ones that apply to your role. If you don't see your
-  # platform on this list, let us know and we'll get it added!
-  #
-  #platforms:
-  #- name: EL
-  #  versions:
-  #  - all
-  #  - 5
-  #  - 6
-  #  - 7
-  #- name: GenericUNIX
-  #  versions:
-  #  - all
-  #  - any
-  #- name: Fedora
-  #  versions:
-  #  - all
-  #  - 16
-  #  - 17
-  #  - 18
-  #  - 19
-  #  - 20
-  #- name: opensuse
-  #  versions:
-  #  - all
-  #  - 12.1
-  #  - 12.2
-  #  - 12.3
-  #  - 13.1
-  #  - 13.2
-  #- name: Amazon
-  #  versions:
-  #  - all
-  #  - 2013.03
-  #  - 2013.09
-  #- name: GenericBSD
-  #  versions:
-  #  - all
-  #  - any
-  #- name: FreeBSD
-  #  versions:
-  #  - all
-  #  - 8.0
-  #  - 8.1
-  #  - 8.2
-  #  - 8.3
-  #  - 8.4
-  #  - 9.0
-  #  - 9.1
-  #  - 9.1
-  #  - 9.2
-  #- name: Ubuntu
-  #  versions:
-  #  - all
-  #  - lucid
-  #  - maverick
-  #  - natty
-  #  - oneiric
-  #  - precise
-  #  - quantal
-  #  - raring
-  #  - saucy
-  #  - trusty
-  #- name: SLES
-  #  versions:
-  #  - all
-  #  - 10SP3
-  #  - 10SP4
-  #  - 11
-  #  - 11SP1
-  #  - 11SP2
-  #  - 11SP3
-  #- name: GenericLinux
-  #  versions:
-  #  - all
-  #  - any
-  #- name: Debian
-  #  versions:
-  #  - all
-  #  - etch
-  #  - lenny
-  #  - squeeze
-  #  - wheezy
-  #
-  # Below are all categories currently available. Just as with
-  # the platforms above, uncomment those that apply to your role.
-  #
-  #categories:
-  #- cloud
-  #- cloud:ec2
-  #- cloud:gce
-  #- cloud:rax
-  #- clustering
-  #- database
-  #- database:nosql
-  #- database:sql
-  #- development
-  #- monitoring
-  #- networking
-  #- packaging
-  #- system
-  #- web
-dependencies: []
-  # List your role dependencies here, one per line. Only
-  # dependencies available via galaxy should be listed here.
-  # Be sure to remove the '[]' above if you add dependencies
-  # to this list.
-

+ 0 - 6
roles/pods/tasks/main.yml

@@ -1,6 +0,0 @@
----
-- name: Transfer the fedora_apache pod template
-  file: path=/usr/local/etc/pods state=directory
-
-- name: Transfer the fedora_apache pod template
-  copy: directory_mode=on src=pods/ dest=/usr/local/etc/pods/