Browse Source

Merge branch 'master' into tower_install

Conflicts:
	lib/aws_command.rb
Matt Woodson 10 years ago
parent
commit
41ad58a9b9
71 changed files with 353 additions and 954 deletions
  1. 1 1
      README.md
  2. 1 1
      README_GCE.md
  3. 12 12
      cluster.sh
  4. 1 1
      lib/aws_command.rb
  5. 10 4
      lib/gce_command.rb
  6. 25 7
      lib/gce_helper.rb
  7. 7 7
      playbooks/aws/openshift-master/config.yml
  8. 1 1
      playbooks/aws/openshift-minion/config.yml
  9. 0 0
      playbooks/aws/openshift-node/launch.yml
  10. 0 0
      playbooks/aws/openshift-node/vars.yml
  11. 9 8
      playbooks/gce/openshift-master/config.yml
  12. 11 2
      playbooks/gce/openshift-minion/config.yml
  13. 10 0
      playbooks/gce/openshift-minion/launch.yml
  14. 1 1
      playbooks/gce/openshift-minion/terminate.yml
  15. 0 0
      playbooks/gce/openshift-node/vars.yml
  16. 14 9
      roles/base_os/tasks/main.yaml
  17. 1 1
      roles/docker/tasks/main.yml
  18. 0 38
      roles/kubernetes_apiserver/README.md
  19. 0 2
      roles/kubernetes_apiserver/defaults/main.yml
  20. 0 4
      roles/kubernetes_apiserver/handlers/main.yml
  21. 0 124
      roles/kubernetes_apiserver/meta/main.yml
  22. 0 25
      roles/kubernetes_apiserver/tasks/main.yml
  23. 0 2
      roles/kubernetes_apiserver/vars/main.yml
  24. 0 38
      roles/kubernetes_controller_manager/README.md
  25. 0 2
      roles/kubernetes_controller_manager/defaults/main.yml
  26. 0 5
      roles/kubernetes_controller_manager/handlers/main.yml
  27. 0 124
      roles/kubernetes_controller_manager/meta/main.yml
  28. 0 7
      roles/kubernetes_controller_manager/tasks/main.yml
  29. 0 2
      roles/kubernetes_controller_manager/vars/main.yml
  30. 0 38
      roles/kubernetes_kubelet/README.md
  31. 0 2
      roles/kubernetes_kubelet/defaults/main.yml
  32. 0 10
      roles/kubernetes_kubelet/files/kubelet.service
  33. 0 4
      roles/kubernetes_kubelet/handlers/main.yml
  34. 0 124
      roles/kubernetes_kubelet/meta/main.yml
  35. 0 31
      roles/kubernetes_kubelet/tasks/main.yml
  36. 0 33
      roles/kubernetes_kubelet/templates/cadvisor.manifest
  37. 0 3
      roles/kubernetes_kubelet/templates/kubelet
  38. 0 2
      roles/kubernetes_kubelet/vars/main.yml
  39. 0 38
      roles/kubernetes_proxy/README.md
  40. 0 2
      roles/kubernetes_proxy/defaults/main.yml
  41. 0 4
      roles/kubernetes_proxy/handlers/main.yml
  42. 0 124
      roles/kubernetes_proxy/meta/main.yml
  43. 0 17
      roles/kubernetes_proxy/tasks/main.yml
  44. 0 2
      roles/kubernetes_proxy/vars/main.yml
  45. 1 1
      roles/openshift_master/handlers/main.yml
  46. 19 16
      roles/openshift_master/tasks/main.yml
  47. 1 1
      roles/openshift_master/vars/main.yml
  48. 0 2
      roles/openshift_minion/defaults/main.yml
  49. 0 4
      roles/openshift_minion/handlers/main.yml
  50. 0 29
      roles/openshift_minion/tasks/main.yml
  51. 0 2
      roles/openshift_minion/vars/main.yml
  52. 0 0
      roles/openshift_node/README.md
  53. 2 0
      roles/openshift_node/defaults/main.yml
  54. 4 0
      roles/openshift_node/handlers/main.yml
  55. 0 0
      roles/openshift_node/meta/main.yml
  56. 45 0
      roles/openshift_node/tasks/main.yml
  57. 2 0
      roles/openshift_node/vars/main.yml
  58. 5 0
      roles/repos/defaults/main.yaml
  59. 0 5
      roles/repos/files/docker.repo
  60. 61 0
      roles/repos/files/online/RPM-GPG-KEY-redhat-beta
  61. 0 0
      roles/repos/files/online/RPM-GPG-KEY-redhat-release
  62. 0 0
      roles/repos/files/online/epel7-kubernetes.repo
  63. 0 0
      roles/repos/files/online/epel7-openshift.repo
  64. 23 0
      roles/repos/files/online/oso-rhui-rhel-7-extras.repo
  65. 21 0
      roles/repos/files/online/oso-rhui-rhel-7-server.repo
  66. 11 0
      roles/repos/files/online/rhel-7-libra-candidate.repo
  67. 0 13
      roles/repos/files/oso-rhui-rhel-7-server.repo
  68. 0 10
      roles/repos/files/rhel-7-libra-candidate.repo
  69. 37 9
      roles/repos/tasks/main.yaml
  70. 15 0
      roles/repos/templates/yum_repo.j2
  71. 2 0
      roles/repos/vars/main.yml

+ 1 - 1
README.md

@@ -1,7 +1,7 @@
 openshift-online-ansible
 ========================
 
-This repo houses Ansible code used in OpenShift Online.
+This repo contains OpenShift Ansible code.
 
 Setup
 -----

+ 1 - 1
README_GCE.md

@@ -72,5 +72,5 @@ Test The Setup
 
 3. Try to create an instance:
 ```
-  ./cloud.rb gce launch -n ${USER}-minion1 -e int --type os3-minion
+  ./cloud.rb gce launch -n ${USER}-node1 -e int --type os3-node
 ```

+ 12 - 12
cluster.sh

@@ -1,6 +1,6 @@
 #!/bin/bash -eu
 
-MINIONS=2
+NODES=2
 MASTERS=1
 
 # If the environment variable OO_PROVDER is defined, it used for the provider
@@ -13,10 +13,10 @@ fi
 UPPER_CASE_PROVIDER=$(echo $PROVIDER | tr '[:lower:]' '[:upper:]')
 
 
-# Use OO_MASTER_PLAYBOOK/OO_MINION_PLAYBOOK environment variables for playbooks if defined,
+# Use OO_MASTER_PLAYBOOK/OO_NODE_PLAYBOOK environment variables for playbooks if defined,
 # otherwise use openshift default values.
 MASTER_PLAYBOOK=${OO_MASTER_PLAYBOOK:-'openshift-master'}
-MINION_PLAYBOOK=${OO_MINION_PLAYBOOK:-'openshift-minion'}
+NODE_PLAYBOOK=${OO_NODE_PLAYBOOK:-'openshift-node'}
 
 
 # @formatter:off
@@ -29,10 +29,10 @@ function usage {
         $([ $? -ne 0 ] && echo "No supported environment tags found for ${PROVIDER}")
 
         Optional arguments for create:
-        [-p|--provider, -m|--masters, -n|--minions, --master-playbook, --minion-playbook]
+        [-p|--provider, -m|--masters, -n|--nodes, --master-playbook, --node-playbook]
 
         Optional arguments for terminate|update:
-        [-p|--provider, --master-playbook, --minion-playbook]
+        [-p|--provider, --master-playbook, --node-playbook]
 EOT
 }
 # @formatter:on
@@ -40,21 +40,21 @@ EOT
 function create_cluster {
     ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=$MASTER_PLAYBOOK -c $MASTERS
 
-    ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=$MINION_PLAYBOOK -c $MINIONS
+    ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=$NODE_PLAYBOOK -c $NODES
 
     update_cluster
 
-    echo -e "\nCreated ${MASTERS}/${MASTER_PLAYBOOK} masters and ${MINIONS}/${MINION_PLAYBOOK} minions using ${PROVIDER} provider\n"
+    echo -e "\nCreated ${MASTERS}/${MASTER_PLAYBOOK} masters and ${NODES}/${NODE_PLAYBOOK} nodes using ${PROVIDER} provider\n"
 }
 
 function update_cluster {
     ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=$MASTER_PLAYBOOK
-    ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=$MINION_PLAYBOOK
+    ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=$NODE_PLAYBOOK
 }
 
 function terminate_cluster {
     ./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=$MASTER_PLAYBOOK
-    ./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=$MINION_PLAYBOOK
+    ./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=$NODE_PLAYBOOK
 }
 
 [ -f ./cloud.rb ] || (echo 1>&2 'Cannot find ./cloud.rb' && exit 1)
@@ -68,7 +68,7 @@ function check_argval {
 }
 
 # Using GNU getopt to support both small and long formats
-OPTIONS=`getopt -o p:m:n:h --long provider:,masters:,minions:,master-playbook:,minion-playbook:,help \
+OPTIONS=`getopt -o p:m:n:h --long provider:,masters:,nodes:,master-playbook:,node-playbook:,help \
 	        -n "$0" -- "$@"`
 eval set -- "$OPTIONS"
 
@@ -77,9 +77,9 @@ while true; do
         -h|--help) (usage; exit 1) ; shift ;;
         -p|--provider) PROVIDER="$2" ; check_argval $2 ; shift 2 ;;
         -m|--masters) MASTERS="$2" ; check_argval $2 ; shift 2 ;;
-        -n|--minions) MINIONS="$2" ; check_argval $2 ; shift 2 ;;
+        -n|--nodes) NODES="$2" ; check_argval $2 ; shift 2 ;;
         --master-playbook) MASTER_PLAYBOOK="$2" ; check_argval $2 ; shift 2 ;;
-        --minion-playbook) MINION_PLAYBOOK="$2" ; check_argval $2 ; shift 2 ;;
+        --node-playbook) NODE_PLAYBOOK="$2" ; check_argval $2 ; shift 2 ;;
         --) shift ; break ;;
         *) break ;;
     esac

+ 1 - 1
lib/aws_command.rb

@@ -7,7 +7,7 @@ module OpenShift
   module Ops
     class AwsCommand < Thor
       # WARNING: we do not currently support environments with hyphens in the name
-      SUPPORTED_ENVS = %w(prod stg int ops tint kint test jint amint tdint lint)
+      SUPPORTED_ENVS = %w(prod stg int twiest gshipley kint test jhonce amint tdint lint)
 
       option :type, :required => true, :enum => LaunchHelper.get_aws_host_types,
              :desc => 'The host type of the new instances.'

+ 10 - 4
lib/gce_command.rb

@@ -10,7 +10,7 @@ module OpenShift
   module Ops
     class GceCommand < Thor
       # WARNING: we do not currently support environments with hyphens in the name
-      SUPPORTED_ENVS = %w(prod stg int tint kint test jint amint tdint lint)
+      SUPPORTED_ENVS = %w(prod stg int twiest gshipley kint test jhonce amint tdint lint)
 
       option :type, :required => true, :enum => LaunchHelper.get_gce_host_types,
              :desc => 'The host type of the new instances.'
@@ -120,14 +120,20 @@ module OpenShift
         ah.run_playbook("playbooks/gce/#{host_type}/terminate.yml")
       end
 
+      option :env, :required => false, :aliases => '-e', :enum => SUPPORTED_ENVS,
+             :desc => 'The environment to list.'
       desc "list", "Lists instances."
       def list()
         hosts = GceHelper.get_hosts()
 
+        hosts.delete_if { |h| h.env != options[:env] } unless options[:env].nil?
+
+        fmt_str = "%34s %5s %8s %17s %7s"
+
         puts
-        puts "Instances"
-        puts "---------"
-        hosts.each { |k| puts "  #{k.name}" }
+        puts fmt_str % ['Name','Env', 'State', 'IP Address', 'Created By']
+        puts fmt_str % ['----','---', '-----', '----------', '----------']
+        hosts.each { |h| puts fmt_str % [h.name, h.env, h.state, h.public_ip, h.created_by ] }
         puts
       end
 

+ 25 - 7
lib/gce_helper.rb

@@ -5,23 +5,41 @@ module OpenShift
     class GceHelper
       MYDIR = File.expand_path(File.dirname(__FILE__))
 
-      def self.get_hosts()
+      def self.get_list()
         cmd = "#{MYDIR}/../inventory/gce/gce.py --list"
         hosts = %x[#{cmd} 2>&1]
 
         raise "Error: failed to list hosts\n#{hosts}" unless $?.exitstatus == 0
 
-        # invert the hash so that it's key is the host, and values is an array of metadata
-        data = {}
-        JSON.parse(hosts).each do |key,value|
-          value.each { |h| (data[h] ||= []) << key }
+        return JSON.parse(hosts)
+      end
+
+      def self.get_tag(tags, selector)
+        tags.each do |tag|
+          return $1 if tag =~ selector
         end
 
-        # For now, we only care about the name. In the future, we may want the other metadata included.
+        return nil
+      end
+
+      def self.get_hosts()
+        hosts = get_list()
+
         retval = []
-        data.keys.sort.each { |k| retval << OpenStruct.new({ :name => k }) }
+        hosts['_meta']['hostvars'].each do |host, info|
+          retval << OpenStruct.new({
+            :name        => info['gce_name'],
+            :env         => get_tag(info['gce_tags'], /^env-(\w+)$/) || 'UNSET',
+            :public_ip   => info['gce_public_ip'],
+            :state       => info['gce_status'],
+            :created_by  => get_tag(info['gce_tags'], /^created-by-(\w+)$/) || 'UNSET',
+          })
+        end
+
+        retval.sort_by! { |h| [h.env, h.state, h.name] }
 
         return retval
+
       end
 
       def self.get_host_details(host)

+ 7 - 7
playbooks/aws/openshift-master/config.yml

@@ -7,8 +7,8 @@
     with_items: "{{ oo_host_group_exp | default('') }}"
     when: oo_host_group_exp is defined
 
-- name: "Gather facts for minions in {{ oo_env }}"
-  hosts: "tag_env-host-type_{{ oo_env }}-openshift-minion"
+- name: "Gather facts for nodes in {{ oo_env }}"
+  hosts: "tag_env-host-type_{{ oo_env }}-openshift-node"
   connection: ssh
   user: root
 
@@ -16,12 +16,12 @@
   hosts: localhost
   gather_facts: no
   tasks:
-    - name: Setting oo_minion_ips fact on localhost
+    - name: Setting oo_node_ips fact on localhost
       set_fact:
-        oo_minion_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-minion'])
+        oo_node_ips: "{{ hostvars
+            | oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-node'])
             | oo_collect(attribute='ansible_eth0.ipv4.address') }}"
-      when: groups['tag_env-host-type_' + oo_env + '-openshift-minion'] is defined
+      when: groups['tag_env-host-type_' + oo_env + '-openshift-node'] is defined
 
 - name: "Configure instances"
   hosts: oo_hosts_to_config
@@ -34,7 +34,7 @@
     - ../../../roles/repos
     - {
         role: ../../../roles/openshift_master,
-        oo_minion_ips: "{{ hostvars['localhost'].oo_minion_ips | default(['']) }}",
+        oo_node_ips: "{{ hostvars['localhost'].oo_node_ips | default(['']) }}",
         oo_bind_ip: "{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address | default(['']) }}"
       }
     - ../../../roles/pods

+ 1 - 1
playbooks/aws/openshift-minion/config.yml

@@ -34,7 +34,7 @@
     - ../../../roles/repos
     - ../../../roles/docker
     - {
-        role: ../../../roles/openshift_minion,
+        role: ../../../roles/openshift_node,
         oo_master_ips: "{{ hostvars['localhost'].oo_master_ips | default(['']) }}",
         oo_bind_ip: "{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address | default(['']) }}"
       }

playbooks/aws/openshift-minion/launch.yml → playbooks/aws/openshift-node/launch.yml


playbooks/gce/openshift-minion/vars.yml → playbooks/aws/openshift-node/vars.yml


+ 9 - 8
playbooks/gce/openshift-master/config.yml

@@ -7,8 +7,8 @@
     with_items: "{{ oo_host_group_exp | default('') }}"
     when: oo_host_group_exp is defined
 
-- name: "Gather facts for minions in {{ oo_env }}"
-  hosts: "tag_env-host-type-{{ oo_env }}-openshift-minion"
+- name: "Gather facts for nodes in {{ oo_env }}"
+  hosts: "tag_env-host-type-{{ oo_env }}-openshift-node"
   connection: ssh
   user: root
 
@@ -16,12 +16,12 @@
   hosts: localhost
   gather_facts: no
   tasks:
-    - name: Setting oo_minion_ips fact on localhost
+    - name: Setting oo_node_ips fact on localhost
       set_fact:
-        oo_minion_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-minion'])
+        oo_node_ips: "{{ hostvars
+            | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-node'])
             | oo_collect(attribute='ansible_eth0.ipv4.address') }}"
-      when: groups['tag_env-host-type-' + oo_env + '-openshift-minion'] is defined
+      when: groups['tag_env-host-type-' + oo_env + '-openshift-node'] is defined
 
 - name: "Configure instances"
   hosts: oo_hosts_to_config
@@ -34,7 +34,8 @@
     - ../../../roles/repos
     - {
         role: ../../../roles/openshift_master,
-        oo_minion_ips: "{{ hostvars['localhost'].oo_minion_ips | default(['']) }}",
-        oo_bind_ip: "{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address | default(['']) }}"
+        oo_node_ips: "{{ hostvars['localhost'].oo_node_ips | default(['']) }}",
+        oo_bind_ip: "{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address | default(['']) }}",
+        oo_public_ip: "{{ gce_public_ip }}"
       }
     - ../../../roles/pods

+ 11 - 2
playbooks/gce/openshift-minion/config.yml

@@ -1,6 +1,7 @@
 - name: "populate oo_hosts_to_config host group if needed"
   hosts: localhost
   gather_facts: no
+
   tasks:
   - name: Evaluate oo_host_group_exp
     add_host: "name={{ item }} groups=oo_hosts_to_config"
@@ -22,6 +23,12 @@
             | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
             | oo_collect(attribute='ansible_eth0.ipv4.address') }}"
       when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
+    - name: Setting oo_master_public_ips fact on localhost
+      set_fact:
+        oo_master_public_ips: "{{ hostvars
+            | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
+            | oo_collect(attribute='gce_public_ip') }}"
+      when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
 
 - name: "Configure instances"
   hosts: oo_hosts_to_config
@@ -34,7 +41,9 @@
     - ../../../roles/repos
     - ../../../roles/docker
     - {
-        role: ../../../roles/openshift_minion,
+        role: ../../../roles/openshift_node,
         oo_master_ips: "{{ hostvars['localhost'].oo_master_ips | default(['']) }}",
-        oo_bind_ip: "{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address | default(['']) }}"
+        oo_master_public_ips: "{{ hostvars['localhost'].oo_master_public_ips | default(['']) }}",
+        oo_bind_ip: "{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address | default(['']) }}",
+        oo_public_ip: "{{ hostvars[inventory_hostname].ansible_ssh_host }}"
       }

+ 10 - 0
playbooks/gce/openshift-minion/launch.yml

@@ -45,3 +45,13 @@
 
 # Apply the configs, separate so that just the configs can be run by themselves
 - include: config.yml
+
+# Always bounce service to pick up new credentials
+#- name: "Restart instances"
+#  hosts: oo_hosts_to_config
+#  connection: ssh
+#  user: root
+#  tasks:
+#    - debug: var=groups.oo_hosts_to_config
+#    - name: Restart OpenShift
+#      service: name=openshift-node enabled=yes state=restarted

+ 1 - 1
playbooks/gce/openshift-minion/terminate.yml

@@ -16,7 +16,7 @@
   hosts: localhost
   connection: local
   tasks:
-    - name: Terminate minion instances
+    - name: Terminate node instances
       gce:
         service_account_email: "{{ gce_service_account_email }}"
         pem_file: "{{ gce_pem_file }}"

playbooks/aws/openshift-minion/vars.yml → playbooks/gce/openshift-node/vars.yml


+ 14 - 9
roles/base_os/tasks/main.yaml

@@ -11,10 +11,13 @@
     src: vimrc
     dest: /root/.vimrc
 
-- name: Ensure vimrc is installed for user root
-  copy:
-    src: vimrc
-    dest: /root/.vimrc
+- name: Add KUBECONFIG to .bash_profile for user root
+  lineinfile:
+    dest: /root/.bash_profile
+    regexp: "KUBECONFIG="
+    line: "export KUBECONFIG=/var/lib/openshift/openshift.local.certificates/admin/.kubeconfig"
+    state: present
+    insertafter: EOF
 
 - name: Bash Completion
   yum:
@@ -26,11 +29,13 @@
     pkg: firewalld
     state: installed
 
-- name: enable firewalld service
-  command: /usr/bin/systemctl enable firewalld.service
-
-- name: start firewalld service
-  command: /usr/bin/systemctl start firewalld.service
+- name: start and enable firewalld service
+  service:
+    name: firewalld
+    state: started
+    enabled: yes
+  register: result
 
 - name: need to pause here, otherwise the firewalld service starting can sometimes cause ssh to fail
   pause: seconds=10
+  when: result | changed

+ 1 - 1
roles/docker/tasks/main.yml

@@ -1,7 +1,7 @@
 ---
 # tasks file for docker
 - name: Install docker
-  yum: pkg=docker
+  yum: pkg=docker-io
 
 - name: enable docker service
   command: /usr/bin/systemctl enable docker.service

+ 0 - 38
roles/kubernetes_apiserver/README.md

@@ -1,38 +0,0 @@
-Role Name
-=========
-
-A brief description of the role goes here.
-
-Requirements
-------------
-
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
-    - hosts: servers
-      roles:
-         - { role: username.rolename, x: 42 }
-
-License
--------
-
-BSD
-
-Author Information
-------------------
-
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).

+ 0 - 2
roles/kubernetes_apiserver/defaults/main.yml

@@ -1,2 +0,0 @@
----
-# defaults file for kubernetes_apiserver

+ 0 - 4
roles/kubernetes_apiserver/handlers/main.yml

@@ -1,4 +0,0 @@
----
-# handlers file for kubernetes_apiserver
-- name: restart kubernetes-apiserver
-  service: name=kubernetes-apiserver state=restarted

+ 0 - 124
roles/kubernetes_apiserver/meta/main.yml

@@ -1,124 +0,0 @@
----
-galaxy_info:
-  author: your name
-  description: 
-  company: your company (optional)
-  # Some suggested licenses:
-  # - BSD (default)
-  # - MIT
-  # - GPLv2
-  # - GPLv3
-  # - Apache
-  # - CC-BY
-  license: license (GPLv2, CC-BY, etc)
-  min_ansible_version: 1.2
-  #
-  # Below are all platforms currently available. Just uncomment
-  # the ones that apply to your role. If you don't see your 
-  # platform on this list, let us know and we'll get it added!
-  #
-  #platforms:
-  #- name: EL
-  #  versions:
-  #  - all
-  #  - 5
-  #  - 6
-  #  - 7
-  #- name: GenericUNIX
-  #  versions:
-  #  - all
-  #  - any
-  #- name: Fedora
-  #  versions:
-  #  - all
-  #  - 16
-  #  - 17
-  #  - 18
-  #  - 19
-  #  - 20
-  #- name: opensuse
-  #  versions:
-  #  - all
-  #  - 12.1
-  #  - 12.2
-  #  - 12.3
-  #  - 13.1
-  #  - 13.2
-  #- name: Amazon
-  #  versions:
-  #  - all
-  #  - 2013.03
-  #  - 2013.09
-  #- name: GenericBSD
-  #  versions:
-  #  - all
-  #  - any
-  #- name: FreeBSD
-  #  versions:
-  #  - all
-  #  - 8.0
-  #  - 8.1
-  #  - 8.2
-  #  - 8.3
-  #  - 8.4
-  #  - 9.0
-  #  - 9.1
-  #  - 9.1
-  #  - 9.2
-  #- name: Ubuntu
-  #  versions:
-  #  - all
-  #  - lucid
-  #  - maverick
-  #  - natty
-  #  - oneiric
-  #  - precise
-  #  - quantal
-  #  - raring
-  #  - saucy
-  #  - trusty
-  #- name: SLES
-  #  versions:
-  #  - all
-  #  - 10SP3
-  #  - 10SP4
-  #  - 11
-  #  - 11SP1
-  #  - 11SP2
-  #  - 11SP3
-  #- name: GenericLinux
-  #  versions:
-  #  - all
-  #  - any
-  #- name: Debian
-  #  versions:
-  #  - all
-  #  - etch
-  #  - lenny
-  #  - squeeze
-  #  - wheezy
-  #
-  # Below are all categories currently available. Just as with
-  # the platforms above, uncomment those that apply to your role.
-  #
-  #categories:
-  #- cloud
-  #- cloud:ec2
-  #- cloud:gce
-  #- cloud:rax
-  #- clustering
-  #- database
-  #- database:nosql
-  #- database:sql
-  #- development
-  #- monitoring
-  #- networking
-  #- packaging
-  #- system
-  #- web
-dependencies: []
-  # List your role dependencies here, one per line. Only
-  # dependencies available via galaxy should be listed here.
-  # Be sure to remove the '[]' above if you add dependencies
-  # to this list.
-  

+ 0 - 25
roles/kubernetes_apiserver/tasks/main.yml

@@ -1,25 +0,0 @@
----
-# tasks file for kubernetes_apiserver
-- name: Install kubernetes
-  yum: pkg=kubernetes
-
-- name: Configure apiserver settings
-  lineinfile:
-    dest: /etc/sysconfig/kubernetes
-    regexp: "{{ item.regex }}"
-    line: "{{ item.line }}"
-  with_items:
-    - { regex: '^KUBE_API_MACHINES=', line: 'KUBE_API_MACHINES=\"{{ oo_minion_ips | join(",") }}\"' }
-    - { regex: '^KUBE_API_ADDRESS=',  line: 'KUBE_API_ADDRESS=\"0.0.0.0\"' }
-  notify:
-    - restart kubernetes-apiserver
-
-- name: Enable apiserver
-  service: name=kubernetes-apiserver enabled=yes state=started
-
-- name: Open firewalld port for apiserver
-  firewalld: port=8080/tcp permanent=false state=enabled
-
-- name: Save firewalld port for apiserver
-  firewalld: port=8080/tcp permanent=true state=enabled
-

+ 0 - 2
roles/kubernetes_apiserver/vars/main.yml

@@ -1,2 +0,0 @@
----
-# vars file for kubernetes_apiserver

+ 0 - 38
roles/kubernetes_controller_manager/README.md

@@ -1,38 +0,0 @@
-Role Name
-=========
-
-A brief description of the role goes here.
-
-Requirements
-------------
-
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
-    - hosts: servers
-      roles:
-         - { role: username.rolename, x: 42 }
-
-License
--------
-
-BSD
-
-Author Information
-------------------
-
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).

+ 0 - 2
roles/kubernetes_controller_manager/defaults/main.yml

@@ -1,2 +0,0 @@
----
-# defaults file for kubernetes_controller_manager

+ 0 - 5
roles/kubernetes_controller_manager/handlers/main.yml

@@ -1,5 +0,0 @@
----
-# handlers file for kubernetes_controller_manager
-- name: restart kubernetes-controller-manager
-  service: name=kubernetes-controller-manager state=restarted
-

+ 0 - 124
roles/kubernetes_controller_manager/meta/main.yml

@@ -1,124 +0,0 @@
----
-galaxy_info:
-  author: your name
-  description: 
-  company: your company (optional)
-  # Some suggested licenses:
-  # - BSD (default)
-  # - MIT
-  # - GPLv2
-  # - GPLv3
-  # - Apache
-  # - CC-BY
-  license: license (GPLv2, CC-BY, etc)
-  min_ansible_version: 1.2
-  #
-  # Below are all platforms currently available. Just uncomment
-  # the ones that apply to your role. If you don't see your 
-  # platform on this list, let us know and we'll get it added!
-  #
-  #platforms:
-  #- name: EL
-  #  versions:
-  #  - all
-  #  - 5
-  #  - 6
-  #  - 7
-  #- name: GenericUNIX
-  #  versions:
-  #  - all
-  #  - any
-  #- name: Fedora
-  #  versions:
-  #  - all
-  #  - 16
-  #  - 17
-  #  - 18
-  #  - 19
-  #  - 20
-  #- name: opensuse
-  #  versions:
-  #  - all
-  #  - 12.1
-  #  - 12.2
-  #  - 12.3
-  #  - 13.1
-  #  - 13.2
-  #- name: Amazon
-  #  versions:
-  #  - all
-  #  - 2013.03
-  #  - 2013.09
-  #- name: GenericBSD
-  #  versions:
-  #  - all
-  #  - any
-  #- name: FreeBSD
-  #  versions:
-  #  - all
-  #  - 8.0
-  #  - 8.1
-  #  - 8.2
-  #  - 8.3
-  #  - 8.4
-  #  - 9.0
-  #  - 9.1
-  #  - 9.1
-  #  - 9.2
-  #- name: Ubuntu
-  #  versions:
-  #  - all
-  #  - lucid
-  #  - maverick
-  #  - natty
-  #  - oneiric
-  #  - precise
-  #  - quantal
-  #  - raring
-  #  - saucy
-  #  - trusty
-  #- name: SLES
-  #  versions:
-  #  - all
-  #  - 10SP3
-  #  - 10SP4
-  #  - 11
-  #  - 11SP1
-  #  - 11SP2
-  #  - 11SP3
-  #- name: GenericLinux
-  #  versions:
-  #  - all
-  #  - any
-  #- name: Debian
-  #  versions:
-  #  - all
-  #  - etch
-  #  - lenny
-  #  - squeeze
-  #  - wheezy
-  #
-  # Below are all categories currently available. Just as with
-  # the platforms above, uncomment those that apply to your role.
-  #
-  #categories:
-  #- cloud
-  #- cloud:ec2
-  #- cloud:gce
-  #- cloud:rax
-  #- clustering
-  #- database
-  #- database:nosql
-  #- database:sql
-  #- development
-  #- monitoring
-  #- networking
-  #- packaging
-  #- system
-  #- web
-dependencies: []
-  # List your role dependencies here, one per line. Only
-  # dependencies available via galaxy should be listed here.
-  # Be sure to remove the '[]' above if you add dependencies
-  # to this list.
-  

+ 0 - 7
roles/kubernetes_controller_manager/tasks/main.yml

@@ -1,7 +0,0 @@
----
-# tasks file for kubernetes_controller_manager
-- name: Install kubernetes
-  yum: pkg=kubernetes
-
-- name: Enable controller-manager
-  service: name=kubernetes-controller-manager enabled=yes state=started

+ 0 - 2
roles/kubernetes_controller_manager/vars/main.yml

@@ -1,2 +0,0 @@
----
-# vars file for kubernetes_controller_manager

+ 0 - 38
roles/kubernetes_kubelet/README.md

@@ -1,38 +0,0 @@
-Role Name
-=========
-
-A brief description of the role goes here.
-
-Requirements
-------------
-
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
-    - hosts: servers
-      roles:
-         - { role: username.rolename, x: 42 }
-
-License
--------
-
-BSD
-
-Author Information
-------------------
-
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).

+ 0 - 2
roles/kubernetes_kubelet/defaults/main.yml

@@ -1,2 +0,0 @@
----
-# defaults file for kubernetes_kubelet

+ 0 - 10
roles/kubernetes_kubelet/files/kubelet.service

@@ -1,10 +0,0 @@
-[Unit]
-Description=Kubernetes Kubelet Server
-Documentation=https://github.com/GoogleCloudPlatform/kubernetes
-
-[Service]
-EnvironmentFile=/etc/sysconfig/kubelet
-ExecStart=/usr/local/bin/kubelet "$DAEMON_ARGS"
-
-[Install]
-WantedBy=multi-user.target

+ 0 - 4
roles/kubernetes_kubelet/handlers/main.yml

@@ -1,4 +0,0 @@
----
-# handlers file for kubernetes_kubelet
-- name: restart kubelet
-  service: name=kubernetes-kubelet state=restarted

+ 0 - 124
roles/kubernetes_kubelet/meta/main.yml

@@ -1,124 +0,0 @@
----
-galaxy_info:
-  author: your name
-  description: 
-  company: your company (optional)
-  # Some suggested licenses:
-  # - BSD (default)
-  # - MIT
-  # - GPLv2
-  # - GPLv3
-  # - Apache
-  # - CC-BY
-  license: license (GPLv2, CC-BY, etc)
-  min_ansible_version: 1.2
-  #
-  # Below are all platforms currently available. Just uncomment
-  # the ones that apply to your role. If you don't see your 
-  # platform on this list, let us know and we'll get it added!
-  #
-  #platforms:
-  #- name: EL
-  #  versions:
-  #  - all
-  #  - 5
-  #  - 6
-  #  - 7
-  #- name: GenericUNIX
-  #  versions:
-  #  - all
-  #  - any
-  #- name: Fedora
-  #  versions:
-  #  - all
-  #  - 16
-  #  - 17
-  #  - 18
-  #  - 19
-  #  - 20
-  #- name: opensuse
-  #  versions:
-  #  - all
-  #  - 12.1
-  #  - 12.2
-  #  - 12.3
-  #  - 13.1
-  #  - 13.2
-  #- name: Amazon
-  #  versions:
-  #  - all
-  #  - 2013.03
-  #  - 2013.09
-  #- name: GenericBSD
-  #  versions:
-  #  - all
-  #  - any
-  #- name: FreeBSD
-  #  versions:
-  #  - all
-  #  - 8.0
-  #  - 8.1
-  #  - 8.2
-  #  - 8.3
-  #  - 8.4
-  #  - 9.0
-  #  - 9.1
-  #  - 9.1
-  #  - 9.2
-  #- name: Ubuntu
-  #  versions:
-  #  - all
-  #  - lucid
-  #  - maverick
-  #  - natty
-  #  - oneiric
-  #  - precise
-  #  - quantal
-  #  - raring
-  #  - saucy
-  #  - trusty
-  #- name: SLES
-  #  versions:
-  #  - all
-  #  - 10SP3
-  #  - 10SP4
-  #  - 11
-  #  - 11SP1
-  #  - 11SP2
-  #  - 11SP3
-  #- name: GenericLinux
-  #  versions:
-  #  - all
-  #  - any
-  #- name: Debian
-  #  versions:
-  #  - all
-  #  - etch
-  #  - lenny
-  #  - squeeze
-  #  - wheezy
-  #
-  # Below are all categories currently available. Just as with
-  # the platforms above, uncomment those that apply to your role.
-  #
-  #categories:
-  #- cloud
-  #- cloud:ec2
-  #- cloud:gce
-  #- cloud:rax
-  #- clustering
-  #- database
-  #- database:nosql
-  #- database:sql
-  #- development
-  #- monitoring
-  #- networking
-  #- packaging
-  #- system
-  #- web
-dependencies: []
-  # List your role dependencies here, one per line. Only
-  # dependencies available via galaxy should be listed here.
-  # Be sure to remove the '[]' above if you add dependencies
-  # to this list.
-  

+ 0 - 31
roles/kubernetes_kubelet/tasks/main.yml

@@ -1,31 +0,0 @@
----
-# tasks file for kubernetes_kubelet
-- name: Install kubernetes
-  yum: pkg=kubernetes state=installed
-
-- name: Configure kubelet
-  lineinfile:
-    dest: /etc/sysconfig/kubernetes
-    regexp: "{{ item.regex }}"
-    line: "{{ item.line }}"
-  with_items:
-    - { regex: '^KUBE_ETCD_SERVERS=', line: 'KUBE_ETCD_SERVERS=\"http://{{ oo_master_ips[0] }}:4001\"' }
-    - { regex: '^KUBE_KUBELET_ADDRESS=', line: 'KUBE_KUBELET_ADDRESS=\"0.0.0.0\"' }
-    - { regex: '^KUBE_KUBELET_HOSTNAME_OVERRIDE=', line: 'KUBE_KUBELET_HOSTNAME_OVERRIDE=\"{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address }}\"' }
-  notify:
-    - restart kubelet
-
-
-#- name: write the cadvisor config
-#  template: src=cadvisor.manifest dest=/etc/kubernetes/manifests/cadvisor.manifest
-#  notify:
-#    - restart kubelet
-
-- name: Enable kubelet
-  service: name=kubernetes-kubelet enabled=yes state=started
-
-- name: Open firewalld port for the kubelet
-  firewalld: port=10250/tcp permanent=false state=enabled
-
-- name: Save firewalld port for the kubelet
-  firewalld: port=10250/tcp permanent=true state=enabled

+ 0 - 33
roles/kubernetes_kubelet/templates/cadvisor.manifest

@@ -1,33 +0,0 @@
-version: v1beta2
-id: cadvisor-agent
-containers:
-  - name: cadvisor
-    image: google/cadvisor:latest
-    ports:
-      - name: http
-        containerPort: 8080
-        hostPort: 4194
-    volumeMounts:
-      - name: varrun
-        mountPath: /var/run
-        readOnly: false
-      - name: varlibdocker
-        mountPath: /var/lib/docker
-        readOnly: true
-      - name: cgroups
-        mountPath: /sys/fs/cgroup
-        readOnly: true
-volumes:
-  - name: varrun
-    source:
-      hostDir:
-        path: /var/run
-  - name: varlibdocker
-    source:
-      hostDir:
-        path: /var/lib/docker
-  - name: cgroups
-    source:
-      hostDir:
-        path: /sys/fs/cgroup
-

+ 0 - 3
roles/kubernetes_kubelet/templates/kubelet

@@ -1,3 +0,0 @@
-
-DAEMON_ARGS=" -etcd_servers=http://10.245.1.2:4001  -hostname_override=10.245.2.2 -address=0.0.0.0 -config=/etc/kubernetes/manifests"
-

+ 0 - 2
roles/kubernetes_kubelet/vars/main.yml

@@ -1,2 +0,0 @@
----
-# vars file for kubernetes_kubelet

+ 0 - 38
roles/kubernetes_proxy/README.md

@@ -1,38 +0,0 @@
-Role Name
-=========
-
-A brief description of the role goes here.
-
-Requirements
-------------
-
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
-    - hosts: servers
-      roles:
-         - { role: username.rolename, x: 42 }
-
-License
--------
-
-BSD
-
-Author Information
-------------------
-
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).

+ 0 - 2
roles/kubernetes_proxy/defaults/main.yml

@@ -1,2 +0,0 @@
----
-# defaults file for kubernetes_proxy

+ 0 - 4
roles/kubernetes_proxy/handlers/main.yml

@@ -1,4 +0,0 @@
----
-# handlers file for kubernetes_proxy
-- name: restart kubernetes-proxy
-  service: name=kubernetes-proxy state=restarted

+ 0 - 124
roles/kubernetes_proxy/meta/main.yml

@@ -1,124 +0,0 @@
----
-galaxy_info:
-  author: your name
-  description: 
-  company: your company (optional)
-  # Some suggested licenses:
-  # - BSD (default)
-  # - MIT
-  # - GPLv2
-  # - GPLv3
-  # - Apache
-  # - CC-BY
-  license: license (GPLv2, CC-BY, etc)
-  min_ansible_version: 1.2
-  #
-  # Below are all platforms currently available. Just uncomment
-  # the ones that apply to your role. If you don't see your 
-  # platform on this list, let us know and we'll get it added!
-  #
-  #platforms:
-  #- name: EL
-  #  versions:
-  #  - all
-  #  - 5
-  #  - 6
-  #  - 7
-  #- name: GenericUNIX
-  #  versions:
-  #  - all
-  #  - any
-  #- name: Fedora
-  #  versions:
-  #  - all
-  #  - 16
-  #  - 17
-  #  - 18
-  #  - 19
-  #  - 20
-  #- name: opensuse
-  #  versions:
-  #  - all
-  #  - 12.1
-  #  - 12.2
-  #  - 12.3
-  #  - 13.1
-  #  - 13.2
-  #- name: Amazon
-  #  versions:
-  #  - all
-  #  - 2013.03
-  #  - 2013.09
-  #- name: GenericBSD
-  #  versions:
-  #  - all
-  #  - any
-  #- name: FreeBSD
-  #  versions:
-  #  - all
-  #  - 8.0
-  #  - 8.1
-  #  - 8.2
-  #  - 8.3
-  #  - 8.4
-  #  - 9.0
-  #  - 9.1
-  #  - 9.1
-  #  - 9.2
-  #- name: Ubuntu
-  #  versions:
-  #  - all
-  #  - lucid
-  #  - maverick
-  #  - natty
-  #  - oneiric
-  #  - precise
-  #  - quantal
-  #  - raring
-  #  - saucy
-  #  - trusty
-  #- name: SLES
-  #  versions:
-  #  - all
-  #  - 10SP3
-  #  - 10SP4
-  #  - 11
-  #  - 11SP1
-  #  - 11SP2
-  #  - 11SP3
-  #- name: GenericLinux
-  #  versions:
-  #  - all
-  #  - any
-  #- name: Debian
-  #  versions:
-  #  - all
-  #  - etch
-  #  - lenny
-  #  - squeeze
-  #  - wheezy
-  #
-  # Below are all categories currently available. Just as with
-  # the platforms above, uncomment those that apply to your role.
-  #
-  #categories:
-  #- cloud
-  #- cloud:ec2
-  #- cloud:gce
-  #- cloud:rax
-  #- clustering
-  #- database
-  #- database:nosql
-  #- database:sql
-  #- development
-  #- monitoring
-  #- networking
-  #- packaging
-  #- system
-  #- web
-dependencies: []
-  # List your role dependencies here, one per line. Only
-  # dependencies available via galaxy should be listed here.
-  # Be sure to remove the '[]' above if you add dependencies
-  # to this list.
-  

+ 0 - 17
roles/kubernetes_proxy/tasks/main.yml

@@ -1,17 +0,0 @@
----
-# tasks file for kubernetes_proxy
-- name: Install kubernetes
-  yum: pkg=kubernetes state=installed
-
-- name: Configure kubernetes-proxy etcd servers
-  lineinfile:
-    dest: /etc/sysconfig/kubernetes
-    regexp: "{{ item.regex }}"
-    line: "{{ item.line }}"
-  with_items:
-    - { regex: '^KUBE_ETCD_SERVERS=', line: 'KUBE_ETCD_SERVERS=\"http://{{ oo_master_ips[0] }}:4001\"' }
-  notify:
-    - restart kubernetes-proxy
-
-- name: Enable proxy
-  service: name=kubernetes-proxy enabled=yes state=started

+ 0 - 2
roles/kubernetes_proxy/vars/main.yml

@@ -1,2 +0,0 @@
----
-# vars file for kubernetes_proxy

+ 1 - 1
roles/openshift_master/handlers/main.yml

@@ -1,4 +1,4 @@
 ---
 # handlers file for openshift_master
 - name: restart openshift-master
-  service: name=openshift state=restarted
+  service: name=openshift-master state=restarted

+ 19 - 16
roles/openshift_master/tasks/main.yml

@@ -1,34 +1,37 @@
 ---
 # tasks file for openshift_master
 - name: Install Origin
-  yum: pkg=origin state=installed
+  yum: pkg=openshift-master state=installed
 
-  # fixme: Once openshift stops resolving hostnames for minion queries remove this...
+  # fixme: Once openshift stops resolving hostnames for node queries remove this...
 - name: Set hostname to IP Addr (WORKAROUND)
   command: /usr/bin/hostname {{ oo_bind_ip }}
 
 - name: Configure OpenShift Master settings
   lineinfile:
-    dest: /etc/sysconfig/openshift
+    dest: /etc/sysconfig/openshift-master
     regexp: "{{ item.regex }}"
     line: "{{ item.line }}"
   with_items:
-    - { regex: '^ROLE=', line: 'ROLE=\"master\"' }
-    - { regex: '^OPTIONS=', line: 'OPTIONS=\"--nodes={{ oo_minion_ips | join(",") }}  --loglevel=5\"' }
+    - regex: '^OPTIONS='
+      line: "OPTIONS=\"--public-master={{ oo_public_ip }} --nodes={{ oo_node_ips | join(',') }}  --loglevel=5\""
   notify:
     - restart openshift-master
 
-- name: Open firewalld port for etcd embedded in OpenShift
-  firewalld: port=4001/tcp permanent=false state=enabled
+# Open etcd embedded, etcd embedded peer, openshift api, and
+# openshift client ports
+- name: Open firewalld ports for openshift-master
+  firewalld: port={{ item[0] }} permanent={{ item[1] }} state=enabled
+  with_nested:
+  - [ 4001/tcp, 7001/tcp, 8443/tcp, 8444/tcp ]
+  - [ true, false ]
 
-- name: Save firewalld port for etcd embedded in
-  firewalld: port=4001/tcp permanent=true state=enabled
-
-- name: Open firewalld port for OpenShift
-  firewalld: port=8080/tcp permanent=false state=enabled
-
-- name: Save firewalld port for OpenShift
-  firewalld: port=8080/tcp permanent=true state=enabled
+# Disable previously exposed ports that are no longer needed
+- name: Close firewalld ports for openshift-master that are no longer needed
+  firewalld: port={{ item[0] }} permanent={{ item[1] }} state=enabled
+  with_nested:
+  - [ 8080/tcp ]
+  - [ true, false ]
 
 - name: Enable OpenShift
-  service: name=openshift enabled=yes state=started
+  service: name=openshift-master enabled=yes state=started

+ 1 - 1
roles/openshift_master/vars/main.yml

@@ -1,2 +1,2 @@
 ---
-# vars file for kubernetes_apiserver
+# vars file for openshift_master

+ 0 - 2
roles/openshift_minion/defaults/main.yml

@@ -1,2 +0,0 @@
----
-# defaults file for openshift_minion

+ 0 - 4
roles/openshift_minion/handlers/main.yml

@@ -1,4 +0,0 @@
----
-# handlers file for openshift_minion
-- name: restart openshift-minion
-  service: name=openshift state=restarted

+ 0 - 29
roles/openshift_minion/tasks/main.yml

@@ -1,29 +0,0 @@
----
-# tasks file for openshift_minion
-- name: Install OpenShift
-  yum: pkg=origin state=installed
-
-  # fixme: Once openshift stops resolving hostnames for minion queries remove this...
-- name: Set hostname to IP Addr (WORKAROUND)
-  command: /usr/bin/hostname {{ oo_bind_ip }}
-
-- name: Configure OpenShift Minion settings
-  lineinfile:
-    dest: /etc/sysconfig/openshift
-    regexp: "{{ item.regex }}"
-    line: "{{ item.line }}"
-  with_items:
-    - { regex: '^ROLE=', line: 'ROLE=\"node\"' }
-    - { regex: '^OPTIONS=', line: 'OPTIONS=\"--master=http://{{ oo_master_ips[0] }}:8080  --loglevel=5\"' }
-  notify:
-    - restart openshift-minion
-
-- name: Open firewalld port for OpenShift
-  firewalld: port=10250/tcp permanent=false state=enabled
-
-- name: Save firewalld port for OpenShift
-  firewalld: port=10250/tcp permanent=true state=enabled
-
-- name: Enable OpenShift
-  service: name=openshift enabled=yes state=started
-

+ 0 - 2
roles/openshift_minion/vars/main.yml

@@ -1,2 +0,0 @@
----
-# vars file for openshift_minion

roles/openshift_minion/README.md → roles/openshift_node/README.md


+ 2 - 0
roles/openshift_node/defaults/main.yml

@@ -0,0 +1,2 @@
+---
+# defaults file for openshift_node

+ 4 - 0
roles/openshift_node/handlers/main.yml

@@ -0,0 +1,4 @@
+---
+# handlers file for openshift_node
+- name: restart openshift-node
+  service: name=openshift-node state=restarted

roles/openshift_minion/meta/main.yml → roles/openshift_node/meta/main.yml


+ 45 - 0
roles/openshift_node/tasks/main.yml

@@ -0,0 +1,45 @@
+---
+
+# tasks file for openshift_node
+- name: Install OpenShift
+  yum: pkg=openshift-node state=installed
+
+  # fixme: Once openshift stops resolving hostnames for node queries remove this...
+- name: Set hostname to IP Addr (WORKAROUND)
+  hostname: name={{ oo_bind_ip }}
+
+- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
+  register: mktemp
+
+- name: Retrieve OpenShift Master credentials
+  local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh  -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' root@{{ oo_master_public_ips[0] }}:/var/lib/openshift/openshift.local.certificates/admin/ {{ mktemp.stdout }}
+  ignore_errors: yes
+
+- file: path=/var/lib/openshift/openshift.local.certificates/admin state=directory
+
+- name: Store OpenShift Master credentials
+  local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh  -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' {{ mktemp.stdout }}/ root@{{ oo_public_ip }}:/var/lib/openshift/openshift.local.certificates/admin
+  ignore_errors: yes
+
+- name: Configure OpenShift Node settings
+  lineinfile:
+    dest: /etc/sysconfig/openshift-node
+    regexp: "{{ item.regex }}"
+    line: "{{ item.line }}"
+  with_items:
+    - { regex: '^OPTIONS=', line: 'OPTIONS=\"--master=https://{{ oo_master_ips[0] }}:8443  --loglevel=5\"' }
+  notify:
+    - restart openshift-node
+
+- name: Open firewalld port for OpenShift
+  firewalld: port=10250/tcp permanent=false state=enabled
+
+- name: Save firewalld port for OpenShift
+  firewalld: port=10250/tcp permanent=true state=enabled
+
+  # fixme: Once the openshift_cluster playbook is published state should be started
+  # Always bounce service to pick up new credentials
+- name: Enable OpenShift
+  service: name=openshift-node enabled=yes state=restarted
+
+- local_action: file name={{ mktemp.stdout }} state=absent

+ 2 - 0
roles/openshift_node/vars/main.yml

@@ -0,0 +1,2 @@
+---
+# vars file for openshift_node

+ 5 - 0
roles/repos/defaults/main.yaml

@@ -0,0 +1,5 @@
+---
+# TODO: once we are able to configure/deploy origin using the openshift roles,
+# then we should default to origin
+openshift_deployment_type: online
+openshift_additional_repos: {}

+ 0 - 5
roles/repos/files/docker.repo

@@ -1,5 +0,0 @@
-[docker]
-name= Temporary Docker rpm
-baseurl=http://10.240.169.148/mirror/docker
-gpgcheck=0
-enabled=0

+ 61 - 0
roles/repos/files/online/RPM-GPG-KEY-redhat-beta

@@ -0,0 +1,61 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1.2.6 (GNU/Linux)
+
+mQINBEmkAzABEAC2/c7bP1lHQ3XScxbIk0LQWe1YOiibQBRLwf8Si5PktgtuPibT
+kKpZjw8p4D+fM7jD1WUzUE0X7tXg2l/eUlMM4dw6XJAQ1AmEOtlwSg7rrMtTvM0A
+BEtI7Km6fC6sU6RtBMdcqD1cH/6dbsfh8muznVA7UlX+PRBHVzdWzj6y8h84dBjo
+gzcbYu9Hezqgj/lLzicqsSZPz9UdXiRTRAIhp8V30BD8uRaaa0KDDnD6IzJv3D9P
+xQWbFM4Z12GN9LyeZqmD7bpKzZmXG/3drvfXVisXaXp3M07t3NlBa3Dt8NFIKZ0D
+FRXBz5bvzxRVmdH6DtkDWXDPOt+Wdm1rZrCOrySFpBZQRpHw12eo1M1lirANIov7
+Z+V1Qh/aBxj5EUu32u9ZpjAPPNtQF6F/KjaoHHHmEQAuj4DLex4LY646Hv1rcv2i
+QFuCdvLKQGSiFBrfZH0j/IX3/0JXQlZzb3MuMFPxLXGAoAV9UP/Sw/WTmAuTzFVm
+G13UYFeMwrToOiqcX2VcK0aC1FCcTP2z4JW3PsWvU8rUDRUYfoXovc7eg4Vn5wHt
+0NBYsNhYiAAf320AUIHzQZYi38JgVwuJfFu43tJZE4Vig++RQq6tsEx9Ftz3EwRR
+fJ9z9mEvEiieZm+vbOvMvIuimFVPSCmLH+bI649K8eZlVRWsx3EXCVb0nQARAQAB
+tDBSZWQgSGF0LCBJbmMuIChiZXRhIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0LmNv
+bT6JAjYEEwECACAFAkpSM+cCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRCT
+ioDK8hVB6/9tEAC0+KmzeKceXQ/GTUoU6jy9vtkFCFrmv+c7ol4XpdTt0QhqBOwy
+6m2mKWwmm8KfYfy0cADQ4y/EcoXl7FtFBwYmkCuEQGXhTDn9DvVjhooIq59LEMBQ
+OW879RwwzRIZ8ebbjMUjDPF5MfPQqP2LBu9N4KvXlZp4voykwuuaJ+cbsKZR6pZ6
+0RQKPHKP+NgUFC0fff7XY9cuOZZWFAeKRhLN2K7bnRHKxp+kELWb6R9ZfrYwZjWc
+MIPbTd1khE53L4NTfpWfAnJRtkPSDOKEGVlVLtLq4HEAxQt07kbslqISRWyXER3u
+QOJj64D1ZiIMz6t6uZ424VE4ry9rBR0Jz55cMMx5O/ni9x3xzFUgH8Su2yM0r3jE
+Rf24+tbOaPf7tebyx4OKe+JW95hNVstWUDyGbs6K9qGfI/pICuO1nMMFTo6GqzQ6
+DwLZvJ9QdXo7ujEtySZnfu42aycaQ9ZLC2DOCQCUBY350Hx6FLW3O546TAvpTfk0
+B6x+DV7mJQH7MGmRXQsE7TLBJKjq28Cn4tVp04PmybQyTxZdGA/8zY6pPl6xyVMH
+V68hSBKEVT/rlouOHuxfdmZva1DhVvUC6Xj7+iTMTVJUAq/4Uyn31P1OJmA2a0PT
+CAqWkbJSgKFccsjPoTbLyxhuMSNkEZFHvlZrSK9vnPzmfiRH0Orx3wYpMQ==
+=21pb
+-----END PGP PUBLIC KEY BLOCK-----
+The following public key can be used to verify RPM packages built and
+signed by Red Hat, Inc. for this beta using `rpm -K' using the GNU GPG
+package.  Questions about this key should be sent to security@redhat.com.
+
+
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1.0.6 (GNU/Linux)
+Comment: For info see http://www.gnupg.org
+
+mQGiBDySTqsRBACzc7xuCIp10oj5B2PAV4XzDeVxprv/WTMreSNSK+iC0bEz0IBp
+Vnn++qtyiXfH+bGIE9jqZgIEnpttWhUOaU5LhcLFzy+m8NWfngIFP9QfGmGAe9Gd
+LFeAdhj4RmSG/vgr7vDd83Hz22dv403Ar/sliWO4vDOrMmZBG57WGYTWtwCgkMsi
+UUQuJ6slbzKn82w+bYxOlL0EAIylWJGaTkKOTL5DqVR3ik9aT0Dt3FNVYiuhcKBe
+II4E3KOIVA9kO8in1IZjx2gs6K2UV+GsoAVANdfKL7l9O+k+J8OxhE74oycvYJxW
+QzCgXMZkNcvW5wyXwEMcr6TVd/5BGztcMw8oT3/l2MtAEG/vn1XaWToRSO1XDMDz
++AjUA/4m0mTkN8S4wjzJG8lqN7+quW3UOaiCe8J3SFrrrhE0XbY9cTJI/9nuXHU1
+VjqOSmXQYH2Db7UOroFTBiWhlAedA4O4yuK52AJnvSsHbnJSEmn9rpo5z1Q8F+qI
+mDlzriJdrIrVLeDiUeTlpH3kpG38D7007GhXBV72k1gpMoMcpbQ3UmVkIEhhdCwg
+SW5jLiAoQmV0YSBUZXN0IFNvZnR3YXJlKSA8cmF3aGlkZUByZWRoYXQuY29tPohX
+BBMRAgAXBQI8l5p/BQsHCgMEAxUDAgMWAgECF4AACgkQ/TcmiYl9oHqdeQCfZjw4
+F9sir3XfRAjVe9kYNcQ8hnIAn0WgyT7H5RriWYTOCfauOmd+cAW4iEYEEBECAAYF
+AjyXmqQACgkQIZGAzdtCpg5nDQCfepuRUyuVJvhuQkPWySETYvRw+WoAnjAWhx6q
+0npMx4OE1JGFi8ymKXktuQENBDySTq4QBADKL/mK7S8E3synxISlu7R6fUvu07Oc
+RoX96n0Di6T+BS99hC44XzHjMDhUX2ZzVvYS88EZXoUDDkB/8g7SwZrOJ/QE1zrI
+JmSVciNhSYWwqeT40Evs88ajZUfDiNbS/cSC6oui98iS4vxd7sE7IPY+FSx9vuAR
+xOa9vBnJY/dx0wADBQQAosm+Iltt2uigC6LJzxNOoIdB5r0GqTC1o5sHCeNqXJhU
+ExAG8m74uzMlYVLOpGZi4y4NwwAWvCWC0MWWnnu+LGFy1wKiJKRjhv5F+WkFutY5
+WHV5L44vp9jSIlBCRG+84jheTh8xqhndM9wOfPwWdYYu1vxrB8Tn6kA17PcYfHSI
+RgQYEQIABgUCPJJergAKCRD9NyaJiX2geiCPAJ4nEM4NtI9Uj8lONDk6FU86PmoL
+yACfb68fBd2pWEzLKsOk9imIobHHpzE=
+=gpIn
+-----END PGP PUBLIC KEY BLOCK-----

roles/repos/files/RPM-GPG-KEY-redhat-release → roles/repos/files/online/RPM-GPG-KEY-redhat-release


roles/repos/files/epel7-kubernetes.repo → roles/repos/files/online/epel7-kubernetes.repo


roles/repos/files/epel7-origin.repo → roles/repos/files/online/epel7-openshift.repo


+ 23 - 0
roles/repos/files/online/oso-rhui-rhel-7-extras.repo

@@ -0,0 +1,23 @@
+[oso-rhui-rhel-server-extras]
+name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras
+baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/
+        https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/
+enabled=1
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta
+failovermethod=priority
+sslverify=False
+sslclientcert=/var/lib/yum/client-cert.pem
+sslclientkey=/var/lib/yum/client-key.pem
+
+[oso-rhui-rhel-server-extras-htb]
+name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras HTB
+baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/
+        https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/
+enabled=0
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta
+failovermethod=priority
+sslverify=False
+sslclientcert=/var/lib/yum/client-cert.pem
+sslclientkey=/var/lib/yum/client-key.pem

+ 21 - 0
roles/repos/files/online/oso-rhui-rhel-7-server.repo

@@ -0,0 +1,21 @@
+[oso-rhui-rhel-server-releases]
+name=OpenShift Online RHUI Mirror RH Enterprise Linux 7
+baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/
+        https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/
+enabled=1
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
+sslverify=False
+sslclientcert=/var/lib/yum/client-cert.pem
+sslclientkey=/var/lib/yum/client-key.pem
+
+[oso-rhui-rhel-server-releases-optional]
+name=OpenShift Online RHUI Mirror RH Enterprise Linux 7 - Optional
+baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/
+        https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/
+enabled=1
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
+sslverify=False
+sslclientcert=/var/lib/yum/client-cert.pem
+sslclientkey=/var/lib/yum/client-key.pem

+ 11 - 0
roles/repos/files/online/rhel-7-libra-candidate.repo

@@ -0,0 +1,11 @@
+[rhel-7-libra-candidate]
+name=rhel-7-libra-candidate - \$basearch
+baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/
+        https://mirror.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/
+gpgkey=https://mirror.ops.rhcloud.com/libra/RPM-GPG-KEY-redhat-openshifthosted
+skip_if_unavailable=True
+gpgcheck=0
+enabled=1
+sslclientcert=/var/lib/yum/client-cert.pem
+sslclientkey=/var/lib/yum/client-key.pem
+sslverify=False

+ 0 - 13
roles/repos/files/oso-rhui-rhel-7-server.repo

@@ -1,13 +0,0 @@
-[oso-rhel-7-server]
-name=Red Hat Enterprise Linux 7 Server from RHUI (RPMs)
-baseurl=http://10.240.169.148/mirror/rhui-rhel-server-7-releases
-enabled=1
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
-
-[oso-rhel-7-server-optional]
-name=Red Hat Enterprise Linux 7 Server - Optional from RHUI (RPMs)
-baseurl=http://10.240.169.148/mirror/rhui-rhel-server-7-releases-optional
-enabled=1
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release

+ 0 - 10
roles/repos/files/rhel-7-libra-candidate.repo

@@ -1,10 +0,0 @@
-[rhel-7-libra-candidate]
-name=rhel-7-libra-candidate - \$basearch
-baseurl=https://mirror1.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/
-gpgkey=https://mirror1.ops.rhcloud.com/libra/RPM-GPG-KEY-redhat-release https://mirror1.ops.rhcloud.com/libra/RPM-GPG-KEY-redhat-beta https://mirror1.ops.rhcloud.com/libra/RPM-GPG-KEY-redhat-openshifthosted
-skip_if_unavailable=True
-gpgcheck=0
-enabled=1
-sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem
-sslverify=False

+ 37 - 9
roles/repos/tasks/main.yaml

@@ -1,13 +1,41 @@
 ---
-# The following role lays down the correct repository and gpg key for yum
-- name: Ensure rhel 7 libra candidate exists in yum.repos.d
-  copy: src=rhel-7-libra-candidate.repo dest=/etc/yum.repos.d/rhel-7-libra-candidate.repo
+# TODO: Add flag for enabling EPEL repo, default to false
 
-- name: Ensure a docker repo is laid down
-  copy: src=docker.repo dest=/etc/yum.repos.d/docker.repo
+- assert:
+    that: openshift_deployment_type in known_openshift_deployment_types
 
-- name: Ensure the kubernetes repo is available
-  copy: src=epel7-kubernetes.repo dest=/etc/yum.repos.d/epel7-kubernetes.repo
+# TODO: remove this when origin support actually works
+- fail: msg="OpenShift Origin support is not currently enabled"
+  when: openshift_deployment_type == 'origin'
 
-- name: Ensure the origin repo is available
-  copy: src=epel7-origin.repo dest=/etc/yum.repos.d/epel7-origin.repo
+- name: Create any additional repos that are defined
+  template:
+    src: yum_repo.j2
+    dest: /etc/yum.repos.d/openshift_additional.repo
+  when: openshift_additional_repos | length > 0
+
+- name: Remove the additional repos if no longer defined
+  file:
+    dest: /etc/yum.repos.d/openshift_additional.repo
+    state: absent
+  when: openshift_additional_repos | length == 0
+
+- name: Remove any yum repo files for other deployment types
+  file:
+    path: "/etc/yum.repos.d/{{ item | basename }}"
+    state: absent
+  with_fileglob:
+  - '*/*'
+  when: not (item | search("/files/" + openshift_deployment_type + "/")) and (item | search(".repo$"))
+
+- name: Configure gpg keys if needed
+  copy: src={{ item }} dest=/etc/pki/rpm-gpg/
+  with_fileglob:
+  - "{{ openshift_deployment_type }}/*"
+  when: item | basename | match("RPM-GPG-KEY-")
+
+- name: Configure yum repositories
+  copy: src={{ item }} dest=/etc/yum.repos.d/
+  with_fileglob:
+  - "{{ openshift_deployment_type }}/*"
+  when: item | basename | search(".*\.repo$")

+ 15 - 0
roles/repos/templates/yum_repo.j2

@@ -0,0 +1,15 @@
+# {{ ansible_managed }}
+{% for repo in openshift_additional_repos %}
+[{{ repo.id }}]
+name={{ repo.name | default(repo.id) }}
+baseurl={{ repo.baseurl }}
+{% set enable_repo = repo.enabled | default('1') %}
+enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }}
+{% set enable_gpg_check = repo.gpgcheck | default('1') %}
+gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }}
+{% for key, value in repo.iteritems() %}
+{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck'] and value is defined %}
+{{ key }}={{ value }}
+{% endif %}
+{% endfor %}
+{% endfor %}

+ 2 - 0
roles/repos/vars/main.yml

@@ -0,0 +1,2 @@
+---
+known_openshift_deployment_types: ['origin', 'online', 'enterprise']