Prechádzať zdrojové kódy

openshift_facts role/module refactor default settings

- Add openshift_facts role and module
  - Created new role openshift_facts that contains an openshift_facts module
  - Refactor openshift_* roles to use openshift_facts instead of relying on
    defaults
  - Refactor playbooks to use openshift_facts
  - Cleanup inventory group_vars

- Update defaults
  - update openshift_master role firewall defaults
    - remove etcd peer port, since we will not be supporting clustered embedded
      etcd
    - remove 8444 since console now runs on the api port by default
    - add 8444 and 7001 to disabled services to ensure removal if updating

- Add new role os_env_extras_node that is a subset of the docker role
  - previously, we were starting/enabling docker which was causing issues with some
  installations
  - Does not install or start docker, since the openshift-node role will
    handle that for us
  - Only adds root to the dockerroot group
  - Update playbooks to use ops_env_extras_node role instead of docker role

- os_firewall bug fixes
  - ignore ip6tables for now, since we are not configuring any ipv6 rules
  - if installing package do a daemon-reload before starting/enabling service

- Add aws support to bin/cluster

- Add list action to bin/cluster

- Add update action to bin/cluster

- cleanup some stray debug statements

- some variable renaming for clarity
Jason DeTiberus 10 rokov pred
rodič
commit
4712e72c91
77 zmenil súbory, kde vykonal 1290 pridanie a 626 odobranie
  1. 24 2
      README_AWS.md
  2. 21 4
      README_GCE.md
  3. 12 5
      bin/cluster
  4. 0 113
      cluster.sh
  5. 2 0
      inventory/aws/group_vars/all
  6. 0 5
      inventory/gce/group_vars/all
  7. 0 5
      inventory/gce/group_vars/tag_host-type-master
  8. 0 6
      inventory/gce/group_vars/tag_host-type-node
  9. 0 1
      inventory/gce/group_vars/tag_host-type-openshift-master
  10. 0 1
      inventory/gce/group_vars/tag_host-type-openshift-node
  11. 1 0
      playbooks/aws/openshift-cluster/filter_plugins
  12. 62 0
      playbooks/aws/openshift-cluster/launch.yml
  13. 62 0
      playbooks/aws/openshift-cluster/launch_instances.yml
  14. 17 0
      playbooks/aws/openshift-cluster/list.yml
  15. 1 0
      playbooks/aws/openshift-cluster/roles
  16. 14 0
      playbooks/aws/openshift-cluster/terminate.yml
  17. 13 0
      playbooks/aws/openshift-cluster/update.yml
  18. 1 0
      playbooks/aws/openshift-cluster/vars.yml
  19. 10 27
      playbooks/aws/openshift-master/config.yml
  20. 6 3
      playbooks/aws/openshift-master/launch.yml
  21. 52 0
      playbooks/aws/openshift-master/terminate.yml
  22. 1 0
      playbooks/aws/openshift-master/vars.yml
  23. 91 32
      playbooks/aws/openshift-node/config.yml
  24. 9 4
      playbooks/aws/openshift-node/launch.yml
  25. 52 0
      playbooks/aws/openshift-node/terminate.yml
  26. 1 0
      playbooks/aws/openshift-node/vars.yml
  27. 4 5
      playbooks/gce/openshift-cluster/launch.yml
  28. 6 1
      playbooks/gce/openshift-cluster/launch_instances.yml
  29. 17 0
      playbooks/gce/openshift-cluster/list.yml
  30. 13 0
      playbooks/gce/openshift-cluster/update.yml
  31. 3 3
      playbooks/gce/openshift-master/config.yml
  32. 9 3
      playbooks/gce/openshift-master/launch.yml
  33. 5 11
      playbooks/gce/openshift-master/terminate.yml
  34. 1 0
      playbooks/gce/openshift-master/vars.yml
  35. 36 58
      playbooks/gce/openshift-node/config.yml
  36. 9 13
      playbooks/gce/openshift-node/launch.yml
  37. 5 11
      playbooks/gce/openshift-node/terminate.yml
  38. 1 0
      playbooks/gce/openshift-node/vars.yml
  39. 10 7
      roles/openshift_common/README.md
  40. 1 0
      roles/openshift_common/defaults/main.yml
  41. 1 0
      roles/openshift_common/meta/main.yml
  42. 13 16
      roles/openshift_common/tasks/main.yml
  43. 0 9
      roles/openshift_common/tasks/set_facts.yml
  44. 3 2
      roles/openshift_common/vars/main.yml
  45. 34 0
      roles/openshift_facts/README.md
  46. 482 0
      roles/openshift_facts/library/openshift_facts.py
  47. 15 0
      roles/openshift_facts/meta/main.yml
  48. 3 0
      roles/openshift_facts/tasks/main.yml
  49. 16 12
      roles/openshift_master/README.md
  50. 7 6
      roles/openshift_master/defaults/main.yml
  51. 0 1
      roles/openshift_master/handlers/main.yml
  52. 26 24
      roles/openshift_master/tasks/main.yml
  53. 0 2
      roles/openshift_master/vars/main.yml
  54. 0 3
      roles/openshift_node/README.md
  55. 0 2
      roles/openshift_node/defaults/main.yml
  56. 1 1
      roles/openshift_node/handlers/main.yml
  57. 11 16
      roles/openshift_node/tasks/main.yml
  58. 0 2
      roles/openshift_node/vars/main.yml
  59. 9 13
      roles/openshift_register_nodes/README.md
  60. 2 1
      roles/openshift_register_nodes/library/kubernetes_register_node.py
  61. 15 126
      roles/openshift_register_nodes/meta/main.yml
  62. 27 31
      roles/openshift_register_nodes/tasks/main.yml
  63. 2 0
      roles/openshift_repos/defaults/main.yaml
  64. 2 1
      roles/openshift_repos/meta/main.yml
  65. 6 0
      roles/openshift_repos/tasks/main.yaml
  66. 0 2
      roles/openshift_sdn_master/defaults/main.yml
  67. 2 1
      roles/openshift_sdn_master/meta/main.yml
  68. 10 8
      roles/openshift_sdn_master/tasks/main.yml
  69. 0 6
      roles/openshift_sdn_node/README.md
  70. 0 2
      roles/openshift_sdn_node/defaults/main.yml
  71. 2 1
      roles/openshift_sdn_node/meta/main.yml
  72. 10 13
      roles/openshift_sdn_node/tasks/main.yml
  73. 5 0
      roles/os_env_extras_node/tasks/main.yml
  74. 1 0
      roles/os_firewall/library/os_firewall_manage_iptables.py
  75. 1 0
      roles/os_firewall/meta/main.yml
  76. 5 0
      roles/os_firewall/tasks/firewall/firewalld.yml
  77. 7 5
      roles/os_firewall/tasks/firewall/iptables.yml

+ 24 - 2
README_AWS.md

@@ -51,7 +51,29 @@ OSX:
 Test The Setup
 --------------
 1. cd openshift-ansible
-1. Try to list all instances:
+1. Try to list all instances (Passing an empty string as the cluster_id
+argument will result in all ec2 instances being listed)
 ```
-  ./cloud.rb aws list
+  bin/cluster list aws ''
+```
+
+Creating a cluster
+------------------
+1. To create a cluster with one master and two nodes
+```
+  bin/cluster create aws <cluster-id>
+```
+
+Updating a cluster
+---------------------
+1. To update the cluster
+```
+  bin/cluster update aws <cluster-id>
+```
+
+Terminating a cluster
+---------------------
+1. To terminate the cluster
+```
+  bin/cluster terminate aws <cluster-id>
 ```

+ 21 - 4
README_GCE.md

@@ -65,12 +65,29 @@ Install Dependencies
 Test The Setup
 --------------
 1. cd openshift-ansible/
-2. Try to list all instances:
+1. Try to list all instances (Passing an empty string as the cluster_id
+argument will result in all gce instances being listed)
 ```
-  ./cloud.rb gce list
+  bin/cluster list gce ''
 ```
 
-3. Try to create an instance:
+Creating a cluster
+------------------
+1. To create a cluster with one master and two nodes
 ```
-  ./cloud.rb gce launch -e int --type openshift-node
+  bin/cluster create gce <cluster-id>
+```
+
+Updating a cluster
+---------------------
+1. To update the cluster
+```
+  bin/cluster update gce <cluster-id>
+```
+
+Terminating a cluster
+---------------------
+1. To terminate the cluster
+```
+  bin/cluster terminate gce <cluster-id>
 ```

+ 12 - 5
bin/cluster

@@ -32,8 +32,8 @@ class Cluster(object):
         playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
-        env['masters'] = args.masters
-        env['nodes'] = args.nodes
+        env['num_masters'] = args.masters
+        env['num_nodes'] = args.nodes
 
         return self.action(args, inventory, env, playbook)
 
@@ -55,16 +55,23 @@ class Cluster(object):
         :param args: command line arguments provided by user
         :return: exit status from run command
         """
-        raise NotImplementedError("ACTION [{}] not implemented".format(sys._getframe().f_code.co_name))
+        env = {'cluster_id': args.cluster_id}
+        playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider)
+        inventory = self.setup_provider(args.provider)
+
+        return self.action(args, inventory, env, playbook)
 
     def update(self, args):
         """
-        Update OpenShift across clustered VMs
+        Update to latest OpenShift across clustered VMs
         :param args: command line arguments provided by user
         :return: exit status from run command
         """
-        raise NotImplementedError("ACTION [{}] not implemented".format(sys._getframe().f_code.co_name))
+        env = {'cluster_id': args.cluster_id}
+        playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider)
+        inventory = self.setup_provider(args.provider)
 
+        return self.action(args, inventory, env, playbook)
 
     def setup_provider(self, provider):
         """

+ 0 - 113
cluster.sh

@@ -1,113 +0,0 @@
-#!/bin/bash -eu
-
-NODES=2
-MASTERS=1
-
-# If the environment variable OO_PROVDER is defined, it used for the provider
-PROVIDER=${OO_PROVIDER:-''}
-# Otherwise, default is gce (Google Compute Engine)
-if [ "x$PROVIDER" == "x" ];then
-   PROVIDER=gce
-fi
-
-UPPER_CASE_PROVIDER=$(echo $PROVIDER | tr '[:lower:]' '[:upper:]')
-
-
-# Use OO_MASTER_PLAYBOOK/OO_NODE_PLAYBOOK environment variables for playbooks if defined,
-# otherwise use openshift default values.
-MASTER_PLAYBOOK=${OO_MASTER_PLAYBOOK:-'openshift-master'}
-NODE_PLAYBOOK=${OO_NODE_PLAYBOOK:-'openshift-node'}
-
-
-# @formatter:off
-function usage {
-    cat 1>&2 <<-EOT
-        ${0} : [create|terminate|update|list] { ${UPPER_CASE_PROVIDER} environment tag}
-
-        Supported environment tags:
-        $(grep --no-messages 'SUPPORTED_ENVS.*=' ./lib/${PROVIDER}_command.rb)
-        $([ $? -ne 0 ] && echo "No supported environment tags found for ${PROVIDER}")
-
-        Optional arguments for create:
-        [-p|--provider, -m|--masters, -n|--nodes, --master-playbook, --node-playbook]
-
-        Optional arguments for terminate|update:
-        [-p|--provider, --master-playbook, --node-playbook]
-EOT
-}
-# @formatter:on
-
-function create_cluster {
-    ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=$MASTER_PLAYBOOK -c $MASTERS
-
-    ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=$NODE_PLAYBOOK -c $NODES
-
-    update_cluster
-
-    echo -e "\nCreated ${MASTERS}/${MASTER_PLAYBOOK} masters and ${NODES}/${NODE_PLAYBOOK} nodes using ${PROVIDER} provider\n"
-}
-
-function update_cluster {
-    ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=$MASTER_PLAYBOOK
-    ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=$NODE_PLAYBOOK
-}
-
-function terminate_cluster {
-    ./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=$MASTER_PLAYBOOK
-    ./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=$NODE_PLAYBOOK
-}
-
-[ -f ./cloud.rb ] || (echo 1>&2 'Cannot find ./cloud.rb' && exit 1)
-
-function check_argval {
-    if [[ $1 == -* ]]; then
-        echo "Invalid value: '$1'"
-        usage
-        exit 1
-    fi
-}
-
-# Using GNU getopt to support both small and long formats
-OPTIONS=`getopt -o p:m:n:h --long provider:,masters:,nodes:,master-playbook:,node-playbook:,help \
-	        -n "$0" -- "$@"`
-eval set -- "$OPTIONS"
-
-while true; do
-    case "$1" in
-        -h|--help) (usage; exit 1) ; shift ;;
-        -p|--provider) PROVIDER="$2" ; check_argval $2 ; shift 2 ;;
-        -m|--masters) MASTERS="$2" ; check_argval $2 ; shift 2 ;;
-        -n|--nodes) NODES="$2" ; check_argval $2 ; shift 2 ;;
-        --master-playbook) MASTER_PLAYBOOK="$2" ; check_argval $2 ; shift 2 ;;
-        --node-playbook) NODE_PLAYBOOK="$2" ; check_argval $2 ; shift 2 ;;
-        --) shift ; break ;;
-        *) break ;;
-    esac
-done
-
-shift $((OPTIND-1))
-
-[ -z "${1:-}" ] && (usage; exit 1)
-
-case "${1}" in
-    'create')
-        [ -z "${2:-}" ] && (usage; exit 1)
-        ENV="${2}"
-        create_cluster ;;
-    'update')
-        [ -z "${2:-}" ] && (usage; exit 1)
-        ENV="${2}"
-        update_cluster ;;
-    'terminate')
-        [ -z "${2:-}" ] && (usage; exit 1)
-        ENV="${2}"
-        terminate_cluster ;;
-    'list')   ./cloud.rb "${PROVIDER}" list ;;
-    'help')   usage; exit 0 ;;
-    *)
-        echo -n 1>&2 "${1} is not a supported operation";
-        usage;
-        exit 1 ;;
-esac
-
-exit 0

+ 2 - 0
inventory/aws/group_vars/all

@@ -0,0 +1,2 @@
+---
+ansible_ssh_user: root

+ 0 - 5
inventory/gce/group_vars/all

@@ -1,7 +1,2 @@
 ---
 ansible_ssh_user: root
-openshift_hostname: "{{ ansible_default_ipv4.address }}"
-openshift_public_hostname: "{{ ansible_default_ipv4.address }}"
-openshift_ip: "{{ ansible_default_ipv4.address }}"
-openshift_public_ip: "{{ gce_public_ip }}"
-openshift_env: "{{ oo_env }}"

+ 0 - 5
inventory/gce/group_vars/tag_host-type-master

@@ -1,5 +0,0 @@
----
-openshift_api_url: https://{{ openshift_hostname }}:8443
-openshift_api_public_url: https://{{ openshift_public_hostname }}:8443
-openshift_webui_url: https://{{ openshift_hostname }}:8444
-openshift_webui_public_url: https://{{ openshift_public_hostname }}:8444

+ 0 - 6
inventory/gce/group_vars/tag_host-type-node

@@ -1,6 +0,0 @@
----
-openshift_node_cpu:
-openshift_node_memory:
-openshift_node_pod_cidr:
-openshift_node_labels: {}
-openshift_node_annotations: {}

+ 0 - 1
inventory/gce/group_vars/tag_host-type-openshift-master

@@ -1 +0,0 @@
-tag_host-type-master

+ 0 - 1
inventory/gce/group_vars/tag_host-type-openshift-node

@@ -1 +0,0 @@
-tag_host-type-node

+ 1 - 0
playbooks/aws/openshift-cluster/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 62 - 0
playbooks/aws/openshift-cluster/launch.yml

@@ -0,0 +1,62 @@
+---
+- name: Launch instance(s)
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  vars_files:
+      - vars.yml
+  tasks:
+    - set_fact: k8s_type="master"
+
+    - name: Generate master instance names(s)
+      set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
+      register: master_names_output
+      with_sequence: start=1 end={{ num_masters }}
+
+    # These set_fact's cannot be combined
+    - set_fact:
+        master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
+
+    - set_fact:
+        master_names: "{{ master_names_string.strip().split(' ') }}"
+
+    - include: launch_instances.yml
+      vars:
+        instances: "{{ master_names }}"
+        cluster: "{{ cluster_id }}"
+        type: "{{ k8s_type }}"
+
+    - set_fact: k8s_type="node"
+
+    - name: Generate node instance names(s)
+      set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
+      register: node_names_output
+      with_sequence: start=1 end={{ num_nodes }}
+
+    # These set_fact's cannot be combined
+    - set_fact:
+        node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
+
+    - set_fact:
+        node_names: "{{ node_names_string.strip().split(' ') }}"
+
+    - include: launch_instances.yml
+      vars:
+        instances: "{{ node_names }}"
+        cluster: "{{ cluster_id }}"
+        type: "{{ k8s_type }}"
+
+- hosts: "tag_env_{{ cluster_id }}"
+  roles:
+  - openshift_repos
+  - os_update_latest
+
+- include: ../openshift-master/config.yml
+  vars:
+    oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]"
+
+- include: ../openshift-node/config.yml
+  vars:
+    oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]"
+
+- include: list.yml

+ 62 - 0
playbooks/aws/openshift-cluster/launch_instances.yml

@@ -0,0 +1,62 @@
+---
+- set_fact:
+    machine_type: "{{ lookup('env', 'ec2_instance_type')|default('m3.large', true) }}"
+    machine_image: "{{ lookup('env', 'ec2_ami')|default('ami-307b3658', true) }}"
+    machine_region: "{{ lookup('env', 'ec2_region')|default('us-east-1', true) }}"
+    machine_keypair: "{{ lookup('env', 'ec2_keypair')|default('libra', true) }}"
+    created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
+    env: "{{ cluster }}"
+    host_type: "{{ type }}"
+    env_host_type: "{{ cluster }}-openshift-{{ type }}"
+
+- name: Launch instance(s)
+  ec2:
+    state: present
+    region: "{{ machine_region }}"
+    keypair: "{{ machine_keypair }}"
+    group: ['public']
+    instance_type: "{{ machine_type }}"
+    image: "{{ machine_image }}"
+    count: "{{ instances | oo_len }}"
+    wait: yes
+    instance_tags:
+      created-by: "{{ created_by }}"
+      env: "{{ env }}"
+      host-type: "{{ host_type }}"
+      env-host-type: "{{ env_host_type }}"
+  register: ec2
+
+- name: Add Name tag to instances
+  ec2_tag: resource={{ item.1.id }} region={{ machine_region }} state=present
+  with_together:
+  - instances
+  - ec2.instances
+  args:
+    tags:
+      Name: "{{ item.0 }}"
+
+- set_fact:
+    instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}
+
+- name: Add new instances groups and variables
+  add_host:
+    hostname: "{{ item.0 }}"
+    ansible_ssh_host: "{{ item.1.dns_name }}"
+    groups: "{{ instance_groups }}"
+    ec2_private_ip_address: "{{ item.1.private_ip }}"
+    ec2_ip_address: "{{ item.1.public_ip }}"
+  with_together:
+  - instances
+  - ec2.instances
+
+- name: Wait for ssh
+  wait_for: "port=22 host={{ item.dns_name }}"
+  with_items: ec2.instances
+
+- name: Wait for root user setup
+  command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup"
+  register: result
+  until: result.rc == 0
+  retries: 20
+  delay: 10
+  with_items: ec2.instances

+ 17 - 0
playbooks/aws/openshift-cluster/list.yml

@@ -0,0 +1,17 @@
+---
+- name: Generate oo_list_hosts group
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - set_fact: scratch_group=tag_env_{{ cluster_id }}
+    when: cluster_id != ''
+  - set_fact: scratch_group=all
+    when: scratch_group is not defined
+  - add_host: name={{ item }} groups=oo_list_hosts
+    with_items: groups[scratch_group] | difference(['localhost'])
+
+- name: List Hosts
+  hosts: oo_list_hosts
+  gather_facts: no
+  tasks:
+  - debug: msg="public:{{hostvars[inventory_hostname].ec2_ip_address}} private:{{hostvars[inventory_hostname].ec2_private_ip_address}}"

+ 1 - 0
playbooks/aws/openshift-cluster/roles

@@ -0,0 +1 @@
+../../../roles

+ 14 - 0
playbooks/aws/openshift-cluster/terminate.yml

@@ -0,0 +1,14 @@
+---
+- name: Terminate instance(s)
+  hosts: localhost
+
+  vars_files:
+    - vars.yml
+
+- include: ../openshift-node/terminate.yml
+  vars:
+    oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]'
+
+- include: ../openshift-master/terminate.yml
+  vars:
+    oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-master"]'

+ 13 - 0
playbooks/aws/openshift-cluster/update.yml

@@ -0,0 +1,13 @@
+---
+- hosts: "tag_env_{{ cluster_id }}"
+  roles:
+  - openshift_repos
+  - os_update_latest
+
+- include: ../openshift-master/config.yml
+  vars:
+    oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]"
+
+- include: ../openshift-node/config.yml
+  vars:
+    oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]"

+ 1 - 0
playbooks/aws/openshift-cluster/vars.yml

@@ -0,0 +1 @@
+---

+ 10 - 27
playbooks/aws/openshift-master/config.yml

@@ -1,5 +1,5 @@
 ---
-- name: "populate oo_masters_to_config host group if needed"
+- name: Populate oo_masters_to_config host group if needed
   hosts: localhost
   gather_facts: no
   tasks:
@@ -8,34 +8,17 @@
     with_items: "{{ oo_host_group_exp | default('') }}"
     when: oo_host_group_exp is defined
 
-- name: "Gather facts for nodes in {{ oo_env }}"
-  hosts: "tag_env-host-type_{{ oo_env }}-openshift-node"
-  connection: ssh
-  user: root
-
-- name: "Set Origin specific facts on localhost (for later use)"
-  hosts: localhost
-  gather_facts: no
-  tasks:
-    - name: Setting openshift_node_ips fact on localhost
-      set_fact:
-        openshift_node_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-node'])
-            | oo_collect(attribute='ansible_default_ipv4.address') }}"
-      when: groups['tag_env-host-type_' + oo_env + '-openshift-node'] is defined
-
-- name: "Configure instances"
+- name: Configure instances
   hosts: oo_masters_to_config
-  connection: ssh
-  user: root
+  vars:
+    openshift_hostname: "{{ ec2_private_ip_address }}"
+    openshift_public_hostname: "{{ ec2_ip_address }}"
+    # TODO: this should be removed once openshift-sdn packages are available
+    openshift_use_openshift_sdn: False
   vars_files:
-    - vars.yml
+  - vars.yml
   roles:
-    - {
-        role: openshift_master,
-        openshift_node_ips: "{{ hostvars['localhost'].openshift_node_ips | default(['']) }}",
-        openshift_env: "{{ oo_env }}",
-        openshift_public_ip: "{{ ec2_ip_address }}"
-      }
+    - openshift_master
+    #- openshift_sdn_master
     - pods
     - os_env_extras

+ 6 - 3
playbooks/aws/openshift-master/launch.yml

@@ -46,13 +46,16 @@
         tags: "{{ oo_new_inst_tags }}"
 
     - name: Add new instances public IPs to oo_masters_to_config
-      add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_masters_to_config"
+      add_host:
+        hostname: "{{ item.0 }}"
+        ansible_ssh_host: "{{ item.1.dns_name }}"
+        groupname: oo_masters_to_config
+        ec2_private_ip_address: "{{ item.1.private_ip }}"
+        ec2_ip_address: "{{ item.1.public_ip }}"
       with_together:
         - oo_new_inst_names
         - ec2.instances
 
-    - debug: var=ec2
-
     - name: Wait for ssh
       wait_for: "port=22 host={{ item.dns_name }}"
       with_items: ec2.instances

+ 52 - 0
playbooks/aws/openshift-master/terminate.yml

@@ -0,0 +1,52 @@
+---
+- name: Populate oo_masters_to_terminate host group if needed
+  hosts: localhost
+  gather_facts: no
+  tasks:
+    - name: Evaluate oo_host_group_exp if it's set
+      add_host: "name={{ item }} groups=oo_masters_to_terminate"
+      with_items: "{{ oo_host_group_exp | default('') }}"
+      when: oo_host_group_exp is defined
+
+- name: Gather facts for instances to terminate
+  hosts: oo_masters_to_terminate
+
+- name: Terminate instances
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  vars:
+    host_vars: "{{ hostvars
+        | oo_select_keys(groups['oo_masters_to_terminate']) }}"
+  tasks:
+    - name: Terminate instances
+      ec2:
+        state: absent
+        instance_ids: ["{{ item.ec2_id }}"]
+        region: "{{ item.ec2_region }}"
+      ignore_errors: yes
+      register: ec2_term
+      with_items: host_vars
+
+    # Fail if any of the instances failed to terminate with an error other
+    # than 403 Forbidden
+    - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
+      when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
+      with_items: ec2_term.results
+
+    - name: Stop instance if termination failed
+      ec2:
+        state: stopped
+        instance_ids: ["{{ item.item.ec2_id }}"]
+        region: "{{ item.item.ec2_region }}"
+      register: ec2_stop
+      when: item.failed
+      with_items: ec2_term.results
+
+    - name: Rename stopped instances
+      ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
+      args:
+        tags:
+          Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
+      with_items: ec2_stop.results
+

+ 1 - 0
playbooks/aws/openshift-master/vars.yml

@@ -1,2 +1,3 @@
 ---
 openshift_debug_level: 4
+openshift_cluster_id: "{{ cluster_id }}"

+ 91 - 32
playbooks/aws/openshift-node/config.yml

@@ -1,5 +1,5 @@
 ---
-- name: "populate oo_nodes_to_config host group if needed"
+- name: Populate oo_nodes_to_config host group if needed
   hosts: localhost
   gather_facts: no
   tasks:
@@ -7,42 +7,101 @@
     add_host: "name={{ item }} groups=oo_nodes_to_config"
     with_items: "{{ oo_host_group_exp | default('') }}"
     when: oo_host_group_exp is defined
+  - add_host:
+      name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
+      groups: oo_first_master
+    when: oo_host_group_exp is defined
 
-- name: "Gather facts for masters in {{ oo_env }}"
-  hosts: "tag_env-host-type_{{ oo_env }}-openshift-master"
-  connection: ssh
-  user: root
 
-- name: "Set OO sepcific facts on localhost (for later use)"
-  hosts: localhost
-  gather_facts: no
+- name: Gather and set facts for hosts to configure
+  hosts: oo_nodes_to_config
+  roles:
+  - openshift_facts
   tasks:
-    - name: Setting openshift_master_ips fact on localhost
-      set_fact:
-        openshift_master_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-master'])
-            | oo_collect(attribute='ansible_default_ipv4.address') }}"
-      when: groups['tag_env-host-type_' + oo_env + '-openshift-master'] is defined
-    - name: Setting openshift_master_public_ips fact on localhost
-      set_fact:
-        openshift_master_public_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
-            | oo_collect(attribute='ec2_ip_address') }}"
-      when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
-
-- name: "Configure instances"
+  # Since the master is registering the nodes before they are configured, we
+  # need to make sure to set the node properties beforehand if we do not want
+  # the defaults
+  - openshift_facts:
+      role: "{{ item.role }}"
+      local_facts: "{{ item.local_facts }}"
+    with_items:
+    - role: common
+      local_facts:
+        hostname: "{{ ec2_private_ip_address }}"
+        public_hostname: "{{ ec2_ip_address }}"
+        # TODO: this should be removed once openshift-sdn packages are available
+        use_openshift_sdn: False
+    - role: node
+      local_facts:
+        external_id: "{{ openshift_node_external_id | default(None) }}"
+        resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
+        resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
+        pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
+        labels: "{{ openshfit_node_labels | default(None) }}"
+        annotations: "{{ openshfit_node_annotations | default(None) }}"
+
+
+- name: Register nodes
+  hosts: oo_first_master
+  vars:
+    openshift_nodes: "{{ hostvars
+          | oo_select_keys(groups['oo_nodes_to_config']) }}"
+  roles:
+  - openshift_register_nodes
+  tasks:
+  - name: Create local temp directory for syncing certs
+    local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
+    register: mktemp
+
+  - name: Sync master certs to localhost
+    synchronize:
+      mode: pull
+      checksum: yes
+      src: /var/lib/openshift/openshift.local.certificates
+      dest: "{{ mktemp.stdout }}"
+
+
+- name: Configure instances
   hosts: oo_nodes_to_config
-  connection: ssh
-  user: root
   vars_files:
-    - vars.yml
+  - vars.yml
+  vars:
+    openshift_hostname: "{{ ec2_private_ip_address }}"
+    openshift_public_hostname: "{{ ec2_ip_address }}"
+    sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
+    cert_parent_rel_path: openshift.local.certificates
+    cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
+    cert_base_path: /var/lib/openshift
+    cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
+    cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
+  pre_tasks:
+  - name: Ensure certificate directories exists
+    file:
+      path: "{{ item }}"
+      state: directory
+    with_items:
+    - "{{ cert_path }}"
+    - "{{ cert_parent_path }}/ca"
+
+  # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+  # possibly test service started time against certificate/config file
+  # timestamps in openshift-node or openshift-sdn-node to trigger notify
+  - name: Sync certs to nodes
+    synchronize:
+      checksum: yes
+      src: "{{ item.src }}"
+      dest: "{{ item.dest }}"
+      owner: no
+      group: no
+    with_items:
+    - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
+      dest: "{{ cert_parent_path }}"
+    - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
+      dest: "{{ cert_parent_path }}/ca/cert.crt"
+  - local_action: file name={{ sync_tmpdir }} state=absent
+    run_once: true
   roles:
-    - {
-        role: openshift_node,
-        openshift_master_ips: "{{ hostvars['localhost'].openshift_master_ips | default(['']) }}",
-        openshift_master_public_ips: "{{ hostvars['localhost'].openshift_master_public_ips | default(['']) }}",
-        openshift_env: "{{ oo_env }}",
-        openshift_public_ip: "{{ ec2_ip_address }}"
-      }
+    - openshift_node
+    #- openshift_sdn_node
     - os_env_extras
     - os_env_extras_node

+ 9 - 4
playbooks/aws/openshift-node/launch.yml

@@ -27,7 +27,9 @@
       register: ec2
 
     - name: Add new instances public IPs to the atomic proxy host group
-      add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances"
+      add_host:
+        hostname: "{{ item.public_ip }}"
+        groupname: new_ec2_instances"
       with_items: ec2.instances
 
     - name: Add Name and environment tags to instances
@@ -46,13 +48,16 @@
         tags: "{{ oo_new_inst_tags }}"
 
     - name: Add new instances public IPs to oo_nodes_to_config
-      add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_nodes_to_config"
+      add_host:
+        hostname: "{{ item.0 }}"
+        ansible_ssh_host: "{{ item.1.dns_name }}"
+        groupname: oo_nodes_to_config
+        ec2_private_ip_address: "{{ item.1.private_ip }}"
+        ec2_ip_address: "{{ item.1.public_ip }}"
       with_together:
         - oo_new_inst_names
         - ec2.instances
 
-    - debug: var=ec2
-
     - name: Wait for ssh
       wait_for: "port=22 host={{ item.dns_name }}"
       with_items: ec2.instances

+ 52 - 0
playbooks/aws/openshift-node/terminate.yml

@@ -0,0 +1,52 @@
+---
+- name: Populate oo_nodes_to_terminate host group if needed
+  hosts: localhost
+  gather_facts: no
+  tasks:
+    - name: Evaluate oo_host_group_exp if it's set
+      add_host: "name={{ item }} groups=oo_nodes_to_terminate"
+      with_items: "{{ oo_host_group_exp | default('') }}"
+      when: oo_host_group_exp is defined
+
+- name: Gather facts for instances to terminate
+  hosts: oo_nodes_to_terminate
+
+- name: Terminate instances
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  vars:
+    host_vars: "{{ hostvars
+        | oo_select_keys(groups['oo_nodes_to_terminate']) }}"
+  tasks:
+    - name: Terminate instances
+      ec2:
+        state: absent
+        instance_ids: ["{{ item.ec2_id }}"]
+        region: "{{ item.ec2_region }}"
+      ignore_errors: yes
+      register: ec2_term
+      with_items: host_vars
+
+    # Fail if any of the instances failed to terminate with an error other
+    # than 403 Forbidden
+    - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
+      when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
+      with_items: ec2_term.results
+
+    - name: Stop instance if termination failed
+      ec2:
+        state: stopped
+        instance_ids: ["{{ item.item.ec2_id }}"]
+        region: "{{ item.item.ec2_region }}"
+      register: ec2_stop
+      when: item.failed
+      with_items: ec2_term.results
+
+    - name: Rename stopped instances
+      ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
+      args:
+        tags:
+          Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
+      with_items: ec2_stop.results
+

+ 1 - 0
playbooks/aws/openshift-node/vars.yml

@@ -1,2 +1,3 @@
 ---
 openshift_debug_level: 4
+openshift_cluster_id: "{{ cluster_id }}"

+ 4 - 5
playbooks/gce/openshift-cluster/launch.yml

@@ -11,7 +11,7 @@
     - name: Generate master instance names(s)
       set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
       register: master_names_output
-      with_sequence: start=1 end={{ masters }}
+      with_sequence: start=1 end={{ num_masters }}
 
     # These set_fact's cannot be combined
     - set_fact:
@@ -25,14 +25,13 @@
         instances: "{{ master_names }}"
         cluster: "{{ cluster_id }}"
         type: "{{ k8s_type }}"
-        group_name: "tag_env-host-type-{{ cluster_id }}-openshift-master"
 
     - set_fact: k8s_type="node"
 
     - name: Generate node instance names(s)
       set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
       register: node_names_output
-      with_sequence: start=1 end={{ nodes }}
+      with_sequence: start=1 end={{ num_nodes }}
 
     # These set_fact's cannot be combined
     - set_fact:
@@ -55,9 +54,9 @@
 - include: ../openshift-master/config.yml
   vars:
     oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]"
-    oo_env: "{{ cluster_id }}"
 
 - include: ../openshift-node/config.yml
   vars:
     oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]"
-    oo_env: "{{ cluster_id }}"
+
+- include: list.yml

+ 6 - 1
playbooks/gce/openshift-cluster/launch_instances.yml

@@ -1,3 +1,7 @@
+---
+# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
+# the gce task to use the disk_auto_delete parameter to avoid having to delete
+# the disk as a separate step on termination
 
 - set_fact:
     machine_type: "{{ lookup('env', 'gce_machine_type') |default('n1-standard-1', true) }}"
@@ -18,12 +22,13 @@
       - "env-host-type-{{ cluster }}-openshift-{{ type }}"
   register: gce
 
-- name: Add new instances public IPs
+- name: Add new instances to groups and set variables needed
   add_host:
     hostname: "{{ item.name }}"
     ansible_ssh_host: "{{ item.public_ip }}"
     groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
     gce_public_ip: "{{ item.public_ip }}"
+    gce_private_ip: "{{ item.private_ip }}"
   with_items: gce.instance_data
 
 - name: Wait for ssh

+ 17 - 0
playbooks/gce/openshift-cluster/list.yml

@@ -0,0 +1,17 @@
+---
+- name: Generate oo_list_hosts group
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - set_fact: scratch_group=tag_env-{{ cluster_id }}
+    when: cluster_id != ''
+  - set_fact: scratch_group=all
+    when: scratch_group is not defined
+  - add_host: name={{ item }} groups=oo_list_hosts
+    with_items: groups[scratch_group] | difference(['localhost']) | difference(groups.status_terminated)
+
+- name: List Hosts
+  hosts: oo_list_hosts
+  gather_facts: no
+  tasks:
+  - debug: msg="public:{{hostvars[inventory_hostname].gce_public_ip}} private:{{hostvars[inventory_hostname].gce_private_ip}}"

+ 13 - 0
playbooks/gce/openshift-cluster/update.yml

@@ -0,0 +1,13 @@
+---
+- hosts: "tag_env-{{ cluster_id }}"
+  roles:
+  - openshift_repos
+  - os_update_latest
+
+- include: ../openshift-master/config.yml
+  vars:
+    oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]"
+
+- include: ../openshift-node/config.yml
+  vars:
+    oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]"

+ 3 - 3
playbooks/gce/openshift-master/config.yml

@@ -1,3 +1,4 @@
+---
 - name: master/config.yml, populate oo_masters_to_config host group if needed
   hosts: localhost
   gather_facts: no
@@ -7,11 +8,10 @@
     with_items: "{{ oo_host_group_exp | default('') }}"
     when: oo_host_group_exp is defined
 
-- name: Gather facts for nodes in {{ oo_env }}
-  hosts: "tag_env-host-type-{{ oo_env }}-openshift-node"
-
 - name: "Configure instances"
   hosts: oo_masters_to_config
+  vars:
+    openshift_hostname: "{{ gce_private_ip }}"
   vars_files:
   - vars.yml
   roles:

+ 9 - 3
playbooks/gce/openshift-master/launch.yml

@@ -1,4 +1,8 @@
 ---
+# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
+# the gce task to use the disk_auto_delete parameter to avoid having to delete
+# the disk as a separate step on termination
+
 - name: Launch instance(s)
   hosts: localhost
   connection: local
@@ -25,15 +29,17 @@
       register: gce
 
     - name: Add new instances public IPs to oo_masters_to_config
-      add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_masters_to_config"
+      add_host:
+        hostname: "{{ item.name }}"
+        ansible_ssh_host: "{{ item.public_ip }}"
+        groupname: oo_masters_to_config
+        gce_private_ip: "{{ item.private_ip }}"
       with_items: gce.instance_data
 
     - name: Wait for ssh
       wait_for: "port=22 host={{ item.public_ip }}"
       with_items: gce.instance_data
 
-    - debug: var=gce
-
     - name: Wait for root user setup
       command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
       register: result

+ 5 - 11
playbooks/gce/openshift-master/terminate.yml

@@ -1,17 +1,13 @@
-- name: "populate oo_hosts_to_terminate host group if needed"
+---
+- name: Populate oo_masters_to_terminate host group if needed
   hosts: localhost
   gather_facts: no
   tasks:
-    - debug: var=oo_host_group_exp
-
     - name: Evaluate oo_host_group_exp if it's set
-      add_host: "name={{ item }} groups=oo_hosts_to_terminate"
+      add_host: "name={{ item }} groups=oo_masters_to_terminate"
       with_items: "{{ oo_host_group_exp | default('') }}"
       when: oo_host_group_exp is defined
 
-    - debug: msg="{{ groups['oo_hosts_to_terminate'] }}"
-
-
 - name: Terminate master instances
   hosts: localhost
   connection: local
@@ -23,12 +19,10 @@
         pem_file: "{{ gce_pem_file }}"
         project_id: "{{ gce_project_id }}"
         state: 'absent'
-        instance_names: "{{ groups['oo_hosts_to_terminate'] }}"
-        disks: "{{ groups['oo_hosts_to_terminate'] }}"
+        instance_names: "{{ groups['oo_masters_to_terminate'] }}"
+        disks: "{{ groups['oo_masters_to_terminate'] }}"
       register: gce
 
-    - debug: var=gce
-
     - name: Remove disks of instances
       gce_pd:
         service_account_email: "{{ gce_service_account_email }}"

+ 1 - 0
playbooks/gce/openshift-master/vars.yml

@@ -1,2 +1,3 @@
 ---
 openshift_debug_level: 4
+openshift_cluster_id: "{{ cluster_id }}"

+ 36 - 58
playbooks/gce/openshift-node/config.yml

@@ -1,3 +1,4 @@
+---
 - name: node/config.yml, populate oo_nodes_to_config host group if needed
   hosts: localhost
   gather_facts: no
@@ -6,50 +7,42 @@
     add_host: "name={{ item }} groups=oo_nodes_to_config"
     with_items: "{{ oo_host_group_exp | default('') }}"
     when: oo_host_group_exp is defined
-  - name: Find masters for env
-    add_host: "name={{ item }} groups=oo_masters_for_node_config"
-    with_items: groups['tag_env-host-type-' + oo_env + '-openshift-master']
+  - add_host:
+      name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+      groups: oo_first_master
+    when: oo_host_group_exp is defined
 
-- name: Gather facts for masters in {{ oo_env }}
-  hosts: tag_env-host-type-{{ oo_env }}-openshift-master
-  tasks:
-  - set_fact:
-      openshift_master_ip: "{{ openshift_ip }}"
-      openshift_master_api_url: "{{ openshift_api_url }}"
-      openshift_master_webui_url: "{{ openshift_webui_url }}"
-      openshift_master_hostname: "{{ openshift_hostname }}"
-      openshift_master_public_ip: "{{ openshift_public_ip }}"
-      openshift_master_api_public_url: "{{ openshift_api_public_url }}"
-      openshift_master_webui_public_url: "{{ openshift_webui_public_url }}"
-      openshift_master_public_hostnames: "{{ openshift_public_hostname }}"
 
-- name: Gather facts for hosts to configure
-  hosts: tag_env-host-type-{{ oo_env }}-openshift-node
+- name: Gather and set facts for hosts to configure
+  hosts: oo_nodes_to_config
+  roles:
+  - openshift_facts
   tasks:
-  - set_fact:
-      openshift_node_hostname: "{{ openshift_hostname }}"
-      openshift_node_name: "{{ openshift_hostname }}"
-      openshift_node_cpu: "{{ openshift_node_cpu if openshift_node_cpu else ansible_processor_cores }}"
-      openshift_node_memory: "{{ openshift_node_memory if openshift_node_memory else (ansible_memtotal_mb|int * 1024 * 1024 * 0.75)|int }}"
-      openshift_node_pod_cidr: "{{ openshift_node_pod_cidr if openshift_node_pod_cidr else None }}"
-      openshift_node_host_ip: "{{ openshift_ip }}"
-      openshift_node_labels: "{{ openshift_node_labels if openshift_node_labels else {} }}"
-      openshift_node_annotations: "{{ openshift_node_annotations if openshift_node_annotations else {} }}"
+  # Since the master is registering the nodes before they are configured, we
+  # need to make sure to set the node properties beforehand if we do not want
+  # the defaults
+  - openshift_facts:
+      role: "{{ item.role }}"
+      local_facts: "{{ item.local_facts }}"
+    with_items:
+    - role: common
+      local_facts:
+        hostname: "{{ gce_private_ip }}"
+    - role: node
+      local_facts:
+        external_id: "{{ openshift_node_external_id | default(None) }}"
+        resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
+        resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
+        pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
+        labels: "{{ openshfit_node_labels | default(None) }}"
+        annotations: "{{ openshfit_node_annotations | default(None) }}"
+
 
 - name: Register nodes
-  hosts: tag_env-host-type-{{ oo_env }}-openshift-master[0]
+  hosts: oo_first_master
   vars:
-    openshift_node_group: tag_env-host-type-{{ oo_env }}-openshift-node
     openshift_nodes: "{{ hostvars
-          | oo_select_keys(groups[openshift_node_group]) }}"
-    openshift_master_group: tag_env-host-type-{{ oo_env }}-openshift-master
-    openshift_master_urls: "{{ hostvars
-          | oo_select_keys(groups[openshift_master_group])
-          | oo_collect(attribute='openshift_master_api_url') }}"
-    openshift_master_public_urls: "{{ hostvars
-          | oo_select_keys(groups[openshift_master_group])
-          | oo_collect(attribute='openshift_master_api_public_url') }}"
-  pre_tasks:
+          | oo_select_keys(groups['oo_nodes_to_config']) }}"
   roles:
   - openshift_register_nodes
   tasks:
@@ -64,28 +57,14 @@
       src: /var/lib/openshift/openshift.local.certificates
       dest: "{{ mktemp.stdout }}"
 
-# TODO: sync generated certs between masters
-#
 - name: Configure instances
   hosts: oo_nodes_to_config
   vars_files:
   - vars.yml
   vars:
-    openshift_master_group: tag_env-host-type-{{ oo_env }}-openshift-master
-    openshift_master_ips: "{{ hostvars
-          | oo_select_keys(groups[openshift_master_group])
-          | oo_collect(attribute='openshift_master_ip') }}"
-    openshift_master_hostnames: "{{ hostvars
-          | oo_select_keys(groups[openshift_master_group])
-          | oo_collect(attribute='openshift_master_hostname') }}"
-    openshift_master_public_ips: "{{ hostvars
-          | oo_select_keys(groups[openshift_master_group])
-          | oo_collect(attribute='openshift_master_public_ip') }}"
-    openshift_master_public_hostnames: "{{ hostvars
-          | oo_select_keys(groups[openshift_master_group])
-          | oo_collect(attribute='openshift_master_public_hostname') }}"
+    sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
     cert_parent_rel_path: openshift.local.certificates
-    cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift_node_name }}"
+    cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
     cert_base_path: /var/lib/openshift
     cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
     cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
@@ -98,11 +77,9 @@
     - "{{ cert_path }}"
     - "{{ cert_parent_path }}/ca"
 
-  # TODO: only sync to a node if it's certs have been updated
   # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
   # possibly test service started time against certificate/config file
   # timestamps in openshift-node or openshift-sdn-node to trigger notify
-  # TODO: also copy ca cert: /var/lib/openshift/openshift.local.certificates/ca/cert.crt
   - name: Sync certs to nodes
     synchronize:
       checksum: yes
@@ -111,12 +88,13 @@
       owner: no
       group: no
     with_items:
-    - src: "{{ hostvars[groups[openshift_master_group][0]].mktemp.stdout }}/{{ cert_rel_path }}"
+    - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
       dest: "{{ cert_parent_path }}"
-    - src: "{{ hostvars[groups[openshift_master_group][0]].mktemp.stdout }}/{{ cert_parent_rel_path }}/ca/cert.crt"
+    - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
       dest: "{{ cert_parent_path }}/ca/cert.crt"
-  - local_action: file name={{ hostvars[groups[openshift_master_group][0]].mktemp.stdout }} state=absent
+  - local_action: file name={{ sync_tmpdir }} state=absent
     run_once: true
   roles:
     - openshift_node
     - os_env_extras
+    - os_env_extras_node

+ 9 - 13
playbooks/gce/openshift-node/launch.yml

@@ -1,4 +1,8 @@
 ---
+# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
+# the gce task to use the disk_auto_delete parameter to avoid having to delete
+# the disk as a separate step on termination
+
 - name: Launch instance(s)
   hosts: localhost
   connection: local
@@ -25,15 +29,17 @@
       register: gce
 
     - name: Add new instances public IPs to oo_nodes_to_config
-      add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_nodes_to_config"
+      add_host:
+        hostname: "{{ item.name }}"
+        ansible_ssh_host: "{{ item.public_ip }}"
+        groupname: oo_nodes_to_config
+        gce_private_ip: "{{ item.private_ip }}"
       with_items: gce.instance_data
 
     - name: Wait for ssh
       wait_for: "port=22 host={{ item.public_ip }}"
       with_items: gce.instance_data
 
-    - debug: var=gce
-
     - name: Wait for root user setup
       command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
       register: result
@@ -45,13 +51,3 @@
 
 # Apply the configs, separate so that just the configs can be run by themselves
 - include: config.yml
-
-# Always bounce service to pick up new credentials
-#- name: "Restart instances"
-#  hosts: oo_nodes_to_config
-#  connection: ssh
-#  user: root
-#  tasks:
-#    - debug: var=groups.oo_nodes_to_config
-#    - name: Restart OpenShift
-#      service: name=openshift-node enabled=yes state=restarted

+ 5 - 11
playbooks/gce/openshift-node/terminate.yml

@@ -1,17 +1,13 @@
-- name: "populate oo_hosts_to_terminate host group if needed"
+---
+- name: Populate oo_nodes_to_terminate host group if needed
   hosts: localhost
   gather_facts: no
   tasks:
-    - debug: var=oo_host_group_exp
-
     - name: Evaluate oo_host_group_exp if it's set
-      add_host: "name={{ item }} groups=oo_hosts_to_terminate"
+      add_host: "name={{ item }} groups=oo_nodes_to_terminate"
       with_items: "{{ oo_host_group_exp | default('') }}"
       when: oo_host_group_exp is defined
 
-    - debug: msg="{{ groups['oo_hosts_to_terminate'] }}"
-
-
 - name: Terminate node instances
   hosts: localhost
   connection: local
@@ -23,12 +19,10 @@
         pem_file: "{{ gce_pem_file }}"
         project_id: "{{ gce_project_id }}"
         state: 'absent'
-        instance_names: "{{ groups['oo_hosts_to_terminate'] }}"
-        disks: "{{ groups['oo_hosts_to_terminate'] }}"
+        instance_names: "{{ groups['oo_nodes_to_terminate'] }}"
+        disks: "{{ groups['oo_nodes_to_terminate'] }}"
       register: gce
 
-    - debug: var=gce
-
     - name: Remove disks of instances
       gce_pd:
         service_account_email: "{{ gce_service_account_email }}"

+ 1 - 0
playbooks/gce/openshift-node/vars.yml

@@ -1,2 +1,3 @@
 ---
 openshift_debug_level: 4
+openshift_cluster_id: "{{ cluster_id }}"

+ 10 - 7
roles/openshift_common/README.md

@@ -12,17 +12,20 @@ rhel-7-server-extra-rpms, and rhel-7-server-ose-beta-rpms repos.
 Role Variables
 --------------
 
-| Name                          | Default value                |                                        |
-|-------------------------------|------------------------------|----------------------------------------|
-| openshift_debug_level         | 0                            | Global openshift debug log verbosity   |
-| openshift_hostname            | UNDEF (Required)             | hostname to use for this instance |
-| openshift_public_ip           | UNDEF (Required)             | Public IP address to use for this host |
-| openshift_env                 | default                      | Envrionment name if multiple OpenShift instances |
+| Name                      | Default value     |                                             |
+|---------------------------|-------------------|---------------------------------------------|
+| openshift_cluster_id      | default           | Cluster name if multiple OpenShift clusters |
+| openshift_debug_level     | 0                 | Global openshift debug log verbosity        |
+| openshift_hostname        | UNDEF             | Internal hostname to use for this host (this value will set the hostname on the system) |
+| openshift_ip              | UNDEF             | Internal IP address to use for this host    |
+| openshift_public_hostname | UNDEF             | Public hostname to use for this host        |
+| openshift_public_ip       | UNDEF             | Public IP address to use for this host      |
 
 Dependencies
 ------------
 
 os_firewall
+openshift_facts
 openshift_repos
 
 Example Playbook
@@ -38,4 +41,4 @@ Apache License, Version 2.0
 Author Information
 ------------------
 
-TODO
+Jason DeTiberus (jdetiber@redhat.com)

+ 1 - 0
roles/openshift_common/defaults/main.yml

@@ -1,2 +1,3 @@
 ---
+openshift_cluster_id: 'default'
 openshift_debug_level: 0

+ 1 - 0
roles/openshift_common/meta/main.yml

@@ -13,4 +13,5 @@ galaxy_info:
   - cloud
 dependencies:
 - { role: os_firewall }
+- { role: openshift_facts }
 - { role: openshift_repos }

+ 13 - 16
roles/openshift_common/tasks/main.yml

@@ -1,19 +1,16 @@
 ---
-- name: Set hostname
-  hostname: name={{ openshift_hostname }}
+- name: Set common OpenShift facts
+  openshift_facts:
+    role: 'common'
+    local_facts:
+      cluster_id: "{{ openshift_cluster_id | default('default') }}"
+      debug_level: "{{ openshift_debug_level | default(0) }}"
+      hostname: "{{ openshift_hostname | default(None) }}"
+      ip: "{{ openshift_ip | default(None) }}"
+      public_hostname: "{{ openshift_public_hostname | default(None) }}"
+      public_ip: "{{ openshift_public_ip | default(None) }}"
+      use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
 
-- name: Configure local facts file
-  file: path=/etc/ansible/facts.d/ state=directory mode=0750
+- name: Set hostname
+  hostname: name={{ openshift.common.hostname }}
 
-- name: Set common OpenShift facts
-  include: set_facts.yml
-  facts:
-  - section: common
-    option: env
-    value: "{{ openshift_env | default('default') }}"
-  - section: common
-    option: host_type
-    value: "{{ openshift_host_type }}"
-  - section: common
-    option: debug_level
-    value: "{{ openshift_debug_level }}"

+ 0 - 9
roles/openshift_common/tasks/set_facts.yml

@@ -1,9 +0,0 @@
----
-- name: "Setting local_facts"
-  ini_file:
-    dest: /etc/ansible/facts.d/openshift.fact
-    mode: 0640
-    section: "{{ item.section }}"
-    option: "{{ item.option }}"
-    value: "{{ item.value }}"
-  with_items: facts

+ 3 - 2
roles/openshift_common/vars/main.yml

@@ -1,6 +1,7 @@
 ---
-openshift_master_credentials_dir: /var/lib/openshift/openshift.local.certificates/admin/
-
 # TODO: Upstream kubernetes only supports iptables currently, if this changes,
 # then these variable should be moved to defaults
+# TODO: it might be possible to still use firewalld if we wire up the created
+# chains with the public zone (or the zone associated with the correct
+# interfaces)
 os_firewall_use_firewalld: False

+ 34 - 0
roles/openshift_facts/README.md

@@ -0,0 +1,34 @@
+OpenShift Facts
+===============
+
+Provides the openshift_facts module
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+None
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)

+ 482 - 0
roles/openshift_facts/library/openshift_facts.py

@@ -0,0 +1,482 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+DOCUMENTATION = '''
+---
+module: openshift_facts
+short_description: OpenShift Facts
+author: Jason DeTiberus
+requirements: [ ]
+'''
+EXAMPLES = '''
+'''
+
+import ConfigParser
+import copy
+
+class OpenShiftFactsUnsupportedRoleError(Exception):
+    pass
+
+class OpenShiftFactsFileWriteError(Exception):
+    pass
+
+class OpenShiftFacts():
+    known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn']
+
+    def __init__(self, role, filename, local_facts):
+        self.changed = False
+        self.filename = filename
+        if role not in self.known_roles:
+            raise OpenShiftFactsUnsupportedRoleError("Role %s is not supported by this module" % role)
+        self.role = role
+        self.facts = self.generate_facts(local_facts)
+
+    def generate_facts(self, local_facts):
+        local_facts = self.init_local_facts(local_facts)
+        roles = local_facts.keys()
+
+        defaults = self.get_defaults(roles)
+        provider_facts = self.init_provider_facts()
+        facts = self.apply_provider_facts(defaults, provider_facts, roles)
+
+        facts = self.merge_facts(facts, local_facts)
+        facts['current_config'] = self.current_config(facts)
+        self.set_url_facts_if_unset(facts)
+        return dict(openshift=facts)
+
+
+    def set_url_facts_if_unset(self, facts):
+        if 'master' in facts:
+            for (url_var, use_ssl, port, default) in [
+                    ('api_url',
+                        facts['master']['api_use_ssl'],
+                        facts['master']['api_port'],
+                        facts['common']['hostname']),
+                    ('public_api_url',
+                        facts['master']['api_use_ssl'],
+                        facts['master']['api_port'],
+                        facts['common']['public_hostname']),
+                    ('console_url',
+                        facts['master']['console_use_ssl'],
+                        facts['master']['console_port'],
+                        facts['common']['hostname']),
+                    ('public_console_url' 'console_use_ssl',
+                        facts['master']['console_use_ssl'],
+                        facts['master']['console_port'],
+                        facts['common']['public_hostname'])]:
+                if url_var not in facts['master']:
+                    scheme = 'https' if use_ssl else 'http'
+                    netloc = default
+                    if (scheme == 'https' and port != '443') or (scheme == 'http' and port != '80'):
+                        netloc = "%s:%s" % (netloc, port)
+                    facts['master'][url_var] = urlparse.urlunparse((scheme, netloc, '', '', '', ''))
+
+
+    # Query current OpenShift config and return a dictionary containing
+    # settings that may be valuable for determining actions that need to be
+    # taken in the playbooks/roles
+    def current_config(self, facts):
+        current_config=dict()
+        roles = [ role for role in facts if role not in ['common','provider'] ]
+        for role in roles:
+            if 'roles' in current_config:
+                current_config['roles'].append(role)
+            else:
+                current_config['roles'] = [role]
+
+            # TODO: parse the /etc/sysconfig/openshift-{master,node} config to
+            # determine the location of files.
+
+            # Query kubeconfig settings
+            kubeconfig_dir = '/var/lib/openshift/openshift.local.certificates'
+            if role == 'node':
+                kubeconfig_dir = os.path.join(kubeconfig_dir, "node-%s" % facts['common']['hostname'])
+
+            kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
+            if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
+                try:
+                    _, output, error = module.run_command(["/usr/bin/openshift", "ex",
+                                                           "config", "view", "-o",
+                                                           "json",
+                                                           "--kubeconfig=%s" % kubeconfig_path],
+                                                           check_rc=False)
+                    config = json.loads(output)
+
+                    try:
+                        for cluster in config['clusters']:
+                            config['clusters'][cluster]['certificate-authority-data'] = 'masked'
+                    except KeyError:
+                        pass
+                    try:
+                        for user in config['users']:
+                            config['users'][user]['client-certificate-data'] = 'masked'
+                            config['users'][user]['client-key-data'] = 'masked'
+                    except KeyError:
+                        pass
+
+                    current_config['kubeconfig'] = config
+                except Exception:
+                    pass
+
+        return current_config
+
+
+    def apply_provider_facts(self, facts, provider_facts, roles):
+        if not provider_facts:
+            return facts
+
+        use_openshift_sdn = provider_facts.get('use_openshift_sdn')
+        if isinstance(use_openshift_sdn, bool):
+            facts['common']['use_openshift_sdn'] = use_openshift_sdn
+
+        common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
+        for h_var, ip_var in common_vars:
+            ip_value = provider_facts['network'].get(ip_var)
+            if ip_value:
+                facts['common'][ip_var] = ip_value
+
+            facts['common'][h_var] = self.choose_hostname([provider_facts['network'].get(h_var)], facts['common'][ip_var])
+
+        if 'node' in roles:
+            ext_id = provider_facts.get('external_id')
+            if ext_id:
+                facts['node']['external_id'] = ext_id
+
+        facts['provider'] = provider_facts
+        return facts
+
+    def hostname_valid(self, hostname):
+        if (not hostname or
+                hostname.startswith('localhost') or
+                hostname.endswith('localdomain') or
+                len(hostname.split('.')) < 2):
+            return False
+
+        return True
+
+    def choose_hostname(self, hostnames=[], fallback=''):
+        hostname = fallback
+
+        ips = [ i for i in hostnames if i is not None and re.match(r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z', i) ]
+        hosts = [ i for i in hostnames if i is not None and i not in set(ips) ]
+
+        for host_list in (hosts, ips):
+            for h in host_list:
+                if self.hostname_valid(h):
+                    return h
+
+        return hostname
+
+    def get_defaults(self, roles):
+        hardware_facts = self.get_hardware_facts()
+        net_facts = self.get_net_facts()
+        base_facts = self.get_base_facts()
+
+        defaults = dict()
+
+        common = dict(use_openshift_sdn=True)
+        ip = net_facts['default_ipv4']['address']
+        common['ip'] = ip
+        common['public_ip'] = ip
+
+        rc, output, error = module.run_command(['hostname', '-f'])
+        hostname_f = output.strip() if rc == 0 else ''
+        hostname_values = [hostname_f, base_facts['nodename'], base_facts['fqdn']]
+        hostname = self.choose_hostname(hostname_values)
+
+        common['hostname'] = hostname
+        common['public_hostname'] = hostname
+        defaults['common'] = common
+
+        if 'master' in roles:
+            # TODO: provide for a better way to override just the port, or just
+            # the urls, instead of forcing both, also to override the hostname
+            # without having to re-generate these urls later
+            master = dict(api_use_ssl=True, api_port='8443',
+                    console_use_ssl=True, console_path='/console',
+                    console_port='8443', etcd_use_ssl=False,
+                    etcd_port='4001')
+            defaults['master'] = master
+
+        if 'node' in roles:
+            node = dict(external_id=common['hostname'], pod_cidr='',
+                        labels={}, annotations={})
+            node['resources_cpu'] = hardware_facts['processor_cores']
+            node['resources_memory'] = int(int(hardware_facts['memtotal_mb']) * 1024 * 1024 * 0.75)
+            defaults['node'] = node
+
+        return defaults
+
+    def merge_facts(self, orig, new):
+        facts = dict()
+        for key, value in orig.iteritems():
+            if key in new:
+                if isinstance(value, dict):
+                    facts[key] = self.merge_facts(value, new[key])
+                else:
+                    facts[key] = copy.copy(new[key])
+            else:
+                facts[key] = copy.deepcopy(value)
+        new_keys = set(new.keys()) - set(orig.keys())
+        for key in new_keys:
+            facts[key] = copy.deepcopy(new[key])
+        return facts
+
+    def query_metadata(self, metadata_url, headers=None, expect_json=False):
+        r, info = fetch_url(module, metadata_url, headers=headers)
+        if info['status'] != 200:
+            module.fail_json(msg='Failed to query metadata', result=r,
+                             info=info)
+        if expect_json:
+            return module.from_json(r.read())
+        else:
+            return [line.strip() for line in r.readlines()]
+
+    def walk_metadata(self, metadata_url, headers=None, expect_json=False):
+        metadata = dict()
+
+        for line in self.query_metadata(metadata_url, headers, expect_json):
+            if line.endswith('/') and not line == 'public-keys/':
+                key = line[:-1]
+                metadata[key]=self.walk_metadata(metadata_url + line, headers,
+                                                 expect_json)
+            else:
+                results = self.query_metadata(metadata_url + line, headers,
+                                              expect_json)
+                if len(results) == 1:
+                    metadata[line] = results.pop()
+                else:
+                    metadata[line] = results
+        return metadata
+
+    def get_provider_metadata(self, metadata_url, supports_recursive=False,
+                          headers=None, expect_json=False):
+        if supports_recursive:
+            metadata = self.query_metadata(metadata_url, headers, expect_json)
+        else:
+            metadata = self.walk_metadata(metadata_url, headers, expect_json)
+        return metadata
+
+    def get_hardware_facts(self):
+        if not hasattr(self, 'hardware_facts'):
+            self.hardware_facts = Hardware().populate()
+        return self.hardware_facts
+
+    def get_base_facts(self):
+        if not hasattr(self, 'base_facts'):
+            self.base_facts = Facts().populate()
+        return self.base_facts
+
+    def get_virt_facts(self):
+        if not hasattr(self, 'virt_facts'):
+            self.virt_facts = Virtual().populate()
+        return self.virt_facts
+
+    def get_net_facts(self):
+        if not hasattr(self, 'net_facts'):
+            self.net_facts = Network(module).populate()
+        return self.net_facts
+
+    def guess_host_provider(self):
+        # TODO: cloud provider facts should probably be submitted upstream
+        virt_facts = self.get_virt_facts()
+        hardware_facts = self.get_hardware_facts()
+        product_name = hardware_facts['product_name']
+        product_version = hardware_facts['product_version']
+        virt_type = virt_facts['virtualization_type']
+        virt_role = virt_facts['virtualization_role']
+        provider = None
+        metadata = None
+
+        # TODO: this is not exposed through module_utils/facts.py in ansible,
+        # need to create PR for ansible to expose it
+        bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
+        if bios_vendor == 'Google':
+            provider = 'gce'
+            metadata_url = 'http://metadata.google.internal/computeMetadata/v1/?recursive=true'
+            headers = {'Metadata-Flavor': 'Google'}
+            metadata = self.get_provider_metadata(metadata_url, True, headers,
+                                                  True)
+
+            # Filter sshKeys and serviceAccounts from gce metadata
+            metadata['project']['attributes'].pop('sshKeys', None)
+            metadata['instance'].pop('serviceAccounts', None)
+        elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
+            provider = 'ec2'
+            metadata_url = 'http://169.254.169.254/latest/meta-data/'
+            metadata = self.get_provider_metadata(metadata_url)
+        elif re.search(r'OpenStack', product_name):
+            provider = 'openstack'
+            metadata_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
+            metadata = self.get_provider_metadata(metadata_url, True, None, True)
+            ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
+            metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url)
+
+            # Filter public_keys  and random_seed from openstack metadata
+            metadata.pop('public_keys', None)
+            metadata.pop('random_seed', None)
+        return dict(name=provider, metadata=metadata)
+
+    def normalize_provider_facts(self, provider, metadata):
+        if provider is None or metadata is None:
+            return {}
+
+        # TODO: test for ipv6_enabled where possible (gce, aws do not support)
+        # and configure ipv6 facts if available
+
+        # TODO: add support for setting user_data if available
+
+        facts = dict(name=provider, metadata=metadata)
+        network = dict(interfaces=[], ipv6_enabled=False)
+        if provider == 'gce':
+            for interface in metadata['instance']['networkInterfaces']:
+                int_info = dict(ips=[interface['ip']], network_type=provider)
+                int_info['public_ips'] = [ ac['externalIp'] for ac in interface['accessConfigs'] ]
+                int_info['public_ips'].extend(interface['forwardedIps'])
+                _, _, network_id = interface['network'].rpartition('/')
+                int_info['network_id'] = network_id
+                network['interfaces'].append(int_info)
+            _, _, zone = metadata['instance']['zone'].rpartition('/')
+            facts['zone'] = zone
+            facts['external_id'] = metadata['instance']['id']
+
+            # Default to no sdn for GCE deployments
+            facts['use_openshift_sdn'] = False
+
+            # GCE currently only supports a single interface
+            network['ip'] = network['interfaces'][0]['ips'][0]
+            network['public_ip'] = network['interfaces'][0]['public_ips'][0]
+            network['hostname'] = metadata['instance']['hostname']
+
+            # TODO: attempt to resolve public_hostname
+            network['public_hostname'] = network['public_ip']
+        elif provider == 'ec2':
+            for interface in sorted(metadata['network']['interfaces']['macs'].values(),
+                                    key=lambda x: x['device-number']):
+                int_info = dict()
+                var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
+                for ips_var, int_var in var_map.iteritems():
+                    ips = interface[int_var]
+                    int_info[ips_var] = [ips] if isinstance(ips, basestring) else ips
+                int_info['network_type'] = 'vpc' if 'vpc-id' in interface else 'classic'
+                int_info['network_id'] = interface['subnet-id'] if int_info['network_type'] == 'vpc' else None
+                network['interfaces'].append(int_info)
+            facts['zone'] = metadata['placement']['availability-zone']
+            facts['external_id'] = metadata['instance-id']
+
+            # TODO: actually attempt to determine default local and public ips
+            # by using the ansible default ip fact and the ipv4-associations
+            # form the ec2 metadata
+            network['ip'] = metadata['local-ipv4']
+            network['public_ip'] = metadata['public-ipv4']
+
+            # TODO: verify that local hostname makes sense and is resolvable
+            network['hostname'] = metadata['local-hostname']
+
+            # TODO: verify that public hostname makes sense and is resolvable
+            network['public_hostname'] = metadata['public-hostname']
+        elif provider == 'openstack':
+            # openstack ec2 compat api does not support network interfaces and
+            # the version tested on did not include the info in the openstack
+            # metadata api, should be updated if neutron exposes this.
+
+            facts['zone'] = metadata['availability_zone']
+            facts['external_id'] = metadata['uuid']
+            network['ip'] = metadata['ec2_compat']['local-ipv4']
+            network['public_ip'] = metadata['ec2_compat']['public-ipv4']
+
+            # TODO: verify local hostname makes sense and is resolvable
+            network['hostname'] = metadata['hostname']
+
+            # TODO: verify that public hostname makes sense and is resolvable
+            network['public_hostname'] = metadata['ec2_compat']['public-hostname']
+
+        facts['network'] = network
+        return facts
+
+    def init_provider_facts(self):
+        provider_info = self.guess_host_provider()
+        provider_facts = self.normalize_provider_facts(
+                provider_info.get('name'),
+                provider_info.get('metadata')
+        )
+        return provider_facts
+
+    def get_facts(self):
+        # TODO: transform facts into cleaner format (openshift_<blah> instead
+        # of openshift.<blah>
+        return self.facts
+
+    def init_local_facts(self, facts={}):
+        changed = False
+
+        local_facts = ConfigParser.SafeConfigParser()
+        local_facts.read(self.filename)
+
+        section = self.role
+        if not local_facts.has_section(section):
+            local_facts.add_section(section)
+            changed = True
+
+        for key, value in facts.iteritems():
+            if isinstance(value, bool):
+                value = str(value)
+            if not value:
+                continue
+            if not local_facts.has_option(section, key) or local_facts.get(section, key) != value:
+                local_facts.set(section, key, value)
+                changed = True
+
+        if changed and not module.check_mode:
+            try:
+                fact_dir = os.path.dirname(self.filename)
+                if not os.path.exists(fact_dir):
+                    os.makedirs(fact_dir)
+                with open(self.filename, 'w') as fact_file:
+                        local_facts.write(fact_file)
+            except (IOError, OSError) as e:
+                raise OpenShiftFactsFileWriteError("Could not create fact file: %s, error: %s" % (self.filename, e))
+        self.changed = changed
+
+        role_facts = dict()
+        for section in local_facts.sections():
+            role_facts[section] = dict()
+            for opt, val in local_facts.items(section):
+                role_facts[section][opt] = val
+        return role_facts
+
+
+def main():
+    global module
+    module = AnsibleModule(
+            argument_spec = dict(
+                    role=dict(default='common',
+                              choices=OpenShiftFacts.known_roles,
+                              required=False),
+                    local_facts=dict(default={}, type='dict', required=False),
+            ),
+            supports_check_mode=True,
+            add_file_common_args=True,
+    )
+
+    role = module.params['role']
+    local_facts = module.params['local_facts']
+    fact_file = '/etc/ansible/facts.d/openshift.fact'
+
+    openshift_facts = OpenShiftFacts(role, fact_file, local_facts)
+
+    file_params = module.params.copy()
+    file_params['path'] = fact_file
+    file_args = module.load_file_common_arguments(file_params)
+    changed = module.set_fs_attributes_if_different(file_args,
+            openshift_facts.changed)
+
+    return module.exit_json(changed=changed,
+            ansible_facts=openshift_facts.get_facts())
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.facts import *
+from ansible.module_utils.urls import *
+main()

+ 15 - 0
roles/openshift_facts/meta/main.yml

@@ -0,0 +1,15 @@
+---
+galaxy_info:
+  author: Jason DeTiberus
+  description:
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 1.8
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  categories:
+  - cloud
+  - system
+dependencies: []

+ 3 - 0
roles/openshift_facts/tasks/main.yml

@@ -0,0 +1,3 @@
+---
+- name: Gather OpenShift facts
+  openshift_facts:

+ 16 - 12
roles/openshift_master/README.md

@@ -13,20 +13,24 @@ Role Variables
 --------------
 
 From this role:
-| Name                                     | Default value         |
-|
-|------------------------------------------|-----------------------|----------------------------------------|
-| openshift_master_manage_service_externally | False                 | Should the openshift-master role manage the openshift-master service? |
-| openshift_master_debug_level               | openshift_debug_level | Verbosity of the debug logs for openshift-master |
-| openshift_node_ips                         | []                    | List of the openshift node ip addresses, that we want to pre-register to the system when openshift-master starts up |
-| openshift_registry_url                     | UNDEF (Optional)      | Default docker registry to use |
+| Name                                | Default value         |                                                  |
+|-------------------------------------|-----------------------|--------------------------------------------------|
+| openshift_master_debug_level        | openshift_debug_level | Verbosity of the debug logs for openshift-master |
+| openshift_node_ips                  | []                    | List of the openshift node ip addresses to pre-register when openshift-master starts up |
+| openshift_registry_url              | UNDEF                 | Default docker registry to use |
+| openshift_master_api_port           | UNDEF                 | |
+| openshift_master_console_port       | UNDEF                 | |
+| openshift_master_api_url            | UNDEF                 | |
+| openshift_master_console_url        | UNDEF                 | |
+| openshift_master_public_api_url     | UNDEF                 | |
+| openshift_master_public_console_url | UNDEF                 | |
 
 From openshift_common:
-| Name                          |  Default Value      |                     |
-|-------------------------------|---------------------|---------------------|
-| openshift_debug_level         | 0                   | Global openshift debug log verbosity |
-| openshift_public_ip           | UNDEF (Required)    | Public IP address to use for this host |
-| openshift_hostname            | UNDEF (Required)    | hostname to use for this instance |
+| Name                          | Default Value  |                                        |
+|-------------------------------|----------------|----------------------------------------|
+| openshift_debug_level         | 0              | Global openshift debug log verbosity   |
+| openshift_public_ip           | UNDEF          | Public IP address to use for this host |
+| openshift_hostname            | UNDEF          | hostname to use for this instance      |
 
 Dependencies
 ------------

+ 7 - 6
roles/openshift_master/defaults/main.yml

@@ -1,16 +1,17 @@
 ---
-openshift_master_manage_service_externally: false
-openshift_master_debug_level: "{{ openshift_debug_level | default(0) }}"
 openshift_node_ips: []
+
+# TODO: update setting these values based on the facts
+# TODO: update for console port change
 os_firewall_allow:
 - service: etcd embedded
   port: 4001/tcp
-- service: etcd peer
-  port: 7001/tcp
 - service: OpenShift api https
   port: 8443/tcp
-- service: OpenShift web console https
-  port: 8444/tcp
 os_firewall_deny:
 - service: OpenShift api http
   port: 8080/tcp
+- service: former OpenShift web console port
+  port: 8444/tcp
+- service: former etcd peer port
+  port: 7001/tcp

+ 0 - 1
roles/openshift_master/handlers/main.yml

@@ -1,4 +1,3 @@
 ---
 - name: restart openshift-master
   service: name=openshift-master state=restarted
-  when: not openshift_master_manage_service_externally

+ 26 - 24
roles/openshift_master/tasks/main.yml

@@ -1,19 +1,37 @@
 ---
-# TODO: allow for overriding default ports where possible
-# TODO: if setting up multiple masters, will need to predistribute the certs
-# to the additional masters before starting openshift-master
+# TODO: actually have api_port, api_use_ssl, console_port, console_use_ssl,
+# etcd_use_ssl actually change the master config.
+
+- name: Set master OpenShift facts
+  openshift_facts:
+    role: 'master'
+    local_facts:
+      debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}"
+      api_port: "{{ openshift_master_api_port | default(None) }}"
+      api_url: "{{ openshift_master_api_url | default(None) }}"
+      api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
+      public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
+      console_port: "{{ openshift_master_console_port | default(None) }}"
+      console_url: "{{ openshift_master_console_url | default(None) }}"
+      console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
+      public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+      etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"
 
 - name: Install OpenShift Master package
   yum: pkg=openshift-master state=installed
 
+# TODO: We should pre-generate the master config and point to the generated
+# config rather than setting command line flags here
 - name: Configure OpenShift settings
   lineinfile:
     dest: /etc/sysconfig/openshift-master
     regexp: '^OPTIONS='
-    line: "OPTIONS=\"--public-master={{ openshift_hostname }} {% if openshift_node_ips %} --nodes={{ openshift_node_ips | join(',') }} {% endif %} --loglevel={{ openshift_master_debug_level }}\""
+    line: "OPTIONS=\"--master={{ openshift.common.hostname }} --public-master={{ openshift.common.public_hostname }} {% if openshift_node_ips %} --nodes={{ openshift_node_ips | join(',') }} {% endif %} --loglevel={{ openshift.master.debug_level }}\""
   notify:
   - restart openshift-master
 
+# TODO: should this be populated by a fact based on the deployment type
+# (origin, online, enterprise)?
 - name: Set default registry url
   lineinfile:
     dest: /etc/sysconfig/openshift-master
@@ -23,34 +41,18 @@
   notify:
   - restart openshift-master
 
-- name: Set master OpenShift facts
-  include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
-  facts:
-  - section: master
-    option: debug_level
-    value: "{{ openshift_master_debug_level }}"
-  - section: master
-    option: public_ip
-    value: "{{ openshift_public_ip }}"
-  - section: master
-    option: externally_managed
-    value: "{{ openshift_master_manage_service_externally }}"
-
 - name: Start and enable openshift-master
   service: name=openshift-master enabled=yes state=started
-  when: not openshift_master_manage_service_externally
-  register: result
-
-- name: Disable openshift-master if openshift-master is managed externally
-  service: name=openshift-master enabled=false
-  when: openshift_master_manage_service_externally
 
 - name: Create .kube directory
   file:
     path: /root/.kube
     state: directory
     mode: 0700
+
+# TODO: Update this file if the contents of the source file are not present in
+# the dest file, will need to make sure to ignore things that could be added
 - name: Configure root user kubeconfig
-  command: cp /var/lib/openshift/openshift.local.certificates/admin/.kubeconfig /root/.kube/.kubeconfig
+  command: cp /var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig /root/.kube/.kubeconfig
   args:
     creates: /root/.kube/.kubeconfig

+ 0 - 2
roles/openshift_master/vars/main.yml

@@ -1,2 +0,0 @@
----
-openshift_host_type: master

+ 0 - 3
roles/openshift_node/README.md

@@ -16,10 +16,7 @@ Role Variables
 From this role:
 | Name                                     | Default value         |                                        |
 |------------------------------------------|-----------------------|----------------------------------------|
-| openshift_node_manage_service_externally | False                 | Should the openshift-node role manage the openshift-node service? |
 | openshift_node_debug_level               | openshift_debug_level | Verbosity of the debug logs for openshift-node |
-| openshift_master_public_ips              | UNDEF (Required)      | List of the public IPs for the openhift-master hosts |
-| openshift_master_ips                     | UNDEF (Required)      | List of IP addresses for the openshift-master hosts to be used for node -> master communication |
 | openshift_registry_url                   | UNDEF (Optional)      | Default docker registry to use |
 
 From openshift_common:

+ 0 - 2
roles/openshift_node/defaults/main.yml

@@ -1,6 +1,4 @@
 ---
-openshift_node_manage_service_externally: false
-openshift_node_debug_level: "{{ openshift_debug_level | default(0) }}"
 os_firewall_allow:
 - service: OpenShift kubelet
   port: 10250/tcp

+ 1 - 1
roles/openshift_node/handlers/main.yml

@@ -1,4 +1,4 @@
 ---
 - name: restart openshift-node
   service: name=openshift-node state=restarted
-  when: not openshift_node_manage_service_externally
+  when: not openshift.common.use_openshift_sdn|bool

+ 11 - 16
roles/openshift_node/tasks/main.yml

@@ -1,4 +1,12 @@
 ---
+# TODO: allow for overriding default ports where possible
+# TODO: trigger the external service when restart is needed
+- name: Set node OpenShift facts
+  openshift_facts:
+    role: 'node'
+    local_facts:
+      debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
+
 - name: Test if node certs and config exist
   stat: path={{ item }}
   failed_when: not result.stat.exists
@@ -23,7 +31,7 @@
   lineinfile:
     dest: /etc/sysconfig/openshift-node
     regexp: '^OPTIONS='
-    line: "OPTIONS=\"--hostname={{ openshift_hostname }} --loglevel={{ openshift_node_debug_level }} --create-certs=false\""
+    line: "OPTIONS=\"--hostname={{ openshift.common.hostname }} --loglevel={{ openshift.node.debug_level }} --create-certs=false\""
   notify:
   - restart openshift-node
 
@@ -36,23 +44,10 @@
   notify:
   - restart openshift-node
 
-- name: Set OpenShift node facts
-  include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
-  facts:
-  - section: node
-    option: debug_level
-    value: "{{ openshift_node_debug_level }}"
-  - section: node
-    option: public_ip
-    value: "{{ openshift_public_ip }}"
-  - section: node
-    option: externally_managed
-    value: "{{ openshift_node_manage_service_externally }}"
-
 - name: Start and enable openshift-node
   service: name=openshift-node enabled=yes state=started
-  when: not openshift_node_manage_service_externally
+  when: not openshift.common.use_openshift_sdn|bool
 
 - name: Disable openshift-node if openshift-node is managed externally
   service: name=openshift-node enabled=false
-  when: openshift_node_manage_service_externally
+  when: openshift.common.use_openshift_sdn|bool

+ 0 - 2
roles/openshift_node/vars/main.yml

@@ -1,2 +0,0 @@
----
-openshift_host_type: node

+ 9 - 13
roles/openshift_register_nodes/README.md

@@ -1,38 +1,34 @@
-Role Name
-=========
+OpenShift Register Nodes
+========================
 
-A brief description of the role goes here.
+TODO
 
 Requirements
 ------------
 
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+TODO
 
 Role Variables
 --------------
 
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+TODO
 
 Dependencies
 ------------
 
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+TODO
 
 Example Playbook
 ----------------
 
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
-    - hosts: servers
-      roles:
-         - { role: username.rolename, x: 42 }
+TODO
 
 License
 -------
 
-BSD
+Apache License Version 2.0
 
 Author Information
 ------------------
 
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).
+Jason DeTiberus (jdetiber@redhat.com)

+ 2 - 1
roles/openshift_register_nodes/library/kubernetes_register_node.py

@@ -214,7 +214,8 @@ class Node:
                              resources = NodeResources(version, cpu, memory),
                              cidr = podCIDR,
                              labels = labels,
-                             annotations = annotations
+                             annotations = annotations,
+                             externalID = externalID
                         )
         elif version == 'v1beta3':
             metadata = dict(name = name,

+ 15 - 126
roles/openshift_register_nodes/meta/main.yml

@@ -1,128 +1,17 @@
 ---
 galaxy_info:
-  author: your name
-  description: 
-  company: your company (optional)
-  # Some suggested licenses:
-  # - BSD (default)
-  # - MIT
-  # - GPLv2
-  # - GPLv3
-  # - Apache
-  # - CC-BY
-  license: license (GPLv2, CC-BY, etc)
-  min_ansible_version: 1.2
-  #
-  # Below are all platforms currently available. Just uncomment
-  # the ones that apply to your role. If you don't see your 
-  # platform on this list, let us know and we'll get it added!
-  #
-  #platforms:
-  #- name: EL
-  #  versions:
-  #  - all
-  #  - 5
-  #  - 6
-  #  - 7
-  #- name: GenericUNIX
-  #  versions:
-  #  - all
-  #  - any
-  #- name: Fedora
-  #  versions:
-  #  - all
-  #  - 16
-  #  - 17
-  #  - 18
-  #  - 19
-  #  - 20
-  #- name: SmartOS
-  #  versions:
-  #  - all
-  #  - any
-  #- name: opensuse
-  #  versions:
-  #  - all
-  #  - 12.1
-  #  - 12.2
-  #  - 12.3
-  #  - 13.1
-  #  - 13.2
-  #- name: Amazon
-  #  versions:
-  #  - all
-  #  - 2013.03
-  #  - 2013.09
-  #- name: GenericBSD
-  #  versions:
-  #  - all
-  #  - any
-  #- name: FreeBSD
-  #  versions:
-  #  - all
-  #  - 8.0
-  #  - 8.1
-  #  - 8.2
-  #  - 8.3
-  #  - 8.4
-  #  - 9.0
-  #  - 9.1
-  #  - 9.1
-  #  - 9.2
-  #- name: Ubuntu
-  #  versions:
-  #  - all
-  #  - lucid
-  #  - maverick
-  #  - natty
-  #  - oneiric
-  #  - precise
-  #  - quantal
-  #  - raring
-  #  - saucy
-  #  - trusty
-  #- name: SLES
-  #  versions:
-  #  - all
-  #  - 10SP3
-  #  - 10SP4
-  #  - 11
-  #  - 11SP1
-  #  - 11SP2
-  #  - 11SP3
-  #- name: GenericLinux
-  #  versions:
-  #  - all
-  #  - any
-  #- name: Debian
-  #  versions:
-  #  - all
-  #  - etch
-  #  - lenny
-  #  - squeeze
-  #  - wheezy
-  #
-  # Below are all categories currently available. Just as with
-  # the platforms above, uncomment those that apply to your role.
-  #
-  #categories:
-  #- cloud
-  #- cloud:ec2
-  #- cloud:gce
-  #- cloud:rax
-  #- clustering
-  #- database
-  #- database:nosql
-  #- database:sql
-  #- development
-  #- monitoring
-  #- networking
-  #- packaging
-  #- system
-  #- web
-dependencies: []
-  # List your role dependencies here, one per line. Only
-  # dependencies available via galaxy should be listed here.
-  # Be sure to remove the '[]' above if you add dependencies
-  # to this list.
-  
+  author: Jason DeTiberus
+  description:
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 1.8
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  categories:
+  - cloud
+  - system
+dependencies:
+- { role: openshift_facts }
+

+ 27 - 31
roles/openshift_register_nodes/tasks/main.yml

@@ -1,18 +1,20 @@
 ---
-# TODO: support configuration for multiple masters, currently hardcoding
-# the info from the first master
+# TODO: support new create-config command to generate node certs and config
+# TODO: recreate master/node configs if settings that affect the configs
+# change (hostname, public_hostname, ip, public_ip, etc)
 
 # TODO: create a failed_when condition
 - name: Create node server certificates
   command: >
     /usr/bin/openshift admin create-server-cert
     --overwrite=false
-    --cert={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/server.crt
-    --key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/server.key
-    --hostnames={{ [openshift_hostname, openshift_public_hostname, openshift_ip, openshift_public_ip]|join(",") }}
+    --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.crt
+    --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.key
+    --hostnames={{ [item.openshift.common.hostname,
+                    item.openshift.common.public_hostname]|unique|join(",") }}
   args:
     chdir: "{{ openshift_cert_dir_parent }}"
-    creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/server.crt"
+    creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/server.crt"
   with_items: openshift_nodes
   register: server_cert_result
 
@@ -21,48 +23,42 @@
   command: >
     /usr/bin/openshift admin create-node-cert
     --overwrite=false
-    --cert={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/cert.crt
-    --key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/key.key
-    --node-name={{ item.openshift_node_hostname }}
+    --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt
+    --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key
+    --node-name={{ item.openshift.common.hostname }}
   args:
     chdir: "{{ openshift_cert_dir_parent }}"
-    creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/cert.crt"
+    creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/cert.crt"
   with_items: openshift_nodes
   register: node_cert_result
 
-# TODO: re-create kubeconfig if certs were regenerated, not just if
-# .kubeconfig doesn't exist
 # TODO: create a failed_when condition
 - name: Create kubeconfigs for nodes
   command: >
     /usr/bin/openshift admin create-kubeconfig
-    --client-certificate={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/cert.crt
-    --client-key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/key.key
-    --kubeconfig={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/.kubeconfig
-    --master={{ openshift_master_urls[0] }}
-    --public-master={{ openshift_master_public_urls[0] }}
+    --client-certificate={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt
+    --client-key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key
+    --kubeconfig={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/.kubeconfig
+    --master={{ openshift.master.api_url }}
+    --public-master={{ openshift.master.public_api_url }}
   args:
     chdir: "{{ openshift_cert_dir_parent }}"
-    creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/.kubeconfig"
+    creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/.kubeconfig"
   with_items: openshift_nodes
   register: kubeconfig_result
 
-# TODO: generate the node configs (openshift start node --write-config
-# --config='{{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/node.yaml'
-# --kubeconfig='{{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/.kubeconfig'
-# will need to modify the generated node config as needed
-# (servingInfo.{certFile,clientCA,keyFile})
-
 - name: Register unregistered nodes
   kubernetes_register_node:
-    name: "{{ item.openshift_node_name }}"
+    client_user: openshift-client
+    name: "{{ item.openshift.common.hostname }}"
     api_version: "{{ openshift_kube_api_version }}"
-    cpu: "{{ item.openshift_node_cpu if item.openshift_node_cpu else None }}"
-    memory: "{{ item.openshift_node_memory if item.openshift_node_memory else None }}"
-    pod_cidr: "{{ item.openshift_node_pod_cidr if item.openshift_node_pod_cidr else None }}"
-    host_ip: "{{ item.openshift_node_host_ip }}"
-    labels: "{{ item.openshift_node_labels if item.openshift_node_labels else {} }}"
-    annotations: "{{ item.openshift_node_annotations if item.openshift_node_annotations else {} }}"
+    cpu: "{{ item.openshift.node.resources_cpu | default(None) }}"
+    memory: "{{ item.openshift.node.resources_memory | default(None) }}"
+    pod_cidr: "{{ item.openshift.node.pod_cidr | default(None) }}"
+    host_ip: "{{ item.openshift.common.ip }}"
+    labels: "{{ item.openshift.node.labels | default({}) }}"
+    annotations: "{{ item.openshift.node.annotations | default({}) }}"
+    external_id: "{{ item.openshift.node.external_id }}"
     # TODO: support customizing other attributes such as: client_config,
     # client_cluster, client_context, client_user
     # TODO: update for v1beta3 changes after rebase: hostnames, external_ips,

+ 2 - 0
roles/openshift_repos/defaults/main.yaml

@@ -1,5 +1,7 @@
 ---
 # TODO: once we are able to configure/deploy origin using the openshift roles,
 # then we should default to origin
+
+# TODO: push the defaulting of these values to the openshift_facts module
 openshift_deployment_type: online
 openshift_additional_repos: {}

+ 2 - 1
roles/openshift_repos/meta/main.yml

@@ -11,4 +11,5 @@ galaxy_info:
     - 7
   categories:
   - cloud
-dependencies: []
+dependencies:
+- { role: openshift_facts }

+ 6 - 0
roles/openshift_repos/tasks/main.yaml

@@ -1,6 +1,12 @@
 ---
 # TODO: Add flag for enabling EPEL repo, default to false
 
+# TODO: Add subscription-management config, with parameters
+#       for username, password, poolid(name), and official repos to
+#       enable/disable. Might need to make a module that extends the
+#       subscription management module to take a poolid and enable/disable the
+#       proper repos correctly.
+
 - assert:
     that: openshift_deployment_type in known_openshift_deployment_types
 

+ 0 - 2
roles/openshift_sdn_master/defaults/main.yml

@@ -1,2 +0,0 @@
----
-openshift_sdn_master_debug_level: "{{ openshift_debug_level | default(0) }}"

+ 2 - 1
roles/openshift_sdn_master/meta/main.yml

@@ -11,4 +11,5 @@ galaxy_info:
     - 7
   categories:
   - cloud
-dependencies: []
+dependencies:
+- { role: openshift_common }

+ 10 - 8
roles/openshift_sdn_master/tasks/main.yml

@@ -1,4 +1,13 @@
 ---
+# TODO: add task to set the sdn subnet if openshift-sdn-master hasn't been
+# started yet
+
+- name: Set master sdn OpenShift facts
+  openshift_facts:
+    role: 'master_sdn'
+    local_facts:
+      debug_level: "{{ openshift_master_sdn_debug_level | default(openshift.common.debug_level) }}"
+
 - name: Install openshift-sdn-master
   yum:
     pkg: openshift-sdn-master
@@ -8,17 +17,10 @@
   lineinfile:
     dest: /etc/sysconfig/openshift-sdn-master
     regexp: '^OPTIONS='
-    line: "OPTIONS=\"-v={{ openshift_sdn_master_debug_level }}\""
+    line: "OPTIONS=\"-v={{ openshift.master_sdn.debug_level }}\""
   notify:
   - restart openshift-sdn-master
 
-- name: Set openshift-sdn-master facts
-  include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
-  facts:
-  - section: sdn-master
-    option: debug_level
-    value: "{{ openshift_sdn_master_debug_level }}"
-
 - name: Enable openshift-sdn-master
   service:
     name: openshift-sdn-master

+ 0 - 6
roles/openshift_sdn_node/README.md

@@ -17,12 +17,6 @@ From this role:
 | openshift_sdn_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
 
 
-From openshift_node:
-| Name                  | Default value    |                                      |
-|-----------------------|------------------|--------------------------------------|
-| openshift_master_ips  | UNDEF (Required) | List of IP addresses for the openshift-master hosts to be used for node -> master communication |
-
-
 From openshift_common:
 | Name                          | Default value       |                                        |
 |-------------------------------|---------------------|----------------------------------------|

+ 0 - 2
roles/openshift_sdn_node/defaults/main.yml

@@ -1,2 +0,0 @@
----
-openshift_sdn_node_debug_level: "{{ openshift_debug_level | default(0) }}"

+ 2 - 1
roles/openshift_sdn_node/meta/main.yml

@@ -11,4 +11,5 @@ galaxy_info:
     - 7
   categories:
   - cloud
-dependencies: []
+dependencies:
+- { role: openshift_common }

+ 10 - 13
roles/openshift_sdn_node/tasks/main.yml

@@ -1,4 +1,10 @@
 ---
+- name: Set node sdn OpenShift facts
+  openshift_facts:
+    role: 'node_sdn'
+    local_facts:
+      debug_level: "{{ openshift_node_sdn_debug_level | default(openshift.common.debug_level) }}"
+
 - name: Install openshift-sdn-node
   yum:
     pkg: openshift-sdn-node
@@ -14,28 +20,19 @@
     backrefs: yes
   with_items:
     - regex: '^(OPTIONS=)'
-      line: '\1"-v={{ openshift_sdn_node_debug_level }} -hostname={{ openshift_hostname }}"'
+      line: '\1"-v={{ openshift.node_sdn.debug_level }} -hostname={{ openshift.common.hostname }}"'
     - regex: '^(MASTER_URL=)'
-      line: '\1"http://{{ openshift_master_ips | first }}:4001"'
+      line: '\1"{{ openshift_sdn_master_url }}"'
     - regex: '^(MINION_IP=)'
-      line: '\1"{{ openshift_public_ip }}"'
+      line: '\1"{{ openshift.common.ip }}"'
     # TODO lock down the insecure-registry config to a more sane value than
     # 0.0.0.0/0
     - regex: '^(DOCKER_OPTIONS=)'
       line: '\1"--insecure-registry=0.0.0.0/0 -b=lbr0 --mtu=1450 --selinux-enabled"'
   notify: restart openshift-sdn-node
 
-- name: Set openshift-sdn-node facts
-  include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
-  facts:
-  - section: sdn-node
-    option: debug_level
-    value: "{{ openshift_sdn_node_debug_level }}"
-
-# fixme: Once the openshift_cluster playbook is published state should be started
-# Always bounce service to pick up new credentials
 - name: Start and enable openshift-sdn-node
   service:
     name: openshift-sdn-node
     enabled: yes
-    state: restarted
+    state: started

+ 5 - 0
roles/os_env_extras_node/tasks/main.yml

@@ -0,0 +1,5 @@
+---
+# From the origin rpm there exists instructions on how to
+# setup origin properly.  The following steps come from there
+- name: Change root to be in the Docker group
+  user: name=root groups=dockerroot append=yes

+ 1 - 0
roles/os_firewall/library/os_firewall_manage_iptables.py

@@ -1,5 +1,6 @@
 #!/usr/bin/python
 # -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
 
 from subprocess import call, check_output
 

+ 1 - 0
roles/os_firewall/meta/main.yml

@@ -1,3 +1,4 @@
+---
 galaxy_info:
   author: Jason DeTiberus
   description: os_firewall

+ 5 - 0
roles/os_firewall/tasks/firewall/firewalld.yml

@@ -3,6 +3,7 @@
   yum:
     name: firewalld
     state: present
+  register: install_result
 
 - name: Check if iptables-services is installed
   command: rpm -q iptables-services
@@ -20,6 +21,10 @@
   - ip6tables
   when: pkg_check.rc == 0
 
+- name: Reload systemd units
+  command: systemctl daemon-reload
+  when: install_result | changed
+
 - name: Start and enable firewalld service
   service:
     name: firewalld

+ 7 - 5
roles/os_firewall/tasks/firewall/iptables.yml

@@ -6,6 +6,7 @@
   with_items:
   - iptables
   - iptables-services
+  register: install_result
 
 - name: Check if firewalld is installed
   command: rpm -q firewalld
@@ -20,14 +21,15 @@
     enabled: no
   when: pkg_check.rc == 0
 
-- name: Start and enable iptables services
+- name: Reload systemd units
+  command: systemctl daemon-reload
+  when: install_result | changed
+
+- name: Start and enable iptables service
   service:
-    name: "{{ item }}"
+    name: iptables
     state: started
     enabled: yes
-  with_items:
-  - iptables
-  - ip6tables
   register: result
 
 - name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail