Browse Source

Merge pull request #97 from jwhonce/wip/cluster

Use ansible playbook to initialize openshift cluster
Jhon Honce 10 years ago
parent
commit
f2f0167b60
40 changed files with 780 additions and 279 deletions
  1. 1 1
      README.md
  2. 111 0
      bin/cluster
  3. 62 44
      filter_plugins/oo_filters.py
  4. 7 0
      inventory/gce/group_vars/all
  5. 5 0
      inventory/gce/group_vars/tag_host-type-master
  6. 6 0
      inventory/gce/group_vars/tag_host-type-node
  7. 1 0
      inventory/gce/group_vars/tag_host-type-openshift-master
  8. 1 0
      inventory/gce/group_vars/tag_host-type-openshift-node
  9. 3 3
      playbooks/aws/openshift-master/config.yml
  10. 2 2
      playbooks/aws/openshift-master/launch.yml
  11. 4 4
      playbooks/aws/openshift-node/config.yml
  12. 2 2
      playbooks/aws/openshift-node/launch.yml
  13. 1 0
      playbooks/gce/openshift-cluster/filter_plugins
  14. 63 0
      playbooks/gce/openshift-cluster/launch.yml
  15. 39 0
      playbooks/gce/openshift-cluster/launch_instances.yml
  16. 1 0
      playbooks/gce/openshift-cluster/roles
  17. 20 0
      playbooks/gce/openshift-cluster/terminate.yml
  18. 1 0
      playbooks/gce/openshift-cluster/vars.yml
  19. 6 27
      playbooks/gce/openshift-master/config.yml
  20. 2 2
      playbooks/gce/openshift-master/launch.yml
  21. 2 1
      playbooks/gce/openshift-master/terminate.yml
  22. 109 35
      playbooks/gce/openshift-node/config.yml
  23. 4 4
      playbooks/gce/openshift-node/launch.yml
  24. 2 1
      playbooks/gce/openshift-node/terminate.yml
  25. 1 1
      roles/docker/tasks/main.yml
  26. 1 2
      roles/openshift_common/README.md
  27. 0 5
      roles/openshift_common/defaults/main.yml
  28. 1 2
      roles/openshift_master/README.md
  29. 5 30
      roles/openshift_master/tasks/main.yml
  30. 1 3
      roles/openshift_node/README.md
  31. 0 6
      roles/openshift_node/defaults/main.yml
  32. 19 49
      roles/openshift_node/tasks/main.yml
  33. 38 0
      roles/openshift_register_nodes/README.md
  34. 5 0
      roles/openshift_register_nodes/defaults/main.yml
  35. 11 31
      roles/openshift_node/library/openshift_register_node.py
  36. 128 0
      roles/openshift_register_nodes/meta/main.yml
  37. 71 0
      roles/openshift_register_nodes/tasks/main.yml
  38. 1 2
      roles/openshift_sdn_node/README.md
  39. 40 22
      roles/os_firewall/library/os_firewall_manage_iptables.py
  40. 3 0
      roles/os_update_latest/tasks/main.yml

+ 1 - 1
README.md

@@ -26,7 +26,7 @@ Setup
 
 - Directory Structure:
   - [cloud.rb](cloud.rb) - light wrapper around Ansible
-  - [cluster.sh](cluster.sh) - easily create OpenShift 3 clusters
+  - [bin/cluster](bin/cluster) - python script to easily create OpenShift 3 clusters
   - [filter_plugins/](filter_plugins) - custom filters used to manipulate data in Ansible
   - [inventory/](inventory) - houses Ansible dynamic inventory scripts
   - [lib/](lib) - library components of cloud.rb

+ 111 - 0
bin/cluster

@@ -0,0 +1,111 @@
+#!/usr/bin/env python
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+import argparse
+import ConfigParser
+import sys
+import os
+
+
+class Cluster(object):
+    """Python wrapper to ensure environment is correct for running ansible playbooks
+    """
+
+    def __init__(self, args):
+        self.args = args
+
+        # setup ansible ssh environment
+        if 'ANSIBLE_SSH_ARGS' not in os.environ:
+            os.environ['ANSIBLE_SSH_ARGS'] = (
+                '-o ForwardAgent=yes'
+                ' -o StrictHostKeyChecking=no'
+                ' -o UserKnownHostsFile=/dev/null'
+                ' -o ControlMaster=auto'
+                ' -o ControlPersist=600s'
+            )
+
+    def apply(self):
+        # setup ansible playbook environment
+        config = ConfigParser.ConfigParser()
+        if 'gce' == self.args.provider:
+            config.readfp(open('inventory/gce/gce.ini'))
+
+            for key in config.options('gce'):
+                os.environ[key] = config.get('gce', key)
+
+            inventory = '-i inventory/gce/gce.py'
+        elif 'aws' == self.args.provider:
+            config.readfp(open('inventory/aws/ec2.ini'))
+
+            for key in config.options('ec2'):
+                os.environ[key] = config.get('ec2', key)
+
+            inventory = '-i inventory/aws/ec2.py'
+        else:
+            # this code should never be reached
+            raise argparse.ArgumentError("invalid PROVIDER {}".format(self.args.provider))
+
+        env = {'cluster_id': self.args.cluster_id}
+
+        if 'create' == self.args.action:
+            playbook = "playbooks/{}/openshift-cluster/launch.yml".format(self.args.provider)
+            env['masters'] = self.args.masters
+            env['nodes'] = self.args.nodes
+
+        elif 'terminate' == self.args.action:
+            playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(self.args.provider)
+        elif 'list' == self.args.action:
+            # todo: implement cluster list
+            raise argparse.ArgumentError("ACTION {} not implemented".format(self.args.action))
+        elif 'update' == self.args.action:
+            # todo: implement cluster update
+            raise argparse.ArgumentError("ACTION {} not implemented".format(self.args.action))
+        else:
+            # this code should never be reached
+            raise argparse.ArgumentError("invalid ACTION {}".format(self.args.action))
+
+        verbose = ''
+        if self.args.verbose > 0:
+            verbose = '-{}'.format('v' * self.args.verbose)
+
+        ansible_env = '-e \'{}\''.format(
+            ' '.join(['%s=%s' % (key, value) for (key, value) in env.items()])
+        )
+
+        command = 'ansible-playbook {} {} {} {}'.format(
+            verbose, inventory, ansible_env, playbook
+        )
+
+        if self.args.verbose > 1:
+            command = 'time {}'.format(command)
+
+        if self.args.verbose > 0:
+            sys.stderr.write('RUN [{}]\n'.format(command))
+            sys.stderr.flush()
+
+        status = os.system(command)
+        if status != 0:
+            sys.stderr.write("RUN [{}] failed with exit status %d".format(command, status))
+            exit(status)
+
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser(description='Manage OpenShift Cluster')
+    parser.add_argument('-m', '--masters', default=1, type=int, help='number of masters to create in cluster')
+    parser.add_argument('-n', '--nodes', default=2, type=int, help='number of nodes to create in cluster')
+    parser.add_argument('-v', '--verbose', action='count', help='Multiple -v options increase the verbosity')
+    parser.add_argument('--version', action='version', version='%(prog)s 0.1')
+    parser.add_argument('action', choices=['create', 'terminate', 'update', 'list'])
+    parser.add_argument('provider', choices=['gce', 'aws'])
+    parser.add_argument('cluster_id', help='prefix for cluster VM names')
+    args = parser.parse_args()
+
+    if 'terminate' == args.action:
+        sys.stderr.write("This will terminate the ENTIRE {} environment. Are you sure? [y/N] ".format(args.cluster_id))
+        sys.stderr.flush()
+        answer = sys.stdin.read(1)
+        if answer not in ['y', 'Y']:
+            exit(0)
+
+    Cluster(args).apply()

+ 62 - 44
filter_plugins/oo_filters.py

@@ -1,39 +1,42 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+
 from ansible import errors, runner
 import json
 import pdb
 
 def oo_pdb(arg):
-  ''' This pops you into a pdb instance where arg is the data passed in from the filter.
+    ''' This pops you into a pdb instance where arg is the data passed in from the filter.
         Ex: "{{ hostvars | oo_pdb }}"
-  '''
-  pdb.set_trace()
-  return arg
+    '''
+    pdb.set_trace()
+    return arg
 
 def oo_len(arg):
-  ''' This returns the length of the argument
+    ''' This returns the length of the argument
         Ex: "{{ hostvars | oo_len }}"
-  '''
-  return len(arg)
+    '''
+    return len(arg)
 
 def get_attr(data, attribute=None):
-  ''' This looks up dictionary attributes of the form a.b.c and returns the value.
+    ''' This looks up dictionary attributes of the form a.b.c and returns the value.
         Ex: data = {'a': {'b': {'c': 5}}}
             attribute = "a.b.c"
             returns 5
-  '''
-
-  if not attribute:
-    raise errors.AnsibleFilterError("|failed expects attribute to be set")
+    '''
+    if not attribute:
+        raise errors.AnsibleFilterError("|failed expects attribute to be set")
 
-  ptr = data
-  for attr in attribute.split('.'):
-    ptr = ptr[attr]
+    ptr = data
+    for attr in attribute.split('.'):
+        ptr = ptr[attr]
 
-  return ptr
+    return ptr
 
 def oo_collect(data, attribute=None, filters={}):
-  ''' This takes a list of dict and collects all attributes specified into a list
-      If filter is specified then we will include all items that match _ALL_ of filters.
+    ''' This takes a list of dict and collects all attributes specified into a list
+        If filter is specified then we will include all items that match _ALL_ of filters.
         Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
                      {'a':2, 'z': 'z'},        # True, return
                      {'a':3, 'z': 'z'},        # True, return
@@ -42,44 +45,59 @@ def oo_collect(data, attribute=None, filters={}):
             attribute = 'a'
             filters   = {'z': 'z'}
             returns [1, 2, 3]
-  '''
+    '''
 
-  if not issubclass(type(data), list):
-    raise errors.AnsibleFilterError("|failed expects to filter on a List")
+    if not issubclass(type(data), list):
+        raise errors.AnsibleFilterError("|failed expects to filter on a List")
 
-  if not attribute:
-    raise errors.AnsibleFilterError("|failed expects attribute to be set")
+    if not attribute:
+        raise errors.AnsibleFilterError("|failed expects attribute to be set")
 
-  if filters:
-    retval = [get_attr(d, attribute) for d in data if all([ d[key] == filters[key] for key in filters ]) ]
-  else:
-    retval = [get_attr(d, attribute) for d in data]
+    if filters:
+        retval = [get_attr(d, attribute) for d in data if all([ d[key] == filters[key] for key in filters ]) ]
+    else:
+        retval = [get_attr(d, attribute) for d in data]
 
-  return retval
+    return retval
 
 def oo_select_keys(data, keys):
-  ''' This returns a list, which contains the value portions for the keys
+    ''' This returns a list, which contains the value portions for the keys
         Ex: data = { 'a':1, 'b':2, 'c':3 }
             keys = ['a', 'c']
             returns [1, 3]
-  '''
+    '''
+
+    if not issubclass(type(data), dict):
+        raise errors.AnsibleFilterError("|failed expects to filter on a Dictionary")
 
-  if not issubclass(type(data), dict):
-    raise errors.AnsibleFilterError("|failed expects to filter on a Dictionary")
+    if not issubclass(type(keys), list):
+        raise errors.AnsibleFilterError("|failed expects first param is a list")
 
-  if not issubclass(type(keys), list):
-    raise errors.AnsibleFilterError("|failed expects first param is a list")
+    # Gather up the values for the list of keys passed in
+    retval = [data[key] for key in keys]
 
-  # Gather up the values for the list of keys passed in
-  retval = [data[key] for key in keys]
+    return retval
 
-  return retval
+def oo_prepend_strings_in_list(data, prepend):
+    ''' This takes a list of strings and prepends a string to each item in the
+        list
+        Ex: data = ['cart', 'tree']
+            prepend = 'apple-'
+            returns ['apple-cart', 'apple-tree']
+    '''
+    if not issubclass(type(data), list):
+        raise errors.AnsibleFilterError("|failed expects first param is a list")
+    if not all(isinstance(x, basestring) for x in data):
+        raise errors.AnsibleFilterError("|failed expects first param is a list of strings")
+    retval = [prepend + s for s in data]
+    return retval
 
 class FilterModule (object):
-  def filters(self):
-    return {
-      "oo_select_keys": oo_select_keys,
-      "oo_collect": oo_collect,
-      "oo_len": oo_len,
-      "oo_pdb": oo_pdb
-    }
+    def filters(self):
+        return {
+                "oo_select_keys": oo_select_keys,
+                "oo_collect": oo_collect,
+                "oo_len": oo_len,
+                "oo_pdb": oo_pdb,
+                "oo_prepend_strings_in_list": oo_prepend_strings_in_list
+                }

+ 7 - 0
inventory/gce/group_vars/all

@@ -0,0 +1,7 @@
+---
+ansible_ssh_user: root
+openshift_hostname: "{{ ansible_default_ipv4.address }}"
+openshift_public_hostname: "{{ ansible_default_ipv4.address }}"
+openshift_ip: "{{ ansible_default_ipv4.address }}"
+openshift_public_ip: "{{ gce_public_ip }}"
+openshift_env: "{{ oo_env }}"

+ 5 - 0
inventory/gce/group_vars/tag_host-type-master

@@ -0,0 +1,5 @@
+---
+openshift_api_url: https://{{ openshift_hostname }}:8443
+openshift_api_public_url: https://{{ openshift_public_hostname }}:8443
+openshift_webui_url: https://{{ openshift_hostname }}:8444
+openshift_webui_public_url: https://{{ openshift_public_hostname }}:8444

+ 6 - 0
inventory/gce/group_vars/tag_host-type-node

@@ -0,0 +1,6 @@
+---
+openshift_node_cpu:
+openshift_node_memory:
+openshift_node_pod_cidr:
+openshift_node_labels: {}
+openshift_node_annotations: {}

+ 1 - 0
inventory/gce/group_vars/tag_host-type-openshift-master

@@ -0,0 +1 @@
+tag_host-type-master

+ 1 - 0
inventory/gce/group_vars/tag_host-type-openshift-node

@@ -0,0 +1 @@
+tag_host-type-node

+ 3 - 3
playbooks/aws/openshift-master/config.yml

@@ -1,10 +1,10 @@
 ---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: "populate oo_masters_to_config host group if needed"
   hosts: localhost
   gather_facts: no
   tasks:
   - name: "Evaluate oo_host_group_exp if it's set"
-    add_host: "name={{ item }} groups=oo_hosts_to_config"
+    add_host: "name={{ item }} groups=oo_masters_to_config"
     with_items: "{{ oo_host_group_exp | default('') }}"
     when: oo_host_group_exp is defined
 
@@ -25,7 +25,7 @@
       when: groups['tag_env-host-type_' + oo_env + '-openshift-node'] is defined
 
 - name: "Configure instances"
-  hosts: oo_hosts_to_config
+  hosts: oo_masters_to_config
   connection: ssh
   user: root
   vars_files:

+ 2 - 2
playbooks/aws/openshift-master/launch.yml

@@ -45,8 +45,8 @@
       args:
         tags: "{{ oo_new_inst_tags }}"
 
-    - name: Add new instances public IPs to oo_hosts_to_config
-      add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config"
+    - name: Add new instances public IPs to oo_masters_to_config
+      add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_masters_to_config"
       with_together:
         - oo_new_inst_names
         - ec2.instances

+ 4 - 4
playbooks/aws/openshift-node/config.yml

@@ -1,10 +1,10 @@
 ---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: "populate oo_nodes_to_config host group if needed"
   hosts: localhost
   gather_facts: no
   tasks:
   - name: Evaluate oo_host_group_exp
-    add_host: "name={{ item }} groups=oo_hosts_to_config"
+    add_host: "name={{ item }} groups=oo_nodes_to_config"
     with_items: "{{ oo_host_group_exp | default('') }}"
     when: oo_host_group_exp is defined
 
@@ -31,7 +31,7 @@
       when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
 
 - name: "Configure instances"
-  hosts: oo_hosts_to_config
+  hosts: oo_nodes_to_config
   connection: ssh
   user: root
   vars_files:
@@ -44,5 +44,5 @@
         openshift_env: "{{ oo_env }}",
         openshift_public_ip: "{{ ec2_ip_address }}"
       }
-    - docker
     - os_env_extras
+    - os_env_extras_node

+ 2 - 2
playbooks/aws/openshift-node/launch.yml

@@ -45,8 +45,8 @@
       args:
         tags: "{{ oo_new_inst_tags }}"
 
-    - name: Add new instances public IPs to oo_hosts_to_config
-      add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config"
+    - name: Add new instances public IPs to oo_nodes_to_config
+      add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_nodes_to_config"
       with_together:
         - oo_new_inst_names
         - ec2.instances

+ 1 - 0
playbooks/gce/openshift-cluster/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 63 - 0
playbooks/gce/openshift-cluster/launch.yml

@@ -0,0 +1,63 @@
+---
+- name: Launch instance(s)
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  vars_files:
+      - vars.yml
+  tasks:
+    - set_fact: k8s_type="master"
+
+    - name: Generate master instance names(s)
+      set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
+      register: master_names_output
+      with_sequence: start=1 end={{ masters }}
+
+    # These set_fact's cannot be combined
+    - set_fact:
+        master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
+
+    - set_fact:
+        master_names: "{{ master_names_string.strip().split(' ') }}"
+
+    - include: launch_instances.yml
+      vars:
+        instances: "{{ master_names }}"
+        cluster: "{{ cluster_id }}"
+        type: "{{ k8s_type }}"
+        group_name: "tag_env-host-type-{{ cluster_id }}-openshift-master"
+
+    - set_fact: k8s_type="node"
+
+    - name: Generate node instance names(s)
+      set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
+      register: node_names_output
+      with_sequence: start=1 end={{ nodes }}
+
+    # These set_fact's cannot be combined
+    - set_fact:
+        node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
+
+    - set_fact:
+        node_names: "{{ node_names_string.strip().split(' ') }}"
+
+    - include: launch_instances.yml
+      vars:
+        instances: "{{ node_names }}"
+        cluster: "{{ cluster_id }}"
+        type: "{{ k8s_type }}"
+
+- hosts: "tag_env-{{ cluster_id }}"
+  roles:
+  - openshift_repos
+  - os_update_latest
+
+- include: ../openshift-master/config.yml
+  vars:
+    oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]"
+    oo_env: "{{ cluster_id }}"
+
+- include: ../openshift-node/config.yml
+  vars:
+    oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]"
+    oo_env: "{{ cluster_id }}"

+ 39 - 0
playbooks/gce/openshift-cluster/launch_instances.yml

@@ -0,0 +1,39 @@
+
+- set_fact:
+    machine_type: "{{ lookup('env', 'gce_machine_type') |default('n1-standard-1', true) }}"
+    machine_image: "{{ lookup('env', 'gce_machine_image') |default('libra-rhel7', true) }}"
+
+- name: Launch instance(s)
+  gce:
+    instance_names: "{{ instances }}"
+    machine_type: "{{ machine_type }}"
+    image: "{{ machine_image }}"
+    service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+    pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+    project_id: "{{ lookup('env', 'gce_project_id') }}"
+    tags:
+      - "created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}"
+      - "env-{{ cluster }}"
+      - "host-type-{{ type }}"
+      - "env-host-type-{{ cluster }}-openshift-{{ type }}"
+  register: gce
+
+- name: Add new instances public IPs
+  add_host:
+    hostname: "{{ item.name }}"
+    ansible_ssh_host: "{{ item.public_ip }}"
+    groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
+    gce_public_ip: "{{ item.public_ip }}"
+  with_items: gce.instance_data
+
+- name: Wait for ssh
+  wait_for: "port=22 host={{ item.public_ip }}"
+  with_items: gce.instance_data
+
+- name: Wait for root user setup
+  command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
+  register: result
+  until: result.rc == 0
+  retries: 20
+  delay: 10
+  with_items: gce.instance_data

+ 1 - 0
playbooks/gce/openshift-cluster/roles

@@ -0,0 +1 @@
+../../../roles

+ 20 - 0
playbooks/gce/openshift-cluster/terminate.yml

@@ -0,0 +1,20 @@
+---
+- name: Terminate instance(s)
+  hosts: localhost
+
+  vars_files:
+    - vars.yml
+
+- include: ../openshift-node/terminate.yml
+  vars:
+    oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]'
+    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+    gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+
+- include: ../openshift-master/terminate.yml
+  vars:
+    oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]'
+    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+    gce_project_id: "{{ lookup('env', 'gce_project_id') }}"

+ 1 - 0
playbooks/gce/openshift-cluster/vars.yml

@@ -0,0 +1 @@
+---

+ 6 - 27
playbooks/gce/openshift-master/config.yml

@@ -1,41 +1,20 @@
----
-- name: "populate oo_hosts_to_config host group if needed"
+- name: master/config.yml, populate oo_masters_to_config host group if needed
   hosts: localhost
   gather_facts: no
   tasks:
   - name: "Evaluate oo_host_group_exp if it's set"
-    add_host: "name={{ item }} groups=oo_hosts_to_config"
+    add_host: "name={{ item }} groups=oo_masters_to_config"
     with_items: "{{ oo_host_group_exp | default('') }}"
     when: oo_host_group_exp is defined
 
-- name: "Gather facts for nodes in {{ oo_env }}"
+- name: Gather facts for nodes in {{ oo_env }}
   hosts: "tag_env-host-type-{{ oo_env }}-openshift-node"
-  connection: ssh
-  user: root
-
-- name: "Set Origin specific facts on localhost (for later use)"
-  hosts: localhost
-  gather_facts: no
-  tasks:
-    - name: Setting openshift_node_ips fact on localhost
-      set_fact:
-        openshift_node_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-node'])
-            | oo_collect(attribute='ansible_default_ipv4.address') }}"
-      when: groups['tag_env-host-type-' + oo_env + '-openshift-node'] is defined
 
 - name: "Configure instances"
-  hosts: oo_hosts_to_config
-  connection: ssh
-  user: root
+  hosts: oo_masters_to_config
   vars_files:
-    - vars.yml
+  - vars.yml
   roles:
-    - {
-        role: openshift_master,
-        openshift_node_ips: "{{ hostvars['localhost'].openshift_node_ips | default(['']) }}",
-        openshift_public_ip: "{{ gce_public_ip }}",
-        openshift_env: "{{ oo_env }}",
-      }
+    - openshift_master
     - pods
     - os_env_extras

+ 2 - 2
playbooks/gce/openshift-master/launch.yml

@@ -24,8 +24,8 @@
         tags: "{{ oo_new_inst_tags }}"
       register: gce
 
-    - name: Add new instances public IPs to oo_hosts_to_config
-      add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_hosts_to_config"
+    - name: Add new instances public IPs to oo_masters_to_config
+      add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_masters_to_config"
       with_items: gce.instance_data
 
     - name: Wait for ssh

+ 2 - 1
playbooks/gce/openshift-master/terminate.yml

@@ -12,9 +12,10 @@
     - debug: msg="{{ groups['oo_hosts_to_terminate'] }}"
 
 
-- name: Terminate instances
+- name: Terminate master instances
   hosts: localhost
   connection: local
+  gather_facts: no
   tasks:
     - name: Terminate master instances
       gce:

+ 109 - 35
playbooks/gce/openshift-node/config.yml

@@ -1,48 +1,122 @@
----
-- name: "populate oo_hosts_to_config host group if needed"
+- name: node/config.yml, populate oo_nodes_to_config host group if needed
   hosts: localhost
   gather_facts: no
   tasks:
   - name: Evaluate oo_host_group_exp
-    add_host: "name={{ item }} groups=oo_hosts_to_config"
+    add_host: "name={{ item }} groups=oo_nodes_to_config"
     with_items: "{{ oo_host_group_exp | default('') }}"
     when: oo_host_group_exp is defined
+  - name: Find masters for env
+    add_host: "name={{ item }} groups=oo_masters_for_node_config"
+    with_items: groups['tag_env-host-type-' + oo_env + '-openshift-master']
 
-- name: "Gather facts for masters in {{ oo_env }}"
-  hosts: "tag_env-host-type-{{ oo_env }}-openshift-master"
-  connection: ssh
-  user: root
+- name: Gather facts for masters in {{ oo_env }}
+  hosts: tag_env-host-type-{{ oo_env }}-openshift-master
+  tasks:
+  - set_fact:
+      openshift_master_ip: "{{ openshift_ip }}"
+      openshift_master_api_url: "{{ openshift_api_url }}"
+      openshift_master_webui_url: "{{ openshift_webui_url }}"
+      openshift_master_hostname: "{{ openshift_hostname }}"
+      openshift_master_public_ip: "{{ openshift_public_ip }}"
+      openshift_master_api_public_url: "{{ openshift_api_public_url }}"
+      openshift_master_webui_public_url: "{{ openshift_webui_public_url }}"
+      openshift_master_public_hostnames: "{{ openshift_public_hostname }}"
 
-- name: "Set OO sepcific facts on localhost (for later use)"
-  hosts: localhost
-  gather_facts: no
+- name: Gather facts for hosts to configure
+  hosts: tag_env-host-type-{{ oo_env }}-openshift-node
+  tasks:
+  - set_fact:
+      openshift_node_hostname: "{{ openshift_hostname }}"
+      openshift_node_name: "{{ openshift_hostname }}"
+      openshift_node_cpu: "{{ openshift_node_cpu if openshift_node_cpu else ansible_processor_cores }}"
+      openshift_node_memory: "{{ openshift_node_memory if openshift_node_memory else (ansible_memtotal_mb|int * 1024 * 1024 * 0.75)|int }}"
+      openshift_node_pod_cidr: "{{ openshift_node_pod_cidr if openshift_node_pod_cidr else None }}"
+      openshift_node_host_ip: "{{ openshift_ip }}"
+      openshift_node_labels: "{{ openshift_node_labels if openshift_node_labels else {} }}"
+      openshift_node_annotations: "{{ openshift_node_annotations if openshift_node_annotations else {} }}"
+
+- name: Register nodes
+  hosts: tag_env-host-type-{{ oo_env }}-openshift-master[0]
+  vars:
+    openshift_node_group: tag_env-host-type-{{ oo_env }}-openshift-node
+    openshift_nodes: "{{ hostvars
+          | oo_select_keys(groups[openshift_node_group]) }}"
+    openshift_master_group: tag_env-host-type-{{ oo_env }}-openshift-master
+    openshift_master_urls: "{{ hostvars
+          | oo_select_keys(groups[openshift_master_group])
+          | oo_collect(attribute='openshift_master_api_url') }}"
+    openshift_master_public_urls: "{{ hostvars
+          | oo_select_keys(groups[openshift_master_group])
+          | oo_collect(attribute='openshift_master_api_public_url') }}"
+  pre_tasks:
+  roles:
+  - openshift_register_nodes
   tasks:
-    - name: Setting openshift_master_ips fact on localhost
-      set_fact:
-        openshift_master_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
-            | oo_collect(attribute='ansible_default_ipv4.address') }}"
-      when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
-    - name: Setting openshift_master_public_ips fact on localhost
-      set_fact:
-        openshift_master_public_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
-            | oo_collect(attribute='gce_public_ip') }}"
-      when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
+  - name: Create local temp directory for syncing certs
+    local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
+    register: mktemp
 
-- name: "Configure instances"
-  hosts: oo_hosts_to_config
-  connection: ssh
-  user: root
+  - name: Sync master certs to localhost
+    synchronize:
+      mode: pull
+      checksum: yes
+      src: /var/lib/openshift/openshift.local.certificates
+      dest: "{{ mktemp.stdout }}"
+
+# TODO: sync generated certs between masters
+#
+- name: Configure instances
+  hosts: oo_nodes_to_config
   vars_files:
-    - vars.yml
+  - vars.yml
+  vars:
+    openshift_master_group: tag_env-host-type-{{ oo_env }}-openshift-master
+    openshift_master_ips: "{{ hostvars
+          | oo_select_keys(groups[openshift_master_group])
+          | oo_collect(attribute='openshift_master_ip') }}"
+    openshift_master_hostnames: "{{ hostvars
+          | oo_select_keys(groups[openshift_master_group])
+          | oo_collect(attribute='openshift_master_hostname') }}"
+    openshift_master_public_ips: "{{ hostvars
+          | oo_select_keys(groups[openshift_master_group])
+          | oo_collect(attribute='openshift_master_public_ip') }}"
+    openshift_master_public_hostnames: "{{ hostvars
+          | oo_select_keys(groups[openshift_master_group])
+          | oo_collect(attribute='openshift_master_public_hostname') }}"
+    cert_parent_rel_path: openshift.local.certificates
+    cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift_node_name }}"
+    cert_base_path: /var/lib/openshift
+    cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
+    cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
+  pre_tasks:
+  - name: Ensure certificate directories exists
+    file:
+      path: "{{ item }}"
+      state: directory
+    with_items:
+    - "{{ cert_path }}"
+    - "{{ cert_parent_path }}/ca"
+
+  # TODO: only sync to a node if it's certs have been updated
+  # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+  # possibly test service started time against certificate/config file
+  # timestamps in openshift-node or openshift-sdn-node to trigger notify
+  # TODO: also copy ca cert: /var/lib/openshift/openshift.local.certificates/ca/cert.crt
+  - name: Sync certs to nodes
+    synchronize:
+      checksum: yes
+      src: "{{ item.src }}"
+      dest: "{{ item.dest }}"
+      owner: no
+      group: no
+    with_items:
+    - src: "{{ hostvars[groups[openshift_master_group][0]].mktemp.stdout }}/{{ cert_rel_path }}"
+      dest: "{{ cert_parent_path }}"
+    - src: "{{ hostvars[groups[openshift_master_group][0]].mktemp.stdout }}/{{ cert_parent_rel_path }}/ca/cert.crt"
+      dest: "{{ cert_parent_path }}/ca/cert.crt"
+  - local_action: file name={{ hostvars[groups[openshift_master_group][0]].mktemp.stdout }} state=absent
+    run_once: true
   roles:
-    - {
-        role: openshift_node,
-        openshift_master_ips: "{{ hostvars['localhost'].openshift_master_ips | default(['']) }}",
-        openshift_master_public_ips: "{{ hostvars['localhost'].openshift_master_public_ips | default(['']) }}",
-        openshift_public_ip: "{{ gce_public_ip }}",
-        openshift_env: "{{ oo_env }}",
-      }
-    - docker
+    - openshift_node
     - os_env_extras

+ 4 - 4
playbooks/gce/openshift-node/launch.yml

@@ -24,8 +24,8 @@
         tags: "{{ oo_new_inst_tags }}"
       register: gce
 
-    - name: Add new instances public IPs to oo_hosts_to_config
-      add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_hosts_to_config"
+    - name: Add new instances public IPs to oo_nodes_to_config
+      add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_nodes_to_config"
       with_items: gce.instance_data
 
     - name: Wait for ssh
@@ -48,10 +48,10 @@
 
 # Always bounce service to pick up new credentials
 #- name: "Restart instances"
-#  hosts: oo_hosts_to_config
+#  hosts: oo_nodes_to_config
 #  connection: ssh
 #  user: root
 #  tasks:
-#    - debug: var=groups.oo_hosts_to_config
+#    - debug: var=groups.oo_nodes_to_config
 #    - name: Restart OpenShift
 #      service: name=openshift-node enabled=yes state=restarted

+ 2 - 1
playbooks/gce/openshift-node/terminate.yml

@@ -12,9 +12,10 @@
     - debug: msg="{{ groups['oo_hosts_to_terminate'] }}"
 
 
-- name: Terminate instances
+- name: Terminate node instances
   hosts: localhost
   connection: local
+  gather_facts: no
   tasks:
     - name: Terminate node instances
       gce:

+ 1 - 1
roles/docker/tasks/main.yml

@@ -11,5 +11,5 @@
 # From the origin rpm there exists instructions on how to
 # setup origin properly.  The following steps come from there
 - name: Change root to be in the Docker group
-  user: name=root groups=docker append=yes
+  user: name=root groups=dockerroot append=yes
 

+ 1 - 2
roles/openshift_common/README.md

@@ -15,8 +15,7 @@ Role Variables
 | Name                          | Default value                |                                        |
 |-------------------------------|------------------------------|----------------------------------------|
 | openshift_debug_level         | 0                            | Global openshift debug log verbosity   |
-| openshift_hostname_workaround | True                         | Workaround needed to set hostname to IP address |
-| openshift_hostname            | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+| openshift_hostname            | UNDEF (Required)             | hostname to use for this instance |
 | openshift_public_ip           | UNDEF (Required)             | Public IP address to use for this host |
 | openshift_env                 | default                      | Envrionment name if multiple OpenShift instances |
 

+ 0 - 5
roles/openshift_common/defaults/main.yml

@@ -1,7 +1,2 @@
 ---
 openshift_debug_level: 0
-
-# TODO: Once openshift stops resolving hostnames for node queries remove
-# this...
-openshift_hostname_workaround: true
-openshift_hostname: "{{ ansible_default_ipv4.address if openshift_hostname_workaround else ansible_fqdn }}"

+ 1 - 2
roles/openshift_master/README.md

@@ -25,9 +25,8 @@ From openshift_common:
 | Name                          |  Default Value      |                     |
 |-------------------------------|---------------------|---------------------|
 | openshift_debug_level         | 0                   | Global openshift debug log verbosity |
-| openshift_hostname_workaround | True                |                     |
 | openshift_public_ip           | UNDEF (Required)    | Public IP address to use for this host |
-| openshift_hostname            | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+| openshift_hostname            | UNDEF (Required)    | hostname to use for this instance |
 
 Dependencies
 ------------

+ 5 - 30
roles/openshift_master/tasks/main.yml

@@ -1,4 +1,8 @@
 ---
+# TODO: allow for overriding default ports where possible
+# TODO: if setting up multiple masters, will need to predistribute the certs
+# to the additional masters before starting openshift-master
+
 - name: Install OpenShift Master package
   yum: pkg=openshift-master state=installed
 
@@ -6,9 +10,7 @@
   lineinfile:
     dest: /etc/sysconfig/openshift-master
     regexp: '^OPTIONS='
-    line: "OPTIONS=\"--public-master={{ openshift_hostname }} {% if
-    openshift_node_ips %} --nodes={{ openshift_node_ips
-              | join(',') }} {% endif %} --loglevel={{ openshift_master_debug_level }}\""
+    line: "OPTIONS=\"--public-master={{ openshift_hostname }} {% if openshift_node_ips %} --nodes={{ openshift_node_ips | join(',') }} {% endif %} --loglevel={{ openshift_master_debug_level }}\""
   notify:
   - restart openshift-master
 
@@ -34,42 +36,15 @@
     option: externally_managed
     value: "{{ openshift_master_manage_service_externally }}"
 
-# TODO: remove this when origin PR #1298 has landed in OSE
-- name: Workaround for openshift-master taking longer than 90 seconds to issue sdNotify signal
-  command: cp /usr/lib/systemd/system/openshift-master.service /etc/systemd/system/
-  args:
-    creates: /etc/systemd/system/openshift-master.service
-- ini_file:
-    dest: /etc/systemd/system/openshift-master.service
-    option: TimeoutStartSec
-    section: Service
-    value: 300
-    state: present
-  register: result
-- command: systemctl daemon-reload
-  when: result | changed
-# End of workaround pending PR #1298
-
 - name: Start and enable openshift-master
   service: name=openshift-master enabled=yes state=started
   when: not openshift_master_manage_service_externally
   register: result
 
-#TODO: remove this when origin PR #1204 has landed in OSE
-- name: need to pause here, otherwise we attempt to copy certificates generated by the master before they are generated
-  pause: seconds=30
-  when: result | changed
-# End of workaround pending PR #1204
-
 - name: Disable openshift-master if openshift-master is managed externally
   service: name=openshift-master enabled=false
   when: openshift_master_manage_service_externally
 
-# TODO: create an os_vars role that has generic env related config and move
-# the root kubeconfig setting there, cannot use dependencies to force ordering
-# with openshift_node and openshift_master because the way conditional
-# dependencies work with current ansible would also exclude the
-# openshift_common dependency.
 - name: Create .kube directory
   file:
     path: /root/.kube

+ 1 - 3
roles/openshift_node/README.md

@@ -21,15 +21,13 @@ From this role:
 | openshift_master_public_ips              | UNDEF (Required)      | List of the public IPs for the openhift-master hosts |
 | openshift_master_ips                     | UNDEF (Required)      | List of IP addresses for the openshift-master hosts to be used for node -> master communication |
 | openshift_registry_url                   | UNDEF (Optional)      | Default docker registry to use |
-| openshift_node_resources                 | { capacity: { cpu: , memory: } } | Resource specification for this node, cpu is the number of CPUs to advertise and memory is the amount of memory in bytes to advertise. Default values chosen when not set are the number of logical CPUs for the host and 75% of total system memory |
 
 From openshift_common:
 | Name                          |  Default Value      |                     | 
 |-------------------------------|---------------------|---------------------|
 | openshift_debug_level         | 0                   | Global openshift debug log verbosity |
-| openshift_hostname_workaround | True                |                     |
 | openshift_public_ip           | UNDEF (Required)    | Public IP address to use for this host |
-| openshift_hostname            | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+| openshift_hostname            | UNDEF (Required)    | hostname to use for this instance |
 
 Dependencies
 ------------

+ 0 - 6
roles/openshift_node/defaults/main.yml

@@ -4,9 +4,3 @@ openshift_node_debug_level: "{{ openshift_debug_level | default(0) }}"
 os_firewall_allow:
 - service: OpenShift kubelet
   port: 10250/tcp
-openshift_node_resources:
-  cpu:
-  memory:
-  cidr:
-openshift_node_labels: {}
-openshift_node_annotations: {}

+ 19 - 49
roles/openshift_node/tasks/main.yml

@@ -1,27 +1,29 @@
 ---
+- name: Test if node certs and config exist
+  stat: path={{ item }}
+  failed_when: not result.stat.exists
+  register: result
+  with_items:
+  - "{{ cert_path }}"
+  - "{{ cert_path }}/cert.crt"
+  - "{{ cert_path }}/key.key"
+  - "{{ cert_path }}/.kubeconfig"
+  - "{{ cert_path }}/server.crt"
+  - "{{ cert_path }}/server.key"
+  - "{{ cert_parent_path }}/ca/cert.crt"
+  #- "{{ cert_path }}/node.yaml"
+
 - name: Install OpenShift Node package
   yum: pkg=openshift-node state=installed
 
-- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
-  register: mktemp
-
-- name: Retrieve OpenShift Master credentials
-  local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' root@{{ openshift_master_public_ips[0] }}:/var/lib/openshift/openshift.local.certificates/admin/ {{ mktemp.stdout }}
-  ignore_errors: yes
-
-- file: path=/var/lib/openshift/openshift.local.certificates/admin state=directory
-
-- name: Store OpenShift Master credentials
-  local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' {{ mktemp.stdout }}/ root@{{ openshift_public_ip }}:/var/lib/openshift/openshift.local.certificates/admin
-  ignore_errors: yes
-
-- local_action: file name={{ mktemp.stdout }} state=absent
-
+# --create-certs=false is a temporary workaround until
+# https://github.com/openshift/origin/pull/1361 is merged upstream and it is
+# the default for nodes
 - name: Configure OpenShift Node settings
   lineinfile:
     dest: /etc/sysconfig/openshift-node
     regexp: '^OPTIONS='
-    line: "OPTIONS=\"--master=https://{{ openshift_master_ips[0] }}:8443 --hostname={{ openshift_hostname }} --loglevel={{ openshift_node_debug_level }}\""
+    line: "OPTIONS=\"--hostname={{ openshift_hostname }} --loglevel={{ openshift_node_debug_level }} --create-certs=false\""
   notify:
   - restart openshift-node
 
@@ -47,42 +49,10 @@
     option: externally_managed
     value: "{{ openshift_node_manage_service_externally }}"
 
-# fixme: Once the openshift_cluster playbook is published state should be started
-# Always bounce service to pick up new credentials
 - name: Start and enable openshift-node
-  service: name=openshift-node enabled=yes state=restarted
+  service: name=openshift-node enabled=yes state=started
   when: not openshift_node_manage_service_externally
 
 - name: Disable openshift-node if openshift-node is managed externally
   service: name=openshift-node enabled=false
   when: openshift_node_manage_service_externally
-
-# TODO: create an os_vars role that has generic env related config and move
-# the root kubeconfig setting there, cannot use dependencies to force ordering
-# with openshift_node and openshift_master because the way conditional
-# dependencies work with current ansible would also exclude the
-# openshift_common dependency.
-- name: Create .kube directory
-  file:
-    path: /root/.kube
-    state: directory
-    mode: 0700
-- name: Configure root user kubeconfig
-  command: cp /var/lib/openshift/openshift.local.certificates/admin/.kubeconfig /root/.kube/.kubeconfig
-  args:
-    creates: /root/.kube/.kubeconfig
-
-- name: Register node (if not already registered)
-  openshift_register_node:
-    name: "{{ openshift_hostname }}"
-    api_version: v1beta1
-    cpu: "{{ openshift_node_resources.cpu }}"
-    memory: "{{ openshift_node_resources.memory }}"
-    pod_cidr: "{{ openshift_node_resources.cidr }}"
-    host_ip: "{{ ansible_default_ipv4.address }}"
-    labels: "{{ openshift_node_labels }}"
-    annotations: "{{ openshift_node_annotations }}"
-    # TODO: support customizing other attributes such as: client_config,
-    # client_cluster, client_context, client_user
-    # TODO: updated for v1beta3 changes after rebase: hostnames, external_ips,
-    # internal_ips, external_id

+ 38 - 0
roles/openshift_register_nodes/README.md

@@ -0,0 +1,38 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+    - hosts: servers
+      roles:
+         - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).

+ 5 - 0
roles/openshift_register_nodes/defaults/main.yml

@@ -0,0 +1,5 @@
+---
+openshift_kube_api_version: v1beta1
+openshift_cert_dir: openshift.local.certificates
+openshift_cert_dir_parent: /var/lib/openshift
+openshift_cert_dir_abs: "{{ openshift_cert_dir_parent ~ '/' ~ openshift_cert_dir }}"

+ 11 - 31
roles/openshift_node/library/openshift_register_node.py

@@ -53,21 +53,17 @@ options:
     cpu:
         default: null
         description:
-            - Number of CPUs to allocate for this node. If not provided, then
-              the node will be registered to advertise the number of logical
-              CPUs available. When using the v1beta1 API, you must specify the
-              CPU count as a floating point number with no more than 3 decimal
-              places. API version v1beta3 and newer accepts arbitrary float
-              values.
+            - Number of CPUs to allocate for this node. When using the v1beta1
+              API, you must specify the CPU count as a floating point number
+              with no more than 3 decimal places. API version v1beta3 and newer
+              accepts arbitrary float values.
         required: false
     memory:
         default: null
         description:
-            - Memory available for this node. If not provided, then the node
-              will be registered to advertise 80% of MemTotal as available
-              memory. When using the v1beta1 API, you must specify the memory
-              size in bytes. API version v1beta3 and newer accepts binary SI
-              and decimal SI values.
+            - Memory available for this node. When using the v1beta1 API, you
+              must specify the memory size in bytes. API version v1beta3 and
+              newer accepts binary SI and decimal SI values.
         required: false
 '''
 EXAMPLES = '''
@@ -152,22 +148,6 @@ class ClientConfig:
 
 class Util:
     @staticmethod
-    def getLogicalCores():
-        return multiprocessing.cpu_count()
-
-    @staticmethod
-    def getMemoryPct(pct):
-        with open('/proc/meminfo', 'r') as mem:
-            for line in mem:
-                entries = line.split()
-                if str(entries.pop(0)) == 'MemTotal:':
-                    mem_total_kb = Decimal(entries.pop(0))
-                    mem_capacity_kb = mem_total_kb * Decimal(pct)
-                    return str(mem_capacity_kb.to_integral_value() * 1024)
-
-        return ""
-
-    @staticmethod
     def remove_empty_elements(mapping):
         if isinstance(mapping, dict):
             m = mapping.copy()
@@ -182,8 +162,8 @@ class NodeResources:
     def __init__(self, version, cpu=None, memory=None):
         if version == 'v1beta1':
             self.resources = dict(capacity=dict())
-            self.resources['capacity']['cpu'] = cpu if cpu else Util.getLogicalCores()
-            self.resources['capacity']['memory'] = memory if cpu else Util.getMemoryPct(.75)
+            self.resources['capacity']['cpu'] = cpu
+            self.resources['capacity']['memory'] = memory
 
     def get_resources(self):
         return Util.remove_empty_elements(self.resources)
@@ -193,8 +173,8 @@ class NodeSpec:
         if version == 'v1beta3':
             self.spec = dict(podCIDR=cidr, externalID=externalID,
                              capacity=dict())
-            self.spec['capacity']['cpu'] = cpu if cpu else Util.getLogicalCores()
-            self.spec['capacity']['memory'] = memory if memory else Util.getMemoryPct(.75)
+            self.spec['capacity']['cpu'] = cpu
+            self.spec['capacity']['memory'] = memory
 
     def get_spec(self):
         return Util.remove_empty_elements(self.spec)

+ 128 - 0
roles/openshift_register_nodes/meta/main.yml

@@ -0,0 +1,128 @@
+---
+galaxy_info:
+  author: your name
+  description: 
+  company: your company (optional)
+  # Some suggested licenses:
+  # - BSD (default)
+  # - MIT
+  # - GPLv2
+  # - GPLv3
+  # - Apache
+  # - CC-BY
+  license: license (GPLv2, CC-BY, etc)
+  min_ansible_version: 1.2
+  #
+  # Below are all platforms currently available. Just uncomment
+  # the ones that apply to your role. If you don't see your 
+  # platform on this list, let us know and we'll get it added!
+  #
+  #platforms:
+  #- name: EL
+  #  versions:
+  #  - all
+  #  - 5
+  #  - 6
+  #  - 7
+  #- name: GenericUNIX
+  #  versions:
+  #  - all
+  #  - any
+  #- name: Fedora
+  #  versions:
+  #  - all
+  #  - 16
+  #  - 17
+  #  - 18
+  #  - 19
+  #  - 20
+  #- name: SmartOS
+  #  versions:
+  #  - all
+  #  - any
+  #- name: opensuse
+  #  versions:
+  #  - all
+  #  - 12.1
+  #  - 12.2
+  #  - 12.3
+  #  - 13.1
+  #  - 13.2
+  #- name: Amazon
+  #  versions:
+  #  - all
+  #  - 2013.03
+  #  - 2013.09
+  #- name: GenericBSD
+  #  versions:
+  #  - all
+  #  - any
+  #- name: FreeBSD
+  #  versions:
+  #  - all
+  #  - 8.0
+  #  - 8.1
+  #  - 8.2
+  #  - 8.3
+  #  - 8.4
+  #  - 9.0
+  #  - 9.1
+  #  - 9.1
+  #  - 9.2
+  #- name: Ubuntu
+  #  versions:
+  #  - all
+  #  - lucid
+  #  - maverick
+  #  - natty
+  #  - oneiric
+  #  - precise
+  #  - quantal
+  #  - raring
+  #  - saucy
+  #  - trusty
+  #- name: SLES
+  #  versions:
+  #  - all
+  #  - 10SP3
+  #  - 10SP4
+  #  - 11
+  #  - 11SP1
+  #  - 11SP2
+  #  - 11SP3
+  #- name: GenericLinux
+  #  versions:
+  #  - all
+  #  - any
+  #- name: Debian
+  #  versions:
+  #  - all
+  #  - etch
+  #  - lenny
+  #  - squeeze
+  #  - wheezy
+  #
+  # Below are all categories currently available. Just as with
+  # the platforms above, uncomment those that apply to your role.
+  #
+  #categories:
+  #- cloud
+  #- cloud:ec2
+  #- cloud:gce
+  #- cloud:rax
+  #- clustering
+  #- database
+  #- database:nosql
+  #- database:sql
+  #- development
+  #- monitoring
+  #- networking
+  #- packaging
+  #- system
+  #- web
+dependencies: []
+  # List your role dependencies here, one per line. Only
+  # dependencies available via galaxy should be listed here.
+  # Be sure to remove the '[]' above if you add dependencies
+  # to this list.
+  

+ 71 - 0
roles/openshift_register_nodes/tasks/main.yml

@@ -0,0 +1,71 @@
+---
+# TODO: support configuration for multiple masters, currently hardcoding
+# the info from the first master
+
+# TODO: create a failed_when condition
+- name: Create node server certificates
+  command: >
+    /usr/bin/openshift admin create-server-cert
+    --overwrite=false
+    --cert={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/server.crt
+    --key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/server.key
+    --hostnames={{ [openshift_hostname, openshift_public_hostname, openshift_ip, openshift_public_ip]|join(",") }}
+  args:
+    chdir: "{{ openshift_cert_dir_parent }}"
+    creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/server.crt"
+  with_items: openshift_nodes
+  register: server_cert_result
+
+# TODO: create a failed_when condition
+- name: Create node client certificates
+  command: >
+    /usr/bin/openshift admin create-node-cert
+    --overwrite=false
+    --cert={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/cert.crt
+    --key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/key.key
+    --node-name={{ item.openshift_node_hostname }}
+  args:
+    chdir: "{{ openshift_cert_dir_parent }}"
+    creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/cert.crt"
+  with_items: openshift_nodes
+  register: node_cert_result
+
+# TODO: re-create kubeconfig if certs were regenerated, not just if
+# .kubeconfig doesn't exist
+# TODO: create a failed_when condition
+- name: Create kubeconfigs for nodes
+  command: >
+    /usr/bin/openshift admin create-kubeconfig
+    --client-certificate={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/cert.crt
+    --client-key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/key.key
+    --kubeconfig={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/.kubeconfig
+    --master={{ openshift_master_urls[0] }}
+    --public-master={{ openshift_master_public_urls[0] }}
+  args:
+    chdir: "{{ openshift_cert_dir_parent }}"
+    creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/.kubeconfig"
+  with_items: openshift_nodes
+  register: kubeconfig_result
+
+# TODO: generate the node configs (openshift start node --write-config
+# --config='{{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/node.yaml'
+# --kubeconfig='{{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/.kubeconfig'
+# will need to modify the generated node config as needed
+# (servingInfo.{certFile,clientCA,keyFile})
+
+- name: Register unregistered nodes
+  kubernetes_register_node:
+    name: "{{ item.openshift_node_name }}"
+    api_version: "{{ openshift_kube_api_version }}"
+    cpu: "{{ item.openshift_node_cpu if item.openshift_node_cpu else None }}"
+    memory: "{{ item.openshift_node_memory if item.openshift_node_memory else None }}"
+    pod_cidr: "{{ item.openshift_node_pod_cidr if item.openshift_node_pod_cidr else None }}"
+    host_ip: "{{ item.openshift_node_host_ip }}"
+    labels: "{{ item.openshift_node_labels if item.openshift_node_labels else {} }}"
+    annotations: "{{ item.openshift_node_annotations if item.openshift_node_annotations else {} }}"
+    # TODO: support customizing other attributes such as: client_config,
+    # client_cluster, client_context, client_user
+    # TODO: update for v1beta3 changes after rebase: hostnames, external_ips,
+    # internal_ips, external_id
+  with_items: openshift_nodes
+  register: register_result

+ 1 - 2
roles/openshift_sdn_node/README.md

@@ -27,9 +27,8 @@ From openshift_common:
 | Name                          | Default value       |                                        |
 |-------------------------------|---------------------|----------------------------------------|
 | openshift_debug_level         | 0                   | Global openshift debug log verbosity   |
-| openshift_hostname_workaround | True                |                                        |
 | openshift_public_ip           | UNDEF (Required)    | Public IP address to use for this host |
-| openshift_hostname            | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+| openshift_hostname            | UNDEF (Required)    | hostname to use for this instance |
 
 Dependencies
 ------------

+ 40 - 22
roles/os_firewall/library/os_firewall_manage_iptables.py

@@ -51,11 +51,13 @@ class IpTablesCreateJumpRuleError(IpTablesError):
 # exception was thrown later. for example, when the chain is created
 # successfully, but the add/remove rule fails.
 class IpTablesManager:
-    def __init__(self, module, ip_version, check_mode, chain):
+    def __init__(self, module):
         self.module = module
-        self.ip_version = ip_version
-        self.check_mode = check_mode
-        self.chain = chain
+        self.ip_version = module.params['ip_version']
+        self.check_mode = module.check_mode
+        self.chain = module.params['chain']
+        self.create_jump_rule = module.params['create_jump_rule']
+        self.jump_rule_chain = module.params['jump_rule_chain']
         self.cmd = self.gen_cmd()
         self.save_cmd = self.gen_save_cmd()
         self.output = []
@@ -70,13 +72,16 @@ class IpTablesManager:
                 msg="Failed to save iptables rules",
                 cmd=e.cmd, exit_code=e.returncode, output=e.output)
 
+    def verify_chain(self):
+        if not self.chain_exists():
+            self.create_chain()
+        if self.create_jump_rule and not self.jump_rule_exists():
+            self.create_jump()
+
     def add_rule(self, port, proto):
         rule = self.gen_rule(port, proto)
         if not self.rule_exists(rule):
-            if not self.chain_exists():
-                self.create_chain()
-            if not self.jump_rule_exists():
-                self.create_jump_rule()
+            self.verify_chain()
 
             if self.check_mode:
                 self.changed = True
@@ -121,13 +126,13 @@ class IpTablesManager:
         return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',
                 '-m', proto, '--dport', str(port), '-j', 'ACCEPT']
 
-    def create_jump_rule(self):
+    def create_jump(self):
         if self.check_mode:
             self.changed = True
             self.output.append("Create jump rule for chain %s" % self.chain)
         else:
             try:
-                cmd = self.cmd + ['-L', 'INPUT', '--line-numbers']
+                cmd = self.cmd + ['-L', self.jump_rule_chain, '--line-numbers']
                 output = check_output(cmd, stderr=subprocess.STDOUT)
 
                 # break the input rules into rows and columns
@@ -144,11 +149,11 @@ class IpTablesManager:
                             continue
                         last_rule_target = rule[1]
 
-                # Raise an exception if we do not find a valid INPUT rule
+                # Raise an exception if we do not find a valid rule
                 if not last_rule_num or not last_rule_target:
                    raise IpTablesCreateJumpRuleError(
                         chain=self.chain,
-                        msg="Failed to find existing INPUT rules",
+                        msg="Failed to find existing %s rules" % self.jump_rule_chain,
                         cmd=None, exit_code=None, output=None)
 
                 # Naively assume that if the last row is a REJECT rule, then
@@ -156,19 +161,20 @@ class IpTablesManager:
                 # assume that we can just append the rule.
                 if last_rule_target == 'REJECT':
                     # insert rule
-                    cmd = self.cmd + ['-I', 'INPUT', str(last_rule_num)]
+                    cmd = self.cmd + ['-I', self.jump_rule_chain, str(last_rule_num)]
                 else:
                     # append rule
-                    cmd = self.cmd + ['-A', 'INPUT']
+                    cmd = self.cmd + ['-A', self.jump_rule_chain]
                 cmd += ['-j', self.chain]
                 output = check_output(cmd, stderr=subprocess.STDOUT)
                 changed = True
                 self.output.append(output)
+                self.save()
             except subprocess.CalledProcessError as e:
                 if '--line-numbers' in e.cmd:
                     raise IpTablesCreateJumpRuleError(
                         chain=self.chain,
-                        msg="Failed to query existing INPUT rules to "
+                        msg="Failed to query existing %s rules to " % self.jump_rule_chain +
                             "determine jump rule location",
                         cmd=e.cmd, exit_code=e.returncode,
                         output=e.output)
@@ -192,6 +198,7 @@ class IpTablesManager:
                 self.changed = True
                 self.output.append("Successfully created chain %s" %
                                    self.chain)
+                self.save()
             except subprocess.CalledProcessError as e:
                 raise IpTablesCreateChainError(
                     chain=self.chain,
@@ -200,7 +207,7 @@ class IpTablesManager:
                     )
 
     def jump_rule_exists(self):
-        cmd = self.cmd + ['-C', 'INPUT', '-j', self.chain]
+        cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain]
         return True if subprocess.call(cmd) == 0 else False
 
     def chain_exists(self):
@@ -220,9 +227,12 @@ def main():
     module = AnsibleModule(
         argument_spec=dict(
             name=dict(required=True),
-            action=dict(required=True, choices=['add', 'remove']),
-            protocol=dict(required=True, choices=['tcp', 'udp']),
-            port=dict(required=True, type='int'),
+            action=dict(required=True, choices=['add', 'remove', 'verify_chain']),
+            chain=dict(required=False, default='OS_FIREWALL_ALLOW'),
+            create_jump_rule=dict(required=False, type='bool', default=True),
+            jump_rule_chain=dict(required=False, default='INPUT'),
+            protocol=dict(required=False, choices=['tcp', 'udp']),
+            port=dict(required=False, type='int'),
             ip_version=dict(required=False, default='ipv4',
                             choices=['ipv4', 'ipv6']),
         ),
@@ -232,16 +242,24 @@ def main():
     action = module.params['action']
     protocol = module.params['protocol']
     port = module.params['port']
-    ip_version = module.params['ip_version']
-    chain = 'OS_FIREWALL_ALLOW'
 
-    iptables_manager = IpTablesManager(module, ip_version, module.check_mode, chain)
+    if action in ['add', 'remove']:
+        if not protocol:
+            error = "protocol is required when action is %s" % action
+            module.fail_json(msg=error)
+        if not port:
+            error = "port is required when action is %s" % action
+            module.fail_json(msg=error)
+
+    iptables_manager = IpTablesManager(module)
 
     try:
         if action == 'add':
             iptables_manager.add_rule(port, protocol)
         elif action == 'remove':
             iptables_manager.remove_rule(port, protocol)
+        elif action == 'verify_chain':
+            iptables_manager.verify_chain()
     except IpTablesError as e:
         module.fail_json(msg=e.msg)
 

+ 3 - 0
roles/os_update_latest/tasks/main.yml

@@ -0,0 +1,3 @@
+---
+- name: Update all packages
+  yum: name=* state=latest