Browse Source

Merge pull request #341 from detiber/sdodson-etcd-playbook

External clustered etcd support
Thomas Wiest 9 years ago
parent
commit
68d6fdf1c1
99 changed files with 1557 additions and 1295 deletions
  1. 18 2
      README_AWS.md
  2. 11 2
      bin/cluster
  3. 10 3
      filter_plugins/oo_filters.py
  4. 35 5
      git/pylint.sh
  5. 5 1
      inventory/byo/hosts
  6. 215 0
      lookup_plugins/sequence.py
  7. 9 24
      playbooks/aws/openshift-cluster/config.yml
  8. 11 3
      playbooks/aws/openshift-cluster/launch.yml
  9. 1 0
      playbooks/aws/openshift-cluster/lookup_plugins
  10. 9 0
      playbooks/aws/openshift-cluster/tasks/launch_instances.yml
  11. 20 0
      playbooks/aws/openshift-cluster/templates/user_data.j2
  12. 50 1
      playbooks/aws/openshift-cluster/terminate.yml
  13. 3 1
      playbooks/aws/openshift-cluster/update.yml
  14. 0 19
      playbooks/aws/openshift-master/config.yml
  15. 0 1
      playbooks/aws/openshift-master/filter_plugins
  16. 0 70
      playbooks/aws/openshift-master/launch.yml
  17. 0 1
      playbooks/aws/openshift-master/roles
  18. 0 2
      playbooks/aws/openshift-master/terminate.yml
  19. 0 26
      playbooks/aws/openshift-node/config.yml
  20. 0 1
      playbooks/aws/openshift-node/filter_plugins
  21. 0 72
      playbooks/aws/openshift-node/launch.yml
  22. 0 1
      playbooks/aws/openshift-node/roles
  23. 0 2
      playbooks/aws/openshift-node/terminate.yml
  24. 0 64
      playbooks/aws/terminate.yml
  25. 1 7
      playbooks/byo/config.yml
  26. 1 0
      playbooks/byo/lookup_plugins
  27. 9 0
      playbooks/byo/openshift-cluster/config.yml
  28. 0 0
      playbooks/byo/openshift-cluster/filter_plugins
  29. 1 0
      playbooks/byo/openshift-cluster/lookup_plugins
  30. 0 0
      playbooks/byo/openshift-cluster/roles
  31. 0 15
      playbooks/byo/openshift-master/config.yml
  32. 0 1
      playbooks/byo/openshift-master/filter_plugins
  33. 0 1
      playbooks/byo/openshift-master/roles
  34. 0 23
      playbooks/byo/openshift-node/config.yml
  35. 0 1
      playbooks/byo/openshift-node/filter_plugins
  36. 0 1
      playbooks/byo/openshift-node/roles
  37. 61 0
      playbooks/common/openshift-cluster/config.yml
  38. 1 0
      playbooks/common/openshift-cluster/lookup_plugins
  39. 13 0
      playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml
  40. 4 2
      playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
  41. 4 2
      playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
  42. 96 0
      playbooks/common/openshift-etcd/config.yml
  43. 0 0
      playbooks/common/openshift-etcd/filter_plugins
  44. 1 0
      playbooks/common/openshift-etcd/lookup_plugins
  45. 1 0
      playbooks/common/openshift-etcd/roles
  46. 18 0
      playbooks/common/openshift-etcd/service.yml
  47. 197 2
      playbooks/common/openshift-master/config.yml
  48. 1 0
      playbooks/common/openshift-master/lookup_plugins
  49. 14 25
      playbooks/common/openshift-node/config.yml
  50. 1 0
      playbooks/common/openshift-node/lookup_plugins
  51. 10 24
      playbooks/gce/openshift-cluster/config.yml
  52. 1 0
      playbooks/gce/openshift-cluster/lookup_plugins
  53. 3 1
      playbooks/gce/openshift-cluster/update.yml
  54. 0 18
      playbooks/gce/openshift-master/config.yml
  55. 0 51
      playbooks/gce/openshift-master/launch.yml
  56. 0 1
      playbooks/gce/openshift-master/roles
  57. 0 35
      playbooks/gce/openshift-master/terminate.yml
  58. 0 25
      playbooks/gce/openshift-node/config.yml
  59. 0 51
      playbooks/gce/openshift-node/launch.yml
  60. 0 35
      playbooks/gce/openshift-node/terminate.yml
  61. 9 24
      playbooks/libvirt/openshift-cluster/config.yml
  62. 3 1
      playbooks/libvirt/openshift-cluster/update.yml
  63. 9 24
      playbooks/openstack/openshift-cluster/config.yml
  64. 3 1
      playbooks/openstack/openshift-cluster/update.yml
  65. 39 0
      roles/etcd/README.md
  66. 31 0
      roles/etcd/defaults/main.yaml
  67. 3 0
      roles/etcd/handlers/main.yml
  68. 19 0
      roles/etcd/meta/main.yml
  69. 52 0
      roles/etcd/tasks/main.yml
  70. 52 0
      roles/etcd/templates/etcd.conf.j2
  71. 34 0
      roles/etcd_ca/README.md
  72. 16 0
      roles/etcd_ca/meta/main.yml
  73. 44 0
      roles/etcd_ca/tasks/main.yml
  74. 51 0
      roles/etcd_ca/templates/openssl_append.j2
  75. 3 0
      roles/etcd_ca/vars/main.yml
  76. 34 0
      roles/etcd_certificates/README.md
  77. 16 0
      roles/etcd_certificates/meta/main.yml
  78. 42 0
      roles/etcd_certificates/tasks/client.yml
  79. 9 0
      roles/etcd_certificates/tasks/main.yml
  80. 73 0
      roles/etcd_certificates/tasks/server.yml
  81. 11 0
      roles/etcd_certificates/vars/main.yml
  82. 14 8
      roles/openshift_facts/library/openshift_facts.py
  83. 6 15
      roles/openshift_master/tasks/main.yml
  84. 4 4
      roles/openshift_master/templates/master.yaml.v1.j2
  85. 34 0
      roles/openshift_master_ca/README.md
  86. 0 1
      roles/openshift_register_nodes/meta/main.yml
  87. 22 0
      roles/openshift_master_ca/tasks/main.yml
  88. 5 0
      roles/openshift_master_ca/vars/main.yml
  89. 34 0
      roles/openshift_master_certificates/README.md
  90. 16 0
      roles/openshift_master_certificates/meta/main.yml
  91. 24 0
      roles/openshift_master_certificates/tasks/main.yml
  92. 1 3
      roles/openshift_register_nodes/vars/main.yml
  93. 6 8
      roles/openshift_node/tasks/main.yml
  94. 2 2
      roles/openshift_node/templates/node.yaml.v1.j2
  95. 1 0
      roles/openshift_node_certificates/tasks/main.yml
  96. 0 1
      roles/openshift_node_certificates/vars/main.yml
  97. 0 15
      roles/openshift_register_nodes/README.md
  98. 0 513
      roles/openshift_register_nodes/library/kubernetes_register_node.py
  99. 0 53
      roles/openshift_register_nodes/tasks/main.yml

+ 18 - 2
README_AWS.md

@@ -20,10 +20,11 @@ Create a credentials file
 ```
 Note: You must source this file before running any Ansible commands.
 
+Alternatively, you could configure credentials in either ~/.boto or ~/.aws/credentials, see the [boto docs](http://docs.pythonboto.org/en/latest/boto_config_tut.html) for the format.
 
 (Optional) Setup your $HOME/.ssh/config file
 -------------------------------------------
-In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use '.ssh/config'
+In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use `.ssh/config`
 to setup a private key file to allow ansible to connect to the created hosts.
 
 To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS.
@@ -62,10 +63,16 @@ Node specific defaults:
 If needed, these values can be changed by setting environment variables on your system.
 
 - export ec2_instance_type='m3.large'
-- export ec2_ami='ami-307b3658'
+- export ec2_image='ami-307b3658'
 - export ec2_region='us-east-1'
 - export ec2_keypair='libra'
 - export ec2_security_groups="['public']"
+- export ec2_vpc_subnet='my_vpc_subnet'
+- export ec2_assign_public_ip='true'
+- export os_etcd_root_vol_size='20'
+- export os_etcd_root_vol_type='standard'
+- export os_etcd_vol_size='20'
+- export os_etcd_vol_type='standard'
 - export os_master_root_vol_size='20'
 - export os_master_root_vol_type='standard'
 - export os_node_root_vol_size='15'
@@ -114,3 +121,12 @@ Terminating a cluster
 ```
   bin/cluster terminate aws <cluster-id>
 ```
+
+Specifying a deployment type
+---------------------------
+The --deployment-type flag can be passed to bin/cluster to specify the deployment type
+1. To launch an online cluster (requires access to private repositories and amis):
+```
+  bin/cluster create aws --deployment-type=online <cluster-id>
+```
+Note: If no deployment type is specified, then the default is origin.

+ 11 - 2
bin/cluster

@@ -51,6 +51,7 @@ class Cluster(object):
 
         env['num_masters'] = args.masters
         env['num_nodes'] = args.nodes
+        env['num_etcd'] = args.etcd
 
         return self.action(args, inventory, env, playbook)
 
@@ -143,8 +144,14 @@ class Cluster(object):
 
             inventory = '-i inventory/aws/hosts'
 
-            missing = [key for key in ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'] if key not in os.environ]
-            if len(missing) > 0:
+            key_vars = ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']
+            key_missing = [key for key in key_vars if key not in os.environ]
+
+            boto_conf_files = ['~/.aws/credentials', '~/.boto']
+            conf_exists = lambda conf: os.path.isfile(os.path.expanduser(conf))
+            boto_configs = [ conf for conf in boto_conf_files if conf_exists(conf)]
+
+            if len(key_missing) > 0 and len(boto_configs) == 0:
                 raise ValueError("PROVIDER aws requires {} environment variable(s). See README_AWS.md".format(missing))
 
         elif 'libvirt' == provider:
@@ -255,6 +262,8 @@ if __name__ == '__main__':
                                help='number of masters to create in cluster')
     create_parser.add_argument('-n', '--nodes', default=2, type=int,
                                help='number of nodes to create in cluster')
+    create_parser.add_argument('-e', '--etcd', default=0, type=int,
+                               help='number of external etcd hosts to create in cluster')
     create_parser.set_defaults(func=cluster.create)
 
     config_parser = action_parser.add_parser('config',

+ 10 - 3
filter_plugins/oo_filters.py

@@ -175,9 +175,9 @@ class FilterModule(object):
         '''
         if not issubclass(type(data), dict):
             raise errors.AnsibleFilterError("|failed expects first param is a dict")
-        if host_type not in ['master', 'node']:
-            raise errors.AnsibleFilterError("|failed expects either master or node"
-                                            " host type")
+        if host_type not in ['master', 'node', 'etcd']:
+            raise errors.AnsibleFilterError("|failed expects etcd, master or node"
+                                            " as the host type")
 
         root_vol = data[host_type]['root']
         root_vol['device_name'] = '/dev/sda1'
@@ -195,6 +195,13 @@ class FilterModule(object):
                 docker_vol.pop('delete_on_termination', None)
                 docker_vol['ephemeral'] = 'ephemeral0'
             return [root_vol, docker_vol]
+        elif host_type == 'etcd':
+            etcd_vol = data[host_type]['etcd']
+            etcd_vol['device_name'] = '/dev/xvdb'
+            etcd_vol['delete_on_termination'] = True
+            if etcd_vol['device_type'] != 'io1':
+                etcd_vol.pop('iops', None)
+            return [root_vol, etcd_vol]
         return [root_vol]
 
     @staticmethod

+ 35 - 5
git/pylint.sh

@@ -1,14 +1,44 @@
 #!/usr/bin/env bash
+set -eu
 
+ANSIBLE_UPSTREAM_FILES=(
+    'inventory/aws/hosts/ec2.py'
+    'inventory/gce/hosts/gce.py'
+    'inventory/libvirt/hosts/libvirt_generic.py'
+    'inventory/openstack/hosts/nova.py'
+    'lookup_plugins/sequence.py'
+  )
 
 OLDREV=$1
 NEWREV=$2
-TRG_BRANCH=$3
+#TRG_BRANCH=$3
 
 PYTHON=/var/lib/jenkins/python27/bin/python
 
-/usr/bin/git diff --name-only $OLDREV $NEWREV --diff-filter=ACM | \
- grep ".py$" | \
- xargs -r -I{} ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc  {}
+PY_DIFF=$(/usr/bin/git diff --name-only $OLDREV $NEWREV --diff-filter=ACM | grep ".py$")
 
-exit $?
+FILES_TO_TEST=""
+
+for PY_FILE in $PY_DIFF; do
+  IGNORE_FILE=false
+  for UPSTREAM_FILE in "${ANSIBLE_UPSTREAM_FILES[@]}"; do
+    if [ "${PY_FILE}" == "${UPSTREAM_FILE}" ]; then
+      IGNORE_FILE=true
+      break
+    fi
+  done
+
+  if [ "${IGNORE_FILE}" == true ]; then
+    echo "Skipping file ${PY_FILE} as an upstream Ansible file..."
+    continue
+  fi
+
+  if [ -e "${PY_FILE}" ]; then
+    FILES_TO_TEST="${FILES_TO_TEST} ${PY_FILE}"
+  fi
+done
+
+if [ "${FILES_TO_TEST}" != "" ]; then
+  echo "Testing files: ${FILES_TO_TEST}"
+  ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc ${FILES_TO_TEST}
+fi

+ 5 - 1
inventory/byo/hosts

@@ -4,6 +4,7 @@
 [OSEv3:children]
 masters
 nodes
+etcd
 
 # Set variables common for all OSEv3 hosts
 [OSEv3:vars]
@@ -33,7 +34,10 @@ openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl':
 [masters]
 ose3-master-ansible.test.example.com
 
+[etcd]
+#ose3-master-ansible.test.example.com
+
 # host group for nodes
 [nodes]
-#ose3-master-ansible.test.example.com openshift_node_labels="{'region': 'infra', 'zone': 'default'}"
+ose3-master-ansible.test.example.com openshift_scheduleable=False
 ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"

+ 215 - 0
lookup_plugins/sequence.py

@@ -0,0 +1,215 @@
+# (c) 2013, Jayson Vantuyl <jayson@aggressive.ly>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
+
+from ansible.errors import AnsibleError
+import ansible.utils as utils
+from re import compile as re_compile, IGNORECASE
+
+# shortcut format
+NUM = "(0?x?[0-9a-f]+)"
+SHORTCUT = re_compile(
+    "^(" +        # Group 0
+    NUM +         # Group 1: Start
+    "-)?" +
+    NUM +         # Group 2: End
+    "(/" +        # Group 3
+    NUM +         # Group 4: Stride
+    ")?" +
+    "(:(.+))?$",  # Group 5, Group 6: Format String
+    IGNORECASE
+)
+
+
+class LookupModule(object):
+    """
+    sequence lookup module
+
+    Used to generate some sequence of items. Takes arguments in two forms.
+
+    The simple / shortcut form is:
+
+      [start-]end[/stride][:format]
+
+    As indicated by the brackets: start, stride, and format string are all
+    optional.  The format string is in the style of printf.  This can be used
+    to pad with zeros, format in hexadecimal, etc.  All of the numerical values
+    can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
+    Negative numbers are not supported.
+
+    Some examples:
+
+      5 -> ["1","2","3","4","5"]
+      5-8 -> ["5", "6", "7", "8"]
+      2-10/2 -> ["2", "4", "6", "8", "10"]
+      4:host%02d -> ["host01","host02","host03","host04"]
+
+    The standard Ansible key-value form is accepted as well.  For example:
+
+      start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
+
+    This format takes an alternate form of "end" called "count", which counts
+    some number from the starting value.  For example:
+
+      count=5 -> ["1", "2", "3", "4", "5"]
+      start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
+      start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
+      start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
+
+    The count option is mostly useful for avoiding off-by-one errors and errors
+    calculating the number of entries in a sequence when a stride is specified.
+    """
+
+    def __init__(self, basedir, **kwargs):
+        """absorb any keyword args"""
+        self.basedir = basedir
+
+    def reset(self):
+        """set sensible defaults"""
+        self.start = 1
+        self.count = None
+        self.end = None
+        self.stride = 1
+        self.format = "%d"
+
+    def parse_kv_args(self, args):
+        """parse key-value style arguments"""
+        for arg in ["start", "end", "count", "stride"]:
+            try:
+                arg_raw = args.pop(arg, None)
+                if arg_raw is None:
+                    continue
+                arg_cooked = int(arg_raw, 0)
+                setattr(self, arg, arg_cooked)
+            except ValueError:
+                raise AnsibleError(
+                    "can't parse arg %s=%r as integer"
+                        % (arg, arg_raw)
+                )
+            if 'format' in args:
+                self.format = args.pop("format")
+        if args:
+            raise AnsibleError(
+                "unrecognized arguments to with_sequence: %r"
+                % args.keys()
+            )
+
+    def parse_simple_args(self, term):
+        """parse the shortcut forms, return True/False"""
+        match = SHORTCUT.match(term)
+        if not match:
+            return False
+
+        _, start, end, _, stride, _, format = match.groups()
+
+        if start is not None:
+            try:
+                start = int(start, 0)
+            except ValueError:
+                raise AnsibleError("can't parse start=%s as integer" % start)
+        if end is not None:
+            try:
+                end = int(end, 0)
+            except ValueError:
+                raise AnsibleError("can't parse end=%s as integer" % end)
+        if stride is not None:
+            try:
+                stride = int(stride, 0)
+            except ValueError:
+                raise AnsibleError("can't parse stride=%s as integer" % stride)
+
+        if start is not None:
+            self.start = start
+        if end is not None:
+            self.end = end
+        if stride is not None:
+            self.stride = stride
+        if format is not None:
+            self.format = format
+
+    def sanity_check(self):
+        if self.count is None and self.end is None:
+            raise AnsibleError(
+                "must specify count or end in with_sequence"
+            )
+        elif self.count is not None and self.end is not None:
+            raise AnsibleError(
+                "can't specify both count and end in with_sequence"
+            )
+        elif self.count is not None:
+            # convert count to end
+            if self.count != 0:
+                self.end = self.start + self.count * self.stride - 1
+            else:
+                self.start = 0
+                self.end = 0
+                self.stride = 0
+            del self.count
+        if self.stride > 0 and self.end < self.start:
+            raise AnsibleError("to count backwards make stride negative")
+        if self.stride < 0 and self.end > self.start:
+            raise AnsibleError("to count forward don't make stride negative")
+        if self.format.count('%') != 1:
+            raise AnsibleError("bad formatting string: %s" % self.format)
+
+    def generate_sequence(self):
+        if self.stride > 0:
+            adjust = 1
+        else:
+            adjust = -1
+        numbers = xrange(self.start, self.end + adjust, self.stride)
+
+        for i in numbers:
+            try:
+                formatted = self.format % i
+                yield formatted
+            except (ValueError, TypeError):
+                raise AnsibleError(
+                    "problem formatting %r with %r" % self.format
+                )
+
+    def run(self, terms, inject=None, **kwargs):
+        results = []
+
+        terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
+
+        if isinstance(terms, basestring):
+            terms = [ terms ]
+
+        for term in terms:
+            try:
+                self.reset()  # clear out things for this iteration
+
+                try:
+                    if not self.parse_simple_args(term):
+                        self.parse_kv_args(utils.parse_kv(term))
+                except Exception:
+                    raise AnsibleError(
+                        "unknown error parsing with_sequence arguments: %r"
+                        % term
+                    )
+
+                self.sanity_check()
+                if self.stride != 0:
+                    results.extend(self.generate_sequence())
+            except AnsibleError:
+                raise
+            except Exception, e:
+                raise AnsibleError(
+                    "unknown error generating sequence: %s" % str(e)
+                )
+
+        return results

+ 9 - 24
playbooks/aws/openshift-cluster/config.yml

@@ -1,37 +1,22 @@
 ---
-- name: Populate oo_masters_to_config host group
-  hosts: localhost
+- hosts: localhost
   gather_facts: no
   vars_files:
   - vars.yml
   tasks:
-  - name: Evaluate oo_masters_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_masters_to_config
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])
-  - name: Evaluate oo_nodes_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_nodes_to_config
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])
-  - name: Evaluate oo_first_master
-    add_host:
-      name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
-      groups: oo_first_master
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    when: "'tag_env-host-type_{{ cluster_id }}-openshift-master' in groups"
+  - set_fact:
+      g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
+      g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
 
 - include: ../../common/openshift-cluster/config.yml
   vars:
+    g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
+    g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
+    g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
+    g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
+    g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     openshift_cluster_id: "{{ cluster_id }}"
     openshift_debug_level: 4
     openshift_deployment_type: "{{ deployment_type }}"
-    openshift_first_master: "{{ groups.oo_first_master.0 }}"
     openshift_hostname: "{{ ec2_private_ip_address }}"
     openshift_public_hostname: "{{ ec2_ip_address }}"

+ 11 - 3
playbooks/aws/openshift-cluster/launch.yml

@@ -11,6 +11,13 @@
       msg: Deployment type not supported for aws provider yet
     when: deployment_type == 'enterprise'
 
+  - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+  - include: tasks/launch_instances.yml
+    vars:
+      instances: "{{ etcd_names }}"
+      cluster: "{{ cluster_id }}"
+      type: "{{ k8s_type }}"
+
   - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
   - include: tasks/launch_instances.yml
     vars:
@@ -25,9 +32,10 @@
       cluster: "{{ cluster_id }}"
       type: "{{ k8s_type }}"
 
-  - set_fact:
-      a_master: "{{ master_names[0] }}"
-  - add_host: name={{ a_master }} groups=service_master
+  - add_host:
+      name: "{{ master_names.0 }}"
+      groups: service_master
+    when: master_names is defined and master_names.0 is defined
 
 - include: update.yml
 

+ 1 - 0
playbooks/aws/openshift-cluster/lookup_plugins

@@ -0,0 +1 @@
+../../../lookup_plugins

+ 9 - 0
playbooks/aws/openshift-cluster/tasks/launch_instances.yml

@@ -53,6 +53,15 @@
     latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}"
     user_data: "{{ lookup('template', '../templates/user_data.j2') }}"
     volume_defs:
+      etcd:
+        root:
+          volume_size: "{{ lookup('env', 'os_etcd_root_vol_size') | default(25, true) }}"
+          device_type: "{{ lookup('env', 'os_etcd_root_vol_type') | default('gp2', true) }}"
+          iops: "{{ lookup('env', 'os_etcd_root_vol_iops') | default(500, true) }}"
+        etcd:
+          volume_size: "{{ lookup('env', 'os_etcd_vol_size') | default(32, true) }}"
+          device_type: "{{ lookup('env', 'os_etcd_vol_type') | default('gp2', true) }}"
+          iops: "{{ lookup('env', 'os_etcd_vol_iops') | default(500, true) }}"
       master:
         root:
           volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}"

+ 20 - 0
playbooks/aws/openshift-cluster/templates/user_data.j2

@@ -1,4 +1,24 @@
 #cloud-config
+{% if type =='etcd' %}
+cloud_config_modules:
+- disk_setup
+- mounts
+
+mounts:
+- [ xvdb, /var/lib/etcd, xfs, "defaults" ]
+
+disk_setup:
+  xvdb:
+    table_type: mbr
+    layout: True
+
+fs_setup:
+- label: etcd_storage
+  filesystem: xfs
+  device: /dev/xvdb
+  partition: auto
+{% endif %}
+
 {% if type == 'node' %}
 mounts:
 - [ xvdb ]

+ 50 - 1
playbooks/aws/openshift-cluster/terminate.yml

@@ -13,4 +13,53 @@
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
     with_items: groups[scratch_group] | default([]) | difference(['localhost'])
 
-- include: ../terminate.yml
+- name: Terminate instances
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  vars:
+    host_vars: "{{ hostvars
+                   | oo_select_keys(groups['oo_hosts_to_terminate']) }}"
+  tasks:
+    - name: Remove tags from instances
+      ec2_tag: resource={{ item.ec2_id }} region={{ item.ec2_region }} state=absent
+      args:
+        tags:
+          env: "{{ item['ec2_tag_env'] }}"
+          host-type: "{{ item['ec2_tag_host-type'] }}"
+          env-host-type: "{{ item['ec2_tag_env-host-type'] }}"
+      with_items: host_vars
+      when: "'oo_hosts_to_terminate' in groups"
+
+    - name: Terminate instances
+      ec2:
+        state: absent
+        instance_ids: ["{{ item.ec2_id }}"]
+        region: "{{ item.ec2_region }}"
+      ignore_errors: yes
+      register: ec2_term
+      with_items: host_vars
+      when: "'oo_hosts_to_terminate' in groups"
+
+    # Fail if any of the instances failed to terminate with an error other
+    # than 403 Forbidden
+    - fail: msg=Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}
+      when: "'oo_hosts_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
+      with_items: ec2_term.results
+
+    - name: Stop instance if termination failed
+      ec2:
+        state: stopped
+        instance_ids: ["{{ item.item.ec2_id }}"]
+        region: "{{ item.item.ec2_region }}"
+      register: ec2_stop
+      when: "'oo_hosts_to_terminate' in groups and item.failed"
+      with_items: ec2_term.results
+
+    - name: Rename stopped instances
+      ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
+      args:
+        tags:
+          Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
+      with_items: ec2_stop.results
+      when: "'oo_hosts_to_terminate' in groups"

+ 3 - 1
playbooks/aws/openshift-cluster/update.yml

@@ -11,7 +11,9 @@
       groups: oo_hosts_to_update
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]) | default([])
+    with_items: (groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([]))
+                | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([]))
+                | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-etcd"] | default([]))
 
 - include: ../../common/openshift-cluster/update_repos_and_packages.yml
 

+ 0 - 19
playbooks/aws/openshift-master/config.yml

@@ -1,19 +0,0 @@
----
-- name: Populate oo_masters_to_config host group
-  hosts: localhost
-  gather_facts: no
-  tasks:
-  - name: Evaluate oo_masters_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_masters_to_config
-      ansible_ssh_user: root
-    with_items: oo_host_group_exp | default([])
-
-- include: ../../common/openshift-master/config.yml
-  vars:
-    openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: 4
-    openshift_deployment_type: "{{ deployment_type }}"
-    openshift_hostname: "{{ ec2_private_ip_address }}"
-    openshift_public_hostname: "{{ ec2_ip_address }}"

+ 0 - 1
playbooks/aws/openshift-master/filter_plugins

@@ -1 +0,0 @@
-../../../filter_plugins

+ 0 - 70
playbooks/aws/openshift-master/launch.yml

@@ -1,70 +0,0 @@
----
-- name: Launch instance(s)
-  hosts: localhost
-  connection: local
-  gather_facts: no
-
-# TODO: modify g_ami based on deployment_type
-  vars:
-    inst_region: us-east-1
-    g_ami: ami-86781fee
-    user_data_file: user_data.txt
-
-  tasks:
-    - name: Launch instances
-      ec2:
-        state: present
-        region: "{{ inst_region }}"
-        keypair: libra
-        group: ['public']
-        instance_type: m3.large
-        image: "{{ g_ami }}"
-        count: "{{ oo_new_inst_names | length }}"
-        user_data: "{{ lookup('file', user_data_file) }}"
-        wait: yes
-      register: ec2
-
-    - name: Add new instances public IPs to the host group
-      add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances"
-      with_items: ec2.instances
-
-    - name: Add Name and environment tags to instances
-      ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present"
-      with_together:
-        - oo_new_inst_names
-        - ec2.instances
-      args:
-        tags:
-          Name: "{{ item.0 }}"
-
-    - name: Add other tags to instances
-      ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present
-      with_items: ec2.instances
-      args:
-        tags: "{{ oo_new_inst_tags }}"
-
-    - name: Add new instances public IPs to oo_masters_to_config
-      add_host:
-        hostname: "{{ item.0 }}"
-        ansible_ssh_host: "{{ item.1.dns_name }}"
-        groupname: oo_masters_to_config
-        ec2_private_ip_address: "{{ item.1.private_ip }}"
-        ec2_ip_address: "{{ item.1.public_ip }}"
-      with_together:
-        - oo_new_inst_names
-        - ec2.instances
-
-    - name: Wait for ssh
-      wait_for: port=22 host={{ item.dns_name }}
-      with_items: ec2.instances
-
-    - name: Wait for root user setup
-      command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup"
-      register: result
-      until: result.rc == 0
-      retries: 20
-      delay: 10
-      with_items: ec2.instances
-
-# Apply the configs, seprate so that just the configs can be run by themselves
-- include: config.yml

+ 0 - 1
playbooks/aws/openshift-master/roles

@@ -1 +0,0 @@
-../../../roles

+ 0 - 2
playbooks/aws/openshift-master/terminate.yml

@@ -1,2 +0,0 @@
----
-- include: ../terminate.yml

+ 0 - 26
playbooks/aws/openshift-node/config.yml

@@ -1,26 +0,0 @@
----
-- name: Populate oo_nodes_to_config and oo_first_master host groups
-  hosts: localhost
-  gather_facts: no
-  tasks:
-  - name: Evaluate oo_nodes_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_nodes_to_config
-      ansible_ssh_user: root
-    with_items: oo_host_group_exp | default([])
-  - name: Evaluate oo_first_master
-    add_host:
-      name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
-      groups: oo_first_master
-      ansible_ssh_user: root
-
-
-- include: ../../common/openshift-node/config.yml
-  vars:
-    openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: 4
-    openshift_deployment_type: "{{ deployment_type }}"
-    openshift_first_master: "{{ groups.oo_first_master.0 }}"
-    openshift_hostname: "{{ ec2_private_ip_address }}"
-    openshift_public_hostname: "{{ ec2_ip_address }}"

+ 0 - 1
playbooks/aws/openshift-node/filter_plugins

@@ -1 +0,0 @@
-../../../filter_plugins

+ 0 - 72
playbooks/aws/openshift-node/launch.yml

@@ -1,72 +0,0 @@
----
-- name: Launch instance(s)
-  hosts: localhost
-  connection: local
-  gather_facts: no
-
-# TODO: modify g_ami based on deployment_type
-  vars:
-    inst_region: us-east-1
-    g_ami: ami-86781fee
-    user_data_file: user_data.txt
-
-  tasks:
-    - name: Launch instances
-      ec2:
-        state: present
-        region: "{{ inst_region }}"
-        keypair: libra
-        group: ['public']
-        instance_type: m3.large
-        image: "{{ g_ami }}"
-        count: "{{ oo_new_inst_names | length }}"
-        user_data: "{{ lookup('file', user_data_file) }}"
-        wait: yes
-      register: ec2
-
-    - name: Add new instances public IPs to the host group
-      add_host:
-        hostname: "{{ item.public_ip }}"
-        groupname: new_ec2_instances"
-      with_items: ec2.instances
-
-    - name: Add Name and environment tags to instances
-      ec2_tag: resource={{ item.1.id }} region={{ inst_region }} state=present
-      with_together:
-        - oo_new_inst_names
-        - ec2.instances
-      args:
-        tags:
-          Name: "{{ item.0 }}"
-
-    - name: Add other tags to instances
-      ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present
-      with_items: ec2.instances
-      args:
-        tags: "{{ oo_new_inst_tags }}"
-
-    - name: Add new instances public IPs to oo_nodes_to_config
-      add_host:
-        hostname: "{{ item.0 }}"
-        ansible_ssh_host: "{{ item.1.dns_name }}"
-        groupname: oo_nodes_to_config
-        ec2_private_ip_address: "{{ item.1.private_ip }}"
-        ec2_ip_address: "{{ item.1.public_ip }}"
-      with_together:
-        - oo_new_inst_names
-        - ec2.instances
-
-    - name: Wait for ssh
-      wait_for: port=22 host={{ item.dns_name }}
-      with_items: ec2.instances
-
-    - name: Wait for root user setup
-      command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup"
-      register: result
-      until: result.rc == 0
-      retries: 20
-      delay: 10
-      with_items: ec2.instances
-
-# Apply the configs, seprate so that just the configs can be run by themselves
-- include: config.yml

+ 0 - 1
playbooks/aws/openshift-node/roles

@@ -1 +0,0 @@
-../../../roles

+ 0 - 2
playbooks/aws/openshift-node/terminate.yml

@@ -1,2 +0,0 @@
----
-- include: ../terminate.yml

+ 0 - 64
playbooks/aws/terminate.yml

@@ -1,64 +0,0 @@
----
-- name: Populate oo_hosts_to_terminate host group
-  hosts: localhost
-  gather_facts: no
-  tasks:
-    - name: Evaluate oo_hosts_to_terminate
-      add_host: name={{ item }} groups=oo_hosts_to_terminate
-      with_items: oo_host_group_exp | default([])
-
-- name: Gather dynamic inventory variables for hosts to terminate
-  hosts: oo_hosts_to_terminate
-  gather_facts: no
-
-- name: Terminate instances
-  hosts: localhost
-  connection: local
-  gather_facts: no
-  vars:
-    host_vars: "{{ hostvars
-        | oo_select_keys(groups['oo_hosts_to_terminate']) }}"
-  tasks:
-    - name: Remove tags from instances
-      ec2_tag: resource={{ item.ec2_id }} region={{ item.ec2_region }} state=absent
-      args:
-        tags:
-          env: "{{ item['ec2_tag_env'] }}"
-          host-type: "{{ item['ec2_tag_host-type'] }}"
-          env-host-type: "{{ item['ec2_tag_env-host-type'] }}"
-      with_items: host_vars
-      when: "'oo_hosts_to_terminate' in groups"
-
-    - name: Terminate instances
-      ec2:
-        state: absent
-        instance_ids: ["{{ item.ec2_id }}"]
-        region: "{{ item.ec2_region }}"
-      ignore_errors: yes
-      register: ec2_term
-      with_items: host_vars
-      when: "'oo_hosts_to_terminate' in groups"
-
-    # Fail if any of the instances failed to terminate with an error other
-    # than 403 Forbidden
-    - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
-      when: "'oo_hosts_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
-      with_items: ec2_term.results
-
-    - name: Stop instance if termination failed
-      ec2:
-        state: stopped
-        instance_ids: ["{{ item.item.ec2_id }}"]
-        region: "{{ item.item.ec2_region }}"
-      register: ec2_stop
-      when: item.failed
-      with_items: ec2_term.results
-      when: "'oo_hosts_to_terminate' in groups"
-
-    - name: Rename stopped instances
-      ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
-      args:
-        tags:
-          Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
-      with_items: ec2_stop.results
-      when: "'oo_hosts_to_terminate' in groups"

+ 1 - 7
playbooks/byo/config.yml

@@ -1,8 +1,2 @@
 ---
-- name: Run the openshift-master config playbook
-  include: openshift-master/config.yml
-  when: groups.masters is defined and groups.masters
-
-- name: Run the openshift-node config playbook
-  include: openshift-node/config.yml
-  when: groups.nodes is defined and groups.nodes and groups.masters is defined and groups.masters
+- include: openshift-cluster/config.yml

+ 1 - 0
playbooks/byo/lookup_plugins

@@ -0,0 +1 @@
+../../lookup_plugins

+ 9 - 0
playbooks/byo/openshift-cluster/config.yml

@@ -0,0 +1,9 @@
+---
+- include: ../../common/openshift-cluster/config.yml
+  vars:
+    g_etcd_group: "{{ 'etcd' }}"
+    g_masters_group: "{{ 'masters' }}"
+    g_nodes_group: "{{ 'nodes' }}"
+    openshift_cluster_id: "{{ cluster_id | default('default') }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"

playbooks/gce/openshift-node/filter_plugins → playbooks/byo/openshift-cluster/filter_plugins


+ 1 - 0
playbooks/byo/openshift-cluster/lookup_plugins

@@ -0,0 +1 @@
+../../../lookup_plugins

playbooks/gce/openshift-node/roles → playbooks/byo/openshift-cluster/roles


+ 0 - 15
playbooks/byo/openshift-master/config.yml

@@ -1,15 +0,0 @@
----
-- name: Populate oo_masters_to_config host group
-  hosts: localhost
-  gather_facts: no
-  tasks:
-  - add_host:
-      name: "{{ item }}"
-      groups: oo_masters_to_config
-    with_items: groups['masters']
-
-- include: ../../common/openshift-master/config.yml
-  vars:
-    openshift_cluster_id: "{{ cluster_id | default('default') }}"
-    openshift_debug_level: 4
-    openshift_deployment_type: "{{ deployment_type }}"

+ 0 - 1
playbooks/byo/openshift-master/filter_plugins

@@ -1 +0,0 @@
-../../../filter_plugins

+ 0 - 1
playbooks/byo/openshift-master/roles

@@ -1 +0,0 @@
-../../../roles

+ 0 - 23
playbooks/byo/openshift-node/config.yml

@@ -1,23 +0,0 @@
----
-- name: Populate oo_nodes_to_config and oo_first_master host groups
-  hosts: localhost
-  gather_facts: no
-  tasks:
-  - name: Evaluate oo_nodes_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_nodes_to_config
-    with_items: groups.nodes
-  - name: Evaluate oo_first_master
-    add_host:
-      name: "{{ item }}"
-      groups: oo_first_master
-    with_items: groups.masters.0
-
-
-- include: ../../common/openshift-node/config.yml
-  vars:
-    openshift_first_master: "{{ groups.masters.0 }}"
-    openshift_cluster_id: "{{ cluster_id | default('default') }}"
-    openshift_debug_level: 4
-    openshift_deployment_type: "{{ deployment_type }}"

+ 0 - 1
playbooks/byo/openshift-node/filter_plugins

@@ -1 +0,0 @@
-../../../filter_plugins

+ 0 - 1
playbooks/byo/openshift-node/roles

@@ -1 +0,0 @@
-../../../roles

+ 61 - 0
playbooks/common/openshift-cluster/config.yml

@@ -1,4 +1,65 @@
 ---
+- name: Populate config host groups
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - fail:
+      msg: This playbook rquires g_etcd_group to be set
+    when: g_etcd_group is not defined
+
+  - fail:
+      msg: This playbook rquires g_masters_group to be set
+    when: g_masters_group is not defined
+
+  - fail:
+      msg: This playbook rquires g_nodes_group to be set
+    when: g_nodes_group is not defined
+
+  - name: Evaluate oo_etcd_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_etcd_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    with_items: groups[g_etcd_group] | default([])
+
+  - name: Evaluate oo_masters_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_masters_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    with_items: groups[g_masters_group] | default([])
+
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    with_items: groups[g_nodes_group] | default([])
+
+  - name: Evaluate oo_first_etcd
+    add_host:
+      name: "{{ groups[g_etcd_group][0] }}"
+      groups: oo_first_etcd
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0
+
+  - name: Evaluate oo_first_master
+    add_host:
+      name: "{{ groups[g_masters_group][0] }}"
+      groups: oo_first_master
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
+
+- include: ../openshift-etcd/config.yml
+
 - include: ../openshift-master/config.yml
 
 - include: ../openshift-node/config.yml
+  vars:
+    osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
+    osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"

+ 1 - 0
playbooks/common/openshift-cluster/lookup_plugins

@@ -0,0 +1 @@
+../../../lookup_plugins

+ 13 - 0
playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml

@@ -0,0 +1,13 @@
+---
+- set_fact: k8s_type="etcd"
+
+- name: Generate etcd instance names(s)
+  set_fact:
+    scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+  register: etcd_names_output
+  with_sequence: count={{ num_etcd }}
+
+- set_fact:
+    etcd_names: "{{ etcd_names_output.results | default([])
+                    | oo_collect('ansible_facts')
+                    | oo_collect('scratch_name') }}"

+ 4 - 2
playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml

@@ -5,7 +5,9 @@
   set_fact:
     scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
   register: master_names_output
-  with_sequence: start=1 end={{ num_masters }}
+  with_sequence: count={{ num_masters }}
 
 - set_fact:
-    master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
+    master_names: "{{ master_names_output.results | default([])
+                      | oo_collect('ansible_facts')
+                      | oo_collect('scratch_name') }}"

+ 4 - 2
playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml

@@ -5,7 +5,9 @@
   set_fact:
     scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
   register: node_names_output
-  with_sequence: start=1 end={{ num_nodes }}
+  with_sequence: count={{ num_nodes }}
 
 - set_fact:
-    node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
+    node_names: "{{ node_names_output.results | default([])
+                    | oo_collect('ansible_facts')
+                    | oo_collect('scratch_name') }}"

+ 96 - 0
playbooks/common/openshift-etcd/config.yml

@@ -0,0 +1,96 @@
+---
+- name: Set etcd facts needed for generating certs
+  hosts: oo_etcd_to_config
+  roles:
+  - openshift_facts
+  tasks:
+  - openshift_facts:
+      role: "{{ item.role }}"
+      local_facts: "{{ item.local_facts }}"
+    with_items:
+      - role: common
+        local_facts:
+          hostname: "{{ openshift_hostname | default(None) }}"
+          public_hostname: "{{ openshift_public_hostname | default(None) }}"
+          deployment_type: "{{ openshift_deployment_type }}"
+  - name: Check status of etcd certificates
+    stat:
+      path: "{{ item }}"
+    with_items:
+    - /etc/etcd/server.crt
+    - /etc/etcd/peer.crt
+    - /etc/etcd/ca.crt
+    register: g_etcd_server_cert_stat_result
+  - set_fact:
+      etcd_server_certs_missing: "{{ g_etcd_server_cert_stat_result.results | map(attribute='stat.exists')
+                                    | list | intersect([false])}}"
+      etcd_cert_subdir: etcd-{{ openshift.common.hostname }}
+      etcd_cert_config_dir: /etc/etcd
+      etcd_cert_prefix:
+
+- name: Create temp directory for syncing certs
+  hosts: localhost
+  connection: local
+  sudo: false
+  gather_facts: no
+  tasks:
+  - name: Create local temp directory for syncing certs
+    local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+    register: g_etcd_mktemp
+    changed_when: False
+
+- name: Configure etcd certificates
+  hosts: oo_first_etcd
+  vars:
+    etcd_generated_certs_dir: /etc/etcd/generated_certs
+    etcd_needing_server_certs: "{{ hostvars
+                                  | oo_select_keys(groups['oo_etcd_to_config'])
+                                  | oo_filter_list(filter_attr='etcd_server_certs_missing') }}"
+    sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
+  roles:
+  - etcd_certificates
+  post_tasks:
+  - name: Create a tarball of the etcd certs
+    command: >
+      tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz
+        -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
+    args:
+      creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
+    with_items: etcd_needing_server_certs
+  - name: Retrieve the etcd cert tarballs
+    fetch:
+      src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
+      dest: "{{ sync_tmpdir }}/"
+      flat: yes
+      fail_on_missing: yes
+      validate_checksum: yes
+    with_items: etcd_needing_server_certs
+
+- name: Configure etcd hosts
+  hosts: oo_etcd_to_config
+  vars:
+    sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
+    etcd_url_scheme: https
+    etcd_peer_url_scheme: https
+    etcd_peers_group: oo_etcd_to_config
+  pre_tasks:
+  - name: Ensure certificate directory exists
+    file:
+      path: "{{ etcd_cert_config_dir }}"
+      state: directory
+  - name: Unarchive the tarball on the etcd host
+    unarchive:
+      src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
+      dest: "{{ etcd_cert_config_dir }}"
+    when: etcd_server_certs_missing
+  roles:
+  - etcd
+
+- name: Delete temporary directory on localhost
+  hosts: localhost
+  connection: local
+  sudo: false
+  gather_facts: no
+  tasks:
+  - file: name={{ g_etcd_mktemp.stdout }} state=absent
+    changed_when: False

playbooks/gce/openshift-master/filter_plugins → playbooks/common/openshift-etcd/filter_plugins


+ 1 - 0
playbooks/common/openshift-etcd/lookup_plugins

@@ -0,0 +1 @@
+../../../lookup_plugins

+ 1 - 0
playbooks/common/openshift-etcd/roles

@@ -0,0 +1 @@
+../../../roles/

+ 18 - 0
playbooks/common/openshift-etcd/service.yml

@@ -0,0 +1,18 @@
+---
+- name: Populate g_service_masters host group if needed
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - fail: msg="new_cluster_state is required to be injected in this playbook"
+    when: new_cluster_state is not defined
+
+  - name: Evaluate g_service_etcd
+    add_host: name={{ item }} groups=g_service_etcd
+    with_items: oo_host_group_exp | default([])
+
+- name: Change etcd state on etcd instance(s)
+  hosts: g_service_etcd
+  connection: ssh
+  gather_facts: no
+  tasks:
+    - service: name=etcd state="{{ new_cluster_state }}"

+ 197 - 2
playbooks/common/openshift-master/config.yml

@@ -1,19 +1,214 @@
 ---
+- name: Set master facts and determine if external etcd certs need to be generated
+  hosts: oo_masters_to_config
+  pre_tasks:
+  - set_fact:
+      openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
+      openshift_master_etcd_hosts: "{{ hostvars
+                                       | oo_select_keys(groups['oo_etcd_to_config']
+                                                        | default([]))
+                                       | oo_collect('openshift.common.hostname')
+                                       | default(none, true) }}"
+  roles:
+  - openshift_facts
+  post_tasks:
+  - openshift_facts:
+      role: "{{ item.role }}"
+      local_facts: "{{ item.local_facts }}"
+    with_items:
+      - role: common
+        local_facts:
+          hostname: "{{ openshift_hostname | default(None) }}"
+          public_hostname: "{{ openshift_public_hostname | default(None) }}"
+          deployment_type: "{{ openshift_deployment_type }}"
+      - role: master
+        local_facts:
+          api_port: "{{ openshift_master_api_port | default(None) }}"
+          api_url: "{{ openshift_master_api_url | default(None) }}"
+          api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
+          public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
+          console_path: "{{ openshift_master_console_path | default(None) }}"
+          console_port: "{{ openshift_master_console_port | default(None) }}"
+          console_url: "{{ openshift_master_console_url | default(None) }}"
+          console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
+          public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+  - name: Check status of external etcd certificatees
+    stat:
+      path: "/etc/openshift/master/{{ item }}"
+    with_items:
+    - master.etcd-client.crt
+    - master.etcd-ca.crt
+    register: g_external_etcd_cert_stat_result
+  - set_fact:
+      etcd_client_certs_missing: "{{ g_external_etcd_cert_stat_result.results
+                                    | map(attribute='stat.exists')
+                                    | list | intersect([false])}}"
+      etcd_cert_subdir: openshift-master-{{ openshift.common.hostname }}
+      etcd_cert_config_dir: /etc/openshift/master
+      etcd_cert_prefix: master.etcd-
+    when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
+
+- name: Create temp directory for syncing certs
+  hosts: localhost
+  connection: local
+  sudo: false
+  gather_facts: no
+  tasks:
+  - name: Create local temp directory for syncing certs
+    local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+    register: g_master_mktemp
+    changed_when: False
+
+- name: Configure etcd certificates
+  hosts: oo_first_etcd
+  vars:
+    etcd_generated_certs_dir: /etc/etcd/generated_certs
+    etcd_needing_client_certs: "{{ hostvars
+                                   | oo_select_keys(groups['oo_masters_to_config'])
+                                   | oo_filter_list(filter_attr='etcd_client_certs_missing') }}"
+    sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+  roles:
+  - etcd_certificates
+  post_tasks:
+  - name: Create a tarball of the etcd certs
+    command: >
+      tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz
+        -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
+    args:
+      creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
+    with_items: etcd_needing_client_certs
+  - name: Retrieve the etcd cert tarballs
+    fetch:
+      src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
+      dest: "{{ sync_tmpdir }}/"
+      flat: yes
+      fail_on_missing: yes
+      validate_checksum: yes
+    with_items: etcd_needing_client_certs
+
+- name: Copy the external etcd certs to the masters
+  hosts: oo_masters_to_config
+  vars:
+    sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+  tasks:
+  - name: Ensure certificate directory exists
+    file:
+      path: /etc/openshift/master
+      state: directory
+    when: etcd_client_certs_missing is defined and etcd_client_certs_missing
+  - name: Unarchive the tarball on the master
+    unarchive:
+      src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
+      dest: "{{ etcd_cert_config_dir }}"
+    when: etcd_client_certs_missing is defined and etcd_client_certs_missing
+  - file:
+      path: "{{ etcd_cert_config_dir }}/{{ item }}"
+      owner: root
+      group: root
+      mode: 0600
+    with_items:
+    - master.etcd-client.crt
+    - master.etcd-client.key
+    - master.etcd-ca.crt
+    when: etcd_client_certs_missing is defined and etcd_client_certs_missing
+
+- name: Determine if master certificates need to be generated
+  hosts: oo_masters_to_config
+  tasks:
+  - set_fact:
+      openshift_master_certs_no_etcd:
+      - admin.crt
+      - master.kubelet-client.crt
+      - master.server.crt
+      - openshift-master.crt
+      - openshift-registry.crt
+      - openshift-router.crt
+      - etcd.server.crt
+      openshift_master_certs_etcd:
+      - master.etcd-client.crt
+  - set_fact:
+      openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}"
+
+  - name: Check status of master certificates
+    stat:
+      path: "/etc/openshift/master/{{ item }}"
+    with_items: openshift_master_certs
+    register: g_master_cert_stat_result
+  - set_fact:
+      master_certs_missing: "{{ g_master_cert_stat_result.results
+                                | map(attribute='stat.exists')
+                                | list | intersect([false])}}"
+      master_cert_subdir: master-{{ openshift.common.hostname }}
+      master_cert_config_dir: /etc/openshift/master
+
+- name: Configure master certificates
+  hosts: oo_first_master
+  vars:
+    master_generated_certs_dir: /etc/openshift/generated-configs
+    masters_needing_certs: "{{ hostvars
+                               | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
+                               | oo_filter_list(filter_attr='master_certs_missing') }}"
+    sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+  roles:
+  - openshift_master_certificates
+  post_tasks:
+  - name: Create a tarball of the master certs
+    command: >
+      tar -czvf {{ master_generated_certs_dir }}/{{ item.master.cert_subdir }}.tgz
+        -C {{ master_generated_certs_dir }}/{{ item.master.cert_subdir }} .
+    args:
+      creates: "{{ master_generated_certs_dir }}/{{ item.master.cert_subdir }}.tgz"
+    with_items: masters_needing_certs
+  - name: Retrieve the master cert tarball from the master
+    fetch:
+      src: "{{ master_generated_certs_dir }}/{{ item.master.cert_subdir }}.tgz"
+      dest: "{{ sync_tmpdir }}/"
+      flat: yes
+      fail_on_missing: yes
+      validate_checksum: yes
+    with_items: masters_needing_certs
+
 - name: Configure master instances
   hosts: oo_masters_to_config
+  vars:
+    sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+  pre_tasks:
+  - name: Ensure certificate directory exists
+    file:
+      path: /etc/openshift/master
+      state: directory
+    when: master_certs_missing and 'oo_first_master' not in group_names
+  - name: Unarchive the tarball on the master
+    unarchive:
+      src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz"
+      dest: "{{ master_cert_config_dir }}"
+    when: master_certs_missing and 'oo_first_master' not in group_names
   roles:
   - openshift_master
-  - openshift_examples
   - role: fluentd_master
     when: openshift.common.use_fluentd | bool
-  tasks:
+  post_tasks:
   - name: Create group for deployment type
     group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
     changed_when: False
 
+- name: Deploy OpenShift examples
+  hosts: oo_first_master
+  roles:
+  - openshift_examples
+
 # Additional instance config for online deployments
 - name: Additional instance config
   hosts: oo_masters_deployment_type_online
   roles:
   - pods
   - os_env_extras
+
+- name: Delete temporary directory on localhost
+  hosts: localhost
+  connection: local
+  sudo: false
+  gather_facts: no
+  tasks:
+  - file: name={{ g_master_mktemp.stdout }} state=absent
+    changed_when: False

+ 1 - 0
playbooks/common/openshift-master/lookup_plugins

@@ -0,0 +1 @@
+../../../lookup_plugins

+ 14 - 25
playbooks/common/openshift-node/config.yml

@@ -18,21 +18,18 @@
           deployment_type: "{{ openshift_deployment_type }}"
       - role: node
         local_facts:
-          resources_cpu: "{{ openshift_node_resources_cpu | default(None) }}"
-          resources_memory: "{{ openshift_node_resources_memory | default(None) }}"
-          pod_cidr: "{{ openshift_node_pod_cidr | default(None) }}"
           labels: "{{ openshift_node_labels | default(None) }}"
           annotations: "{{ openshift_node_annotations | default(None) }}"
   - name: Check status of node certificates
     stat:
-      path: "{{ item }}"
+      path: "/etc/openshift/node/{{ item }}"
     with_items:
-    - "/etc/openshift/node/system:node:{{ openshift.common.hostname }}.crt"
-    - "/etc/openshift/node/system:node:{{ openshift.common.hostname }}.key"
-    - "/etc/openshift/node/system:node:{{ openshift.common.hostname }}.kubeconfig"
-    - "/etc/openshift/node/ca.crt"
-    - "/etc/openshift/node/server.key"
-    - "/etc/openshift/node/server.crt"
+    - "system:node:{{ openshift.common.hostname }}.crt"
+    - "system:node:{{ openshift.common.hostname }}.key"
+    - "system:node:{{ openshift.common.hostname }}.kubeconfig"
+    - ca.crt
+    - server.key
+    - server.crt
     register: stat_result
   - set_fact:
       certs_missing: "{{ stat_result.results | map(attribute='stat.exists')
@@ -56,10 +53,9 @@
   hosts: oo_first_master
   vars:
     nodes_needing_certs: "{{ hostvars
-                             | oo_select_keys(groups['oo_nodes_to_config'])
+                             | oo_select_keys(groups['oo_nodes_to_config']
+                                              | default([]))
                              | oo_filter_list(filter_attr='certs_missing') }}"
-    openshift_nodes: "{{ hostvars
-                         | oo_select_keys(groups['oo_nodes_to_config']) }}"
     sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
   roles:
   - openshift_node_certificates
@@ -86,7 +82,7 @@
   hosts: oo_nodes_to_config
   vars:
     sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
-    openshift_node_master_api_url: "{{ hostvars[openshift_first_master].openshift.master.api_url }}"
+    openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
   pre_tasks:
   - name: Ensure certificate directory exists
     file:
@@ -110,15 +106,6 @@
     group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
     changed_when: False
 
-- name: Delete the temporary directory on the master
-  hosts: oo_first_master
-  gather_facts: no
-  vars:
-    sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
-  tasks:
-  - file: name={{ sync_tmpdir }} state=absent
-    changed_when: False
-
 - name: Delete temporary directory on localhost
   hosts: localhost
   connection: local
@@ -143,12 +130,14 @@
                          | oo_select_keys(groups['oo_nodes_to_config'])
                          | oo_collect('openshift.common.hostname') }}"
     openshift_unscheduleable_nodes: "{{ hostvars
-                                        | oo_select_keys(groups['oo_nodes_to_config']) 
+                                        | oo_select_keys(groups['oo_nodes_to_config']
+                                                         | default([]))
                                         | oo_collect('openshift.common.hostname', {'openshift_scheduleable': False}) }}"
   pre_tasks:
   - set_fact:
       openshift_scheduleable_nodes: "{{ hostvars
-                                      | oo_select_keys(groups['oo_nodes_to_config'])
+                                      | oo_select_keys(groups['oo_nodes_to_config']
+                                                       | default([]))
                                       | oo_collect('openshift.common.hostname')
                                       | difference(openshift_unscheduleable_nodes) }}"
   roles:

+ 1 - 0
playbooks/common/openshift-node/lookup_plugins

@@ -0,0 +1 @@
+../../../lookup_plugins

+ 10 - 24
playbooks/gce/openshift-cluster/config.yml

@@ -1,38 +1,24 @@
 ---
 # TODO: fix firewall related bug with GCE and origin, since GCE is overriding
 # /etc/sysconfig/iptables
-- name: Populate oo_masters_to_config host group
-  hosts: localhost
+
+- hosts: localhost
   gather_facts: no
   vars_files:
   - vars.yml
   tasks:
-  - name: Evaluate oo_masters_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_masters_to_config
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
-  - name: Evaluate oo_nodes_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_nodes_to_config
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
-  - name: Evaluate oo_first_master
-    add_host:
-      name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
-      groups: oo_first_master
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+  - set_fact:
+      g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
+      g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
 
 - include: ../../common/openshift-cluster/config.yml
   vars:
+    g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}"
+    g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}"
+    g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
+    g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
+    g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     openshift_cluster_id: "{{ cluster_id }}"
     openshift_debug_level: 4
     openshift_deployment_type: "{{ deployment_type }}"
-    openshift_first_master: "{{ groups.oo_first_master.0 }}"
     openshift_hostname: "{{ gce_private_ip }}"

+ 1 - 0
playbooks/gce/openshift-cluster/lookup_plugins

@@ -0,0 +1 @@
+../../../lookup_plugins

+ 3 - 1
playbooks/gce/openshift-cluster/update.yml

@@ -11,7 +11,9 @@
       groups: oo_hosts_to_update
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([])
+    with_items: (groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([]))
+                | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([]))
+                | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-etcd"] | default([]))
 
 - include: ../../common/openshift-cluster/update_repos_and_packages.yml
 

+ 0 - 18
playbooks/gce/openshift-master/config.yml

@@ -1,18 +0,0 @@
----
-- name: Populate oo_masters_to_config host group
-  hosts: localhost
-  gather_facts: no
-  tasks:
-  - name: Evaluate oo_masters_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_masters_to_config
-      ansible_ssh_user: root
-    with_items: oo_host_group_exp | default([])
-
-- include: ../../common/openshift-master/config.yml
-  vars:
-    openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: 4
-    openshift_deployment_type: "{{ deployment_type }}"
-    openshift_hostname: "{{ gce_private_ip }}"

+ 0 - 51
playbooks/gce/openshift-master/launch.yml

@@ -1,51 +0,0 @@
----
-# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
-# the gce task to use the disk_auto_delete parameter to avoid having to delete
-# the disk as a separate step on termination
-
-- name: Launch instance(s)
-  hosts: localhost
-  connection: local
-  gather_facts: no
-
-# TODO: modify image based on deployment_type
-  vars:
-    inst_names: "{{ oo_new_inst_names }}"
-    machine_type: n1-standard-1
-    image: libra-rhel7
-
-  tasks:
-    - name: Launch instances
-      gce:
-        instance_names: "{{ inst_names }}"
-        machine_type: "{{ machine_type }}"
-        image: "{{ image }}"
-        service_account_email: "{{ gce_service_account_email }}"
-        pem_file: "{{ gce_pem_file }}"
-        project_id: "{{ gce_project_id }}"
-        tags: "{{ oo_new_inst_tags }}"
-      register: gce
-
-    - name: Add new instances public IPs to oo_masters_to_config
-      add_host:
-        hostname: "{{ item.name }}"
-        ansible_ssh_host: "{{ item.public_ip }}"
-        groupname: oo_masters_to_config
-        gce_private_ip: "{{ item.private_ip }}"
-      with_items: gce.instance_data
-
-    - name: Wait for ssh
-      wait_for: port=22 host={{ item.public_ip }}
-      with_items: gce.instance_data
-
-    - name: Wait for root user setup
-      command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
-      register: result
-      until: result.rc == 0
-      retries: 20
-      delay: 10
-      with_items: gce.instance_data
-
-
-# Apply the configs, separate so that just the configs can be run by themselves
-- include: config.yml

+ 0 - 1
playbooks/gce/openshift-master/roles

@@ -1 +0,0 @@
-../../../roles

+ 0 - 35
playbooks/gce/openshift-master/terminate.yml

@@ -1,35 +0,0 @@
----
-- name: Populate oo_masters_to_terminate host group if needed
-  hosts: localhost
-  gather_facts: no
-  tasks:
-    - name: Evaluate oo_masters_to_terminate
-      add_host: name={{ item }} groups=oo_masters_to_terminate
-      with_items: oo_host_group_exp | default([])
-
-- name: Terminate master instances
-  hosts: localhost
-  connection: local
-  gather_facts: no
-  tasks:
-    - name: Terminate master instances
-      gce:
-        service_account_email: "{{ gce_service_account_email }}"
-        pem_file: "{{ gce_pem_file }}"
-        project_id: "{{ gce_project_id }}"
-        state: 'absent'
-        instance_names: "{{ groups['oo_masters_to_terminate'] }}"
-        disks: "{{ groups['oo_masters_to_terminate'] }}"
-      register: gce
-      when: "'oo_masters_to_terminate' in groups"
-
-    - name: Remove disks of instances
-      gce_pd:
-        service_account_email: "{{ gce_service_account_email }}"
-        pem_file: "{{ gce_pem_file }}"
-        project_id: "{{ gce_project_id }}"
-        name: "{{ item }}"
-        zone: "{{ gce.zone }}"
-        state: absent
-      with_items: gce.instance_names
-      when: "'oo_masters_to_terminate' in groups"

+ 0 - 25
playbooks/gce/openshift-node/config.yml

@@ -1,25 +0,0 @@
----
-- name: Populate oo_nodes_to_config and oo_first_master host groups
-  hosts: localhost
-  gather_facts: no
-  tasks:
-  - name: Evaluate oo_nodes_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_nodes_to_config
-      ansible_ssh_user: root
-    with_items: oo_host_group_exp | default([])
-  - name: Evaluate oo_first_master
-    add_host:
-      name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
-      groups: oo_first_master
-      ansible_ssh_user: root
-
-
-- include: ../../common/openshift-node/config.yml
-  vars:
-    openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: 4
-    openshift_deployment_type: "{{ deployment_type }}"
-    openshift_first_master: "{{ groups.oo_first_master.0 }}"
-    openshift_hostname: "{{ gce_private_ip }}"

+ 0 - 51
playbooks/gce/openshift-node/launch.yml

@@ -1,51 +0,0 @@
----
-# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
-# the gce task to use the disk_auto_delete parameter to avoid having to delete
-# the disk as a separate step on termination
-
-- name: Launch instance(s)
-  hosts: localhost
-  connection: local
-  gather_facts: no
-
-# TODO: modify image based on deployment_type
-  vars:
-    inst_names: "{{ oo_new_inst_names }}"
-    machine_type: n1-standard-1
-    image: libra-rhel7
-
-  tasks:
-    - name: Launch instances
-      gce:
-        instance_names: "{{ inst_names }}"
-        machine_type: "{{ machine_type }}"
-        image: "{{ image }}"
-        service_account_email: "{{ gce_service_account_email }}"
-        pem_file: "{{ gce_pem_file }}"
-        project_id: "{{ gce_project_id }}"
-        tags: "{{ oo_new_inst_tags }}"
-      register: gce
-
-    - name: Add new instances public IPs to oo_nodes_to_config
-      add_host:
-        hostname: "{{ item.name }}"
-        ansible_ssh_host: "{{ item.public_ip }}"
-        groupname: oo_nodes_to_config
-        gce_private_ip: "{{ item.private_ip }}"
-      with_items: gce.instance_data
-
-    - name: Wait for ssh
-      wait_for: port=22 host={{ item.public_ip }}
-      with_items: gce.instance_data
-
-    - name: Wait for root user setup
-      command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
-      register: result
-      until: result.rc == 0
-      retries: 20
-      delay: 10
-      with_items: gce.instance_data
-
-
-# Apply the configs, separate so that just the configs can be run by themselves
-- include: config.yml

+ 0 - 35
playbooks/gce/openshift-node/terminate.yml

@@ -1,35 +0,0 @@
----
-- name: Populate oo_nodes_to_terminate host group if needed
-  hosts: localhost
-  gather_facts: no
-  tasks:
-    - name: Evaluate oo_nodes_to_terminate
-      add_host: name={{ item }} groups=oo_nodes_to_terminate
-      with_items: oo_host_group_exp | default([])
-
-- name: Terminate node instances
-  hosts: localhost
-  connection: local
-  gather_facts: no
-  tasks:
-    - name: Terminate node instances
-      gce:
-        service_account_email: "{{ gce_service_account_email }}"
-        pem_file: "{{ gce_pem_file }}"
-        project_id: "{{ gce_project_id }}"
-        state: 'absent'
-        instance_names: "{{ groups['oo_nodes_to_terminate'] }}"
-        disks: "{{ groups['oo_nodes_to_terminate'] }}"
-      register: gce
-      when: "'oo_nodes_to_terminate' in groups"
-
-    - name: Remove disks of instances
-      gce_pd:
-        service_account_email: "{{ gce_service_account_email }}"
-        pem_file: "{{ gce_pem_file }}"
-        project_id: "{{ gce_project_id }}"
-        name: "{{ item }}"
-        zone: "{{ gce.zone }}"
-        state: absent
-      with_items: gce.instance_names
-      when: "'oo_nodes_to_terminate' in groups"

+ 9 - 24
playbooks/libvirt/openshift-cluster/config.yml

@@ -3,37 +3,22 @@
 # is localhost, so no hostname value (or public_hostname) value is getting
 # assigned
 
-- name: Populate oo_masters_to_config host group
-  hosts: localhost
+- hosts: localhost
   gather_facts: no
   vars_files:
   - vars.yml
   tasks:
-  - name: Evaluate oo_masters_to_config
-    add_host:
-      name: "{{ item }}"
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-      groups: oo_masters_to_config
-    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
-  - name: Evaluate oo_nodes_to_config
-    add_host:
-      name: "{{ item }}"
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-      groups: oo_nodes_to_config
-    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
-  - name: Evaluate oo_first_master
-    add_host:
-      name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-      groups: oo_first_master
-    when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+  - set_fact:
+      g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
+      g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
 
 - include: ../../common/openshift-cluster/config.yml
   vars:
+    g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}"
+    g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}"
+    g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
+    g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
+    g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     openshift_cluster_id: "{{ cluster_id }}"
     openshift_debug_level: 4
     openshift_deployment_type: "{{ deployment_type }}"
-    openshift_first_master: "{{ groups.oo_first_master.0 }}"

+ 3 - 1
playbooks/libvirt/openshift-cluster/update.yml

@@ -11,7 +11,9 @@
       groups: oo_hosts_to_update
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([])
+    with_items: (groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([]))
+                | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([]))
+                | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-etcd"] | default([]))
 
 - include: ../../common/openshift-cluster/update_repos_and_packages.yml
 

+ 9 - 24
playbooks/openstack/openshift-cluster/config.yml

@@ -1,35 +1,20 @@
-- name: Populate oo_masters_to_config host group
-  hosts: localhost
+- hosts: localhost
   gather_facts: no
   vars_files:
   - vars.yml
   tasks:
-  - name: Evaluate oo_masters_to_config
-    add_host:
-      name: "{{ item }}"
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-      groups: oo_masters_to_config
-    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])
-  - name: Evaluate oo_nodes_to_config
-    add_host:
-      name: "{{ item }}"
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-      groups: oo_nodes_to_config
-    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])
-  - name: Evaluate oo_first_master
-    add_host:
-      name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-      groups: oo_first_master
-    when: "'tag_env-host-type_{{ cluster_id }}-openshift-master' in groups"
+  - set_fact:
+      g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
+      g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
 
 - include: ../../common/openshift-cluster/config.yml
   vars:
+    g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
+    g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
+    g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
+    g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
+    g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     openshift_cluster_id: "{{ cluster_id }}"
     openshift_debug_level: 4
     openshift_deployment_type: "{{ deployment_type }}"
-    openshift_first_master: "{{ groups.oo_first_master.0 }}"
     openshift_hostname: "{{ ansible_default_ipv4.address }}"

+ 3 - 1
playbooks/openstack/openshift-cluster/update.yml

@@ -11,7 +11,9 @@
       groups: oo_hosts_to_update
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]) | default([])
+    with_items: (groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([]))
+                | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([]))
+                | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-etcd"] | default([]))
 
 - include: ../../common/openshift-cluster/update_repos_and_packages.yml
 

+ 39 - 0
roles/etcd/README.md

@@ -0,0 +1,39 @@
+Role Name
+=========
+
+Configures an etcd cluster for an arbitrary number of hosts
+
+Requirements
+------------
+
+This role assumes it's being deployed on a RHEL/Fedora based host with package
+named 'etcd' available via yum.
+
+Role Variables
+--------------
+
+TODO
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+    - hosts: etcd
+      roles:
+         - { etcd }
+
+License
+-------
+
+MIT
+
+Author Information
+------------------
+
+Scott Dodson <sdodson@redhat.com>
+Adapted from https://github.com/retr0h/ansible-etcd for use on RHEL/Fedora. We
+should at some point submit a PR to merge this with that module.

+ 31 - 0
roles/etcd/defaults/main.yaml

@@ -0,0 +1,31 @@
+---
+etcd_interface: eth0
+etcd_client_port: 2379
+etcd_peer_port: 2380
+etcd_peers_group: etcd
+etcd_url_scheme: http
+etcd_peer_url_scheme: http
+etcd_conf_dir: /etc/etcd
+etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
+etcd_cert_file: "{{ etcd_conf_dir }}/server.crt"
+etcd_key_file: "{{ etcd_conf_dir }}/server.key"
+etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt"
+etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt"
+etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key"
+
+etcd_initial_cluster_state: new
+etcd_initial_cluster_token: etcd-cluster-1
+
+etcd_initial_advertise_peer_urls: "{{ etcd_peer_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}"
+etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}"
+etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_client_port }}"
+etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_client_port }}"
+
+etcd_data_dir: /var/lib/etcd/
+
+os_firewall_use_firewalld: False
+os_firewall_allow:
+- service: etcd
+  port: "{{etcd_client_port}}/tcp"
+- service: etcd peering
+  port: "{{ etcd_peer_port }}/tcp"

+ 3 - 0
roles/etcd/handlers/main.yml

@@ -0,0 +1,3 @@
+---
+- name: restart etcd
+  service: name=etcd state=restarted

+ 19 - 0
roles/etcd/meta/main.yml

@@ -0,0 +1,19 @@
+---
+# This module is based on https://github.com/retr0h/ansible-etcd with most
+# changes centered around installing from a pre-existing rpm
+# TODO: Extend https://github.com/retr0h/ansible-etcd rather than forking
+galaxy_info:
+  author: Scott Dodson
+  description: etcd management
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 1.2
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  categories:
+  - cloud
+  - system
+dependencies:
+- { role: os_firewall }

+ 52 - 0
roles/etcd/tasks/main.yml

@@ -0,0 +1,52 @@
+---
+- name: Install etcd
+  yum: pkg=etcd state=present
+
+- name: Validate permissions on the config dir
+  file:
+    path: "{{ etcd_conf_dir }}"
+    state: directory
+    owner: etcd
+    group: etcd
+    mode: 0700
+
+- name: Validate permissions on certificate files
+  file:
+    path: "{{ item }}"
+    mode: 0600
+    group: etcd
+    owner: etcd
+  when: etcd_url_scheme == 'https'
+  with_items:
+  - "{{ etcd_ca_file }}"
+  - "{{ etcd_cert_file }}"
+  - "{{ etcd_key_file }}"
+
+- name: Validate permissions on peer certificate files
+  file:
+    path: "{{ item }}"
+    mode: 0600
+    group: etcd
+    owner: etcd
+  when: etcd_peer_url_scheme == 'https'
+  with_items:
+  - "{{ etcd_peer_ca_file }}"
+  - "{{ etcd_peer_cert_file }}"
+  - "{{ etcd_peer_key_file }}"
+
+- name: Write etcd global config file
+  template:
+    src: etcd.conf.j2
+    dest: /etc/etcd/etcd.conf
+  notify:
+    - restart etcd
+
+- name: Enable etcd
+  service:
+    name: etcd
+    state: started
+    enabled: yes
+  register: start_result
+
+- pause: seconds=30
+  when: start_result | changed

+ 52 - 0
roles/etcd/templates/etcd.conf.j2

@@ -0,0 +1,52 @@
+{% macro initial_cluster() -%}
+{% for host in groups[etcd_peers_group] -%}
+{% if loop.last -%}
+{{ host }}={{ etcd_peer_url_scheme }}://{{ hostvars[host]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}
+{%- else -%}
+{{ host }}={{ etcd_peer_url_scheme }}://{{ hostvars[host]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }},
+{%- endif -%}
+{% endfor -%}
+{% endmacro -%}
+
+{% if groups[etcd_peers_group] and groups[etcd_peers_group] | length > 1 %}
+ETCD_NAME={{ inventory_hostname }}
+ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
+{% else %}
+ETCD_NAME=default
+{% endif %}
+ETCD_DATA_DIR={{ etcd_data_dir }}
+#ETCD_SNAPSHOT_COUNTER="10000"
+#ETCD_HEARTBEAT_INTERVAL="100"
+#ETCD_ELECTION_TIMEOUT="1000"
+ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }}
+#ETCD_MAX_SNAPSHOTS="5"
+#ETCD_MAX_WALS="5"
+#ETCD_CORS=""
+
+{% if groups[etcd_peers_group] and groups[etcd_peers_group] | length > 1 %}
+#[cluster]
+ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }}
+ETCD_INITIAL_CLUSTER={{ initial_cluster() }}
+ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}
+ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }}
+#ETCD_DISCOVERY=""
+#ETCD_DISCOVERY_SRV=""
+#ETCD_DISCOVERY_FALLBACK="proxy"
+#ETCD_DISCOVERY_PROXY=""
+{% endif %}
+ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }}
+
+#[proxy]
+#ETCD_PROXY="off"
+
+#[security]
+{% if etcd_url_scheme == 'https' -%}
+ETCD_CA_FILE={{ etcd_ca_file }}
+ETCD_CERT_FILE={{ etcd_cert_file }}
+ETCD_KEY_FILE={{ etcd_key_file }}
+{% endif -%}
+{% if etcd_peer_url_scheme == 'https' -%}
+ETCD_PEER_CA_FILE={{ etcd_peer_ca_file }}
+ETCD_PEER_CERT_FILE={{ etcd_peer_cert_file }}
+ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }}
+{% endif -%}

+ 34 - 0
roles/etcd_ca/README.md

@@ -0,0 +1,34 @@
+etcd_ca
+========================
+
+TODO
+
+Requirements
+------------
+
+TODO
+
+Role Variables
+--------------
+
+TODO
+
+Dependencies
+------------
+
+TODO
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License Version 2.0
+
+Author Information
+------------------
+
+Scott Dodson (sdodson@redhat.com)

+ 16 - 0
roles/etcd_ca/meta/main.yml

@@ -0,0 +1,16 @@
+---
+galaxy_info:
+  author: Jason DeTiberus
+  description:
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 1.9
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  categories:
+  - cloud
+  - system
+dependencies:
+- { role: openshift_facts }

+ 44 - 0
roles/etcd_ca/tasks/main.yml

@@ -0,0 +1,44 @@
+---
+- file:
+    path: "{{ etcd_ca_dir }}/{{ item }}"
+    state: directory
+    mode: 0700
+    owner: root
+    group: root
+  with_items:
+  - certs
+  - crl
+  - fragments
+
+- command: cp /etc/pki/tls/openssl.cnf ./
+  args:
+    chdir: "{{ etcd_ca_dir }}/fragments"
+    creates: "{{ etcd_ca_dir }}/fragments/openssl.cnf"
+
+- template:
+    dest: "{{ etcd_ca_dir }}/fragments/openssl_append.cnf"
+    src: openssl_append.j2
+
+- assemble:
+    src: "{{ etcd_ca_dir }}/fragments"
+    dest: "{{ etcd_ca_dir }}/openssl.cnf"
+
+- command: touch index.txt
+  args:
+    chdir: "{{ etcd_ca_dir }}"
+    creates: "{{ etcd_ca_dir }}/index.txt"
+
+- copy:
+    dest: "{{ etcd_ca_dir }}/serial"
+    content: "01"
+    force: no
+
+- command: >
+    openssl req -config openssl.cnf -newkey rsa:4096
+    -keyout ca.key -new -out ca.crt -x509 -extensions etcd_v3_ca_self
+    -batch -nodes -subj /CN=etcd-signer@{{ ansible_date_time.epoch }}
+  args:
+    chdir: "{{ etcd_ca_dir }}"
+    creates: "{{ etcd_ca_dir }}/ca.crt"
+  environment:
+    SAN: ''

+ 51 - 0
roles/etcd_ca/templates/openssl_append.j2

@@ -0,0 +1,51 @@
+
+[ etcd_v3_req ]
+basicConstraints = critical,CA:FALSE
+keyUsage         = digitalSignature,keyEncipherment
+subjectAltName   = ${ENV::SAN}
+
+[ etcd_ca ]
+dir             = {{ etcd_ca_dir }}
+crl_dir         = $dir/crl
+database        = $dir/index.txt
+new_certs_dir   = $dir/certs
+certificate     = $dir/ca.crt
+serial          = $dir/serial
+private_key     = $dir/ca.key
+crl_number      = $dir/crlnumber
+x509_extensions = etcd_v3_ca_client
+default_days    = 365
+default_md      = sha256
+preserve        = no
+name_opt        = ca_default
+cert_opt        = ca_default
+policy          = policy_anything
+unique_subject  = no
+copy_extensions = copy
+
+[ etcd_v3_ca_self ]
+authorityKeyIdentifier = keyid,issuer
+basicConstraints       = critical,CA:TRUE,pathlen:0
+keyUsage               = critical,digitalSignature,keyEncipherment,keyCertSign
+subjectKeyIdentifier   = hash
+
+[ etcd_v3_ca_peer ]
+authorityKeyIdentifier = keyid,issuer:always
+basicConstraints       = critical,CA:FALSE
+extendedKeyUsage       = clientAuth,serverAuth
+keyUsage               = digitalSignature,keyEncipherment
+subjectKeyIdentifier   = hash
+
+[ etcd_v3_ca_server ]
+authorityKeyIdentifier = keyid,issuer:always
+basicConstraints       = critical,CA:FALSE
+extendedKeyUsage       = serverAuth
+keyUsage               = digitalSignature,keyEncipherment
+subjectKeyIdentifier   = hash
+
+[ etcd_v3_ca_client ]
+authorityKeyIdentifier = keyid,issuer:always
+basicConstraints       = critical,CA:FALSE
+extendedKeyUsage       = clientAuth
+keyUsage               = digitalSignature,keyEncipherment
+subjectKeyIdentifier   = hash

+ 3 - 0
roles/etcd_ca/vars/main.yml

@@ -0,0 +1,3 @@
+---
+etcd_conf_dir: /etc/etcd
+etcd_ca_dir: /etc/etcd/ca

+ 34 - 0
roles/etcd_certificates/README.md

@@ -0,0 +1,34 @@
+OpenShift etcd certificates
+========================
+
+TODO
+
+Requirements
+------------
+
+TODO
+
+Role Variables
+--------------
+
+TODO
+
+Dependencies
+------------
+
+TODO
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License Version 2.0
+
+Author Information
+------------------
+
+Scott Dodson (sdodson@redhat.com)

+ 16 - 0
roles/etcd_certificates/meta/main.yml

@@ -0,0 +1,16 @@
+---
+galaxy_info:
+  author: Jason DeTiberus
+  description:
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 1.8
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  categories:
+  - cloud
+  - system
+dependencies:
+- { role: etcd_ca }

+ 42 - 0
roles/etcd_certificates/tasks/client.yml

@@ -0,0 +1,42 @@
+---
+- name: Ensure generated_certs directory present
+  file:
+    path: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
+    state: directory
+    mode: 0700
+  with_items: etcd_needing_client_certs
+
+- name: Create the client csr
+  command: >
+    openssl req -new -keyout {{ item.etcd_cert_prefix }}client.key
+    -config {{ etcd_openssl_conf }}
+    -out {{ item.etcd_cert_prefix }}client.csr
+    -reqexts {{ etcd_req_ext }} -batch -nodes
+    -subj /CN={{ item.openshift.common.hostname }}
+  args:
+    chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
+    creates: "{{ etcd_generated_certs_dir ~ '/' ~  item.etcd_cert_subdir ~ '/'
+                 ~ item.etcd_cert_prefix ~ 'client.csr' }}"
+  environment:
+    SAN: "IP:{{ item.openshift.common.ip }}"
+  with_items: etcd_needing_client_certs
+
+- name: Sign and create the client crt
+  command: >
+    openssl ca -name {{ etcd_ca_name }} -config {{ etcd_openssl_conf }}
+    -out {{ item.etcd_cert_prefix }}client.crt
+    -in {{ item.etcd_cert_prefix }}client.csr
+    -batch
+  args:
+    chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
+    creates: "{{ etcd_generated_certs_dir ~ '/' ~  item.etcd_cert_subdir ~ '/'
+                 ~ item.etcd_cert_prefix ~ 'client.crt' }}"
+  environment:
+    SAN: ''
+  with_items: etcd_needing_client_certs
+
+- file:
+    src: "{{ etcd_ca_cert }}"
+    dest: "{{ etcd_generated_certs_dir}}/{{ item.etcd_cert_subdir }}/{{ item.etcd_cert_prefix }}ca.crt"
+    state: hard
+  with_items: etcd_needing_client_certs

+ 9 - 0
roles/etcd_certificates/tasks/main.yml

@@ -0,0 +1,9 @@
+---
+- include: client.yml
+  when: etcd_needing_client_certs is defined and etcd_needing_client_certs
+
+- include: server.yml
+  when: etcd_needing_server_certs is defined and etcd_needing_server_certs
+
+
+

+ 73 - 0
roles/etcd_certificates/tasks/server.yml

@@ -0,0 +1,73 @@
+---
+- name: Ensure generated_certs directory present
+  file:
+    path: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
+    state: directory
+    mode: 0700
+  with_items: etcd_needing_server_certs
+
+- name: Create the server csr
+  command: >
+    openssl req -new -keyout {{ item.etcd_cert_prefix }}server.key
+    -config {{ etcd_openssl_conf }}
+    -out {{ item.etcd_cert_prefix }}server.csr
+    -reqexts {{ etcd_req_ext }} -batch -nodes
+    -subj /CN={{ item.openshift.common.hostname }}
+  args:
+    chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
+    creates: "{{ etcd_generated_certs_dir ~ '/' ~  item.etcd_cert_subdir ~ '/'
+                 ~ item.etcd_cert_prefix ~ 'server.csr' }}"
+  environment:
+    SAN: "IP:{{ item.openshift.common.ip }}"
+  with_items: etcd_needing_server_certs
+
+- name: Sign and create the server crt
+  command: >
+    openssl ca -name {{ etcd_ca_name }} -config {{ etcd_openssl_conf }}
+    -out {{ item.etcd_cert_prefix }}server.crt
+    -in {{ item.etcd_cert_prefix }}server.csr
+    -extensions {{ etcd_ca_exts_server }} -batch
+  args:
+    chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
+    creates: "{{ etcd_generated_certs_dir ~ '/' ~  item.etcd_cert_subdir ~ '/'
+                 ~ item.etcd_cert_prefix ~ 'server.crt' }}"
+  environment:
+    SAN: ''
+  with_items: etcd_needing_server_certs
+
+- name: Create the peer csr
+  command: >
+    openssl req -new -keyout {{ item.etcd_cert_prefix }}peer.key
+    -config {{ etcd_openssl_conf }}
+    -out {{ item.etcd_cert_prefix }}peer.csr
+    -reqexts {{ etcd_req_ext }} -batch -nodes
+    -subj /CN={{ item.openshift.common.hostname }}
+  args:
+    chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
+    creates: "{{ etcd_generated_certs_dir ~ '/' ~  item.etcd_cert_subdir ~ '/'
+                 ~ item.etcd_cert_prefix ~ 'peer.csr' }}"
+  environment:
+    SAN: "IP:{{ item.openshift.common.ip }}"
+  with_items: etcd_needing_server_certs
+
+- name: Sign and create the peer crt
+  command: >
+    openssl ca -name {{ etcd_ca_name }} -config {{ etcd_openssl_conf }}
+    -out {{ item.etcd_cert_prefix }}peer.crt
+    -in {{ item.etcd_cert_prefix }}peer.csr
+    -extensions {{ etcd_ca_exts_peer }} -batch
+  args:
+    chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
+    creates: "{{ etcd_generated_certs_dir ~ '/' ~  item.etcd_cert_subdir ~ '/'
+                 ~ item.etcd_cert_prefix ~ 'peer.crt' }}"
+  environment:
+    SAN: ''
+  with_items: etcd_needing_server_certs
+
+- file:
+    src: "{{ etcd_ca_cert }}"
+    dest: "{{ etcd_generated_certs_dir}}/{{ item.etcd_cert_subdir }}/{{ item.etcd_cert_prefix }}ca.crt"
+    state: hard
+  with_items: etcd_needing_server_certs
+
+

+ 11 - 0
roles/etcd_certificates/vars/main.yml

@@ -0,0 +1,11 @@
+---
+etcd_conf_dir: /etc/etcd
+etcd_ca_dir: /etc/etcd/ca
+etcd_generated_certs_dir: /etc/etcd/generated_certs
+etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt"
+etcd_ca_key: "{{ etcd_ca_dir }}/ca.key"
+etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf"
+etcd_ca_name: etcd_ca
+etcd_req_ext: etcd_v3_req
+etcd_ca_exts_peer: etcd_v3_ca_peer
+etcd_ca_exts_server: etcd_v3_ca_server

+ 14 - 8
roles/openshift_facts/library/openshift_facts.py

@@ -366,13 +366,23 @@ def set_url_facts_if_unset(facts):
         console_port = facts['master']['console_port']
         console_path = facts['master']['console_path']
         etcd_use_ssl = facts['master']['etcd_use_ssl']
+        etcd_hosts = facts['master']['etcd_hosts']
         etcd_port = facts['master']['etcd_port'],
         hostname = facts['common']['hostname']
         public_hostname = facts['common']['public_hostname']
 
         if 'etcd_urls' not in facts['master']:
-            facts['master']['etcd_urls'] = [format_url(etcd_use_ssl, hostname,
-                                                       etcd_port)]
+            etcd_urls = []
+            if etcd_hosts != '':
+                facts['master']['etcd_port'] = etcd_port
+                facts['master']['embedded_etcd'] = False
+                for host in etcd_hosts:
+                    etcd_urls.append(format_url(etcd_use_ssl, host,
+                                                etcd_port))
+            else:
+                etcd_urls = [format_url(etcd_use_ssl, hostname,
+                                        etcd_port)]
+            facts['master']['etcd_urls'] = etcd_urls
         if 'api_url' not in facts['master']:
             facts['master']['api_url'] = format_url(api_use_ssl, hostname,
                                                     api_port)
@@ -695,7 +705,7 @@ class OpenShiftFacts(object):
         if 'master' in roles:
             master = dict(api_use_ssl=True, api_port='8443',
                           console_use_ssl=True, console_path='/console',
-                          console_port='8443', etcd_use_ssl=True,
+                          console_port='8443', etcd_use_ssl=True, etcd_hosts='',
                           etcd_port='4001', portal_net='172.30.0.0/16',
                           embedded_etcd=True, embedded_kube=True,
                           embedded_dns=True, dns_port='53',
@@ -707,11 +717,7 @@ class OpenShiftFacts(object):
             defaults['master'] = master
 
         if 'node' in roles:
-            node = dict(pod_cidr='', labels={}, annotations={}, portal_net='172.30.0.0/16')
-            node['resources_cpu'] = self.system_facts['processor_cores']
-            node['resources_memory'] = int(
-                int(self.system_facts['memtotal_mb']) * 1024 * 1024 * 0.75
-            )
+            node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16')
             defaults['node'] = node
 
         return defaults

+ 6 - 15
roles/openshift_master/tasks/main.yml

@@ -12,11 +12,6 @@
   yum: pkg=openshift-master state=present
   register: install_result
 
-# TODO: Is this necessary or was this a workaround for an old bug in packaging?
-- name: Reload systemd units
-  command: systemctl daemon-reload
-  when: install_result | changed
-
 - name: Set master OpenShift facts
   openshift_facts:
     role: master
@@ -31,6 +26,7 @@
       console_url: "{{ openshift_master_console_url | default(None) }}"
       console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
       public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+      etcd_hosts: "{{ openshift_master_etcd_hosts | default(None)}}"
       etcd_port: "{{ openshift_master_etcd_port | default(None) }}"
       etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"
       etcd_urls: "{{ openshift_master_etcd_urls | default(None) }}"
@@ -65,16 +61,6 @@
     path: "{{ openshift_master_config_dir }}"
     state: directory
 
-- name: Create the master certificates if they do not already exist
-  command: >
-    {{ openshift.common.admin_binary }} create-master-certs
-      --hostnames={{ openshift.common.hostname }},{{ openshift.common.public_hostname }}
-      --master={{ openshift.master.api_url }}
-      --public-master={{ openshift.master.public_api_url }}
-      --cert-dir={{ openshift_master_config_dir }} --overwrite=false
-  args:
-    creates: "{{ openshift_master_config_dir }}/master.server.key"
-
 - name: Create the policy file if it does not already exist
   command: >
     {{ openshift.common.admin_binary }} create-bootstrap-policy-file
@@ -128,6 +114,11 @@
 
 - name: Start and enable openshift-master
   service: name=openshift-master enabled=yes state=started
+  register: start_result
+
+- name: pause to prevent service restart from interfering with bootstrapping
+  pause: seconds=30
+  when: start_result | changed
 
 - name: Create the OpenShift client config dir(s)
   file:

+ 4 - 4
roles/openshift_master/templates/master.yaml.v1.j2

@@ -18,19 +18,19 @@ corsAllowedOrigins:
 {% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] %}
   - {{ origin }}
 {% endfor %}
-{% if openshift.master.embedded_dns %}
+{% if openshift.master.embedded_dns | bool %}
 dnsConfig:
   bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}
 {% endif %}
 etcdClientInfo:
-  ca: ca.crt
+  ca: {{ "ca.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }}
   certFile: master.etcd-client.crt
   keyFile: master.etcd-client.key
   urls:
 {% for etcd_url in openshift.master.etcd_urls %}
     - {{ etcd_url }}
 {% endfor %}
-{% if openshift.master.embedded_etcd %}
+{% if openshift.master.embedded_etcd | bool %}
 etcdConfig:
   address: {{ openshift.common.hostname }}:{{ openshift.master.etcd_port }}
   peerAddress: {{ openshift.common.hostname }}:7001
@@ -61,7 +61,7 @@ kubeletClientInfo:
   certFile: master.kubelet-client.crt
   keyFile: master.kubelet-client.key
   port: 10250
-{% if openshift.master.embedded_kube %}
+{% if openshift.master.embedded_kube | bool %}
 kubernetesMasterConfig:
   apiLevels:
   - v1beta3

+ 34 - 0
roles/openshift_master_ca/README.md

@@ -0,0 +1,34 @@
+OpenShift Master CA
+========================
+
+TODO
+
+Requirements
+------------
+
+TODO
+
+Role Variables
+--------------
+
+TODO
+
+Dependencies
+------------
+
+TODO
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)

+ 0 - 1
roles/openshift_register_nodes/meta/main.yml

@@ -14,4 +14,3 @@ galaxy_info:
   - system
 dependencies:
 - { role: openshift_facts }
-

+ 22 - 0
roles/openshift_master_ca/tasks/main.yml

@@ -0,0 +1,22 @@
+---
+- name: Install the OpenShift package for admin tooling
+  yum: pkg=openshift state=present
+  register: install_result
+
+- name: Reload generated facts
+  openshift_facts:
+
+- name: Create openshift_master_config_dir if it doesn't exist
+  file:
+    path: "{{ openshift_master_config_dir }}"
+    state: directory
+
+- name: Create the master certificates if they do not already exist
+  command: >
+    {{ openshift.common.admin_binary }} create-master-certs
+      --hostnames={{ openshift.common.hostname }},{{ openshift.common.public_hostname }}
+      --master={{ openshift.master.api_url }}
+      --public-master={{ openshift.master.public_api_url }}
+      --cert-dir={{ openshift_master_config_dir }} --overwrite=false
+  args:
+    creates: "{{ openshift_master_config_dir }}/master.server.key"

+ 5 - 0
roles/openshift_master_ca/vars/main.yml

@@ -0,0 +1,5 @@
+---
+openshift_master_config_dir: /etc/openshift/master
+openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
+openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
+openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"

+ 34 - 0
roles/openshift_master_certificates/README.md

@@ -0,0 +1,34 @@
+OpenShift Master Certificates
+========================
+
+TODO
+
+Requirements
+------------
+
+TODO
+
+Role Variables
+--------------
+
+TODO
+
+Dependencies
+------------
+
+TODO
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)

+ 16 - 0
roles/openshift_master_certificates/meta/main.yml

@@ -0,0 +1,16 @@
+---
+galaxy_info:
+  author: Jason DeTiberus
+  description:
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 1.8
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  categories:
+  - cloud
+  - system
+dependencies:
+- { role: openshift_master_ca }

+ 24 - 0
roles/openshift_master_certificates/tasks/main.yml

@@ -0,0 +1,24 @@
+---
+- name: Ensure the generated_configs directory present
+  file:
+    path: "{{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}"
+    state: directory
+    mode: 0700
+  with_items: masters_needing_certs
+
+- file:
+    src: "{{ openshift_master_ca_cert }}"
+    dest: "{{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}/ca.crt"
+  with_items: masters_needing_certs
+
+- name: Create the master certificates if they do not already exist
+  command: >
+    {{ openshift.common.admin_binary }} create-master-certs
+      --hostnames={{ item.openshift.common.hostname }},{{ item.openshift.common.public_hostname }}
+      --master={{ item.openshift.master.api_url }}
+      --public-master={{ item.openshift.master.public_api_url }}
+      --cert-dir={{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}
+      --overwrite=false
+  args:
+    creates: "{{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}/master.server.crt"
+  with_items: masters_needing_certs

+ 1 - 3
roles/openshift_register_nodes/vars/main.yml

@@ -1,8 +1,6 @@
 ---
-openshift_node_config_dir: /etc/openshift/node
-openshift_master_config_dir: /etc/openshift/master
 openshift_generated_configs_dir: /etc/openshift/generated-configs
+openshift_master_config_dir: /etc/openshift/master
 openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
 openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
 openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
-openshift_kube_api_version: v1beta3

+ 6 - 8
roles/openshift_node/tasks/main.yml

@@ -1,5 +1,11 @@
 ---
 # TODO: allow for overriding default ports where possible
+- fail:
+    msg: This role requres that osn_cluster_dns_domain is set
+  when: osn_cluster_dns_domain is not defined or not osn_cluster_dns_domain
+- fail:
+    msg: This role requres that osn_cluster_dns_ip is set
+  when: osn_cluster_dns_ip is not defined or not osn_cluster_dns_ip
 
 - name: Install OpenShift Node package
   yum: pkg=openshift-node state=present
@@ -10,11 +16,6 @@
   register: sdn_install_result
   when: openshift.common.use_openshift_sdn
 
-- name: Reload systemd units
-  command: systemctl daemon-reload
-  when: (node_install_result | changed or (openshift.common.use_openshift_sdn
-          and sdn_install_result | changed))
-
 - name: Set node OpenShift facts
   openshift_facts:
     role: "{{ item.role }}"
@@ -27,9 +28,6 @@
       deployment_type: "{{ openshift_deployment_type }}"
   - role: node
     local_facts:
-      resources_cpu: "{{ openshift_node_resources_cpu | default(none) }}"
-      resources_memory: "{{ openshift_node_resources_memory | default(none) }}"
-      pod_cidr: "{{ openshift_node_pod_cidr | default(none) }}"
       labels: "{{ openshift_node_labels | default(none) }}"
       annotations: "{{ openshift_node_annotations | default(none) }}"
       registry_url: "{{ oreg_url | default(none) }}"

+ 2 - 2
roles/openshift_node/templates/node.yaml.v1.j2

@@ -1,7 +1,7 @@
 allowDisabledDocker: false
 apiVersion: v1
-dnsDomain: {{ hostvars[openshift_first_master].openshift.dns.domain }}
-dnsIP: {{ hostvars[openshift_first_master].openshift.dns.ip }}
+dnsDomain: {{ osn_cluster_dns_domain }}
+dnsIP: {{ osn_cluster_dns_ip }}
 dockerConfig:
   execHandlerName: ""
 imageConfig:

+ 1 - 0
roles/openshift_node_certificates/tasks/main.yml

@@ -3,6 +3,7 @@
   file:
     path: "{{ openshift_generated_configs_dir }}"
     state: directory
+  when: nodes_needing_certs | length > 0
 
 - name: Generate the node client config
   command: >

+ 0 - 1
roles/openshift_node_certificates/vars/main.yml

@@ -5,4 +5,3 @@ openshift_generated_configs_dir: /etc/openshift/generated-configs
 openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
 openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
 openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
-openshift_kube_api_version: v1beta3

+ 0 - 15
roles/openshift_register_nodes/README.md

@@ -1,15 +0,0 @@
-OpenShift Register Nodes
-========================
-
-DEPRECATED!!!
-Nodes should now auto register themselves. Use openshift_node_certificates role instead.
-
-License
--------
-
-Apache License Version 2.0
-
-Author Information
-------------------
-
-Jason DeTiberus (jdetiber@redhat.com)

+ 0 - 513
roles/openshift_register_nodes/library/kubernetes_register_node.py

@@ -1,513 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-# disable pylint checks
-# permanently disabled unless someone wants to refactor the object model:
-#   too-few-public-methods
-#   no-self-use
-#   too-many-arguments
-#   too-many-locals
-#   too-many-branches
-# pylint:disable=too-many-arguments, no-self-use
-# pylint:disable=too-many-locals, too-many-branches, too-few-public-methods
-"""Ansible module to register a kubernetes node to the cluster"""
-
-import os
-
-DOCUMENTATION = '''
----
-module: kubernetes_register_node
-short_description: Registers a kubernetes node with a master
-description:
-    - Registers a kubernetes node with a master
-options:
-    name:
-        default: null
-        description:
-            - Identifier for this node (usually the node fqdn).
-        required: true
-    api_verison:
-        choices: ['v1beta1', 'v1beta3']
-        default: 'v1beta1'
-        description:
-            - Kubernetes API version to use
-        required: true
-    host_ip:
-        default: null
-        description:
-            - IP Address to associate with the node when registering.
-              Available in the following API versions: v1beta1.
-        required: false
-    cpu:
-        default: null
-        description:
-            - Number of CPUs to allocate for this node. When using the v1beta1
-              API, you must specify the CPU count as a floating point number
-              with no more than 3 decimal places. API version v1beta3 and newer
-              accepts arbitrary float values.
-        required: false
-    memory:
-        default: null
-        description:
-            - Memory available for this node. When using the v1beta1 API, you
-              must specify the memory size in bytes. API version v1beta3 and
-              newer accepts binary SI and decimal SI values.
-        required: false
-'''
-EXAMPLES = '''
-# Minimal node registration
-- openshift_register_node: name=ose3.node.example.com
-
-# Node registration using the v1beta1 API and assigning 1 CPU core and 10 GB of
-# Memory
-- openshift_register_node:
-    name: ose3.node.example.com
-    api_version: v1beta1
-    hostIP: 192.168.1.1
-    cpu: 1
-    memory: 500000000
-'''
-
-
-class ClientConfigException(Exception):
-    """Client Configuration Exception"""
-    pass
-
-class ClientConfig(object):
-    """ Representation of a client config
-
-        Attributes:
-            config (dict): dictionary representing the client configuration
-
-        Args:
-            client_opts (list of str): client options to use
-            module (AnsibleModule):
-
-        Raises:
-            ClientConfigException:
-    """
-    def __init__(self, client_opts, module):
-        kubectl = module.params['kubectl_cmd']
-        _, output, _ = module.run_command((kubectl +
-                                           ["config", "view", "-o", "json"] +
-                                           client_opts), check_rc=True)
-        self.config = json.loads(output)
-
-        if not (bool(self.config['clusters']) or
-                bool(self.config['contexts']) or
-                bool(self.config['current-context']) or
-                bool(self.config['users'])):
-            raise ClientConfigException(
-                "Client config missing required values: %s" % output
-            )
-
-    def current_context(self):
-        """ Gets the current context for the client config
-
-            Returns:
-                str: The current context as set in the config
-        """
-        return self.config['current-context']
-
-    def section_has_value(self, section_name, value):
-        """ Test if specified section contains a value
-
-            Args:
-                section_name (str): config section to test
-                value (str): value to test if present
-            Returns:
-                bool: True if successful, false otherwise
-        """
-        section = self.config[section_name]
-        if isinstance(section, dict):
-            return value in section
-        else:
-            val = next((item for item in section
-                        if item['name'] == value), None)
-            return val is not None
-
-    def has_context(self, context):
-        """ Test if specified context exists in config
-
-            Args:
-                context (str): value to test if present
-            Returns:
-                bool: True if successful, false otherwise
-        """
-        return self.section_has_value('contexts', context)
-
-    def has_user(self, user):
-        """ Test if specified user exists in config
-
-            Args:
-                context (str): value to test if present
-            Returns:
-                bool: True if successful, false otherwise
-        """
-        return self.section_has_value('users', user)
-
-    def has_cluster(self, cluster):
-        """ Test if specified cluster exists in config
-
-            Args:
-                context (str): value to test if present
-            Returns:
-                bool: True if successful, false otherwise
-        """
-        return self.section_has_value('clusters', cluster)
-
-    def get_value_for_context(self, context, attribute):
-        """ Get the value of attribute in context
-
-            Args:
-                context (str): context to search
-                attribute (str): attribute wanted
-            Returns:
-                str: The value for attribute in context
-        """
-        contexts = self.config['contexts']
-        if isinstance(contexts, dict):
-            return contexts[context][attribute]
-        else:
-            return next((c['context'][attribute] for c in contexts
-                         if c['name'] == context), None)
-
-    def get_user_for_context(self, context):
-        """ Get the user attribute in context
-
-            Args:
-                context (str): context to search
-            Returns:
-                str: The value for the attribute in context
-        """
-        return self.get_value_for_context(context, 'user')
-
-    def get_cluster_for_context(self, context):
-        """ Get the cluster attribute in context
-
-            Args:
-                context (str): context to search
-            Returns:
-                str: The value for the attribute in context
-        """
-        return self.get_value_for_context(context, 'cluster')
-
-    def get_namespace_for_context(self, context):
-        """ Get the namespace attribute in context
-
-            Args:
-                context (str): context to search
-            Returns:
-                str: The value for the attribute in context
-        """
-        return self.get_value_for_context(context, 'namespace')
-
-class Util(object):
-    """Utility methods"""
-    @staticmethod
-    def remove_empty_elements(mapping):
-        """ Recursively removes empty elements from a dict
-
-            Args:
-                mapping (dict): dict to remove empty attributes from
-            Returns:
-                dict: A copy of the dict with empty elements removed
-        """
-        if isinstance(mapping, dict):
-            copy = mapping.copy()
-            for key, val in mapping.iteritems():
-                if not val:
-                    del copy[key]
-            return copy
-        else:
-            return mapping
-
-class NodeResources(object):
-    """ Kubernetes Node Resources
-
-        Attributes:
-            resources (dict): A dictionary representing the node resources
-
-        Args:
-            version (str): kubernetes api version
-            cpu (str): string representation of the cpu resources for the node
-            memory (str): string representation of the memory resources for the
-                node
-    """
-    def __init__(self, version, cpu=None, memory=None):
-        if version == 'v1beta1':
-            self.resources = dict(capacity=dict())
-            self.resources['capacity']['cpu'] = cpu
-            self.resources['capacity']['memory'] = memory
-
-    def get_resources(self):
-        """ Get the dict representing the node resources
-
-            Returns:
-                dict: representation of the node resources with any empty
-                    elements removed
-        """
-        return Util.remove_empty_elements(self.resources)
-
-class NodeSpec(object):
-    """ Kubernetes Node Spec
-
-        Attributes:
-            spec (dict): A dictionary representing the node resources
-
-        Args:
-            version (str): kubernetes api version
-            cpu (str): string representation of the cpu resources for the node
-            memory (str): string representation of the memory resources for the
-                node
-            cidr (str): string representation of the cidr block available for
-                the node
-            externalID (str): The external id of the node
-    """
-    def __init__(self, version, cpu=None, memory=None, cidr=None,
-                 externalID=None):
-        if version == 'v1beta3':
-            self.spec = dict(podCIDR=cidr, externalID=externalID,
-                             capacity=dict())
-            self.spec['capacity']['cpu'] = cpu
-            self.spec['capacity']['memory'] = memory
-
-    def get_spec(self):
-        """ Get the dict representing the node spec
-
-            Returns:
-                dict: representation of the node spec with any empty elements
-                    removed
-        """
-        return Util.remove_empty_elements(self.spec)
-
-class Node(object):
-    """ Kubernetes Node
-
-        Attributes:
-            node (dict): A dictionary representing the node
-
-        Args:
-            module (AnsibleModule):
-            client_opts (list): client connection options
-            version (str, optional): kubernetes api version
-            node_name (str, optional): name for node
-            hostIP (str, optional): node host ip
-            cpu (str, optional): cpu resources for the node
-            memory (str, optional): memory resources for the node
-            labels (list, optional): labels for the node
-            annotations (list, optional): annotations for the node
-            podCIDR (list, optional): cidr block to use for pods
-            externalID (str, optional): external id of the node
-    """
-    def __init__(self, module, client_opts, version='v1beta1', node_name=None,
-                 hostIP=None, cpu=None, memory=None, labels=None,
-                 annotations=None, podCIDR=None, externalID=None):
-        self.module = module
-        self.client_opts = client_opts
-        if version == 'v1beta1':
-            self.node = dict(id=node_name,
-                             kind='Node',
-                             apiVersion=version,
-                             hostIP=hostIP,
-                             resources=NodeResources(version, cpu, memory),
-                             cidr=podCIDR,
-                             labels=labels,
-                             annotations=annotations,
-                             externalID=externalID)
-        elif version == 'v1beta3':
-            metadata = dict(name=node_name,
-                            labels=labels,
-                            annotations=annotations)
-            self.node = dict(kind='Node',
-                             apiVersion=version,
-                             metadata=metadata,
-                             spec=NodeSpec(version, cpu, memory, podCIDR,
-                                           externalID))
-
-    def get_name(self):
-        """ Get the name for the node
-
-            Returns:
-                str: node name
-        """
-        if self.node['apiVersion'] == 'v1beta1':
-            return self.node['id']
-        elif self.node['apiVersion'] == 'v1beta3':
-            return self.node['metadata']['name']
-
-    def get_node(self):
-        """ Get the dict representing the node
-
-            Returns:
-                dict: representation of the node with any empty elements
-                    removed
-        """
-        node = self.node.copy()
-        if self.node['apiVersion'] == 'v1beta1':
-            node['resources'] = self.node['resources'].get_resources()
-        elif self.node['apiVersion'] == 'v1beta3':
-            node['spec'] = self.node['spec'].get_spec()
-        return Util.remove_empty_elements(node)
-
-    def exists(self):
-        """ Tests if the node already exists
-
-            Returns:
-                bool: True if node exists, otherwise False
-        """
-        kubectl = self.module.params['kubectl_cmd']
-        _, output, _ = self.module.run_command((kubectl + ["get", "nodes"] +
-                                                self.client_opts),
-                                               check_rc=True)
-        if re.search(self.module.params['name'], output, re.MULTILINE):
-            return True
-        return False
-
-    def create(self):
-        """ Creates the node
-
-            Returns:
-                bool: True if node creation successful
-        """
-        kubectl = self.module.params['kubectl_cmd']
-        cmd = kubectl + self.client_opts + ['create', '-f', '-']
-        exit_code, output, error = self.module.run_command(
-            cmd, data=self.module.jsonify(self.get_node())
-        )
-        if exit_code != 0:
-            if re.search("minion \"%s\" already exists" % self.get_name(),
-                         error):
-                self.module.exit_json(msg="node definition already exists",
-                                      changed=False, node=self.get_node())
-            else:
-                self.module.fail_json(msg="Node creation failed.",
-                                      exit_code=exit_code,
-                                      output=output, error=error,
-                                      node=self.get_node())
-        else:
-            return True
-
-def generate_client_opts(module):
-    """ Generates the client options
-
-        Args:
-            module(AnsibleModule)
-
-        Returns:
-            str: client options
-    """
-    client_config = '~/.kube/.kubeconfig'
-    if 'default_client_config' in module.params:
-        client_config = module.params['default_client_config']
-    user_has_client_config = os.path.exists(os.path.expanduser(client_config))
-    if not (user_has_client_config or module.params['client_config']):
-        module.fail_json(msg="Could not locate client configuration, "
-                         "client_config must be specified if "
-                         "~/.kube/.kubeconfig is not present")
-
-    client_opts = []
-    if module.params['client_config']:
-        kubeconfig_flag = '--kubeconfig'
-        if 'kubeconfig_flag' in module.params:
-            kubeconfig_flag = module.params['kubeconfig_flag']
-        client_opts.append(kubeconfig_flag + '=' + os.path.expanduser(module.params['client_config']))
-
-    try:
-        config = ClientConfig(client_opts, module)
-    except ClientConfigException as ex:
-        module.fail_json(msg="Failed to get client configuration",
-                         exception=str(ex))
-
-    client_context = module.params['client_context']
-    if config.has_context(client_context):
-        if client_context != config.current_context():
-            client_opts.append("--context=%s" % client_context)
-    else:
-        module.fail_json(msg="Context %s not found in client config" % client_context)
-
-    client_user = module.params['client_user']
-    if config.has_user(client_user):
-        if client_user != config.get_user_for_context(client_context):
-            client_opts.append("--user=%s" % client_user)
-    else:
-        module.fail_json(msg="User %s not found in client config" % client_user)
-
-    client_cluster = module.params['client_cluster']
-    if config.has_cluster(client_cluster):
-        if client_cluster != config.get_cluster_for_context(client_context):
-            client_opts.append("--cluster=%s" % client_cluster)
-    else:
-        module.fail_json(msg="Cluster %s not found in client config" % client_cluster)
-
-    client_namespace = module.params['client_namespace']
-    if client_namespace != config.get_namespace_for_context(client_context):
-        client_opts.append("--namespace=%s" % client_namespace)
-
-    return client_opts
-
-
-def main():
-    """ main """
-    module = AnsibleModule(
-        argument_spec=dict(
-            name=dict(required=True, type='str'),
-            host_ip=dict(type='str'),
-            api_version=dict(type='str', default='v1beta1',
-                             choices=['v1beta1', 'v1beta3']),
-            cpu=dict(type='str'),
-            memory=dict(type='str'),
-            # TODO: needs documented
-            labels=dict(type='dict', default={}),
-            # TODO: needs documented
-            annotations=dict(type='dict', default={}),
-            # TODO: needs documented
-            pod_cidr=dict(type='str'),
-            # TODO: needs documented
-            client_config=dict(type='str'),
-            # TODO: needs documented
-            client_cluster=dict(type='str', default='master'),
-            # TODO: needs documented
-            client_context=dict(type='str', default='default'),
-            # TODO: needs documented
-            client_namespace=dict(type='str', default='default'),
-            # TODO: needs documented
-            client_user=dict(type='str', default='system:admin'),
-            # TODO: needs documented
-            kubectl_cmd=dict(type='list', default=['kubectl']),
-            # TODO: needs documented
-            kubeconfig_flag=dict(type='str'),
-            # TODO: needs documented
-            default_client_config=dict(type='str')
-        ),
-        supports_check_mode=True
-    )
-
-    labels = module.params['labels']
-    kube_hostname_label = 'kubernetes.io/hostname'
-    if kube_hostname_label not in labels:
-        labels[kube_hostname_label] = module.params['name']
-
-    node = Node(module, generate_client_opts(module),
-                module.params['api_version'], module.params['name'],
-                module.params['host_ip'], module.params['cpu'],
-                module.params['memory'], labels, module.params['annotations'],
-                module.params['pod_cidr'])
-
-    if node.exists():
-        module.exit_json(changed=False, node=node.get_node())
-    elif module.check_mode:
-        module.exit_json(changed=True, node=node.get_node())
-    elif node.create():
-        module.exit_json(changed=True, msg="Node created successfully",
-                         node=node.get_node())
-    else:
-        module.fail_json(msg="Unknown error creating node", node=node.get_node())
-
-# ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
-# import module snippets
-from ansible.module_utils.basic import *
-if __name__ == '__main__':
-    main()

+ 0 - 53
roles/openshift_register_nodes/tasks/main.yml

@@ -1,53 +0,0 @@
----
-- name: Create openshift_generated_configs_dir if it doesn't exist
-  file:
-    path: "{{ openshift_generated_configs_dir }}"
-    state: directory
-
-- name: Generate the node client config
-  command: >
-    {{ openshift.common.admin_binary }} create-api-client-config
-      --certificate-authority={{ openshift_master_ca_cert }}
-      --client-dir={{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}
-      --groups=system:nodes
-      --master={{ openshift.master.api_url }}
-      --signer-cert={{ openshift_master_ca_cert }}
-      --signer-key={{ openshift_master_ca_key }}
-      --signer-serial={{ openshift_master_ca_serial }}
-      --user=system:node:{{ item.openshift.common.hostname }}
-  args:
-    chdir: "{{ openshift_generated_configs_dir }}"
-    creates: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}"
-  with_items: nodes_needing_certs
-
-- name: Generate the node server certificate
-  delegate_to: "{{ openshift_first_master }}"
-  command: >
-    {{ openshift.common.admin_binary }} create-server-cert
-      --cert=server.crt --key=server.key --overwrite=true
-      --hostnames={{ [item.openshift.common.hostname, item.openshift.common.public_hostname]|unique|join(",") }}
-      --signer-cert={{ openshift_master_ca_cert }}
-      --signer-key={{ openshift_master_ca_key }}
-      --signer-serial={{ openshift_master_ca_serial }}
-  args:
-    chdir: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}"
-    creates: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}/server.crt"
-  with_items: nodes_needing_certs
-
-- name: Register unregistered nodes
-  kubernetes_register_node:
-    kubectl_cmd: "{{ [openshift.common.client_binary] }}"
-    default_client_config: '~/.kube/config'
-    name: "{{ item.openshift.common.hostname }}"
-    api_version: "{{ openshift_kube_api_version }}"
-    cpu: "{{ item.openshift.node.resources_cpu | default(None) }}"
-    memory: "{{ item.openshift.node.resources_memory | default(None) }}"
-    pod_cidr: "{{ item.openshift.node.pod_cidr | default(None) }}"
-    host_ip: "{{ item.openshift.common.ip }}"
-    labels: "{{ item.openshift.node.labels | default({}) }}"
-    annotations: "{{ item.openshift.node.annotations | default({}) }}"
-    client_context: default/ose3-master-example-com:8443/system:openshift-master
-    client_user: system:openshift-master/ose3-master-example-com:8443
-    client_cluster: ose3-master-example-com:8443
-  with_items: openshift_nodes
-  register: register_result