|
@@ -6,78 +6,315 @@ import os
|
|
|
import multiprocessing
|
|
|
import socket
|
|
|
from subprocess import check_output, Popen
|
|
|
+from decimal import *
|
|
|
|
|
|
DOCUMENTATION = '''
|
|
|
---
|
|
|
-module: openshift_register_node
|
|
|
-short_description: This module registers an openshift-node with an openshift-master
|
|
|
-author: Jason DeTiberus
|
|
|
-requirements: [ openshift-node ]
|
|
|
-notes: Node resources can be specified using either the resources option or the following options: cpu, memory
|
|
|
+module: kubernetes_register_node
|
|
|
+short_description: Registers a kubernetes node with a master
|
|
|
+description:
|
|
|
+ - Registers a kubernetes node with a master
|
|
|
options:
|
|
|
name:
|
|
|
+ default: null
|
|
|
description:
|
|
|
- - id for this node (usually the node fqdn)
|
|
|
+ - Identifier for this node (usually the node fqdn).
|
|
|
required: true
|
|
|
- hostIP:
|
|
|
+ api_verison:
|
|
|
+ choices: ['v1beta1', 'v1beta3']
|
|
|
+ default: 'v1beta1'
|
|
|
description:
|
|
|
- - ip address for this node
|
|
|
+ - Kubernetes API version to use
|
|
|
+ required: true
|
|
|
+ host_ip:
|
|
|
+ default: null
|
|
|
+ description:
|
|
|
+ - IP Address to associate with the node when registering.
|
|
|
+ Available in the following API versions: v1beta1.
|
|
|
required: false
|
|
|
- cpu:
|
|
|
+ hostnames:
|
|
|
+ default: []
|
|
|
description:
|
|
|
- - number of CPUs for this node
|
|
|
+ - Valid hostnames for this node. Available in the following API
|
|
|
+ versions: v1beta3.
|
|
|
required: false
|
|
|
- default: number of logical CPUs detected
|
|
|
- memory:
|
|
|
+ external_ips:
|
|
|
+ default: []
|
|
|
description:
|
|
|
- - Memory available for this node in bytes
|
|
|
+ - External IP Addresses for this node. Available in the following API
|
|
|
+ versions: v1beta3.
|
|
|
required: false
|
|
|
- default: 80% MemTotal
|
|
|
- resources:
|
|
|
+ internal_ips:
|
|
|
+ default: []
|
|
|
description:
|
|
|
- - A json string representing Node resources
|
|
|
+ - Internal IP Addresses for this node. Available in the following API
|
|
|
+ versions: v1beta3.
|
|
|
+ required: false
|
|
|
+ cpu:
|
|
|
+ default: null
|
|
|
+ description:
|
|
|
+ - Number of CPUs to allocate for this node. If not provided, then
|
|
|
+ the node will be registered to advertise the number of logical
|
|
|
+ CPUs available. When using the v1beta1 API, you must specify the
|
|
|
+ CPU count as a floating point number with no more than 3 decimal
|
|
|
+ places. API version v1beta3 and newer accepts arbitrary float
|
|
|
+ values.
|
|
|
+ required: false
|
|
|
+ memory:
|
|
|
+ default: null
|
|
|
+ description:
|
|
|
+ - Memory available for this node. If not provided, then the node
|
|
|
+ will be registered to advertise 80% of MemTotal as available
|
|
|
+ memory. When using the v1beta1 API, you must specify the memory
|
|
|
+ size in bytes. API version v1beta3 and newer accepts binary SI
|
|
|
+ and decimal SI values.
|
|
|
required: false
|
|
|
'''
|
|
|
EXAMPLES = '''
|
|
|
# Minimal node registration
|
|
|
- openshift_register_node: name=ose3.node.example.com
|
|
|
|
|
|
-# Node registration with all options (using cpu and memory options)
|
|
|
+# Node registration using the v1beta1 API and assigning 1 CPU core and 10 GB of
|
|
|
+# Memory
|
|
|
- openshift_register_node:
|
|
|
name: ose3.node.example.com
|
|
|
+ api_version: v1beta1
|
|
|
hostIP: 192.168.1.1
|
|
|
- apiVersion: v1beta1
|
|
|
cpu: 1
|
|
|
- memory: 1073741824
|
|
|
+ memory: 500000000
|
|
|
|
|
|
-# Node registration with all options (using resources option)
|
|
|
+# Node registration using the v1beta3 API, setting an alternate hostname,
|
|
|
+# internalIP, externalIP and assigning 3.5 CPU cores and 1 TiB of Memory
|
|
|
- openshift_register_node:
|
|
|
name: ose3.node.example.com
|
|
|
- hostIP: 192.168.1.1
|
|
|
- apiVersion: v1beta1
|
|
|
- resources:
|
|
|
- capacity:
|
|
|
- cpu: 1
|
|
|
- memory: 1073741824
|
|
|
+ api_version: v1beta3
|
|
|
+ external_ips: ['192.168.1.5']
|
|
|
+ internal_ips: ['10.0.0.5']
|
|
|
+ hostnames: ['ose2.node.internal.local']
|
|
|
+ cpu: 3.5
|
|
|
+ memory: 1Ti
|
|
|
'''
|
|
|
|
|
|
+
|
|
|
+class ClientConfigException(Exception):
|
|
|
+ pass
|
|
|
+
|
|
|
+class ClientConfig:
|
|
|
+ def __init__(self, client_opts, module):
|
|
|
+ _, output, error = module.run_command(["/usr/bin/openshift", "ex",
|
|
|
+ "config", "view", "-o",
|
|
|
+ "json"] + client_opts,
|
|
|
+ check_rc = True)
|
|
|
+ self.config = json.loads(output)
|
|
|
+
|
|
|
+ if not (bool(self.config['clusters']) or
|
|
|
+ bool(self.config['contexts']) or
|
|
|
+ bool(self.config['current-context']) or
|
|
|
+ bool(self.config['users'])):
|
|
|
+ raise ClientConfigException(msg="Client config missing required " \
|
|
|
+ "values",
|
|
|
+ output=output)
|
|
|
+
|
|
|
+ def current_context(self):
|
|
|
+ return self.config['current-context']
|
|
|
+
|
|
|
+ def section_has_value(self, section_name, value):
|
|
|
+ section = self.config[section_name]
|
|
|
+ if isinstance(section, dict):
|
|
|
+ return value in section
|
|
|
+ else:
|
|
|
+ val = next((item for item in section
|
|
|
+ if item['name'] == value), None)
|
|
|
+ return val is not None
|
|
|
+
|
|
|
+ def has_context(self, context):
|
|
|
+ return self.section_has_value('contexts', context)
|
|
|
+
|
|
|
+ def has_user(self, user):
|
|
|
+ return self.section_has_value('users', user)
|
|
|
+
|
|
|
+ def has_cluster(self, cluster):
|
|
|
+ return self.section_has_value('clusters', cluster)
|
|
|
+
|
|
|
+ def get_value_for_context(self, context, attribute):
|
|
|
+ contexts = self.config['contexts']
|
|
|
+ if isinstance(contexts, dict):
|
|
|
+ return contexts[context][attribute]
|
|
|
+ else:
|
|
|
+ return next((c['context'][attribute] for c in contexts
|
|
|
+ if c['name'] == context), None)
|
|
|
+
|
|
|
+ def get_user_for_context(self, context):
|
|
|
+ return self.get_value_for_context(context, 'user')
|
|
|
+
|
|
|
+ def get_cluster_for_context(self, context):
|
|
|
+ return self.get_value_for_context(context, 'cluster')
|
|
|
+
|
|
|
+class Util:
|
|
|
+ @staticmethod
|
|
|
+ def getLogicalCores():
|
|
|
+ return multiprocessing.cpu_count()
|
|
|
+
|
|
|
+ @staticmethod
|
|
|
+ def getMemoryPct(pct):
|
|
|
+ with open('/proc/meminfo', 'r') as mem:
|
|
|
+ for line in mem:
|
|
|
+ entries = line.split()
|
|
|
+ if str(entries.pop(0)) == 'MemTotal:':
|
|
|
+ mem_total_kb = Decimal(entries.pop(0))
|
|
|
+ mem_capacity_kb = mem_total_kb * Decimal(pct)
|
|
|
+ return str(mem_capacity_kb.to_integral_value() * 1024)
|
|
|
+
|
|
|
+ return ""
|
|
|
+
|
|
|
+ @staticmethod
|
|
|
+ def remove_empty_elements(mapping):
|
|
|
+ if isinstance(mapping, dict):
|
|
|
+ m = mapping.copy()
|
|
|
+ for key, val in mapping.iteritems():
|
|
|
+ if not val:
|
|
|
+ del m[key]
|
|
|
+ return m
|
|
|
+ else:
|
|
|
+ return mapping
|
|
|
+
|
|
|
+class NodeResources:
|
|
|
+ def __init__(self, version, cpu=None, memory=None):
|
|
|
+ if version == 'v1beta1':
|
|
|
+ self.resources = dict(capacity=dict())
|
|
|
+ self.resources['capacity']['cpu'] = cpu if cpu else Util.getLogicalCores()
|
|
|
+ self.resources['capacity']['memory'] = memory if cpu else Util.getMemoryPct(.75)
|
|
|
+
|
|
|
+ def get_resources(self):
|
|
|
+ return Util.remove_empty_elements(self.resources)
|
|
|
+
|
|
|
+class NodeSpec:
|
|
|
+ def __init__(self, version, cpu=None, memory=None, cidr=None, externalID=None):
|
|
|
+ if version == 'v1beta3':
|
|
|
+ self.spec = dict(podCIDR=cidr, externalID=externalID,
|
|
|
+ capacity=dict())
|
|
|
+ self.spec['capacity']['cpu'] = cpu if cpu else Util.getLogicalCores()
|
|
|
+ self.spec['capacity']['memory'] = memory if memory else Util.getMemoryPct(.75)
|
|
|
+
|
|
|
+ def get_spec(self):
|
|
|
+ return Util.remove_empty_elements(self.spec)
|
|
|
+
|
|
|
+class NodeStatus:
|
|
|
+ def addAddresses(self, addressType, addresses):
|
|
|
+ addressList = []
|
|
|
+ for address in addresses:
|
|
|
+ addressList.append(dict(type=addressType, address=address))
|
|
|
+ return addressList
|
|
|
+
|
|
|
+ def __init__(self, version, externalIPs = [], internalIPs = [],
|
|
|
+ hostnames = []):
|
|
|
+ if version == 'v1beta3':
|
|
|
+ self.status = dict(addresses = addAddresses('ExternalIP',
|
|
|
+ externalIPs) +
|
|
|
+ addAddresses('InternalIP',
|
|
|
+ internalIPs) +
|
|
|
+ addAddresses('Hostname',
|
|
|
+ hostnames))
|
|
|
+
|
|
|
+ def get_status(self):
|
|
|
+ return Util.remove_empty_elements(self.status)
|
|
|
+
|
|
|
+class Node:
|
|
|
+ def __init__(self, module, client_opts, version='v1beta1', name=None,
|
|
|
+ hostIP = None, hostnames=[], externalIPs=[], internalIPs=[],
|
|
|
+ cpu=None, memory=None, labels=dict(), annotations=dict(),
|
|
|
+ podCIDR=None, externalID=None):
|
|
|
+ self.module = module
|
|
|
+ self.client_opts = client_opts
|
|
|
+ if version == 'v1beta1':
|
|
|
+ self.node = dict(id = name,
|
|
|
+ kind = 'Node',
|
|
|
+ apiVersion = version,
|
|
|
+ hostIP = hostIP,
|
|
|
+ resources = NodeResources(version, cpu, memory),
|
|
|
+ cidr = podCIDR,
|
|
|
+ labels = labels,
|
|
|
+ annotations = annotations
|
|
|
+ )
|
|
|
+ elif version == 'v1beta3':
|
|
|
+ metadata = dict(name = name,
|
|
|
+ labels = labels,
|
|
|
+ annotations = annotations
|
|
|
+ )
|
|
|
+ self.node = dict(kind = 'Node',
|
|
|
+ apiVersion = version,
|
|
|
+ metadata = metadata,
|
|
|
+ spec = NodeSpec(version, cpu, memory, podCIDR,
|
|
|
+ externalID),
|
|
|
+ status = NodeStatus(version, externalIPs,
|
|
|
+ internalIPs, hostnames),
|
|
|
+ )
|
|
|
+
|
|
|
+ def get_name(self):
|
|
|
+ if self.node['apiVersion'] == 'v1beta1':
|
|
|
+ return self.node['id']
|
|
|
+ elif self.node['apiVersion'] == 'v1beta3':
|
|
|
+ return self.node['name']
|
|
|
+
|
|
|
+ def get_node(self):
|
|
|
+ node = self.node.copy()
|
|
|
+ if self.node['apiVersion'] == 'v1beta1':
|
|
|
+ node['resources'] = self.node['resources'].get_resources()
|
|
|
+ elif self.node['apiVersion'] == 'v1beta3':
|
|
|
+ node['spec'] = self.node['spec'].get_spec()
|
|
|
+ node['status'] = self.node['status'].get_status()
|
|
|
+ return Util.remove_empty_elements(node)
|
|
|
+
|
|
|
+ def exists(self):
|
|
|
+ _, output, error = self.module.run_command(["/usr/bin/osc", "get",
|
|
|
+ "nodes"] + self.client_opts,
|
|
|
+ check_rc = True)
|
|
|
+ if re.search(self.module.params['name'], output, re.MULTILINE):
|
|
|
+ return True
|
|
|
+ return False
|
|
|
+
|
|
|
+ def create(self):
|
|
|
+ cmd = ['/usr/bin/osc'] + self.client_opts + ['create', 'node', '-f', '-']
|
|
|
+ rc, output, error = self.module.run_command(cmd,
|
|
|
+ data=self.module.jsonify(self.get_node()))
|
|
|
+ if rc != 0:
|
|
|
+ if re.search("minion \"%s\" already exists" % self.get_name(),
|
|
|
+ error):
|
|
|
+ self.module.exit_json(changed=False,
|
|
|
+ msg="node definition already exists",
|
|
|
+ node=self.get_node())
|
|
|
+ else:
|
|
|
+ self.module.fail_json(msg="Node creation failed.", rc=rc,
|
|
|
+ output=output, error=error,
|
|
|
+ node=self.get_node())
|
|
|
+ else:
|
|
|
+ return True
|
|
|
+
|
|
|
def main():
|
|
|
module = AnsibleModule(
|
|
|
argument_spec = dict(
|
|
|
- name = dict(required = True),
|
|
|
- hostIP = dict(),
|
|
|
- apiVersion = dict(),
|
|
|
- cpu = dict(),
|
|
|
- memory = dict(),
|
|
|
- resources = dict(),
|
|
|
- client_config = dict(),
|
|
|
- client_cluster = dict(default = 'master'),
|
|
|
- client_context = dict(default = 'master'),
|
|
|
- client_user = dict(default = 'admin')
|
|
|
+ name = dict(required = True, type = 'str'),
|
|
|
+ host_ip = dict(type = 'str'),
|
|
|
+ hostnames = dict(type = 'list', default = []),
|
|
|
+ external_ips = dict(type = 'list', default = []),
|
|
|
+ internal_ips = dict(type = 'list', default = []),
|
|
|
+ api_version = dict(type = 'str', default = 'v1beta1', # TODO: after kube rebase, we can default to v1beta3
|
|
|
+ choices = ['v1beta1', 'v1beta3']),
|
|
|
+ cpu = dict(type = 'str'),
|
|
|
+ memory = dict(type = 'str'),
|
|
|
+ labels = dict(type = 'dict', default = {}), # TODO: needs documented
|
|
|
+ annotations = dict(type = 'dict', default = {}), # TODO: needs documented
|
|
|
+ pod_cidr = dict(type = 'str'), # TODO: needs documented
|
|
|
+ external_id = dict(type = 'str'), # TODO: needs documented
|
|
|
+ client_config = dict(type = 'str'), # TODO: needs documented
|
|
|
+ client_cluster = dict(type = 'str', default = 'master'), # TODO: needs documented
|
|
|
+ client_context = dict(type = 'str', default = 'master'), # TODO: needs documented
|
|
|
+ client_user = dict(type = 'str', default = 'admin') # TODO: needs documented
|
|
|
),
|
|
|
mutually_exclusive = [
|
|
|
- ['resources', 'cpu'],
|
|
|
- ['resources', 'memory']
|
|
|
+ ['host_ip', 'external_ips'],
|
|
|
+ ['host_ip', 'internal_ips'],
|
|
|
+ ['host_ip', 'hostnames'],
|
|
|
],
|
|
|
supports_check_mode=True
|
|
|
)
|
|
@@ -93,119 +330,61 @@ def main():
|
|
|
client_opts.append("--kubeconfig=%s" % module.params['client_config'])
|
|
|
|
|
|
try:
|
|
|
- output = check_output(["/usr/bin/openshift", "ex", "config", "view",
|
|
|
- "-o", "json"] + client_opts,
|
|
|
- stderr=subprocess.STDOUT)
|
|
|
- except subprocess.CalledProcessError as e:
|
|
|
- module.fail_json(msg="Failed to get client configuration",
|
|
|
- command=e.cmd, returncode=e.returncode, output=e.output)
|
|
|
-
|
|
|
- config = json.loads(output)
|
|
|
- if not (bool(config['clusters']) or bool(config['contexts']) or
|
|
|
- bool(config['current-context']) or bool(config['users'])):
|
|
|
- module.fail_json(msg="Client config missing required values",
|
|
|
- output=output)
|
|
|
+ config = ClientConfig(client_opts, module)
|
|
|
+ except ClientConfigException as e:
|
|
|
+ module.fail_json(msg="Failed to get client configuration", exception=e)
|
|
|
|
|
|
client_context = module.params['client_context']
|
|
|
- if client_context:
|
|
|
- config_context = next((context for context in config['contexts']
|
|
|
- if context['name'] == client_context), None)
|
|
|
- if not config_context:
|
|
|
- module.fail_json(msg="Context %s not found in client config" %
|
|
|
- client_context)
|
|
|
- if not config['current-context'] or config['current-context'] != client_context:
|
|
|
+ if config.has_context(client_context):
|
|
|
+ if client_context != config.current_context():
|
|
|
client_opts.append("--context=%s" % client_context)
|
|
|
+ else:
|
|
|
+ module.fail_json(msg="Context %s not found in client config" %
|
|
|
+ client_context)
|
|
|
|
|
|
client_user = module.params['client_user']
|
|
|
- if client_user:
|
|
|
- config_user = next((user for user in config['users']
|
|
|
- if user['name'] == client_user), None)
|
|
|
- if not config_user:
|
|
|
- module.fail_json(msg="User %s not found in client config" %
|
|
|
- client_user)
|
|
|
- if client_user != config_context['context']['user']:
|
|
|
+ if config.has_user(client_user):
|
|
|
+ if client_user != config.get_user_for_context(client_context):
|
|
|
client_opts.append("--user=%s" % client_user)
|
|
|
+ else:
|
|
|
+ module.fail_json(msg="User %s not found in client config" %
|
|
|
+ client_user)
|
|
|
|
|
|
client_cluster = module.params['client_cluster']
|
|
|
- if client_cluster:
|
|
|
- config_cluster = next((cluster for cluster in config['clusters']
|
|
|
- if cluster['name'] == client_cluster), None)
|
|
|
- if not client_cluster:
|
|
|
- module.fail_json(msg="Cluster %s not found in client config" %
|
|
|
- client_cluster)
|
|
|
- if client_cluster != config_context['context']['cluster']:
|
|
|
+ if config.has_cluster(client_cluster):
|
|
|
+ if client_cluster != config.get_cluster_for_context(client_cluster):
|
|
|
client_opts.append("--cluster=%s" % client_cluster)
|
|
|
+ else:
|
|
|
+ module.fail_json(msg="Cluster %s not found in client config" %
|
|
|
+ client_cluster)
|
|
|
|
|
|
- node_def = dict(
|
|
|
- id = module.params['name'],
|
|
|
- kind = 'Node',
|
|
|
- apiVersion = 'v1beta1',
|
|
|
- resources = dict(
|
|
|
- capacity = dict()
|
|
|
- )
|
|
|
- )
|
|
|
-
|
|
|
- for key, value in module.params.iteritems():
|
|
|
- if key in ['cpu', 'memory']:
|
|
|
- node_def['resources']['capacity'][key] = value
|
|
|
- elif key == 'name':
|
|
|
- node_def['id'] = value
|
|
|
- elif key != 'client_config':
|
|
|
- if value:
|
|
|
- node_def[key] = value
|
|
|
+ # TODO: provide sane defaults for some (like hostname, externalIP,
|
|
|
+ # internalIP, etc)
|
|
|
+ node = Node(module, client_opts, module.params['api_version'],
|
|
|
+ module.params['name'], module.params['host_ip'],
|
|
|
+ module.params['hostnames'], module.params['external_ips'],
|
|
|
+ module.params['internal_ips'], module.params['cpu'],
|
|
|
+ module.params['memory'], module.params['labels'],
|
|
|
+ module.params['annotations'], module.params['pod_cidr'],
|
|
|
+ module.params['external_id'])
|
|
|
|
|
|
- if not node_def['resources']['capacity']['cpu']:
|
|
|
- node_def['resources']['capacity']['cpu'] = multiprocessing.cpu_count()
|
|
|
-
|
|
|
- if not node_def['resources']['capacity']['memory']:
|
|
|
- with open('/proc/meminfo', 'r') as mem:
|
|
|
- for line in mem:
|
|
|
- entries = line.split()
|
|
|
- if str(entries.pop(0)) == 'MemTotal:':
|
|
|
- mem_total_kb = int(entries.pop(0))
|
|
|
- mem_capacity = int(mem_total_kb * 1024 * .75)
|
|
|
- node_def['resources']['capacity']['memory'] = mem_capacity
|
|
|
- break
|
|
|
-
|
|
|
- try:
|
|
|
- output = check_output(["/usr/bin/osc", "get", "nodes"] + client_opts,
|
|
|
- stderr=subprocess.STDOUT)
|
|
|
- except subprocess.CalledProcessError as e:
|
|
|
- module.fail_json(msg="Failed to get node list", command=e.cmd,
|
|
|
- returncode=e.returncode, output=e.output)
|
|
|
-
|
|
|
- if re.search(module.params['name'], output, re.MULTILINE):
|
|
|
- module.exit_json(changed=False, node_def=node_def)
|
|
|
+ # TODO: attempt to support changing node settings where possible and/or
|
|
|
+ # modifying node resources
|
|
|
+ if node.exists():
|
|
|
+ module.exit_json(changed=False, node=node.get_node())
|
|
|
elif module.check_mode:
|
|
|
- module.exit_json(changed=True, node_def=node_def)
|
|
|
-
|
|
|
- config_def = dict(
|
|
|
- metadata = dict(
|
|
|
- name = "add-node-%s" % module.params['name']
|
|
|
- ),
|
|
|
- kind = 'Config',
|
|
|
- apiVersion = 'v1beta1',
|
|
|
- items = [node_def]
|
|
|
- )
|
|
|
-
|
|
|
- p = Popen(["/usr/bin/osc"] + client_opts + ["create", "node"] + ["-f", "-"],
|
|
|
- stdin=subprocess.PIPE, stdout=subprocess.PIPE,
|
|
|
- stderr=subprocess.PIPE, close_fds=True)
|
|
|
- (out, err) = p.communicate(module.jsonify(config_def))
|
|
|
- ret = p.returncode
|
|
|
-
|
|
|
- if ret != 0:
|
|
|
- if re.search("minion \"%s\" already exists" % module.params['name'],
|
|
|
- err):
|
|
|
- module.exit_json(changed=False,
|
|
|
- msg="node definition already exists", config_def=config_def)
|
|
|
+ module.exit_json(changed=True, node=node.get_node())
|
|
|
+ else:
|
|
|
+ if node.create():
|
|
|
+ module.exit_json(changed=True,
|
|
|
+ msg="Node created successfully",
|
|
|
+ node=node.get_node())
|
|
|
else:
|
|
|
- module.fail_json(msg="Node creation failed.", ret=ret, out=out,
|
|
|
- err=err, config_def=config_def)
|
|
|
+ module.fail_json(msg="Unknown error creating node",
|
|
|
+ node=node.get_node())
|
|
|
|
|
|
- module.exit_json(changed=True, out=out, err=err, ret=ret,
|
|
|
- node_def=config_def)
|
|
|
|
|
|
# import module snippets
|
|
|
from ansible.module_utils.basic import *
|
|
|
-main()
|
|
|
+if __name__ == '__main__':
|
|
|
+ main()
|