Browse Source

Merge pull request #3197 from kwoodson/manage_node

Adding oadm_manage_node lib_openshift.
Kenny Woodson 8 years ago
parent
commit
8c7ccc601c

File diff suppressed because it is too large
+ 1477 - 0
roles/lib_openshift/library/oadm_manage_node.py


+ 1 - 1
roles/lib_openshift/library/oc_edit.py

@@ -947,7 +947,7 @@ class OpenShiftCLI(object):
 
         if self.all_namespaces:
             cmds.extend(['--all-namespaces'])
-        elif self.namespace:
+        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501
             cmds.extend(['-n', self.namespace])
 
         cmds.extend(cmd)

+ 1 - 1
roles/lib_openshift/library/oc_obj.py

@@ -926,7 +926,7 @@ class OpenShiftCLI(object):
 
         if self.all_namespaces:
             cmds.extend(['--all-namespaces'])
-        elif self.namespace:
+        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501
             cmds.extend(['-n', self.namespace])
 
         cmds.extend(cmd)

+ 1 - 1
roles/lib_openshift/library/oc_route.py

@@ -951,7 +951,7 @@ class OpenShiftCLI(object):
 
         if self.all_namespaces:
             cmds.extend(['--all-namespaces'])
-        elif self.namespace:
+        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501
             cmds.extend(['-n', self.namespace])
 
         cmds.extend(cmd)

+ 1 - 1
roles/lib_openshift/library/oc_scale.py

@@ -901,7 +901,7 @@ class OpenShiftCLI(object):
 
         if self.all_namespaces:
             cmds.extend(['--all-namespaces'])
-        elif self.namespace:
+        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501
             cmds.extend(['-n', self.namespace])
 
         cmds.extend(cmd)

+ 1 - 1
roles/lib_openshift/library/oc_secret.py

@@ -947,7 +947,7 @@ class OpenShiftCLI(object):
 
         if self.all_namespaces:
             cmds.extend(['--all-namespaces'])
-        elif self.namespace:
+        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501
             cmds.extend(['-n', self.namespace])
 
         cmds.extend(cmd)

+ 1 - 1
roles/lib_openshift/library/oc_version.py

@@ -871,7 +871,7 @@ class OpenShiftCLI(object):
 
         if self.all_namespaces:
             cmds.extend(['--all-namespaces'])
-        elif self.namespace:
+        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501
             cmds.extend(['-n', self.namespace])
 
         cmds.extend(cmd)

+ 38 - 0
roles/lib_openshift/src/ansible/oadm_manage_node.py

@@ -0,0 +1,38 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+def main():
+    '''
+    ansible oadm module for manage-node
+    '''
+
+    module = AnsibleModule(
+        argument_spec=dict(
+            debug=dict(default=False, type='bool'),
+            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+            node=dict(default=None, type='list'),
+            selector=dict(default=None, type='str'),
+            pod_selector=dict(default=None, type='str'),
+            schedulable=dict(default=None, type='bool'),
+            list_pods=dict(default=False, type='bool'),
+            evacuate=dict(default=False, type='bool'),
+            dry_run=dict(default=False, type='bool'),
+            force=dict(default=False, type='bool'),
+            grace_period=dict(default=None, type='int'),
+        ),
+        mutually_exclusive=[["selector", "node"], ['evacuate', 'list_pods'], ['list_pods', 'schedulable']],
+        required_one_of=[["node", "selector"]],
+
+        supports_check_mode=True,
+    )
+    results = ManageNode.run_ansible(module.params, module.check_mode)
+
+    if 'failed' in results:
+        module.fail_json(**results)
+
+    module.exit_json(**results)
+
+
+if __name__ == "__main__":
+    main()

+ 209 - 0
roles/lib_openshift/src/class/oadm_manage_node.py

@@ -0,0 +1,209 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+class ManageNodeException(Exception):
+    ''' manage-node exception class '''
+    pass
+
+
+class ManageNodeConfig(OpenShiftCLIConfig):
+    ''' ManageNodeConfig is a DTO for the manage-node command.'''
+    def __init__(self, kubeconfig, node_options):
+        super(ManageNodeConfig, self).__init__(None, None, kubeconfig, node_options)
+
+
+# pylint: disable=too-many-instance-attributes
+class ManageNode(OpenShiftCLI):
+    ''' Class to wrap the oc command line tools '''
+
+    # pylint allows 5
+    # pylint: disable=too-many-arguments
+    def __init__(self,
+                 config,
+                 verbose=False):
+        ''' Constructor for ManageNode '''
+        super(ManageNode, self).__init__(None, config.kubeconfig)
+        self.config = config
+
+    def evacuate(self):
+        ''' formulate the params and run oadm manage-node '''
+        return self._evacuate(node=self.config.config_options['node']['value'],
+                              selector=self.config.config_options['selector']['value'],
+                              pod_selector=self.config.config_options['pod_selector']['value'],
+                              dry_run=self.config.config_options['dry_run']['value'],
+                              grace_period=self.config.config_options['grace_period']['value'],
+                              force=self.config.config_options['force']['value'],
+                             )
+    def get_nodes(self, node=None, selector=''):
+        '''perform oc get node'''
+        _node = None
+        _sel = None
+        if node:
+            _node = node
+        if selector:
+            _sel = selector
+
+        results = self._get('node', rname=_node, selector=_sel)
+        if results['returncode'] != 0:
+            return results
+
+        nodes = []
+        items = None
+        if results['results'][0]['kind'] == 'List':
+            items = results['results'][0]['items']
+        else:
+            items = results['results']
+
+        for node in items:
+            _node = {}
+            _node['name'] = node['metadata']['name']
+            _node['schedulable'] = True
+            if 'unschedulable' in node['spec']:
+                _node['schedulable'] = False
+            nodes.append(_node)
+
+        return nodes
+
+    def get_pods_from_node(self, node, pod_selector=None):
+        '''return pods for a node'''
+        results = self._list_pods(node=[node], pod_selector=pod_selector)
+
+        if results['returncode'] != 0:
+            return results
+
+        # When a selector or node is matched it is returned along with the json.
+        # We are going to split the results based on the regexp and then
+        # load the json for each matching node.
+        # Before we return we are going to loop over the results and pull out the node names.
+        # {'node': [pod, pod], 'node': [pod, pod]}
+        # 3.2 includes the following lines in stdout: "Listing matched pods on node:"
+        all_pods = []
+        if "Listing matched" in results['results']:
+            listing_match = re.compile('\n^Listing matched.*$\n', flags=re.MULTILINE)
+            pods = listing_match.split(results['results'])
+            for pod in pods:
+                if pod:
+                    all_pods.extend(json.loads(pod)['items'])
+
+        # 3.3 specific
+        else:
+            # this is gross but I filed a bug...
+            # https://bugzilla.redhat.com/show_bug.cgi?id=1381621
+            # build our own json from the output.
+            all_pods = json.loads(results['results'])['items']
+
+        return all_pods
+
+    def list_pods(self):
+        ''' run oadm manage-node --list-pods'''
+        _nodes = self.config.config_options['node']['value']
+        _selector = self.config.config_options['selector']['value']
+        _pod_selector = self.config.config_options['pod_selector']['value']
+
+        if not _nodes:
+            _nodes = self.get_nodes(selector=_selector)
+        else:
+            _nodes = [{'name': name} for name in _nodes]
+
+        all_pods = {}
+        for node in _nodes:
+            results = self.get_pods_from_node(node['name'], pod_selector=_pod_selector)
+            if isinstance(results, dict):
+                return results
+            all_pods[node['name']] = results
+
+        results = {}
+        results['nodes'] = all_pods
+        results['returncode'] = 0
+        return results
+
+    def schedulable(self):
+        '''oadm manage-node call for making nodes unschedulable'''
+        nodes = self.config.config_options['node']['value']
+        selector = self.config.config_options['selector']['value']
+
+        if not nodes:
+            nodes = self.get_nodes(selector=selector)
+        else:
+            tmp_nodes = []
+            for name in nodes:
+                tmp_result = self.get_nodes(name)
+                if isinstance(tmp_result, dict):
+                    tmp_nodes.append(tmp_result)
+                    continue
+                tmp_nodes.extend(tmp_result)
+            nodes = tmp_nodes
+
+        # This is a short circuit based on the way we fetch nodes.
+        # If node is a dict/list then we've already fetched them.
+        for node in nodes:
+            if isinstance(node, dict) and 'returncode' in node:
+                return {'results': nodes, 'returncode': node['returncode']}
+            if isinstance(node, list) and 'returncode' in node[0]:
+                return {'results': nodes, 'returncode': node[0]['returncode']}
+        # check all the nodes that were returned and verify they are:
+        # node['schedulable'] == self.config.config_options['schedulable']['value']
+        if any([node['schedulable'] != self.config.config_options['schedulable']['value'] for node in nodes]):
+
+            results = self._schedulable(node=self.config.config_options['node']['value'],
+                                        selector=self.config.config_options['selector']['value'],
+                                        schedulable=self.config.config_options['schedulable']['value'])
+
+            # 'NAME                            STATUS    AGE\\nip-172-31-49-140.ec2.internal   Ready     4h\\n'  # E501
+            # normalize formatting with previous return objects
+            if results['results'].startswith('NAME'):
+                nodes = []
+                # removing header line and trailing new line character of node lines
+                for node_results in results['results'].split('\n')[1:-1]:
+                    parts = node_results.split()
+                    nodes.append({'name': parts[0], 'schedulable': parts[1] == 'Ready'})
+                results['nodes'] = nodes
+
+            return results
+
+        results = {}
+        results['returncode'] = 0
+        results['changed'] = False
+        results['nodes'] = nodes
+
+        return results
+
+    @staticmethod
+    def run_ansible(params, check_mode):
+        '''run the idempotent ansible code'''
+        nconfig = ManageNodeConfig(params['kubeconfig'],
+                                   {'node': {'value': params['node'], 'include': True},
+                                    'selector': {'value': params['selector'], 'include': True},
+                                    'pod_selector': {'value': params['pod_selector'], 'include': True},
+                                    'schedulable': {'value': params['schedulable'], 'include': True},
+                                    'list_pods': {'value': params['list_pods'], 'include': True},
+                                    'evacuate': {'value': params['evacuate'], 'include': True},
+                                    'dry_run': {'value': params['dry_run'], 'include': True},
+                                    'force': {'value': params['force'], 'include': True},
+                                    'grace_period': {'value': params['grace_period'], 'include': True},
+                                   })
+
+        oadm_mn = ManageNode(nconfig)
+        # Run the oadm manage-node commands
+        results = None
+        changed = False
+        if params['schedulable'] != None:
+            if check_mode:
+                # schedulable returns results after the fact.
+                # We need to redo how this works to support check_mode completely.
+                return {'changed': True, 'msg': 'CHECK_MODE: would have called schedulable.'}
+            results = oadm_mn.schedulable()
+            if 'changed' not in results:
+                changed = True
+
+        if params['evacuate']:
+            results = oadm_mn.evacuate()
+            changed = True
+        elif params['list_pods']:
+            results = oadm_mn.list_pods()
+
+        if not results or results['returncode'] != 0:
+            return {'failed': True, 'msg': results}
+
+        return {'changed': changed, 'results': results, 'state': "present"}

+ 88 - 0
roles/lib_openshift/src/doc/manage_node

@@ -0,0 +1,88 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oadm_manage_node
+short_description: Module to manage openshift nodes
+description:
+  - Manage openshift nodes programmatically.
+options:
+  kubeconfig:
+    description:
+    - The path for the kubeconfig file to use for authentication
+    required: false
+    default: /etc/origin/master/admin.kubeconfig
+    aliases: []
+  debug:
+    description:
+    - Turn on debug output.
+    required: false
+    default: False
+    aliases: []
+  node:
+    description:
+    - A list of the nodes being managed
+    required: false
+    default: None
+    aliases: []
+  selector:
+    description:
+    - The selector when filtering on node labels
+    required: false
+    default: None
+    aliases: []
+  pod_selector:
+    description:
+    - A selector when filtering on pod labels.
+    required: false
+    default: None
+    aliases: []
+  evacuate:
+    description:
+    - Remove all pods from a node.
+    required: false
+    default: False
+    aliases: []
+  schedulable:
+    description:
+    - whether or not openshift can schedule pods on this node
+    required: False
+    default: None
+    aliases: []
+  dry_run:
+    description:
+    - This shows the pods that would be migrated if evacuate were called
+    required: False
+    default: False
+    aliases: []
+  grace_period:
+    description:
+    - Grace period (seconds) for pods being deleted.
+    required: false
+    default: None
+    aliases: []
+  force:
+    description:
+    - Whether or not to attempt to force this action in openshift
+    required: false
+    default: None
+    aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: oadm manage-node --schedulable=true --selector=ops_node=new
+  oadm_manage_node:
+    selector: ops_node=new
+    schedulable: True
+  register: schedout
+
+- name: oadm manage-node my-k8s-node-5 --evacuate
+  oadm_manage_node:
+    node:  my-k8s-node-5
+    evacuate: True
+    force: True
+'''

+ 1 - 1
roles/lib_openshift/src/lib/base.py

@@ -216,7 +216,7 @@ class OpenShiftCLI(object):
 
         if self.all_namespaces:
             cmds.extend(['--all-namespaces'])
-        elif self.namespace:
+        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501
             cmds.extend(['-n', self.namespace])
 
         cmds.extend(cmd)

+ 9 - 0
roles/lib_openshift/src/sources.yml

@@ -1,4 +1,13 @@
 ---
+oadm_manage_node.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/manage_node
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- class/oadm_manage_node.py
+- ansible/oadm_manage_node.py
 oc_edit.py:
 - doc/generated
 - doc/license

+ 58 - 0
roles/lib_openshift/src/test/integration/oadm_manage_node.yml

@@ -0,0 +1,58 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+# ./oadm_manage_node.yml -M ../../../library -e "cli_master_test=$OPENSHIFT_MASTER cli_node_test=$OPENSHIFT_NODE
+---
+- hosts: "{{ cli_master_test }}"
+  gather_facts: no
+  user: root
+  tasks:
+  - name: list pods from a node
+    oadm_manage_node:
+      list_pods: True
+      node:
+      - "{{ cli_node_test }}"
+    register: podout
+  - debug: var=podout
+
+  - assert:
+      that: "'{{ cli_node_test }}' in podout.results.nodes"
+      msg: Pod data was not returned
+
+  - name: set node to unschedulable
+    oadm_manage_node:
+      schedulable: False
+      node:
+      - "{{ cli_node_test }}"
+    register: nodeout
+  - debug: var=nodeout
+
+  - name: assert that schedulable=False
+    assert:
+      that: nodeout.results.nodes[0]['schedulable'] == False
+      msg: "{{ cli_node_test }} schedulable set to True"
+
+  - name: get node scheduable
+    oc_obj:
+      kind: node
+      state: list
+      name: "{{ cli_node_test }}"
+      namespace: None
+    register: nodeout
+
+  - debug: var=nodeout
+
+  - name: assert that schedulable=False
+    assert:
+      that: nodeout.results.results[0]['spec']['unschedulable']
+
+  - name: set node to schedulable
+    oadm_manage_node:
+      schedulable: True
+      node:
+      - "{{ cli_node_test }}"
+    register: nodeout
+  - debug: var=nodeout
+
+  - name: assert that schedulable=False
+    assert:
+      that: nodeout.results.nodes[0]['schedulable']
+      msg: "{{ cli_node_test }} schedulable set to False"

+ 177 - 0
roles/lib_openshift/src/test/unit/oadm_manage_node.py

@@ -0,0 +1,177 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for oadm_manage_node
+'''
+# To run
+# python -m unittest version
+#
+# .
+# Ran 2 tests in 0.001s
+#
+# OK
+
+import os
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library')  # noqa: E501
+sys.path.insert(0, module_path)
+from oadm_manage_node import ManageNode  # noqa: E402
+
+
+class ManageNodeTest(unittest.TestCase):
+    '''
+     Test class for oadm_manage_node
+    '''
+
+    def setUp(self):
+        ''' setup method will create a file and set to known configuration '''
+        pass
+
+    @mock.patch('oadm_manage_node.ManageNode.openshift_cmd')
+    def test_list_pods(self, mock_openshift_cmd):
+        ''' Testing a get '''
+        params = {'node': ['ip-172-31-49-140.ec2.internal'],
+                  'schedulable': None,
+                  'selector': None,
+                  'pod_selector': None,
+                  'list_pods': True,
+                  'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+                  'evacuate': False,
+                  'grace_period': False,
+                  'dry_run': False,
+                  'force': False}
+
+        pod_list = '''{
+    "metadata": {},
+    "items": [
+        {
+            "metadata": {
+                "name": "docker-registry-1-xuhik",
+                "generateName": "docker-registry-1-",
+                "namespace": "default",
+                "selfLink": "/api/v1/namespaces/default/pods/docker-registry-1-xuhik",
+                "uid": "ae2a25a2-e316-11e6-80eb-0ecdc51fcfc4",
+                "resourceVersion": "1501",
+                "creationTimestamp": "2017-01-25T15:55:23Z",
+                "labels": {
+                    "deployment": "docker-registry-1",
+                    "deploymentconfig": "docker-registry",
+                    "docker-registry": "default"
+                },
+                "annotations": {
+                    "openshift.io/deployment-config.latest-version": "1",
+                    "openshift.io/deployment-config.name": "docker-registry",
+                    "openshift.io/deployment.name": "docker-registry-1",
+                    "openshift.io/scc": "restricted"
+                }
+            },
+            "spec": {}
+        },
+        {
+            "metadata": {
+                "name": "router-1-kp3m3",
+                "generateName": "router-1-",
+                "namespace": "default",
+                "selfLink": "/api/v1/namespaces/default/pods/router-1-kp3m3",
+                "uid": "9e71f4a5-e316-11e6-80eb-0ecdc51fcfc4",
+                "resourceVersion": "1456",
+                "creationTimestamp": "2017-01-25T15:54:56Z",
+                "labels": {
+                    "deployment": "router-1",
+                    "deploymentconfig": "router",
+                    "router": "router"
+                },
+                "annotations": {
+                    "openshift.io/deployment-config.latest-version": "1",
+                    "openshift.io/deployment-config.name": "router",
+                    "openshift.io/deployment.name": "router-1",
+                    "openshift.io/scc": "hostnetwork"
+                }
+            },
+            "spec": {}
+        }]
+}'''
+
+        mock_openshift_cmd.side_effect = [
+            {"cmd": "/usr/bin/oadm manage-node ip-172-31-49-140.ec2.internal --list-pods",
+             "results": pod_list,
+             "returncode": 0}
+        ]
+
+        results = ManageNode.run_ansible(params, False)
+
+        # returned a single node
+        self.assertTrue(len(results['results']['nodes']) == 1)
+        # returned 2 pods
+        self.assertTrue(len(results['results']['nodes']['ip-172-31-49-140.ec2.internal']) == 2)
+
+    @mock.patch('oadm_manage_node.ManageNode.openshift_cmd')
+    def test_schedulable_false(self, mock_openshift_cmd):
+        ''' Testing a get '''
+        params = {'node': ['ip-172-31-49-140.ec2.internal'],
+                  'schedulable': False,
+                  'selector': None,
+                  'pod_selector': None,
+                  'list_pods': False,
+                  'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+                  'evacuate': False,
+                  'grace_period': False,
+                  'dry_run': False,
+                  'force': False}
+
+        node = [{
+            "apiVersion": "v1",
+            "kind": "Node",
+            "metadata": {
+                "creationTimestamp": "2017-01-26T14:34:43Z",
+                "labels": {
+                    "beta.kubernetes.io/arch": "amd64",
+                    "beta.kubernetes.io/instance-type": "m4.large",
+                    "beta.kubernetes.io/os": "linux",
+                    "failure-domain.beta.kubernetes.io/region": "us-east-1",
+                    "failure-domain.beta.kubernetes.io/zone": "us-east-1c",
+                    "hostname": "opstest-node-compute-0daaf",
+                    "kubernetes.io/hostname": "ip-172-31-51-111.ec2.internal",
+                    "ops_node": "old",
+                    "region": "us-east-1",
+                    "type": "compute"
+                },
+                "name": "ip-172-31-51-111.ec2.internal",
+                "resourceVersion": "6936",
+                "selfLink": "/api/v1/nodes/ip-172-31-51-111.ec2.internal",
+                "uid": "93d7fdfb-e3d4-11e6-a982-0e84250fc302"
+            },
+            "spec": {
+                "externalID": "i-06bb330e55c699b0f",
+                "providerID": "aws:///us-east-1c/i-06bb330e55c699b0f",
+            }}]
+
+        mock_openshift_cmd.side_effect = [
+            {"cmd": "/usr/bin/oc get node -o json ip-172-31-49-140.ec2.internal",
+             "results": node,
+             "returncode": 0},
+            {"cmd": "/usr/bin/oadm manage-node ip-172-31-49-140.ec2.internal --schedulable=False",
+             "results": "NAME                            STATUS    AGE\n" +
+                        "ip-172-31-49-140.ec2.internal   Ready,SchedulingDisabled     5h\n",
+             "returncode": 0}]
+        results = ManageNode.run_ansible(params, False)
+
+        self.assertTrue(results['changed'])
+        self.assertEqual(results['results']['nodes'][0]['name'], 'ip-172-31-49-140.ec2.internal')
+        self.assertEqual(results['results']['nodes'][0]['schedulable'], False)
+
+    def tearDown(self):
+        '''TearDown method'''
+        pass
+
+
+if __name__ == "__main__":
+    unittest.main()