Pārlūkot izejas kodu

Merge pull request #12129 from mtnbikenc/fix-1815010

Bug 1815010: Use oc_csr_approve Ansible module for CSR approval
OpenShift Merge Robot 5 gadi atpakaļ
vecāks
revīzija
2cf2144dff

+ 1 - 0
openshift-ansible.spec

@@ -19,6 +19,7 @@ BuildArch:      noarch
 
 Requires:      ansible >= 2.9.5
 Requires:      openshift-clients
+Requires:      openssl
 
 %description
 OpenShift RHEL Worker Management Ansible Playbooks

+ 8 - 0
pytest.ini

@@ -0,0 +1,8 @@
+[pytest]
+norecursedirs =
+    .*
+    __pycache__
+    docs
+    playbooks
+python_files =
+    test_*.py

+ 296 - 0
roles/openshift_node/library/oc_csr_approve.py

@@ -0,0 +1,296 @@
+#!/usr/bin/env python
+"""oc_csr_approve module"""
+# Copyright 2020 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import json
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+try:
+    # Python >= 3.5
+    from json.decoder import JSONDecodeError
+except ImportError:
+    # Python < 3.5
+    JSONDecodeError = ValueError
+
+DOCUMENTATION = '''
+---
+module: oc_csr_approve
+
+short_description: Retrieve and approve node client and server CSRs
+
+version_added: "2.9"
+
+description:
+    - Retrieve and approve node client and server CSRs
+
+author:
+    - "Michael Gugino <mgugino@redhat.com>"
+    - "Russell Teague <rteague@redhat.com>"
+'''
+
+EXAMPLES = '''
+- name: Approve node CSRs
+  oc_csr_approve:
+    kubeconfig: "{{ openshift_node_kubeconfig_path }}"
+    nodename: "{{ ansible_nodename | lower }}"
+  delegate_to: localhost
+'''
+
+CERT_MODE = {'client': 'client auth', 'server': 'server auth'}
+
+
+def parse_subject_cn(subject_str):
+    """parse output of openssl req -noout -subject to retrieve CN.
+       example input:
+         'subject=/C=US/CN=test.io/L=Raleigh/O=Red Hat/ST=North Carolina/OU=OpenShift\n'
+         or
+         'subject=C = US, CN = test.io, L = City, O = Company, ST = State, OU = Dept\n'
+       example output: 'test.io'
+    """
+    stripped_string = subject_str[len('subject='):].strip()
+    kv_strings = [x.strip() for x in stripped_string.split(',')]
+    if len(kv_strings) == 1:
+        kv_strings = [x.strip() for x in stripped_string.split('/')][1:]
+    for item in kv_strings:
+        item_parts = [x.strip() for x in item.split('=')]
+        if item_parts[0] == 'CN':
+            return item_parts[1]
+    return None
+
+
+def csr_present_check(nodename, csr_dict):
+    """Ensure node has a CSR
+    Returns True if CSR for node is present"""
+    for _, val in csr_dict.items():
+        if val == nodename:
+            # CSR for node is present
+            return True
+    # Didn't find a CSR for node
+    return False
+
+
+class CSRapprove(object):  # pylint: disable=useless-object-inheritance
+    """Approves node CSRs"""
+
+    def __init__(self, module, oc_bin, kubeconfig, nodename):
+        """init method"""
+        self.module = module
+        self.oc_bin = oc_bin
+        self.kubeconfig = kubeconfig
+        self.nodename = nodename
+        # Build a dictionary to hold all of our output information so nothing
+        # is lost when we fail.
+        self.result = {'changed': False,
+                       'rc': 0,
+                       'client_approve_results': [],
+                       'server_approve_results': [],
+                       }
+
+    def run_command(self, command, rc_opts=None):
+        """Run a command using AnsibleModule.run_command, or fail"""
+        if rc_opts is None:
+            rc_opts = {}
+        rtnc, stdout, err = self.module.run_command(command, **rc_opts)
+        if rtnc:
+            self.result['failed'] = True
+            self.result['rc'] = rtnc
+            self.result['msg'] = str(err)
+            self.result['state'] = 'unknown'
+            self.module.fail_json(**self.result)
+        return stdout
+
+    def get_nodes(self):
+        """Get all nodes via oc get nodes -ojson"""
+        # json output is necessary for consistency here.
+        command = "{} {} get nodes -ojson".format(self.oc_bin, self.kubeconfig)
+        stdout = self.run_command(command)
+        try:
+            data = json.loads(stdout)
+        except JSONDecodeError as err:
+            self.result['failed'] = True
+            self.result['rc'] = 1
+            self.result['msg'] = str(err)
+            self.result['state'] = 'unknown'
+            self.module.fail_json(**self.result)
+        return [node['metadata']['name'] for node in data['items']]
+
+    def get_csrs(self):
+        """Retrieve CSRs from cluster using oc get csr -ojson"""
+        command = "{} {} get csr -ojson".format(self.oc_bin, self.kubeconfig)
+        stdout = self.run_command(command)
+        try:
+            data = json.loads(stdout)
+        except JSONDecodeError as err:
+            self.result['failed'] = True
+            self.result['rc'] = 1
+            self.result['msg'] = str(err)
+            self.result['state'] = 'unknown'
+            self.module.fail_json(**self.result)
+        return data['items']
+
+    def process_csrs(self, csrs, mode):
+        """Return a dictionary of pending CSRs where the format of the dict is
+           k=csr name, v=Subject Common Name"""
+        csr_dict = {}
+        for item in csrs:
+            status = item['status'].get('conditions')
+            if status:
+                # If status is not an empty dictionary, cert is not pending.
+                continue
+            if CERT_MODE[mode] not in item['spec']['usages']:
+                continue
+
+            name = item['metadata']['name']
+            request_data = base64.b64decode(item['spec']['request'])
+            command = "openssl req -noout -subject"
+            # ansible's module.run_command accepts data to pipe via stdin as
+            # as 'data' kwarg.
+            rc_opts = {'data': request_data, 'binary_data': True}
+            stdout = self.run_command(command, rc_opts=rc_opts)
+
+            # parse common_name from subject string.
+            common_name = parse_subject_cn(stdout)
+            if common_name and common_name.startswith('system:node:'):
+                # common name is typically prepended with system:node:.
+                common_name = common_name.split('system:node:')[1]
+            # we only want to approve CSRs from nodes we know about.
+            if common_name == self.nodename:
+                csr_dict[name] = common_name
+
+        return csr_dict
+
+    def approve_csrs(self, csr_pending_list, mode):
+        """Loop through csr_pending_list and call:
+           oc adm certificate approve <item>"""
+        results_mode = "{}_approve_results".format(mode)
+        base_command = "{} {} adm certificate approve {}"
+        approve_results = []
+        for csr in csr_pending_list:
+            command = base_command.format(self.oc_bin, self.kubeconfig, csr)
+            rtnc, stdout, err = self.module.run_command(command)
+            if rtnc:
+                self.result['failed'] = True
+                self.result['rc'] = rtnc
+                self.result['msg'] = str(err)
+                self.result[results_mode].extend(approve_results)
+                self.result['state'] = 'unknown'
+                self.module.fail_json(**self.result)
+            approve_results.append("{}: {}".format(csr_pending_list[csr], stdout))
+        self.result[results_mode].extend(approve_results)
+
+        # We set changed for approved client or server CSRs.
+        self.result['changed'] = bool(approve_results) or bool(self.result['changed'])
+
+    def node_is_ready(self, nodename):
+        """Determine if node has working server certificate
+        Returns True if the node is ready"""
+        base_command = "{} {} get --raw /api/v1/nodes/{}/proxy/healthz"
+        # need this to look like /api/v1/nodes/<node>/proxy/healthz
+        # if we can hit that api endpoint (rtnc=0), the node has a valid server cert.
+        command = base_command.format(self.oc_bin, self.kubeconfig, nodename)
+        rtnc, _, _ = self.module.run_command(command)
+        return not bool(rtnc)
+
+    def runner(self, attempts, mode):
+        """Approve CSRs if they are present for node"""
+        results_mode = "{}_approve_results".format(mode)
+        # Get all CSRs, no good way to filter on pending.
+        csrs = self.get_csrs()
+        # process data in CSRs and build a dictionary of requests
+        csr_dict = self.process_csrs(csrs, mode)
+
+        if csr_present_check(self.nodename, csr_dict):
+            # Approve outstanding CSRs for node
+            self.approve_csrs(csr_dict, mode)
+        else:
+            # CSR is not present, increment attempts and retry
+            if attempts < 36:  # 36 * 5 = 3 minutes waiting for CSRs
+                self.result[results_mode].append(
+                    "Attempt: {}, Node {} not present or CSR not yet available".format(attempts, self.nodename))
+                attempts += 1
+                time.sleep(5)
+            else:
+                # If attempts < 36, fail waiting for CSRs to appear
+                # Using 'describe' to have the API provide the decoded results for all CSRs
+                command = "{} {} describe csr".format(self.oc_bin, self.kubeconfig)
+                stdout = self.run_command(command)
+                self.result['failed'] = True
+                self.result['rc'] = 1
+                self.result['msg'] = "Node {} not present or could not find {} CSR".format(self.nodename, mode)
+                self.result['oc_describe_csr'] = stdout
+                self.module.fail_json(**self.result)
+
+        return attempts
+
+    def run(self):
+        """execute the CSR approval process"""
+
+        # # Client Cert Section # #
+        mode = "client"
+        attempts = 1
+        while True:
+            # If the node is in the list of all nodes, we do not need to approve client CSRs
+            if self.nodename not in self.get_nodes():
+                attempts = self.runner(attempts, mode)
+            else:
+                self.result["{}_approve_results".format(mode)].append(
+                    "Node {} is present in node list".format(self.nodename))
+                break
+
+        # # Server Cert Section # #
+        mode = "server"
+        attempts = 1
+        while True:
+            # If the node API is healthy, we do not need to approve server CSRs
+            if not self.node_is_ready(self.nodename):
+                attempts = self.runner(attempts, mode)
+            else:
+                self.result["{}_approve_results".format(mode)].append(
+                    "Node {} API is ready".format(self.nodename))
+                break
+
+        self.module.exit_json(**self.result)
+
+
+def run_module():
+    """Run this module"""
+    module_args = dict(
+        oc_bin=dict(type='path', required=False, default='oc'),
+        kubeconfig=dict(type='path', required=True),
+        nodename=dict(type='str', required=True),
+    )
+    module = AnsibleModule(
+        supports_check_mode=False,
+        argument_spec=module_args
+    )
+    oc_bin = module.params['oc_bin']
+    kubeconfig = '--kubeconfig={}'.format(module.params['kubeconfig'])
+    nodename = module.params['nodename']
+
+    approver = CSRapprove(module, oc_bin, kubeconfig, nodename)
+    approver.run()
+
+
+def main():
+    """main"""
+    run_module()
+
+
+if __name__ == '__main__':
+    main()

+ 1 - 2
roles/openshift_node/tasks/apply_machine_config.yml

@@ -86,7 +86,7 @@
   #  reboot_timeout: 600  # default, 10 minutes
 
 - block:
-  - name: Wait for nodes to report ready
+  - name: Wait for node to report ready
     command: >
       oc get node {{ ansible_nodename | lower }}
       --kubeconfig={{ openshift_node_kubeconfig_path }}
@@ -105,4 +105,3 @@
   - name: DEBUG - Node failed to report ready
     fail:
       msg: "Node failed to report ready"
-    delegate_to: localhost

+ 7 - 65
roles/openshift_node/tasks/config.yml

@@ -172,83 +172,26 @@
       msg: "Ignition apply failed"
 
 - block:
-  - name: Approve node-bootstrapper CSR
-    shell: >
-      count=0;
-      for csr in `oc --kubeconfig={{ openshift_node_kubeconfig_path }} get csr --no-headers \
-        | grep " system:serviceaccount:openshift-machine-config-operator:node-bootstrapper " \
-        | cut -d " " -f1`;
-      do
-        oc --kubeconfig={{ openshift_node_kubeconfig_path }} describe csr/$csr \
-          | grep " system:node:{{ hostvars[item].ansible_nodename | lower }}$";
-        if [ $? -eq 0 ];
-        then
-          oc --kubeconfig={{ openshift_node_kubeconfig_path }} adm certificate approve ${csr};
-          if [ $? -eq 0 ];
-          then
-            count=$((count+1));
-          fi;
-        fi;
-      done;
-      exit $((!count));
-    loop: "{{ ansible_play_batch }}"
+  - name: Approve node CSRs
+    oc_csr_approve:
+      kubeconfig: "{{ openshift_node_kubeconfig_path }}"
+      nodename: "{{ ansible_nodename | lower }}"
     delegate_to: localhost
-    run_once: true
-    register: oc_get
-    until:
-    - oc_get is success
-    retries: 6
-    delay: 5
 
   rescue:
   - import_tasks: gather_debug.yml
 
-  - name: DEBUG - Failed to approve node-bootstrapper CSR
+  - name: DEBUG - Failed to approve node CSRs
     fail:
       msg: "Failed to approve node-bootstrapper CSR"
-    delegate_to: localhost
 
 - block:
-  - name: Approve node CSR
-    shell: >
-      count=0;
-      for csr in `oc --kubeconfig={{ openshift_node_kubeconfig_path }} get csr --no-headers \
-        | grep " system:node:{{ hostvars[item].ansible_nodename | lower }} " \
-        | cut -d " " -f1`;
-      do
-        oc --kubeconfig={{ openshift_node_kubeconfig_path }} adm certificate approve ${csr};
-        if [ $? -eq 0 ];
-        then
-          count=$((count+1));
-        fi;
-      done;
-      exit $((!count));
-    loop: "{{ ansible_play_batch }}"
-    delegate_to: localhost
-    run_once: true
-    register: oc_get
-    until:
-    - oc_get is success
-    retries: 6
-    delay: 5
-
-  rescue:
-  - import_tasks: gather_debug.yml
-
-  - name: DEBUG - Failed to approve node CSR
-    fail:
-      msg: "Failed to approve node CSR"
-    delegate_to: localhost
-
-- block:
-  - name: Wait for nodes to report ready
+  - name: Wait for node to report ready
     command: >
-      oc get node {{ hostvars[item].ansible_nodename | lower }}
+      oc get node {{ ansible_nodename | lower }}
       --kubeconfig={{ openshift_node_kubeconfig_path }}
       --output=jsonpath='{.status.conditions[?(@.type=="Ready")].status}'
-    loop: "{{ ansible_play_batch }}"
     delegate_to: localhost
-    run_once: true
     register: oc_get
     until:
     - oc_get.stdout == "True"
@@ -262,4 +205,3 @@
   - name: DEBUG - Node failed to report ready
     fail:
       msg: "Node failed to report ready"
-    delegate_to: localhost

+ 1 - 3
roles/openshift_node/tasks/gather_debug.yml

@@ -12,12 +12,10 @@
 
 - name: Gather Debug - Get complete node objects
   command: >
-    oc get node {{ hostvars[item].ansible_nodename | lower }}
+    oc get node {{ ansible_nodename | lower }}
     --kubeconfig={{ openshift_node_kubeconfig_path }}
     --output=json
-  loop: "{{ ansible_play_batch }}"
   delegate_to: localhost
-  run_once: true
   changed_when: false
   ignore_errors: true
   register: oc_get

Failā izmaiņas netiks attēlotas, jo tās ir par lielu
+ 38 - 0
roles/openshift_node/test/test_data/oc_csr_approve_pending.json


Failā izmaiņas netiks attēlotas, jo tās ir par lielu
+ 361 - 0
roles/openshift_node/test/test_data/oc_csr_server_multiple_pends_one_host.json


+ 450 - 0
roles/openshift_node/test/test_data/oc_get_nodes.json

@@ -0,0 +1,450 @@
+{
+    "apiVersion": "v1",
+    "items": [
+        {
+            "apiVersion": "v1",
+            "kind": "Node",
+            "metadata": {
+                "annotations": {
+                    "volumes.kubernetes.io/controller-managed-attach-detach": "true"
+                },
+                "creationTimestamp": "2018-08-10T23:50:59Z",
+                "labels": {
+                    "beta.kubernetes.io/arch": "amd64",
+                    "beta.kubernetes.io/os": "linux",
+                    "glusterfs": "storage-host",
+                    "kubernetes.io/hostname": "fedora1.openshift.io",
+                    "node-role.kubernetes.io/compute": "true",
+                    "node-role.kubernetes.io/infra": "true",
+                    "node-role.kubernetes.io/master": "true"
+                },
+                "name": "fedora1.openshift.io",
+                "namespace": "",
+                "resourceVersion": "1732411",
+                "selfLink": "/api/v1/nodes/fedora1.openshift.io",
+                "uid": "3b52eed5-9cf8-11e8-964a-525400650cba"
+            },
+            "spec": {
+                "externalID": "fedora1.openshift.io"
+            },
+            "status": {
+                "addresses": [
+                    {
+                        "address": "192.168.124.106",
+                        "type": "InternalIP"
+                    },
+                    {
+                        "address": "fedora1.openshift.io",
+                        "type": "Hostname"
+                    }
+                ],
+                "allocatable": {
+                    "cpu": "4",
+                    "hugepages-1Gi": "0",
+                    "hugepages-2Mi": "0",
+                    "memory": "8070076Ki",
+                    "pods": "250"
+                },
+                "capacity": {
+                    "cpu": "4",
+                    "hugepages-1Gi": "0",
+                    "hugepages-2Mi": "0",
+                    "memory": "8172476Ki",
+                    "pods": "250"
+                },
+                "conditions": [
+                    {
+                        "lastHeartbeatTime": "2018-08-23T20:01:16Z",
+                        "lastTransitionTime": "2018-08-10T23:50:53Z",
+                        "message": "kubelet has sufficient disk space available",
+                        "reason": "KubeletHasSufficientDisk",
+                        "status": "False",
+                        "type": "OutOfDisk"
+                    },
+                    {
+                        "lastHeartbeatTime": "2018-08-23T20:01:16Z",
+                        "lastTransitionTime": "2018-08-10T23:50:53Z",
+                        "message": "kubelet has sufficient memory available",
+                        "reason": "KubeletHasSufficientMemory",
+                        "status": "False",
+                        "type": "MemoryPressure"
+                    },
+                    {
+                        "lastHeartbeatTime": "2018-08-23T20:01:16Z",
+                        "lastTransitionTime": "2018-08-10T23:50:53Z",
+                        "message": "kubelet has no disk pressure",
+                        "reason": "KubeletHasNoDiskPressure",
+                        "status": "False",
+                        "type": "DiskPressure"
+                    },
+                    {
+                        "lastHeartbeatTime": "2018-08-23T20:01:16Z",
+                        "lastTransitionTime": "2018-08-10T23:50:53Z",
+                        "message": "kubelet has sufficient PID available",
+                        "reason": "KubeletHasSufficientPID",
+                        "status": "False",
+                        "type": "PIDPressure"
+                    },
+                    {
+                        "lastHeartbeatTime": "2018-08-23T20:01:16Z",
+                        "lastTransitionTime": "2018-08-11T00:01:06Z",
+                        "message": "kubelet is posting ready status",
+                        "reason": "KubeletReady",
+                        "status": "True",
+                        "type": "Ready"
+                    }
+                ],
+                "daemonEndpoints": {
+                    "kubeletEndpoint": {
+                        "Port": 10250
+                    }
+                },
+                "images": [
+                    {
+                        "names": [
+                            "docker.io/openshift/origin-node@sha256:d8e0b4e5912e12e84ccd2b72a90ce66ce6e5569dfcc62f9cd69f0315d59c6a91",
+                            "docker.io/openshift/origin-node:v3.10",
+                            "docker.io/openshift/origin-node:v3.10.0"
+                        ],
+                        "sizeBytes": 1281495850
+                    },
+                    {
+                        "names": [
+                            "docker.io/openshift/origin-control-plane@sha256:8a030a68593d64703c0572454d3fd9475bcfadf5d26d2899f92418516c1c49be",
+                            "docker.io/openshift/origin-control-plane:v3.10",
+                            "docker.io/openshift/origin-control-plane:v3.10.0"
+                        ],
+                        "sizeBytes": 815862538
+                    },
+                    {
+                        "names": [
+                            "docker.io/gluster/gluster-centos@sha256:850fd2399d254f678b40bebe1602aa0c46d60facc7804b922c81c1524e05903a",
+                            "docker.io/gluster/gluster-centos:latest"
+                        ],
+                        "sizeBytes": 328338103
+                    },
+                    {
+                        "names": [
+                            "docker.io/openshift/origin-pod@sha256:6ae0714fe9bf19f1312e2a869bc3d7b7cd01aea330c33675f1e215e3de857385",
+                            "docker.io/openshift/origin-pod:v3.10.0"
+                        ],
+                        "sizeBytes": 222597999
+                    },
+                    {
+                        "names": [
+                            "quay.io/coreos/etcd@sha256:43fbc8a457aa0cb887da63d74a48659e13947cb74b96a53ba8f47abb6172a948",
+                            "quay.io/coreos/etcd:v3.2.22"
+                        ],
+                        "sizeBytes": 37269372
+                    }
+                ],
+                "nodeInfo": {
+                    "architecture": "amd64",
+                    "bootID": "fc58c6b9-9f67-4377-8cbe-57f0c3f7a517",
+                    "containerRuntimeVersion": "docker://1.13.1",
+                    "kernelVersion": "4.13.9-300.fc27.x86_64",
+                    "kubeProxyVersion": "v1.10.0+b81c8f8",
+                    "kubeletVersion": "v1.10.0+b81c8f8",
+                    "machineID": "57f56a8c5aeb47a98ca1fd94281c64aa",
+                    "operatingSystem": "linux",
+                    "osImage": "Fedora 27 (Cloud Edition)",
+                    "systemUUID": "57F56A8C-5AEB-47A9-8CA1-FD94281C64AA"
+                }
+            }
+        },
+        {
+            "apiVersion": "v1",
+            "kind": "Node",
+            "metadata": {
+                "annotations": {
+                    "volumes.kubernetes.io/controller-managed-attach-detach": "true"
+                },
+                "creationTimestamp": "2018-08-10T23:53:32Z",
+                "labels": {
+                    "beta.kubernetes.io/arch": "amd64",
+                    "beta.kubernetes.io/os": "linux",
+                    "glusterfs": "storage-host",
+                    "kubernetes.io/hostname": "fedora2.openshift.io",
+                    "node-role.kubernetes.io/infra": "true"
+                },
+                "name": "fedora2.openshift.io",
+                "namespace": "",
+                "resourceVersion": "1732413",
+                "selfLink": "/api/v1/nodes/fedora2.openshift.io",
+                "uid": "965edafb-9cf8-11e8-964a-525400650cba"
+            },
+            "spec": {
+                "externalID": "fedora2.openshift.io"
+            },
+            "status": {
+                "addresses": [
+                    {
+                        "address": "192.168.124.48",
+                        "type": "InternalIP"
+                    },
+                    {
+                        "address": "fedora2.openshift.io",
+                        "type": "Hostname"
+                    }
+                ],
+                "allocatable": {
+                    "cpu": "4",
+                    "hugepages-1Gi": "0",
+                    "hugepages-2Mi": "0",
+                    "memory": "8070076Ki",
+                    "pods": "250"
+                },
+                "capacity": {
+                    "cpu": "4",
+                    "hugepages-1Gi": "0",
+                    "hugepages-2Mi": "0",
+                    "memory": "8172476Ki",
+                    "pods": "250"
+                },
+                "conditions": [
+                    {
+                        "lastHeartbeatTime": "2018-08-23T20:01:17Z",
+                        "lastTransitionTime": "2018-08-22T21:40:58Z",
+                        "message": "kubelet has sufficient disk space available",
+                        "reason": "KubeletHasSufficientDisk",
+                        "status": "False",
+                        "type": "OutOfDisk"
+                    },
+                    {
+                        "lastHeartbeatTime": "2018-08-23T20:01:17Z",
+                        "lastTransitionTime": "2018-08-22T21:40:58Z",
+                        "message": "kubelet has sufficient memory available",
+                        "reason": "KubeletHasSufficientMemory",
+                        "status": "False",
+                        "type": "MemoryPressure"
+                    },
+                    {
+                        "lastHeartbeatTime": "2018-08-23T20:01:17Z",
+                        "lastTransitionTime": "2018-08-22T21:40:58Z",
+                        "message": "kubelet has no disk pressure",
+                        "reason": "KubeletHasNoDiskPressure",
+                        "status": "False",
+                        "type": "DiskPressure"
+                    },
+                    {
+                        "lastHeartbeatTime": "2018-08-23T20:01:17Z",
+                        "lastTransitionTime": "2018-08-10T23:53:32Z",
+                        "message": "kubelet has sufficient PID available",
+                        "reason": "KubeletHasSufficientPID",
+                        "status": "False",
+                        "type": "PIDPressure"
+                    },
+                    {
+                        "lastHeartbeatTime": "2018-08-23T20:01:17Z",
+                        "lastTransitionTime": "2018-08-22T21:41:08Z",
+                        "message": "kubelet is posting ready status",
+                        "reason": "KubeletReady",
+                        "status": "False",
+                        "type": "Ready"
+                    }
+                ],
+                "daemonEndpoints": {
+                    "kubeletEndpoint": {
+                        "Port": 10250
+                    }
+                },
+                "images": [
+                    {
+                        "names": [
+                            "docker.io/openshift/origin-node@sha256:d8e0b4e5912e12e84ccd2b72a90ce66ce6e5569dfcc62f9cd69f0315d59c6a91",
+                            "docker.io/openshift/origin-node:v3.10",
+                            "docker.io/openshift/origin-node:v3.10.0"
+                        ],
+                        "sizeBytes": 1281495850
+                    },
+                    {
+                        "names": [
+                            "docker.io/heketi/heketi@sha256:d847e721966c6b6b09a50cbe3ec209d7d6cf4ad7cca204cf114028c98a39aecd",
+                            "docker.io/heketi/heketi:latest"
+                        ],
+                        "sizeBytes": 361586900
+                    },
+                    {
+                        "names": [
+                            "docker.io/gluster/gluster-centos@sha256:850fd2399d254f678b40bebe1602aa0c46d60facc7804b922c81c1524e05903a",
+                            "docker.io/gluster/gluster-centos:latest"
+                        ],
+                        "sizeBytes": 328338103
+                    },
+                    {
+                        "names": [
+                            "docker.io/openshift/origin-pod@sha256:6ae0714fe9bf19f1312e2a869bc3d7b7cd01aea330c33675f1e215e3de857385",
+                            "docker.io/openshift/origin-pod:v3.10.0"
+                        ],
+                        "sizeBytes": 222597999
+                    }
+                ],
+                "nodeInfo": {
+                    "architecture": "amd64",
+                    "bootID": "9bced612-abc1-4129-8d92-b17e786df8dd",
+                    "containerRuntimeVersion": "docker://1.13.1",
+                    "kernelVersion": "4.13.9-300.fc27.x86_64",
+                    "kubeProxyVersion": "v1.10.0+b81c8f8",
+                    "kubeletVersion": "v1.10.0+b81c8f8",
+                    "machineID": "a883f7e82e0645578114dafea6fca8bb",
+                    "operatingSystem": "linux",
+                    "osImage": "Fedora 27 (Cloud Edition)",
+                    "systemUUID": "A883F7E8-2E06-4557-8114-DAFEA6FCA8BB"
+                }
+            }
+        },
+        {
+            "apiVersion": "v1",
+            "kind": "Node",
+            "metadata": {
+                "annotations": {
+                    "volumes.kubernetes.io/controller-managed-attach-detach": "true"
+                },
+                "creationTimestamp": "2018-08-10T23:53:32Z",
+                "labels": {
+                    "beta.kubernetes.io/arch": "amd64",
+                    "beta.kubernetes.io/os": "linux",
+                    "glusterfs": "storage-host",
+                    "kubernetes.io/hostname": "fedora3.openshift.io",
+                    "node-role.kubernetes.io/infra": "true"
+                },
+                "name": "fedora3.openshift.io",
+                "namespace": "",
+                "resourceVersion": "1732410",
+                "selfLink": "/api/v1/nodes/fedora3.openshift.io",
+                "uid": "9646e307-9cf8-11e8-964a-525400650cba"
+            },
+            "spec": {
+                "externalID": "fedora3.openshift.io"
+            },
+            "status": {
+                "addresses": [
+                    {
+                        "address": "192.168.124.171",
+                        "type": "InternalIP"
+                    },
+                    {
+                        "address": "fedora3.openshift.io",
+                        "type": "Hostname"
+                    }
+                ],
+                "allocatable": {
+                    "cpu": "4",
+                    "hugepages-1Gi": "0",
+                    "hugepages-2Mi": "0",
+                    "memory": "8070068Ki",
+                    "pods": "250"
+                },
+                "capacity": {
+                    "cpu": "4",
+                    "hugepages-1Gi": "0",
+                    "hugepages-2Mi": "0",
+                    "memory": "8172468Ki",
+                    "pods": "250"
+                },
+                "conditions": [
+                    {
+                        "lastHeartbeatTime": "2018-08-23T20:01:16Z",
+                        "lastTransitionTime": "2018-08-10T23:53:32Z",
+                        "message": "kubelet has sufficient disk space available",
+                        "reason": "KubeletHasSufficientDisk",
+                        "status": "False",
+                        "type": "OutOfDisk"
+                    },
+                    {
+                        "lastHeartbeatTime": "2018-08-23T20:01:16Z",
+                        "lastTransitionTime": "2018-08-10T23:53:32Z",
+                        "message": "kubelet has sufficient memory available",
+                        "reason": "KubeletHasSufficientMemory",
+                        "status": "False",
+                        "type": "MemoryPressure"
+                    },
+                    {
+                        "lastHeartbeatTime": "2018-08-23T20:01:16Z",
+                        "lastTransitionTime": "2018-08-10T23:53:32Z",
+                        "message": "kubelet has no disk pressure",
+                        "reason": "KubeletHasNoDiskPressure",
+                        "status": "False",
+                        "type": "DiskPressure"
+                    },
+                    {
+                        "lastHeartbeatTime": "2018-08-23T20:01:16Z",
+                        "lastTransitionTime": "2018-08-10T23:53:32Z",
+                        "message": "kubelet has sufficient PID available",
+                        "reason": "KubeletHasSufficientPID",
+                        "status": "False",
+                        "type": "PIDPressure"
+                    },
+                    {
+                        "lastHeartbeatTime": "2018-08-23T20:01:16Z",
+                        "lastTransitionTime": "2018-08-11T00:01:06Z",
+                        "message": "kubelet is posting ready status",
+                        "reason": "KubeletReady",
+                        "status": "True",
+                        "type": "Ready"
+                    }
+                ],
+                "daemonEndpoints": {
+                    "kubeletEndpoint": {
+                        "Port": 10250
+                    }
+                },
+                "images": [
+                    {
+                        "names": [
+                            "docker.io/openshift/origin-node@sha256:d8e0b4e5912e12e84ccd2b72a90ce66ce6e5569dfcc62f9cd69f0315d59c6a91",
+                            "docker.io/openshift/origin-node:v3.10",
+                            "docker.io/openshift/origin-node:v3.10.0"
+                        ],
+                        "sizeBytes": 1281495850
+                    },
+                    {
+                        "names": [
+                            "docker.io/openshift/origin-deployer@sha256:72d013cdfdf3d16557b64ac0a459c2fc4e90e37422ceed1564a2f69d68607e2a",
+                            "docker.io/openshift/origin-deployer:v3.10.0"
+                        ],
+                        "sizeBytes": 815862538
+                    },
+                    {
+                        "names": [
+                            "docker.io/heketi/heketi@sha256:e6d0362d217573a3f92792e14c611d75df04eb7bc8f245e8c44c4a9c3a870ee1",
+                            "docker.io/heketi/heketi:latest"
+                        ],
+                        "sizeBytes": 384664289
+                    },
+                    {
+                        "names": [
+                            "docker.io/gluster/gluster-centos@sha256:850fd2399d254f678b40bebe1602aa0c46d60facc7804b922c81c1524e05903a",
+                            "docker.io/gluster/gluster-centos:latest"
+                        ],
+                        "sizeBytes": 328338103
+                    },
+                    {
+                        "names": [
+                            "docker.io/openshift/origin-pod@sha256:6ae0714fe9bf19f1312e2a869bc3d7b7cd01aea330c33675f1e215e3de857385",
+                            "docker.io/openshift/origin-pod:v3.10.0"
+                        ],
+                        "sizeBytes": 222597999
+                    }
+                ],
+                "nodeInfo": {
+                    "architecture": "amd64",
+                    "bootID": "a81e3aa0-bf11-432d-b671-aa7d86344c3f",
+                    "containerRuntimeVersion": "docker://1.13.1",
+                    "kernelVersion": "4.13.9-300.fc27.x86_64",
+                    "kubeProxyVersion": "v1.10.0+b81c8f8",
+                    "kubeletVersion": "v1.10.0+b81c8f8",
+                    "machineID": "95bf4677a2ac4f8daa29a31efdb09eed",
+                    "operatingSystem": "linux",
+                    "osImage": "Fedora 27 (Cloud Edition)",
+                    "systemUUID": "95BF4677-A2AC-4F8D-AA29-A31EFDB09EED"
+                }
+            }
+        }
+    ],
+    "kind": "List",
+    "metadata": {
+        "resourceVersion": "",
+        "selfLink": ""
+    }
+}

+ 1 - 0
roles/openshift_node/test/test_data/openssl1.txt

@@ -0,0 +1 @@
+subject=C = US, CN = fedora1.openshift.io, L = Raleigh, O = Red Hat, ST = North Carolina, OU = OpenShift

+ 161 - 0
roles/openshift_node/test/test_oc_csr_approve.py

@@ -0,0 +1,161 @@
+import os
+import sys
+from unittest.mock import patch
+
+from ansible.module_utils.basic import AnsibleModule
+
+MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'library'))
+sys.path.insert(1, MODULE_PATH)
+
+import oc_csr_approve  # noqa
+from oc_csr_approve import CSRapprove # noqa
+
+# base path for text files with sample outputs.
+ASSET_PATH = os.path.realpath(os.path.join(__file__, os.pardir, 'test_data'))
+
+RUN_CMD_MOCK = 'ansible.module_utils.basic.AnsibleModule.run_command'
+
+
+class DummyModule(AnsibleModule):
+    def _load_params(self):
+        self.params = {}
+
+    def exit_json(*args, **kwargs):
+        return 0
+
+    def fail_json(*args, **kwargs):
+        raise Exception(kwargs['msg'])
+
+
+def test_parse_subject_cn():
+    subject = 'subject=/C=US/CN=fedora1.openshift.io/L=Raleigh/O=Red Hat/ST=North Carolina/OU=OpenShift\n'
+    assert oc_csr_approve.parse_subject_cn(subject) == 'fedora1.openshift.io'
+
+    subject = 'subject=C = US, CN = test.io, L = City, O = Company, ST = State, OU = Dept\n'
+    assert oc_csr_approve.parse_subject_cn(subject) == 'test.io'
+
+
+def test_csr_present_check():
+    csr_dict = {'csr-1': 'fedora1.openshift.io'}
+
+    nodename = 'fedora1.openshift.io'
+    assert oc_csr_approve.csr_present_check(nodename, csr_dict) is True
+
+    nodename = 'fedora2.openshift.io'
+    assert oc_csr_approve.csr_present_check(nodename, csr_dict) is False
+
+
+def test_get_nodes():
+    output_file = os.path.join(ASSET_PATH, 'oc_get_nodes.json')
+    with open(output_file) as stdoutfile:
+        oc_get_nodes_stdout = stdoutfile.read()
+
+    module = DummyModule({})
+    approver = CSRapprove(module, 'oc', '/dev/null', [])
+
+    with patch(RUN_CMD_MOCK) as call_mock:
+        call_mock.return_value = (0, oc_get_nodes_stdout, '')
+        all_nodes = approver.get_nodes()
+    assert all_nodes == ['fedora1.openshift.io', 'fedora2.openshift.io', 'fedora3.openshift.io']
+
+
+def test_get_csrs_client():
+    module = DummyModule({})
+    approver = CSRapprove(module, 'oc', '/dev/null', [])
+    output_file = os.path.join(ASSET_PATH, 'oc_csr_approve_pending.json')
+    with open(output_file) as stdoutfile:
+        oc_get_csr_out = stdoutfile.read()
+
+    # mock oc get csr call to cluster
+    with patch(RUN_CMD_MOCK) as call_mock:
+        call_mock.return_value = (0, oc_get_csr_out, '')
+        csrs = approver.get_csrs()
+
+    assert csrs[0]['kind'] == "CertificateSigningRequest"
+
+    output_file = os.path.join(ASSET_PATH, 'openssl1.txt')
+    with open(output_file) as stdoutfile:
+        openssl_out = stdoutfile.read()
+
+    # mock openssl req call.
+    nodename = 'fedora1.openshift.io'
+    approver = CSRapprove(module, 'oc', '/dev/null', nodename)
+    with patch(RUN_CMD_MOCK) as call_mock:
+        call_mock.return_value = (0, openssl_out, '')
+        csr_dict = approver.process_csrs(csrs, "client")
+    # actually run openssl req call.
+    csr_dict = approver.process_csrs(csrs, "client")
+    assert csr_dict['node-csr-TkefytQp8Dz4Xp7uzcw605MocvI0gWuEOGNrHhOjGNQ'] == 'fedora1.openshift.io'
+
+
+def test_get_csrs_server():
+    module = DummyModule({})
+    output_file = os.path.join(ASSET_PATH, 'oc_csr_server_multiple_pends_one_host.json')
+    with open(output_file) as stdoutfile:
+        oc_get_csr_out = stdoutfile.read()
+
+    approver = CSRapprove(module, 'oc', '/dev/null', [])
+    # mock oc get csr call to cluster
+    with patch(RUN_CMD_MOCK) as call_mock:
+        call_mock.return_value = (0, oc_get_csr_out, '')
+        csrs = approver.get_csrs()
+
+    assert csrs[0]['kind'] == "CertificateSigningRequest"
+
+    output_file = os.path.join(ASSET_PATH, 'openssl1.txt')
+    with open(output_file) as stdoutfile:
+        openssl_out = stdoutfile.read()
+
+    nodename = 'fedora1.openshift.io'
+    approver = CSRapprove(module, 'oc', '/dev/null', nodename)
+    # mock openssl req call.
+    with patch(RUN_CMD_MOCK) as call_mock:
+        call_mock.return_value = (0, openssl_out, '')
+        csr_dict = approver.process_csrs(csrs, "server")
+
+    # actually run openssl req call.
+    nodename = 'fedora1.openshift.io'
+    approver = CSRapprove(module, 'oc', '/dev/null', nodename)
+    csr_dict = approver.process_csrs(csrs, "server")
+    assert csr_dict['csr-2cxkp'] == 'fedora1.openshift.io'
+
+
+def test_process_csrs():
+    module = DummyModule({})
+    approver = CSRapprove(module, 'oc', '/dev/null', 'fedora1.openshift.io')
+    output_file = os.path.join(ASSET_PATH, 'oc_csr_approve_pending.json')
+    with open(output_file) as stdoutfile:
+        oc_get_csr_out = stdoutfile.read()
+
+    # mock oc get csr call to cluster
+    with patch(RUN_CMD_MOCK) as call_mock:
+        call_mock.return_value = (0, oc_get_csr_out, '')
+        csrs = approver.get_csrs()
+
+    csr_dict = approver.process_csrs(csrs, "client")
+    assert csr_dict == {'node-csr-TkefytQp8Dz4Xp7uzcw605MocvI0gWuEOGNrHhOjGNQ': 'fedora1.openshift.io'}
+
+
+def test_approve_csrs():
+    module = DummyModule({})
+    csr_dict = {'csr-1': 'fedora1.openshift.io'}
+    approver = CSRapprove(module, 'oc', '/dev/null', '')
+    with patch(RUN_CMD_MOCK) as call_mock:
+        call_mock.return_value = (0, 'csr-1 ok', '')
+        approver.approve_csrs(csr_dict, 'client')
+    assert approver.result['client_approve_results'] == ['fedora1.openshift.io: csr-1 ok']
+
+
+def test_node_is_ready():
+    module = DummyModule({})
+    nodename = 'fedora1.openshift.io'
+    approver = CSRapprove(module, 'oc', '/dev/null', nodename)
+    with patch(RUN_CMD_MOCK) as call_mock:
+        call_mock.return_value = (0, 'ok', '')
+        result = approver.node_is_ready(nodename)
+    assert result is True
+
+    with patch(RUN_CMD_MOCK) as call_mock:
+        call_mock.return_value = (1, 'stdout fail', 'stderr fail')
+        result = approver.node_is_ready(nodename)
+    assert result is False

+ 1 - 0
test-requirements.txt

@@ -6,5 +6,6 @@ flake8==3.7.9
 flake8-mutable==1.2.0
 flake8-print==3.1.4
 pylint==2.4.4
+pytest==5.4.1
 setuptools-lint==0.6.0
 yamllint==1.20.0

+ 2 - 1
tox.ini

@@ -1,6 +1,6 @@
 [tox]
 envlist =
-    py36-{flake8,pylint,yamllint,ansible_syntax}
+    py36-{unit,flake8,pylint,yamllint,ansible_syntax}
 skipsdist=True
 skip_missing_interpreters=True
 
@@ -18,6 +18,7 @@ deps =
     -rtest-requirements.txt
 
 commands =
+    unit: pytest {posargs}
     flake8: flake8 {posargs}
     pylint: python setup.py lint
     yamllint: python setup.py yamllint