소스 검색

Merge pull request #2855 from detiber/updateSchedulerDefaults

Update scheduler defaults
Scott Dodson 8 년 전
부모
커밋
e4a010aa08
42개의 변경된 파일859개의 추가작업 그리고 265개의 파일을 삭제
  1. 0 2
      .travis.yml
  2. 3 2
      callback_plugins/default.py
  3. 30 26
      filter_plugins/oo_filters.py
  4. 2 1
      filter_plugins/oo_zabbix_filters.py
  5. 4 6
      filter_plugins/openshift_master.py
  6. 2 1
      filter_plugins/openshift_node.py
  7. 2 1
      git/parent.py
  8. 1 1
      git/yaml_validation.py
  9. 4 3
      inventory/aws/hosts/ec2.py
  10. 1 0
      inventory/gce/hosts/gce.py
  11. 1 0
      inventory/libvirt/hosts/libvirt_generic.py
  12. 1 0
      inventory/openstack/hosts/openstack.py
  13. 5 4
      library/modify_yaml.py
  14. 5 3
      library/rpm_q.py
  15. 2 0
      lookup_plugins/oo_option.py
  16. 0 1
      playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
  17. 1 0
      playbooks/aws/openshift-cluster/library/ec2_ami_find.py
  18. 1 0
      playbooks/byo/openshift-cluster/config.yml
  19. 2 0
      playbooks/common/openshift-cluster/config.yml
  20. 11 1
      playbooks/common/openshift-cluster/evaluate_groups.yml
  21. 5 4
      playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
  22. 9 9
      roles/etcd_common/library/delegated_serial_command.py
  23. 14 9
      roles/kube_nfs_volumes/library/partitionpool.py
  24. 0 24
      roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
  25. 2 1
      roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
  26. 3 3
      roles/openshift_cli/library/openshift_container_binary_sync.py
  27. 133 105
      roles/openshift_facts/library/openshift_facts.py
  28. 2 5
      roles/openshift_master/vars/main.yml
  29. 96 0
      roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
  30. 87 0
      roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
  31. 35 2
      roles/openshift_master_facts/tasks/main.yml
  32. 174 0
      roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
  33. 164 0
      roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
  34. 5 0
      roles/openshift_master_facts/vars/main.yml
  35. 24 24
      roles/os_firewall/library/os_firewall_manage_iptables.py
  36. 0 2
      setup.cfg
  37. 2 2
      test/modify_yaml_tests.py
  38. 8 21
      utils/Makefile
  39. 13 0
      utils/setup.cfg
  40. 1 1
      utils/setup.py
  41. 3 0
      utils/src/ooinstall/utils.py
  42. 1 1
      utils/test-requirements.txt

+ 0 - 2
.travis.yml

@@ -10,6 +10,4 @@ install:
 script:
   # TODO(rhcarvalho): check syntax of other important entrypoint playbooks
   - ansible-playbook --syntax-check playbooks/byo/config.yml
-  # TODO(rhcarvalho): update make ci to pick up these tests
-  - nosetests --tests=test
   - cd utils && make ci

+ 3 - 2
callback_plugins/default.py

@@ -30,7 +30,7 @@ DEFAULT_MODULE = imp.load_source(
 try:
     from ansible.plugins.callback import CallbackBase
     BASECLASS = CallbackBase
-except ImportError: # < ansible 2.1
+except ImportError:  # < ansible 2.1
     BASECLASS = DEFAULT_MODULE.CallbackModule
 
 
@@ -46,6 +46,7 @@ class CallbackModule(DEFAULT_MODULE.CallbackModule):  # pylint: disable=too-few-
     CALLBACK_NAME = 'default'
 
     def __init__(self, *args, **kwargs):
+        # pylint: disable=non-parent-init-called
         BASECLASS.__init__(self, *args, **kwargs)
 
     def _dump_results(self, result):
@@ -57,7 +58,7 @@ class CallbackModule(DEFAULT_MODULE.CallbackModule):  # pylint: disable=too-few-
             if key in result:
                 save[key] = result.pop(key)
 
-        output = BASECLASS._dump_results(self, result) # pylint: disable=protected-access
+        output = BASECLASS._dump_results(self, result)  # pylint: disable=protected-access
 
         for key in ['stdout', 'stderr', 'msg']:
             if key in save and save[key]:

+ 30 - 26
filter_plugins/oo_filters.py

@@ -1,32 +1,32 @@
 #!/usr/bin/python
 # -*- coding: utf-8 -*-
 # vim: expandtab:tabstop=4:shiftwidth=4
+# pylint: disable=no-name-in-module, import-error, wrong-import-order, ungrouped-imports
 """
 Custom filters for use in openshift-ansible
 """
+import os
+import pdb
+import pkg_resources
+import re
+import json
+import yaml
 
 from ansible import errors
 from collections import Mapping
 from distutils.util import strtobool
 from distutils.version import LooseVersion
 from operator import itemgetter
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from urlparse import urlparse
 
-HAS_OPENSSL=False
+HAS_OPENSSL = False
 try:
     import OpenSSL.crypto
-    HAS_OPENSSL=True
+    HAS_OPENSSL = True
 except ImportError:
     pass
 
-import os
-import pdb
-import pkg_resources
-import re
-import json
-import yaml
-from ansible.parsing.yaml.dumper import AnsibleDumper
-from urlparse import urlparse
-
 try:
     # ansible-2.2
     # ansible.utils.unicode.to_unicode is deprecated in ansible-2.2,
@@ -36,6 +36,7 @@ except ImportError:
     # ansible-2.1
     from ansible.utils.unicode import to_unicode as to_text
 
+
 # Disabling too-many-public-methods, since filter methods are necessarily
 # public
 # pylint: disable=too-many-public-methods
@@ -74,7 +75,6 @@ class FilterModule(object):
 
         return ptr
 
-
     @staticmethod
     def oo_flatten(data):
         """ This filter plugin will flatten a list of lists
@@ -163,7 +163,7 @@ class FilterModule(object):
         else:
             retval = [FilterModule.get_attr(d, attribute) for d in data]
 
-        retval = [val for val in retval if val != None]
+        retval = [val for val in retval if val is not None]
 
         return retval
 
@@ -241,6 +241,7 @@ class FilterModule(object):
            arrange them as a string 'key=value key=value'
         """
         if not isinstance(data, dict):
+            # pylint: disable=line-too-long
             raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_combine_dict]. Got %s. Type: %s" % (str(data), str(type(data))))
 
         return out_joiner.join([in_joiner.join([k, str(v)]) for k, v in data.items()])
@@ -293,6 +294,7 @@ class FilterModule(object):
                 }
         """
         if not isinstance(data, dict):
+            # pylint: disable=line-too-long
             raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_ec2_volume_def]. Got %s. Type: %s" % (str(data), str(type(data))))
         if host_type not in ['master', 'node', 'etcd']:
             raise errors.AnsibleFilterError("|failed expects etcd, master or node"
@@ -427,7 +429,6 @@ class FilterModule(object):
 
         return [n for n in nodes if label_filter(n)]
 
-
     @staticmethod
     def oo_parse_heat_stack_outputs(data):
         """ Formats the HEAT stack output into a usable form
@@ -592,8 +593,8 @@ class FilterModule(object):
                     returns 'value2'
             """
             for tag in tags:
-                if tag[:len(prefix)+len(key)] == prefix + key:
-                    return tag[len(prefix)+len(key)+1:]
+                if tag[:len(prefix) + len(key)] == prefix + key:
+                    return tag[len(prefix) + len(key) + 1:]
             raise KeyError(key)
 
         def _add_host(clusters,
@@ -675,7 +676,7 @@ class FilterModule(object):
         return facts
 
     @staticmethod
-    # pylint: disable=too-many-branches
+    # pylint: disable=too-many-branches, too-many-nested-blocks
     def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
         """ Generate list of persistent volumes based on oo_openshift_env
             storage options set in host variables.
@@ -684,10 +685,10 @@ class FilterModule(object):
             raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
         if not issubclass(type(groups), dict):
             raise errors.AnsibleFilterError("|failed expects groups is a dict")
-        if persistent_volumes != None and not issubclass(type(persistent_volumes), list):
+        if persistent_volumes is not None and not issubclass(type(persistent_volumes), list):
             raise errors.AnsibleFilterError("|failed expects persistent_volumes is a list")
 
-        if persistent_volumes == None:
+        if persistent_volumes is None:
             persistent_volumes = []
         if 'hosted' in hostvars['openshift']:
             for component in hostvars['openshift']['hosted']:
@@ -695,10 +696,10 @@ class FilterModule(object):
                     params = hostvars['openshift']['hosted'][component]['storage']
                     kind = params['kind']
                     create_pv = params['create_pv']
-                    if kind != None and create_pv:
+                    if kind is not None and create_pv:
                         if kind == 'nfs':
                             host = params['host']
-                            if host == None:
+                            if host is None:
                                 if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
                                     host = groups['oo_nfs_to_config'][0]
                                 else:
@@ -746,10 +747,10 @@ class FilterModule(object):
         """
         if not issubclass(type(hostvars), dict):
             raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
-        if persistent_volume_claims != None and not issubclass(type(persistent_volume_claims), list):
+        if persistent_volume_claims is not None and not issubclass(type(persistent_volume_claims), list):
             raise errors.AnsibleFilterError("|failed expects persistent_volume_claims is a list")
 
-        if persistent_volume_claims == None:
+        if persistent_volume_claims is None:
             persistent_volume_claims = []
         if 'hosted' in hostvars['openshift']:
             for component in hostvars['openshift']['hosted']:
@@ -785,7 +786,7 @@ class FilterModule(object):
 
         rpms_31 = []
         for rpm in rpms:
-            if not 'atomic' in rpm:
+            if 'atomic' not in rpm:
                 rpm = rpm.replace("openshift", "atomic-openshift")
             if openshift_version:
                 rpm = rpm + openshift_version
@@ -816,7 +817,7 @@ class FilterModule(object):
             for container in pod['spec']['containers']:
                 if re.search(image_regex, container['image']):
                     matching_pods.append(pod)
-                    break # stop here, don't add a pod more than once
+                    break  # stop here, don't add a pod more than once
 
         return matching_pods
 
@@ -827,7 +828,7 @@ class FilterModule(object):
         for host in hosts:
             try:
                 retval.append(hostvars[host])
-            except errors.AnsibleError as _:
+            except errors.AnsibleError:
                 # host does not exist
                 pass
 
@@ -869,6 +870,7 @@ class FilterModule(object):
             # netloc wasn't parsed, assume url was missing scheme and path
             return parse_result.path
 
+    # pylint: disable=invalid-name, missing-docstring, unused-argument
     @staticmethod
     def oo_openshift_loadbalancer_frontends(api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
         loadbalancer_frontends = [{'name': 'atomic-openshift-api',
@@ -884,6 +886,7 @@ class FilterModule(object):
                                            'default_backend': 'nuage-monitor'})
         return loadbalancer_frontends
 
+    # pylint: disable=invalid-name, missing-docstring
     @staticmethod
     def oo_openshift_loadbalancer_backends(api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
         loadbalancer_backends = [{'name': 'atomic-openshift-api',
@@ -892,6 +895,7 @@ class FilterModule(object):
                                   'balance': 'source',
                                   'servers': FilterModule.oo_haproxy_backend_masters(servers_hostvars, api_port)}]
         if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
+            # pylint: disable=line-too-long
             loadbalancer_backends.append({'name': 'nuage-monitor',
                                           'mode': 'tcp',
                                           'option': 'tcplog',

+ 2 - 1
filter_plugins/oo_zabbix_filters.py

@@ -7,6 +7,7 @@ Custom zabbix filters for use in openshift-ansible
 
 import pdb
 
+
 class FilterModule(object):
     ''' Custom zabbix ansible filters '''
 
@@ -107,7 +108,7 @@ class FilterModule(object):
             for results in data:
                 if cluster == results['item'][0]:
                     results = results['results']
-                    if results and len(results) > 0 and all([results[0].has_key(_key) for _key in keys]):
+                    if results and len(results) > 0 and all([_key in results[0] for _key in keys]):
                         tmp = {}
                         tmp['clusterid'] = cluster
                         for key in keys:

+ 4 - 6
filter_plugins/openshift_master.py

@@ -9,20 +9,21 @@ import sys
 import yaml
 
 from ansible import errors
-from distutils.version import LooseVersion
 
-# pylint: disable=no-name-in-module,import-error
+# pylint: disable=no-name-in-module,import-error,wrong-import-order
+from distutils.version import LooseVersion
 try:
     # ansible-2.1
     from ansible.plugins.filter.core import to_bool as ansible_bool
 except ImportError:
     try:
-        #ansible-2.0.x
+        # ansible-2.0.x
         from ansible.runner.filter_plugins.core import bool as ansible_bool
     except ImportError:
         # ansible-1.9.x
         from ansible.plugins.filter.core import bool as ansible_bool
 
+
 class IdentityProviderBase(object):
     """ IdentityProviderBase
 
@@ -388,7 +389,6 @@ class OpenIDIdentityProvider(IdentityProviderOauthBase):
                 val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes'))
                 self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val
 
-
     def validate(self):
         ''' validate this idp instance '''
         IdentityProviderOauthBase.validate(self)
@@ -495,7 +495,6 @@ class FilterModule(object):
             idp_inst.set_provider_items()
             idp_list.append(idp_inst)
 
-
         IdentityProviderBase.validate_idp_list(idp_list, openshift_version, deployment_type)
         return yaml.safe_dump([idp.to_dict() for idp in idp_list], default_flow_style=False)
 
@@ -575,7 +574,6 @@ class FilterModule(object):
             htpasswd_entries[user] = passwd
         return htpasswd_entries
 
-
     def filters(self):
         ''' returns a mapping of filters to methods '''
         return {"translate_idps": self.translate_idps,

+ 2 - 1
filter_plugins/openshift_node.py

@@ -6,6 +6,7 @@ Custom filters for use in openshift-node
 '''
 from ansible import errors
 
+
 class FilterModule(object):
     ''' Custom ansible filters for use by openshift_node role'''
 
@@ -23,7 +24,7 @@ class FilterModule(object):
             raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
 
         # We always use what they've specified if they've specified a value
-        if openshift_dns_ip != None:
+        if openshift_dns_ip is not None:
             return openshift_dns_ip
 
         if bool(hostvars['openshift']['common']['use_dnsmasq']):

+ 2 - 1
git/parent.py

@@ -1,4 +1,6 @@
 #!/usr/bin/env python
+# flake8: noqa
+# pylint: skip-file
 '''
   Script to determine if this commit has also
   been merged through the stage branch
@@ -93,4 +95,3 @@ def main():
 
 if __name__ == '__main__':
     main()
-

+ 1 - 1
git/yaml_validation.py

@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+# flake8: noqa
 #
 #  python yaml validator for a git commit
 #
@@ -70,4 +71,3 @@ def main():
 
 if __name__ == "__main__":
     main()
-

+ 4 - 3
inventory/aws/hosts/ec2.py

@@ -1,4 +1,5 @@
 #!/usr/bin/env python2
+# pylint: skip-file
 
 '''
 EC2 external inventory script
@@ -482,7 +483,7 @@ class Ec2Inventory(object):
             if e.error_code == 'AuthFailure':
                 error = self.get_auth_error_message()
             else:
-                backend = 'Eucalyptus' if self.eucalyptus else 'AWS' 
+                backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
                 error = "Error connecting to %s backend.\n%s" % (backend, e.message)
             self.fail_with_error(error, 'getting EC2 instances')
 
@@ -700,7 +701,7 @@ class Ec2Inventory(object):
                     if self.nested_groups:
                         self.push_group(self.inventory, 'security_groups', key)
             except AttributeError:
-                self.fail_with_error('\n'.join(['Package boto seems a bit older.', 
+                self.fail_with_error('\n'.join(['Package boto seems a bit older.',
                                             'Please upgrade boto >= 2.3.0.']))
 
         # Inventory: Group by tag keys
@@ -798,7 +799,7 @@ class Ec2Inventory(object):
                         self.push_group(self.inventory, 'security_groups', key)
 
             except AttributeError:
-                self.fail_with_error('\n'.join(['Package boto seems a bit older.', 
+                self.fail_with_error('\n'.join(['Package boto seems a bit older.',
                                             'Please upgrade boto >= 2.3.0.']))
 
 

+ 1 - 0
inventory/gce/hosts/gce.py

@@ -1,4 +1,5 @@
 #!/usr/bin/env python2
+# pylint: skip-file
 # Copyright 2013 Google Inc.
 #
 # This file is part of Ansible

+ 1 - 0
inventory/libvirt/hosts/libvirt_generic.py

@@ -1,4 +1,5 @@
 #!/usr/bin/env python2
+# pylint: skip-file
 
 '''
 libvirt external inventory script

+ 1 - 0
inventory/openstack/hosts/openstack.py

@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+# pylint: skip-file
 
 # Copyright (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
 # Copyright (c) 2013, Jesse Keating <jesse.keating@rackspace.com>

+ 5 - 4
library/modify_yaml.py

@@ -30,7 +30,7 @@ def set_key(yaml_data, yaml_key, yaml_value):
             ptr[key] = {}
             ptr = ptr[key]
         elif key == yaml_key.split('.')[-1]:
-            if (key in ptr and module.safe_eval(ptr[key]) != yaml_value) or (key not in ptr):
+            if (key in ptr and module.safe_eval(ptr[key]) != yaml_value) or (key not in ptr):  # noqa: F405
                 ptr[key] = yaml_value
                 changes.append((yaml_key, yaml_value))
         else:
@@ -49,7 +49,7 @@ def main():
     # redefined-outer-name
     global module
 
-    module = AnsibleModule(
+    module = AnsibleModule(  # noqa: F405
         argument_spec=dict(
             dest=dict(required=True),
             yaml_key=dict(required=True),
@@ -94,10 +94,11 @@ def main():
     except Exception, e:
         return module.fail_json(msg=str(e))
 
+
 # ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, wrong-import-position
 # import module snippets
-from ansible.module_utils.basic import *
+from ansible.module_utils.basic import *  # noqa: F402,F403
 
 if __name__ == '__main__':
     main()

+ 5 - 3
library/rpm_q.py

@@ -9,7 +9,7 @@ available.
 """
 
 # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
-from ansible.module_utils.basic import *
+from ansible.module_utils.basic import *  # noqa: F403
 
 DOCUMENTATION = """
 ---
@@ -35,16 +35,17 @@ EXAMPLES = """
 
 RPM_BINARY = '/bin/rpm'
 
+
 def main():
     """
     Checks rpm -q for the named package and returns the installed packages
     or None if not installed.
     """
-    module = AnsibleModule(
+    module = AnsibleModule(  # noqa: F405
         argument_spec=dict(
             name=dict(required=True),
             state=dict(default='present', choices=['present', 'absent'])
-            ),
+        ),
         supports_check_mode=True
     )
 
@@ -66,5 +67,6 @@ def main():
     else:
         module.fail_json(msg="%s is installed", installed_versions=installed)
 
+
 if __name__ == '__main__':
     main()

+ 2 - 0
lookup_plugins/oo_option.py

@@ -30,9 +30,11 @@ except ImportError:
         def __init__(self, basedir=None, runner=None, **kwargs):
             self.runner = runner
             self.basedir = self.runner.basedir
+
             def get_basedir(self, variables):
                 return self.basedir
 
+
 # Reason: disable too-few-public-methods because the `run` method is the only
 #     one required by the Ansible API
 # Status: permanently disabled

+ 0 - 1
playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py

@@ -33,7 +33,6 @@ class FilterModule(object):
 
         return None
 
-
     def filters(self):
         ''' returns a mapping of filters to methods '''
         return {

+ 1 - 0
playbooks/aws/openshift-cluster/library/ec2_ami_find.py

@@ -1,5 +1,6 @@
 #!/usr/bin/python
 #pylint: skip-file
+# flake8: noqa
 #
 # This file is part of Ansible
 #

+ 1 - 0
playbooks/byo/openshift-cluster/config.yml

@@ -14,6 +14,7 @@
       name: "{{ item }}"
       groups: l_oo_all_hosts
     with_items: "{{ g_all_hosts | default([]) }}"
+    changed_when: no
 
 - name: Create initial host groups for all hosts
   hosts: l_oo_all_hosts

+ 2 - 0
playbooks/common/openshift-cluster/config.yml

@@ -12,6 +12,8 @@
   - node
 
 - include: initialize_openshift_version.yml
+  tags:
+  - always
 
 - name: Set oo_option facts
   hosts: oo_all_hosts

+ 11 - 1
playbooks/common/openshift-cluster/evaluate_groups.yml

@@ -36,6 +36,7 @@
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_become: "{{ g_sudo | default(omit) }}"
     with_items: "{{ g_all_hosts | default([]) }}"
+    changed_when: no
 
   - name: Evaluate oo_masters
     add_host:
@@ -44,6 +45,7 @@
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_become: "{{ g_sudo | default(omit) }}"
     with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"
+    changed_when: no
 
   - name: Evaluate oo_etcd_to_config
     add_host:
@@ -52,6 +54,7 @@
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_become: "{{ g_sudo | default(omit) }}"
     with_items: "{{ g_etcd_hosts | default([]) }}"
+    changed_when: no
 
   - name: Evaluate oo_masters_to_config
     add_host:
@@ -60,6 +63,7 @@
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_become: "{{ g_sudo | default(omit) }}"
     with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
+    changed_when: no
 
   - name: Evaluate oo_nodes_to_config
     add_host:
@@ -68,9 +72,10 @@
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_become: "{{ g_sudo | default(omit) }}"
     with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
+    changed_when: no
 
   # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
-  - name: Evaluate oo_nodes_to_config
+  - name: Add master to oo_nodes_to_config
     add_host:
       name: "{{ item }}"
       groups: oo_nodes_to_config
@@ -78,6 +83,7 @@
       ansible_become: "{{ g_sudo | default(omit) }}"
     with_items: "{{ g_master_hosts | default([]) }}"
     when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool
+    changed_when: no
 
   - name: Evaluate oo_first_etcd
     add_host:
@@ -85,6 +91,7 @@
       groups: oo_first_etcd
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
     when: g_etcd_hosts|length > 0
+    changed_when: no
 
   - name: Evaluate oo_first_master
     add_host:
@@ -93,6 +100,7 @@
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_become: "{{ g_sudo | default(omit) }}"
     when: g_master_hosts|length > 0
+    changed_when: no
 
   - name: Evaluate oo_lb_to_config
     add_host:
@@ -101,6 +109,7 @@
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_become: "{{ g_sudo | default(omit) }}"
     with_items: "{{ g_lb_hosts | default([]) }}"
+    changed_when: no
 
   - name: Evaluate oo_nfs_to_config
     add_host:
@@ -109,3 +118,4 @@
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_become: "{{ g_sudo | default(omit) }}"
     with_items: "{{ g_nfs_hosts | default([]) }}"
+    changed_when: no

+ 5 - 4
playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py

@@ -17,6 +17,7 @@ requirements: [ ]
 EXAMPLES = '''
 '''
 
+
 def modify_api_levels(level_list, remove, ensure, msg_prepend='',
                       msg_append=''):
     """ modify_api_levels """
@@ -62,7 +63,6 @@ def upgrade_master_3_0_to_3_1(ansible_module, config_base, backup):
     config = yaml.safe_load(master_cfg_file.read())
     master_cfg_file.close()
 
-
     # Remove unsupported api versions and ensure supported api versions from
     # master config
     unsupported_levels = ['v1beta1', 'v1beta2', 'v1beta3']
@@ -118,7 +118,7 @@ def main():
     # redefined-outer-name
     global module
 
-    module = AnsibleModule(
+    module = AnsibleModule(  # noqa: F405
         argument_spec=dict(
             config_base=dict(required=True),
             from_version=dict(required=True, choices=['3.0']),
@@ -149,10 +149,11 @@ def main():
     except Exception, e:
         return module.fail_json(msg=str(e))
 
+
 # ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, wrong-import-position
 # import module snippets
-from ansible.module_utils.basic import *
+from ansible.module_utils.basic import *  # noqa: E402,F403
 
 if __name__ == '__main__':
     main()

+ 9 - 9
roles/etcd_common/library/delegated_serial_command.py

@@ -24,12 +24,9 @@
 
 ''' delegated_serial_command '''
 
-import copy
-import sys
 import datetime
+import errno
 import glob
-import traceback
-import re
 import shlex
 import os
 import fcntl
@@ -133,6 +130,7 @@ OPTIONS = {'chdir': None,
            'lockfile': None,
            'timeout': None}
 
+
 def check_command(commandline):
     ''' Check provided command '''
     arguments = {'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group',
@@ -160,7 +158,7 @@ def check_command(commandline):
 # pylint: disable=too-many-statements,too-many-branches,too-many-locals
 def main():
     ''' Main module function '''
-    module = AnsibleModule(
+    module = AnsibleModule(  # noqa: F405
         argument_spec=dict(
             _uses_shell=dict(type='bool', default=False),
             command=dict(required=True),
@@ -220,9 +218,9 @@ def main():
             )
 
     if removes:
-    # do not run the command if the line contains removes=filename
-    # and the filename does not exist.  This allows idempotence
-    # of command executions.
+        # do not run the command if the line contains removes=filename
+        # and the filename does not exist.  This allows idempotence
+        # of command executions.
         path = os.path.expanduser(removes)
         if not glob.glob(path):
             module.exit_json(
@@ -268,7 +266,9 @@ def main():
         iterated=iterated
     )
 
+
 # import module snippets
-from ansible.module_utils.basic import *
+# pylint: disable=wrong-import-position
+from ansible.module_utils.basic import *  # noqa: F402,F403
 
 main()

+ 14 - 9
roles/kube_nfs_volumes/library/partitionpool.py

@@ -52,7 +52,7 @@ options:
         partitions. On 1 TiB disk, 10 partitions will be created.
 
       - Example 2: size=100G:1,10G:1 says that ratio of space occupied by 100 GiB
-        partitions and 10 GiB partitions is 1:1. Therefore, on 1 TiB disk, 500 GiB 
+        partitions and 10 GiB partitions is 1:1. Therefore, on 1 TiB disk, 500 GiB
         will be split into five 100 GiB partition and 500 GiB will be split into fifty
         10GiB partitions.
       - size=100G:1,10G:1 = 5x 100 GiB and 50x 10 GiB partitions (on 1 TiB disk).
@@ -73,7 +73,7 @@ options:
         and eight 50 GiB partitions (again, 400 GiB).
       - size=200G:1,100G:1,50G:1 = 1x 200 GiB, 4x 100 GiB and 8x 50 GiB partitions
         (on 1 TiB disk).
-        
+
   force:
     description:
       - If True, it will always overwite partition table on the disk and create new one.
@@ -81,6 +81,7 @@ options:
 
 """
 
+
 # It's not class, it's more a simple struct with almost no functionality.
 # pylint: disable=too-few-public-methods
 class PartitionSpec(object):
@@ -98,6 +99,7 @@ class PartitionSpec(object):
         """ Set count of parititions of this specification. """
         self.count = count
 
+
 def assign_space(total_size, specs):
     """
     Satisfy all the PartitionSpecs according to their weight.
@@ -113,6 +115,7 @@ def assign_space(total_size, specs):
         total_size -= num_blocks * spec.size
         total_weight -= spec.weight
 
+
 def partition(diskname, specs, force=False, check_mode=False):
     """
     Create requested partitions.
@@ -161,16 +164,17 @@ def partition(diskname, specs, force=False, check_mode=False):
         pass
     return count
 
+
 def parse_spec(text):
     """ Parse string with partition specification. """
     tokens = text.split(",")
     specs = []
     for token in tokens:
-        if not ":" in token:
+        if ":" not in token:
             token += ":1"
 
         (sizespec, weight) = token.split(':')
-        weight = float(weight) # throws exception with reasonable error string
+        weight = float(weight)  # throws exception with reasonable error string
 
         units = {"m": 1, "g": 1 << 10, "t": 1 << 20, "p": 1 << 30}
         unit = units.get(sizespec[-1].lower(), None)
@@ -184,6 +188,7 @@ def parse_spec(text):
         specs.append(spec)
     return specs
 
+
 def get_partitions(diskpath):
     """ Return array of partition names for given disk """
     dev = parted.getDevice(diskpath)
@@ -198,7 +203,7 @@ def get_partitions(diskpath):
 
 def main():
     """ Ansible module main method. """
-    module = AnsibleModule(
+    module = AnsibleModule(  # noqa: F405
         argument_spec=dict(
             disks=dict(required=True, type='str'),
             force=dict(required=False, default="no", type='bool'),
@@ -227,14 +232,14 @@ def main():
         except Exception, ex:
             err = "Error creating partitions on " + disk + ": " + str(ex)
             raise
-            #module.fail_json(msg=err)
+            # module.fail_json(msg=err)
         partitions += get_partitions(disk)
 
     module.exit_json(changed=(changed_count > 0), ansible_facts={"partition_pool": partitions})
 
+
 # ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, wrong-import-order, wrong-import-position
 # import module snippets
-from ansible.module_utils.basic import *
+from ansible.module_utils.basic import *  # noqa: E402,F403
 main()
-

+ 0 - 24
roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py

@@ -5,29 +5,6 @@
 Custom filters for use in openshift-ansible
 """
 
-from ansible import errors
-from collections import Mapping
-from distutils.util import strtobool
-from distutils.version import LooseVersion
-from operator import itemgetter
-import OpenSSL.crypto
-import os
-import pdb
-import pkg_resources
-import re
-import json
-import yaml
-from ansible.parsing.yaml.dumper import AnsibleDumper
-from urlparse import urlparse
-
-try:
-    # ansible-2.2
-    # ansible.utils.unicode.to_unicode is deprecated in ansible-2.2,
-    # ansible.module_utils._text.to_text should be used instead.
-    from ansible.module_utils._text import to_text
-except ImportError:
-    # ansible-2.1
-    from ansible.utils.unicode import to_unicode as to_text
 
 # Disabling too-many-public-methods, since filter methods are necessarily
 # public
@@ -80,7 +57,6 @@ Example playbook usage:
 
         return json_result
 
-
     def filters(self):
         """ returns a mapping of filters to methods """
         return {

+ 2 - 1
roles/openshift_certificate_expiry/library/openshift_cert_expiry.py

@@ -628,10 +628,11 @@ an OpenShift Container Platform cluster
         changed=False
     )
 
+
 ######################################################################
 # It's just the way we do things in Ansible. So disable this warning
 #
 # pylint: disable=wrong-import-position,import-error
-from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.basic import AnsibleModule  # noqa: E402
 if __name__ == '__main__':
     main()

+ 3 - 3
roles/openshift_cli/library/openshift_container_binary_sync.py

@@ -10,7 +10,7 @@ import shutil
 import os.path
 
 # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
-from ansible.module_utils.basic import *
+from ansible.module_utils.basic import *  # noqa: F403
 
 
 DOCUMENTATION = '''
@@ -40,7 +40,7 @@ class BinarySyncer(object):
         self.bin_dir = '/usr/local/bin'
         self.image = image
         self.tag = tag
-        self.temp_dir = None # TBD
+        self.temp_dir = None  # TBD
 
     def sync(self):
         container_name = "openshift-cli-%s" % random.randint(1, 100000)
@@ -110,7 +110,7 @@ class BinarySyncer(object):
 
 
 def main():
-    module = AnsibleModule(
+    module = AnsibleModule(  # noqa: F405
         argument_spec=dict(
             image=dict(required=True),
             tag=dict(required=True),

+ 133 - 105
roles/openshift_facts/library/openshift_facts.py

@@ -14,20 +14,35 @@ except ImportError:
     # python3
     import configparser as ConfigParser
 
+# pylint: disable=no-name-in-module, import-error, wrong-import-order
 import copy
+import errno
+import json
+import re
 import io
 import os
 import yaml
-from distutils.util import strtobool
-from distutils.version import LooseVersion
 import struct
 import socket
+from distutils.util import strtobool
+from distutils.version import LooseVersion
+
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# import module snippets
+from ansible.module_utils.basic import *  # noqa: F403
+from ansible.module_utils.facts import *  # noqa: F403
+from ansible.module_utils.urls import *  # noqa: F403
+from ansible.module_utils.six import iteritems, itervalues
+from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
+from ansible.module_utils._text import to_native
+
+HAVE_DBUS = False
 
-HAVE_DBUS=False
 try:
     from dbus import SystemBus, Interface
     from dbus.exceptions import DBusException
-    HAVE_DBUS=True
+    HAVE_DBUS = True
 except ImportError:
     pass
 
@@ -58,6 +73,7 @@ def migrate_docker_facts(facts):
     }
     if 'docker' not in facts:
         facts['docker'] = {}
+    # pylint: disable=consider-iterating-dictionary
     for role in params.keys():
         if role in facts:
             for param in params[role]:
@@ -76,6 +92,7 @@ def migrate_docker_facts(facts):
 
     return facts
 
+
 # TODO: We should add a generic migration function that takes source and destination
 # paths and does the right thing rather than one function for common, one for node, etc.
 def migrate_common_facts(facts):
@@ -86,6 +103,7 @@ def migrate_common_facts(facts):
     }
     if 'common' not in facts:
         facts['common'] = {}
+    # pylint: disable=consider-iterating-dictionary
     for role in params.keys():
         if role in facts:
             for param in params[role]:
@@ -93,6 +111,7 @@ def migrate_common_facts(facts):
                     facts['common'][param] = facts[role].pop(param)
     return facts
 
+
 def migrate_node_facts(facts):
     """ Migrate facts from various roles into node """
     params = {
@@ -100,6 +119,7 @@ def migrate_node_facts(facts):
     }
     if 'node' not in facts:
         facts['node'] = {}
+    # pylint: disable=consider-iterating-dictionary
     for role in params.keys():
         if role in facts:
             for param in params[role]:
@@ -125,7 +145,9 @@ def migrate_hosted_facts(facts):
             facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')
     return facts
 
+
 def migrate_admission_plugin_facts(facts):
+    """ Apply migrations for admission plugin facts """
     if 'master' in facts:
         if 'kube_admission_plugin_config' in facts['master']:
             if 'admission_plugin_config' not in facts['master']:
@@ -139,6 +161,7 @@ def migrate_admission_plugin_facts(facts):
             facts['master'].pop('kube_admission_plugin_config', None)
     return facts
 
+
 def migrate_local_facts(facts):
     """ Apply migrations of local facts """
     migrated_facts = copy.deepcopy(facts)
@@ -149,6 +172,7 @@ def migrate_local_facts(facts):
     migrated_facts = migrate_admission_plugin_facts(migrated_facts)
     return migrated_facts
 
+
 def first_ip(network):
     """ Return the first IPv4 address in network
 
@@ -157,13 +181,14 @@ def first_ip(network):
         Returns:
             str: first IPv4 address
     """
-    atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0]
-    itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr))
+    atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0]  # noqa: E731
+    itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr))  # noqa: E731
 
     (address, netmask) = network.split('/')
     netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
     return itoa((atoi(address) & netmask_i) + 1)
 
+
 def hostname_valid(hostname):
     """ Test if specified hostname should be considered valid
 
@@ -201,11 +226,8 @@ def choose_hostname(hostnames=None, fallback=''):
         return hostname
 
     ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
-    ips = [i for i in hostnames
-           if (i is not None and isinstance(i, basestring)
-               and re.match(ip_regex, i))]
-    hosts = [i for i in hostnames
-             if i is not None and i != '' and i not in ips]
+    ips = [i for i in hostnames if i is not None and isinstance(i, basestring) and re.match(ip_regex, i)]
+    hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
 
     for host_list in (hosts, ips):
         for host in host_list:
@@ -225,11 +247,11 @@ def query_metadata(metadata_url, headers=None, expect_json=False):
         Returns:
             dict or list: metadata request result
     """
-    result, info = fetch_url(module, metadata_url, headers=headers)
+    result, info = fetch_url(module, metadata_url, headers=headers)  # noqa: F405
     if info['status'] != 200:
         raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
     if expect_json:
-        return module.from_json(to_native(result.read()))
+        return module.from_json(to_native(result.read()))  # noqa: F405
     else:
         return [to_native(line.strip()) for line in result.readlines()]
 
@@ -390,7 +412,7 @@ def normalize_openstack_facts(metadata, facts):
     facts['network']['ip'] = local_ipv4
     facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
 
-    for f_var, h_var, ip_var in [('hostname',        'hostname',        'local-ipv4'),
+    for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
                                  ('public_hostname', 'public-hostname', 'public-ipv4')]:
         try:
             if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
@@ -431,6 +453,7 @@ def normalize_provider_facts(provider, metadata):
         facts = normalize_openstack_facts(metadata, facts)
     return facts
 
+
 def set_flannel_facts_if_unset(facts):
     """ Set flannel facts if not already present in facts dict
             dict: the facts dict updated with the flannel facts if
@@ -448,6 +471,7 @@ def set_flannel_facts_if_unset(facts):
             facts['common']['use_flannel'] = use_flannel
     return facts
 
+
 def set_nuage_facts_if_unset(facts):
     """ Set nuage facts if not already present in facts dict
             dict: the facts dict updated with the nuage facts if
@@ -465,6 +489,7 @@ def set_nuage_facts_if_unset(facts):
             facts['common']['use_nuage'] = use_nuage
     return facts
 
+
 def set_node_schedulability(facts):
     """ Set schedulable facts if not already present in facts dict
         Args:
@@ -482,6 +507,7 @@ def set_node_schedulability(facts):
                 facts['node']['schedulable'] = True
     return facts
 
+
 def set_selectors(facts):
     """ Set selectors facts if not already present in facts dict
         Args:
@@ -518,6 +544,7 @@ def set_selectors(facts):
 
     return facts
 
+
 def set_dnsmasq_facts_if_unset(facts):
     """ Set dnsmasq facts if not already present in facts
     Args:
@@ -537,6 +564,7 @@ def set_dnsmasq_facts_if_unset(facts):
 
     return facts
 
+
 def set_project_cfg_facts_if_unset(facts):
     """ Set Project Configuration facts if not already present in facts dict
             dict:
@@ -564,6 +592,7 @@ def set_project_cfg_facts_if_unset(facts):
 
     return facts
 
+
 def set_identity_providers_if_unset(facts):
     """ Set identity_providers fact if not already present in facts dict
 
@@ -590,6 +619,7 @@ def set_identity_providers_if_unset(facts):
 
     return facts
 
+
 def set_url_facts_if_unset(facts):
     """ Set url facts if not already present in facts dict
 
@@ -649,7 +679,6 @@ def set_url_facts_if_unset(facts):
                                                                    host,
                                                                    ports[prefix]))
 
-
         r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
         r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
         facts['master'].setdefault('loopback_cluster_name', r_lhn)
@@ -665,6 +694,7 @@ def set_url_facts_if_unset(facts):
 
     return facts
 
+
 def set_aggregate_facts(facts):
     """ Set aggregate facts
 
@@ -760,6 +790,7 @@ def set_etcd_facts_if_unset(facts):
 
     return facts
 
+
 def set_deployment_facts_if_unset(facts):
     """ Set Facts that vary based on deployment_type. This currently
         includes common.service_type, common.config_base, master.registry_url,
@@ -840,6 +871,7 @@ def set_deployment_facts_if_unset(facts):
 
     return facts
 
+
 def set_version_facts_if_unset(facts):
     """ Set version facts. This currently includes common.version and
         common.version_gte_3_1_or_1_1.
@@ -851,21 +883,23 @@ def set_version_facts_if_unset(facts):
     """
     if 'common' in facts:
         deployment_type = facts['common']['deployment_type']
-        version = get_openshift_version(facts)
-        if version:
-            facts['common']['version'] = version
+        openshift_version = get_openshift_version(facts)
+        if openshift_version:
+            version = LooseVersion(openshift_version)
+            facts['common']['version'] = openshift_version
+            facts['common']['short_version'] = '.'.join([str(x) for x in version.version[0:2]])
             if deployment_type == 'origin':
-                version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('1.1.0')
-                version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('1.1.1')
-                version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.2.0')
-                version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('1.3.0')
-                version_gte_3_4_or_1_4 = LooseVersion(version) >= LooseVersion('1.4.0')
+                version_gte_3_1_or_1_1 = version >= LooseVersion('1.1.0')
+                version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('1.1.1')
+                version_gte_3_2_or_1_2 = version >= LooseVersion('1.2.0')
+                version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
+                version_gte_3_4_or_1_4 = version >= LooseVersion('1.4.0')
             else:
-                version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('3.0.2.905')
-                version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('3.1.1')
-                version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('3.1.1.901')
-                version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('3.3.0')
-                version_gte_3_4_or_1_4 = LooseVersion(version) >= LooseVersion('3.4.0')
+                version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
+                version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
+                version_gte_3_2_or_1_2 = version >= LooseVersion('3.1.1.901')
+                version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0')
+                version_gte_3_4_or_1_4 = version >= LooseVersion('3.4.0')
         else:
             version_gte_3_1_or_1_1 = True
             version_gte_3_1_1_or_1_1_1 = True
@@ -878,7 +912,6 @@ def set_version_facts_if_unset(facts):
         facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
         facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4
 
-
         if version_gte_3_4_or_1_4:
             examples_content_version = 'v1.4'
         elif version_gte_3_3_or_1_3:
@@ -894,6 +927,7 @@ def set_version_facts_if_unset(facts):
 
     return facts
 
+
 def set_manageiq_facts_if_unset(facts):
     """ Set manageiq facts. This currently includes common.use_manageiq.
 
@@ -914,6 +948,7 @@ def set_manageiq_facts_if_unset(facts):
 
     return facts
 
+
 def set_sdn_facts_if_unset(facts, system_facts):
     """ Set sdn facts if not already present in facts dict
 
@@ -924,6 +959,7 @@ def set_sdn_facts_if_unset(facts, system_facts):
             dict: the facts dict updated with the generated sdn facts if they
                   were not already present
     """
+    # pylint: disable=too-many-branches
     if 'common' in facts:
         use_sdn = facts['common']['use_openshift_sdn']
         if not (use_sdn == '' or isinstance(use_sdn, bool)):
@@ -973,7 +1009,9 @@ def set_sdn_facts_if_unset(facts, system_facts):
 
     return facts
 
+
 def set_nodename(facts):
+    """ set nodename """
     if 'node' in facts and 'common' in facts:
         if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
             facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
@@ -981,6 +1019,7 @@ def set_nodename(facts):
             facts['node']['nodename'] = facts['common']['hostname'].lower()
     return facts
 
+
 def migrate_oauth_template_facts(facts):
     """
     Migrate an old oauth template fact to a newer format if it's present.
@@ -1000,6 +1039,7 @@ def migrate_oauth_template_facts(facts):
             facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
     return facts
 
+
 def format_url(use_ssl, hostname, port, path=''):
     """ Format url based on ssl flag, hostname, port and path
 
@@ -1022,6 +1062,7 @@ def format_url(use_ssl, hostname, port, path=''):
         url = urlunparse((scheme, netloc, path, '', '', ''))
     return url
 
+
 def get_current_config(facts):
     """ Get current openshift config
 
@@ -1051,10 +1092,9 @@ def get_current_config(facts):
             )
 
         kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
-        if (os.path.isfile('/usr/bin/openshift')
-                and os.path.isfile(kubeconfig_path)):
+        if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
             try:
-                _, output, _ = module.run_command(
+                _, output, _ = module.run_command(  # noqa: F405
                     ["/usr/bin/openshift", "ex", "config", "view", "-o",
                      "json", "--kubeconfig=%s" % kubeconfig_path],
                     check_rc=False
@@ -1085,6 +1125,7 @@ def get_current_config(facts):
 
     return current_config
 
+
 def build_kubelet_args(facts):
     """Build node kubelet_args
 
@@ -1129,6 +1170,7 @@ values provided as a list. Hence the gratuitous use of ['foo'] below.
             #
             # map() seems to be returning an itertools.imap object
             # instead of a list. We cast it to a list ourselves.
+            # pylint: disable=unnecessary-lambda
             labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items()))
             if labels_str != '':
                 kubelet_args['node-labels'] = labels_str
@@ -1139,6 +1181,7 @@ values provided as a list. Hence the gratuitous use of ['foo'] below.
             facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
     return facts
 
+
 def build_controller_args(facts):
     """ Build master controller_args """
     cloud_cfg_path = os.path.join(facts['common']['config_base'],
@@ -1160,6 +1203,7 @@ def build_controller_args(facts):
             facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
     return facts
 
+
 def build_api_server_args(facts):
     """ Build master api_server_args """
     cloud_cfg_path = os.path.join(facts['common']['config_base'],
@@ -1181,6 +1225,7 @@ def build_api_server_args(facts):
             facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
     return facts
 
+
 def is_service_running(service):
     """ Queries systemd through dbus to see if the service is running """
     service_running = False
@@ -1200,6 +1245,7 @@ def is_service_running(service):
 
     return service_running
 
+
 def get_version_output(binary, version_cmd):
     """ runs and returns the version output for a command """
     cmd = []
@@ -1210,9 +1256,10 @@ def get_version_output(binary, version_cmd):
             cmd.append(item)
 
     if os.path.isfile(cmd[0]):
-        _, output, _ = module.run_command(cmd)
+        _, output, _ = module.run_command(cmd)  # noqa: F405
     return output
 
+
 def get_docker_version_info():
     """ Parses and returns the docker version info """
     result = None
@@ -1225,6 +1272,7 @@ def get_docker_version_info():
             }
     return result
 
+
 def get_hosted_registry_insecure():
     """ Parses OPTIONS from /etc/sysconfig/docker to determine if the
         registry is currently insecure.
@@ -1239,10 +1287,12 @@ def get_hosted_registry_insecure():
             options = config.get('root', 'OPTIONS')
             if 'insecure-registry' in options:
                 hosted_registry_insecure = True
+        # pylint: disable=bare-except
         except:
             pass
     return hosted_registry_insecure
 
+
 def get_openshift_version(facts):
     """ Get current version of openshift on the host.
 
@@ -1264,7 +1314,7 @@ def get_openshift_version(facts):
             return chomp_commit_offset(facts['common']['version'])
 
     if os.path.isfile('/usr/bin/openshift'):
-        _, output, _ = module.run_command(['/usr/bin/openshift', 'version'])
+        _, output, _ = module.run_command(['/usr/bin/openshift', 'version'])  # noqa: F405
         version = parse_openshift_version(output)
     elif 'common' in facts and 'is_containerized' in facts['common']:
         version = get_container_openshift_version(facts)
@@ -1273,7 +1323,7 @@ def get_openshift_version(facts):
     # This can be very slow and may get re-run multiple times, so we only use this
     # if other methods failed to find a version.
     if not version and os.path.isfile('/usr/local/bin/openshift'):
-        _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version'])
+        _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version'])  # noqa: F405
         version = parse_openshift_version(output)
 
     return chomp_commit_offset(version)
@@ -1363,9 +1413,10 @@ def apply_provider_facts(facts, provider_facts):
     facts['provider'] = provider_facts
     return facts
 
+
 # Disabling pylint too many branches. This function needs refactored
 # but is a very core part of openshift_facts.
-# pylint: disable=too-many-branches
+# pylint: disable=too-many-branches, too-many-nested-blocks
 def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
     """ Recursively merge facts dicts
 
@@ -1434,16 +1485,18 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
             elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
                 # The master count (int) can only increase unless it
                 # has been passed as a protected fact to overwrite.
-                if key == 'master_count':
+                if key == 'master_count' and new[key] is not None and new[key] is not '':
                     if int(value) <= int(new[key]):
                         facts[key] = copy.deepcopy(new[key])
                     else:
-                        module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count')
+                        # pylint: disable=line-too-long
+                        module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count')  # noqa: F405
                 # ha (bool) can not change unless it has been passed
                 # as a protected fact to overwrite.
                 if key == 'ha':
                     if safe_get_bool(value) != safe_get_bool(new[key]):
-                        module.fail_json(msg='openshift_facts received a different value for openshift.master.ha')
+                        # pylint: disable=line-too-long
+                        module.fail_json(msg='openshift_facts received a different value for openshift.master.ha')  # noqa: F405
                     else:
                         facts[key] = value
             # No other condition has been met. Overwrite the old fact
@@ -1463,6 +1516,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
             facts[key] = copy.deepcopy(new[key])
     return facts
 
+
 def save_local_facts(filename, facts):
     """ Save local facts
 
@@ -1478,7 +1532,7 @@ def save_local_facts(filename, facts):
             if exception.errno != errno.EEXIST:  # but it is okay if it is already there
                 raise  # pass any other exceptions up the chain
         with open(filename, 'w') as fact_file:
-            fact_file.write(module.jsonify(facts))
+            fact_file.write(module.jsonify(facts))  # noqa: F405
         os.chmod(filename, 0o600)
     except (IOError, OSError) as ex:
         raise OpenShiftFactsFileWriteError(
@@ -1514,6 +1568,7 @@ def get_local_facts_from_file(filename):
 
     return local_facts
 
+
 def sort_unique(alist):
     """ Sorts and de-dupes a list
 
@@ -1531,6 +1586,7 @@ def sort_unique(alist):
 
     return out
 
+
 def safe_get_bool(fact):
     """ Get a boolean fact safely.
 
@@ -1541,6 +1597,7 @@ def safe_get_bool(fact):
     """
     return bool(strtobool(str(fact)))
 
+
 def set_proxy_facts(facts):
     """ Set global proxy facts and promote defaults from http_proxy, https_proxy,
         no_proxy to the more specific builddefaults and builddefaults_git vars.
@@ -1556,13 +1613,11 @@ def set_proxy_facts(facts):
     if 'common' in facts:
         common = facts['common']
         if 'http_proxy' in common or 'https_proxy' in common:
-            if 'no_proxy' in common and \
-                isinstance(common['no_proxy'], basestring):
+            if 'no_proxy' in common and isinstance(common['no_proxy'], basestring):
                 common['no_proxy'] = common['no_proxy'].split(",")
             elif 'no_proxy' not in common:
                 common['no_proxy'] = []
-            if 'generate_no_proxy_hosts' in common and \
-                safe_get_bool(common['generate_no_proxy_hosts']):
+            if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
                 if 'no_proxy_internal_hostnames' in common:
                     common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
             # We always add local dns domain and ourselves no matter what
@@ -1593,13 +1648,14 @@ def set_proxy_facts(facts):
         # into admission_plugin_config
         if 'admission_plugin_config' not in facts['master']:
             facts['master']['admission_plugin_config'] = dict()
-        if 'config' in builddefaults and ('http_proxy' in builddefaults or \
-                'https_proxy' in builddefaults):
+        if 'config' in builddefaults and ('http_proxy' in builddefaults or
+                                          'https_proxy' in builddefaults):
             facts['master']['admission_plugin_config'].update(builddefaults['config'])
         facts['builddefaults'] = builddefaults
 
     return facts
 
+
 # pylint: disable=too-many-statements
 def set_container_facts_if_unset(facts):
     """ Set containerized facts.
@@ -1671,6 +1727,7 @@ def set_container_facts_if_unset(facts):
 
     return facts
 
+
 def set_installed_variant_rpm_facts(facts):
     """ Set RPM facts of installed variant
         Args:
@@ -1685,7 +1742,7 @@ def set_installed_variant_rpm_facts(facts):
                        ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
                        ['tuned-profiles-%s-node' % base_rpm]
         for rpm in variant_rpms:
-            exit_code, _, _ = module.run_command(['rpm', '-q', rpm])
+            exit_code, _, _ = module.run_command(['rpm', '-q', rpm])  # noqa: F405
             if exit_code == 0:
                 installed_rpms.append(rpm)
 
@@ -1693,7 +1750,6 @@ def set_installed_variant_rpm_facts(facts):
     return facts
 
 
-
 class OpenShiftFactsInternalError(Exception):
     """Origin Facts Error"""
     pass
@@ -1761,12 +1817,12 @@ class OpenShiftFacts(object):
         try:
             # ansible-2.1
             # pylint: disable=too-many-function-args,invalid-name
-            self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter'])
+            self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter'])  # noqa: F405
             for (k, v) in self.system_facts.items():
                 self.system_facts["ansible_%s" % k.replace('-', '_')] = v
         except UnboundLocalError:
             # ansible-2.2
-            self.system_facts = get_all_facts(module)['ansible_facts']
+            self.system_facts = get_all_facts(module)['ansible_facts']  # noqa: F405
 
         self.facts = self.generate_facts(local_facts,
                                          additive_facts_to_overwrite,
@@ -1799,7 +1855,6 @@ class OpenShiftFacts(object):
                                             protected_facts_to_overwrite)
         roles = local_facts.keys()
 
-
         if 'common' in local_facts and 'deployment_type' in local_facts['common']:
             deployment_type = local_facts['common']['deployment_type']
         else:
@@ -1854,7 +1909,7 @@ class OpenShiftFacts(object):
         """
         defaults = {}
         ip_addr = self.system_facts['ansible_default_ipv4']['address']
-        exit_code, output, _ = module.run_command(['hostname', '-f'])
+        exit_code, output, _ = module.run_command(['hostname', '-f'])  # noqa: F405
         hostname_f = output.strip() if exit_code == 0 else ''
         hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
                            self.system_facts['ansible_fqdn']]
@@ -1873,22 +1928,6 @@ class OpenShiftFacts(object):
                                   debug_level=2)
 
         if 'master' in roles:
-            scheduler_predicates = [
-                {"name": "MatchNodeSelector"},
-                {"name": "PodFitsResources"},
-                {"name": "PodFitsPorts"},
-                {"name": "NoDiskConflict"},
-                {"name": "NoVolumeZoneConflict"},
-                {"name": "MaxEBSVolumeCount"},
-                {"name": "MaxGCEPDVolumeCount"},
-                {"name": "Region", "argument": {"serviceAffinity" : {"labels" : ["region"]}}}
-            ]
-            scheduler_priorities = [
-                {"name": "LeastRequestedPriority", "weight": 1},
-                {"name": "SelectorSpreadPriority", "weight": 1},
-                {"name": "Zone", "weight" : 2, "argument": {"serviceAntiAffinity" : {"label": "zone"}}}
-            ]
-
             defaults['master'] = dict(api_use_ssl=True, api_port='8443',
                                       controllers_port='8444',
                                       console_use_ssl=True,
@@ -1905,8 +1944,6 @@ class OpenShiftFacts(object):
                                       access_token_max_seconds=86400,
                                       auth_token_max_seconds=500,
                                       oauth_grant_method='auto',
-                                      scheduler_predicates=scheduler_predicates,
-                                      scheduler_priorities=scheduler_priorities,
                                       dynamic_provisioning_enabled=True,
                                       max_requests_inflight=500)
 
@@ -1930,7 +1967,7 @@ class OpenShiftFacts(object):
             defaults['docker'] = docker
 
         if 'clock' in roles:
-            exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony'])
+            exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony'])  # noqa: F405
             chrony_installed = bool(exit_code == 0)
             defaults['clock'] = dict(
                 enabled=True,
@@ -2015,7 +2052,7 @@ class OpenShiftFacts(object):
 
         # TODO: this is not exposed through module_utils/facts.py in ansible,
         # need to create PR for ansible to expose it
-        bios_vendor = get_file_content(
+        bios_vendor = get_file_content(  # noqa: F405
             '/sys/devices/virtual/dmi/id/bios_vendor'
         )
         if bios_vendor == 'Google':
@@ -2030,8 +2067,7 @@ class OpenShiftFacts(object):
             if metadata:
                 metadata['project']['attributes'].pop('sshKeys', None)
                 metadata['instance'].pop('serviceAccounts', None)
-        elif (virt_type == 'xen' and virt_role == 'guest'
-              and re.match(r'.*\.amazon$', product_version)):
+        elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
             provider = 'aws'
             metadata_url = 'http://169.254.169.254/latest/meta-data/'
             metadata = get_provider_metadata(metadata_url)
@@ -2092,7 +2128,7 @@ class OpenShiftFacts(object):
 
         # Determine if any of the provided variable structures match the fact.
         matching_structure = None
-        if openshift_env_structures != None:
+        if openshift_env_structures is not None:
             for structure in openshift_env_structures:
                 if re.match(structure, openshift_env_fact):
                     matching_structure = structure
@@ -2116,7 +2152,7 @@ class OpenShiftFacts(object):
 
     # Disabling too-many-branches and too-many-locals.
     # This should be cleaned up as a TODO item.
-    #pylint: disable=too-many-branches, too-many-locals
+    # pylint: disable=too-many-branches, too-many-locals
     def init_local_facts(self, facts=None,
                          additive_facts_to_overwrite=None,
                          openshift_env=None,
@@ -2144,7 +2180,7 @@ class OpenShiftFacts(object):
         if facts is not None:
             facts_to_set[self.role] = facts
 
-        if openshift_env != {} and openshift_env != None:
+        if openshift_env != {} and openshift_env is not None:
             for fact, value in iteritems(openshift_env):
                 oo_env_facts = dict()
                 current_level = oo_env_facts
@@ -2173,7 +2209,7 @@ class OpenShiftFacts(object):
 
         if 'docker' in new_local_facts:
             # remove duplicate and empty strings from registry lists
-            for cat in  ['additional', 'blocked', 'insecure']:
+            for cat in ['additional', 'blocked', 'insecure']:
                 key = '{0}_registries'.format(cat)
                 if key in new_local_facts['docker']:
                     val = new_local_facts['docker'][key]
@@ -2190,7 +2226,7 @@ class OpenShiftFacts(object):
         if new_local_facts != local_facts:
             self.validate_local_facts(new_local_facts)
             changed = True
-            if not module.check_mode:
+            if not module.check_mode:  # noqa: F405
                 save_local_facts(self.filename, new_local_facts)
 
         self.changed = changed
@@ -2223,10 +2259,10 @@ class OpenShiftFacts(object):
         invalid_facts = self.validate_master_facts(facts, invalid_facts)
         if invalid_facts:
             msg = 'Invalid facts detected:\n'
+            # pylint: disable=consider-iterating-dictionary
             for key in invalid_facts.keys():
                 msg += '{0}: {1}\n'.format(key, invalid_facts[key])
-            module.fail_json(msg=msg,
-                             changed=self.changed)
+            module.fail_json(msg=msg, changed=self.changed)  # noqa: F405
 
     # disabling pylint errors for line-too-long since we're dealing
     # with best effort reduction of error messages here.
@@ -2278,13 +2314,14 @@ class OpenShiftFacts(object):
                                                                            'Secrets must be 16, 24, or 32 characters in length.')
         return invalid_facts
 
+
 def main():
     """ main """
     # disabling pylint errors for global-variable-undefined and invalid-name
     # for 'global module' usage, since it is required to use ansible_facts
     # pylint: disable=global-variable-undefined, invalid-name
     global module
-    module = AnsibleModule(
+    module = AnsibleModule(  # noqa: F405
         argument_spec=dict(
             role=dict(default='common', required=False,
                       choices=OpenShiftFacts.known_roles),
@@ -2299,18 +2336,18 @@ def main():
     )
 
     if not HAVE_DBUS:
-        module.fail_json(msg="This module requires dbus python bindings")
+        module.fail_json(msg="This module requires dbus python bindings")  # noqa: F405
 
-    module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter']
-    module.params['gather_timeout'] = 10
-    module.params['filter'] = '*'
+    module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter']  # noqa: F405
+    module.params['gather_timeout'] = 10  # noqa: F405
+    module.params['filter'] = '*'  # noqa: F405
 
-    role = module.params['role']
-    local_facts = module.params['local_facts']
-    additive_facts_to_overwrite = module.params['additive_facts_to_overwrite']
-    openshift_env = module.params['openshift_env']
-    openshift_env_structures = module.params['openshift_env_structures']
-    protected_facts_to_overwrite = module.params['protected_facts_to_overwrite']
+    role = module.params['role']  # noqa: F405
+    local_facts = module.params['local_facts']  # noqa: F405
+    additive_facts_to_overwrite = module.params['additive_facts_to_overwrite']  # noqa: F405
+    openshift_env = module.params['openshift_env']  # noqa: F405
+    openshift_env_structures = module.params['openshift_env_structures']  # noqa: F405
+    protected_facts_to_overwrite = module.params['protected_facts_to_overwrite']  # noqa: F405
 
     fact_file = '/etc/ansible/facts.d/openshift.fact'
 
@@ -2322,24 +2359,15 @@ def main():
                                      openshift_env_structures,
                                      protected_facts_to_overwrite)
 
-    file_params = module.params.copy()
+    file_params = module.params.copy()  # noqa: F405
     file_params['path'] = fact_file
-    file_args = module.load_file_common_arguments(file_params)
-    changed = module.set_fs_attributes_if_different(file_args,
+    file_args = module.load_file_common_arguments(file_params)  # noqa: F405
+    changed = module.set_fs_attributes_if_different(file_args,  # noqa: F405
                                                     openshift_facts.changed)
 
-    return module.exit_json(changed=changed,
+    return module.exit_json(changed=changed,  # noqa: F405
                             ansible_facts=openshift_facts.facts)
 
-# ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.facts import *
-from ansible.module_utils.urls import *
-from ansible.module_utils.six import iteritems, itervalues
-from ansible.module_utils._text import to_native
-from ansible.module_utils.six import b
 
 if __name__ == '__main__':
     main()

+ 2 - 5
roles/openshift_master/vars/main.yml

@@ -1,17 +1,14 @@
 ---
-openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
-openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
 openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig"
 loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
-openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"
 openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"
 openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
 
 scheduler_config:
   kind: Policy
   apiVersion: v1
-  predicates: "{{ openshift.master.scheduler_predicates }}"
-  priorities: "{{ openshift.master.scheduler_priorities }}"
+  predicates: "{{ openshift_master_scheduler_predicates }}"
+  priorities: "{{ openshift_master_scheduler_priorities }}"
 
 openshift_master_valid_grant_methods:
 - auto

+ 96 - 0
roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py

@@ -0,0 +1,96 @@
+# pylint: disable=missing-docstring
+
+import re
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+    # pylint: disable=too-many-branches,too-many-statements
+
+    def run(self, terms, variables=None, regions_enabled=True, **kwargs):
+        if 'openshift' not in variables:
+            raise AnsibleError("This lookup module requires openshift_facts to be run prior to use")
+        if 'master' not in variables['openshift']:
+            raise AnsibleError("This lookup module is meant to be run against an OpenShift master host only")
+
+        if 'openshift_master_scheduler_predicates' in variables:
+            return variables['openshift_master_scheduler_predicates']
+        elif 'scheduler_predicates' in variables['openshift']['master']:
+            return variables['openshift']['master']['scheduler_predicates']
+        else:
+            predicates = []
+
+            if 'deployment_type' not in variables['openshift']['common']:
+                raise AnsibleError("This lookup module requires that the deployment_type be set")
+
+            deployment_type = variables['openshift']['common']['deployment_type']
+
+            if 'short_version' in variables['openshift']['common']:
+                short_version = variables['openshift']['common']['short_version']
+            elif 'openshift_release' in variables:
+                release = variables['openshift_release']
+                if release.startswith('v'):
+                    short_version = release[1:]
+                else:
+                    short_version = release
+            elif 'openshift_version' in variables:
+                version = variables['openshift_version']
+                short_version = '.'.join(version.split('.')[0:2])
+            else:
+                # pylint: disable=line-too-long
+                raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
+            if deployment_type not in ['origin', 'openshift-enterprise']:
+                raise AnsibleError("Unknown deployment_type %s" % deployment_type)
+
+            if deployment_type == 'origin':
+                if short_version not in ['1.1', '1.2', '1.3', '1.4']:
+                    raise AnsibleError("Unknown short_version %s" % short_version)
+            elif deployment_type == 'openshift_enterprise':
+                if short_version not in ['3.1', '3.2', '3.3', '3.4']:
+                    raise AnsibleError("Unknown short_version %s" % short_version)
+
+            if deployment_type == 'openshift-enterprise':
+                # convert short_version to origin short_version
+                short_version = re.sub('^3.', '1.', short_version)
+
+            if short_version in ['1.1', '1.2']:
+                predicates.append({'name': 'PodFitsHostPorts'})
+                predicates.append({'name': 'PodFitsResources'})
+
+            # applies to all known versions
+            predicates.append({'name': 'NoDiskConflict'})
+
+            # only 1.1 didn't include NoVolumeZoneConflict
+            if short_version != '1.1':
+                predicates.append({'name': 'NoVolumeZoneConflict'})
+
+            if short_version in ['1.1', '1.2']:
+                predicates.append({'name': 'MatchNodeSelector'})
+                predicates.append({'name': 'Hostname'})
+
+            if short_version != '1.1':
+                predicates.append({'name': 'MaxEBSVolumeCount'})
+                predicates.append({'name': 'MaxGCEPDVolumeCount'})
+
+            if short_version not in ['1.1', '1.2']:
+                predicates.append({'name': 'GeneralPredicates'})
+                predicates.append({'name': 'PodToleratesNodeTaints'})
+                predicates.append({'name': 'CheckNodeMemoryPressure'})
+
+            if short_version not in ['1.1', '1.2', '1.3']:
+                predicates.append({'name': 'CheckNodeDiskPressure'})
+                predicates.append({'name': 'MatchInterPodAffinity'})
+
+            if regions_enabled:
+                region_predicate = {
+                    'name': 'Region',
+                    'argument': {
+                        'serviceAffinity': {
+                            'labels': ['region']
+                        }
+                    }
+                }
+                predicates.append(region_predicate)
+
+            return predicates

+ 87 - 0
roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py

@@ -0,0 +1,87 @@
+# pylint: disable=missing-docstring
+
+import re
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+    # pylint: disable=too-many-branches
+
+    def run(self, terms, variables=None, zones_enabled=True, **kwargs):
+        if 'openshift' not in variables:
+            raise AnsibleError("This lookup module requires openshift_facts to be run prior to use")
+        if 'master' not in variables['openshift']:
+            raise AnsibleError("This lookup module is meant to be run against an OpenShift master host only")
+
+        if 'openshift_master_scheduler_priorities' in variables:
+            return variables['openshift_master_scheduler_priorities']
+        elif 'scheduler_priorities' in variables['openshift']['master']:
+            return variables['openshift']['master']['scheduler_priorities']
+        else:
+            priorities = [
+                {'name': 'LeastRequestedPriority', 'weight': 1},
+                {'name': 'BalancedResourceAllocation', 'weight': 1},
+                {'name': 'SelectorSpreadPriority', 'weight': 1}
+            ]
+
+            if 'deployment_type' not in variables['openshift']['common']:
+                raise AnsibleError("This lookup module requires that the deployment_type be set")
+
+            deployment_type = variables['openshift']['common']['deployment_type']
+
+            if 'short_version' in variables['openshift']['common']:
+                short_version = variables['openshift']['common']['short_version']
+            elif 'openshift_release' in variables:
+                release = variables['openshift_release']
+                if release.startswith('v'):
+                    short_version = release[1:]
+                else:
+                    short_version = release
+            elif 'openshift_version' in variables:
+                version = variables['openshift_version']
+                short_version = '.'.join(version.split('.')[0:2])
+            else:
+                # pylint: disable=line-too-long
+                raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
+
+            if deployment_type not in ['origin', 'openshift-enterprise']:
+                raise AnsibleError("Unknown deployment_type %s" % deployment_type)
+
+            if deployment_type == 'origin':
+                if short_version not in ['1.1', '1.2', '1.3', '1.4']:
+                    raise AnsibleError("Unknown short_version %s" % short_version)
+            elif deployment_type == 'openshift_enterprise':
+                if short_version not in ['3.1', '3.2', '3.3', '3.4']:
+                    raise AnsibleError("Unknown short_version %s" % short_version)
+
+            if deployment_type == 'openshift-enterprise':
+                # convert short_version to origin short_version
+                short_version = re.sub('^3.', '1.', short_version)
+
+            if short_version == '1.4':
+                priorities.append({'name': 'NodePreferAvoidPodsPriority', 'weight': 10000})
+
+            # only 1.1 didn't include NodeAffinityPriority
+            if short_version != '1.1':
+                priorities.append({'name': 'NodeAffinityPriority', 'weight': 1})
+
+            if short_version not in ['1.1', '1.2']:
+                priorities.append({'name': 'TaintTolerationPriority', 'weight': 1})
+
+            if short_version not in ['1.1', '1.2', '1.3']:
+                priorities.append({'name': 'InterPodAffinityPriority', 'weight': 1})
+
+            if zones_enabled:
+                zone_priority = {
+                    'name': 'Zone',
+                    'argument': {
+                        'serviceAntiAffinity': {
+                            'label': 'zone'
+                        }
+                    },
+                    'weight': 2
+                }
+                priorities.append(zone_priority)
+
+            return priorities

+ 35 - 2
roles/openshift_master_facts/tasks/main.yml

@@ -64,8 +64,6 @@
       master_count: "{{ openshift_master_count | default(None) }}"
       controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"
       master_image: "{{ osm_image | default(None) }}"
-      scheduler_predicates: "{{ openshift_master_scheduler_predicates | default(None) }}"
-      scheduler_priorities: "{{ openshift_master_scheduler_priorities | default(None) }}"
       admission_plugin_config: "{{openshift_master_admission_plugin_config | default(None) }}"
       kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config
       oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2
@@ -79,3 +77,38 @@
       audit_config: "{{ openshift_master_audit_config | default(None) }}"
       metrics_public_url: "{% if openshift_hosted_metrics_deploy | default(false) %}https://{{ metrics_hostname }}/hawkular/metrics{% endif %}"
       scheduler_args: "{{ openshift_master_scheduler_args | default(None) }}"
+
+- name: Determine if scheduler config present
+  stat:
+    path: "{{ openshift_master_scheduler_conf }}"
+  register: scheduler_config_stat
+
+- block:
+  - set_fact:
+      openshift_master_scheduler_predicates: "{{ lookup('openshift_master_facts_default_predicates') }}"
+    when: "{{ openshift_master_scheduler_predicates is not defined }}"
+
+  - set_fact:
+      openshift_master_scheduler_priorities: "{{ lookup('openshift_master_facts_default_priorities') }}"
+    when: "{{ openshift_master_scheduler_priorities is not defined }}"
+  when: "{{ not scheduler_config_stat.stat.exists }}"
+
+- block:
+  - name: Retrieve current scheduler config
+    slurp:
+      src: "{{ openshift_master_scheduler_conf }}"
+    register: current_scheduler_config
+
+  - fail:
+      msg: "Could not decode scheduler config"
+    when: "{{ (current_scheduler_config.content | b64decode | from_json).apiVersion | default(none) != 'v1' }}"
+
+  - set_fact:
+      openshift_master_scheduler_predicates: "{{ (current_scheduler_config.content | b64decode | from_json).predicates }}"
+    when: "{{ openshift_master_scheduler_predicates is not defined }}"
+
+  - set_fact:
+      openshift_master_scheduler_priorities: "{{ (current_scheduler_config.content | b64decode | from_json).priorities }}"
+    when: "{{ openshift_master_scheduler_priorities is not defined }}"
+
+  when: "{{ scheduler_config_stat.stat.exists }}"

+ 174 - 0
roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py

@@ -0,0 +1,174 @@
+import copy
+import os
+import sys
+
+from ansible.errors import AnsibleError
+from nose.tools import raises, assert_equal
+
+sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../lookup_plugins/")] + sys.path
+
+from openshift_master_facts_default_predicates import LookupModule  # noqa: E402
+
+DEFAULT_PREDICATES_1_1 = [
+    {'name': 'PodFitsHostPorts'},
+    {'name': 'PodFitsResources'},
+    {'name': 'NoDiskConflict'},
+    {'name': 'MatchNodeSelector'},
+    {'name': 'Hostname'}
+]
+
+DEFAULT_PREDICATES_1_2 = [
+    {'name': 'PodFitsHostPorts'},
+    {'name': 'PodFitsResources'},
+    {'name': 'NoDiskConflict'},
+    {'name': 'NoVolumeZoneConflict'},
+    {'name': 'MatchNodeSelector'},
+    {'name': 'Hostname'},
+    {'name': 'MaxEBSVolumeCount'},
+    {'name': 'MaxGCEPDVolumeCount'}
+]
+
+DEFAULT_PREDICATES_1_3 = [
+    {'name': 'NoDiskConflict'},
+    {'name': 'NoVolumeZoneConflict'},
+    {'name': 'MaxEBSVolumeCount'},
+    {'name': 'MaxGCEPDVolumeCount'},
+    {'name': 'GeneralPredicates'},
+    {'name': 'PodToleratesNodeTaints'},
+    {'name': 'CheckNodeMemoryPressure'}
+]
+
+DEFAULT_PREDICATES_1_4 = [
+    {'name': 'NoDiskConflict'},
+    {'name': 'NoVolumeZoneConflict'},
+    {'name': 'MaxEBSVolumeCount'},
+    {'name': 'MaxGCEPDVolumeCount'},
+    {'name': 'GeneralPredicates'},
+    {'name': 'PodToleratesNodeTaints'},
+    {'name': 'CheckNodeMemoryPressure'},
+    {'name': 'CheckNodeDiskPressure'},
+    {'name': 'MatchInterPodAffinity'}
+]
+
+REGION_PREDICATE = {
+    'name': 'Region',
+    'argument': {
+        'serviceAffinity': {
+            'labels': ['region']
+        }
+    }
+}
+
+
+class TestOpenShiftMasterFactsDefaultPredicates(object):
+    def setUp(self):
+        self.lookup = LookupModule()
+        self.default_facts = {
+            'openshift': {
+                'master': {},
+                'common': {}
+            }
+        }
+
+    @raises(AnsibleError)
+    def test_missing_short_version_and_missing_openshift_release(self):
+        facts = copy.deepcopy(self.default_facts)
+        facts['openshift']['common']['deployment_type'] = 'origin'
+        self.lookup.run(None, variables=facts)
+
+    def check_defaults(self, release, deployment_type, default_predicates,
+                       regions_enabled, short_version):
+        facts = copy.deepcopy(self.default_facts)
+        if short_version:
+            facts['openshift']['common']['short_version'] = release
+        else:
+            facts['openshift_release'] = release
+        facts['openshift']['common']['deployment_type'] = deployment_type
+        results = self.lookup.run(None, variables=facts,
+                                  regions_enabled=regions_enabled)
+        if regions_enabled:
+            assert_equal(results, default_predicates + [REGION_PREDICATE])
+        else:
+            assert_equal(results, default_predicates)
+
+    def test_openshift_release_defaults(self):
+        test_vars = [
+            ('1.1', 'origin', DEFAULT_PREDICATES_1_1),
+            ('3.1', 'openshift-enterprise', DEFAULT_PREDICATES_1_1),
+            ('1.2', 'origin', DEFAULT_PREDICATES_1_2),
+            ('3.2', 'openshift-enterprise', DEFAULT_PREDICATES_1_2),
+            ('1.3', 'origin', DEFAULT_PREDICATES_1_3),
+            ('3.3', 'openshift-enterprise', DEFAULT_PREDICATES_1_3),
+            ('1.4', 'origin', DEFAULT_PREDICATES_1_4),
+            ('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4)
+        ]
+
+        for regions_enabled in (True, False):
+            for release, deployment_type, default_predicates in test_vars:
+                for prepend_v in (True, False):
+                    if prepend_v:
+                        release = 'v' + release
+                yield self.check_defaults, release, deployment_type, default_predicates, regions_enabled, False
+
+    def test_short_version_defaults(self):
+        test_vars = [
+            ('1.1', 'origin', DEFAULT_PREDICATES_1_1),
+            ('3.1', 'openshift-enterprise', DEFAULT_PREDICATES_1_1),
+            ('1.2', 'origin', DEFAULT_PREDICATES_1_2),
+            ('3.2', 'openshift-enterprise', DEFAULT_PREDICATES_1_2),
+            ('1.3', 'origin', DEFAULT_PREDICATES_1_3),
+            ('3.3', 'openshift-enterprise', DEFAULT_PREDICATES_1_3),
+            ('1.4', 'origin', DEFAULT_PREDICATES_1_4),
+            ('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4)
+        ]
+        for regions_enabled in (True, False):
+            for short_version, deployment_type, default_predicates in test_vars:
+                yield self.check_defaults, short_version, deployment_type, default_predicates, regions_enabled, True
+
+    @raises(AnsibleError)
+    def test_unknown_deployment_types(self):
+        facts = copy.deepcopy(self.default_facts)
+        facts['openshift']['common']['short_version'] = '1.1'
+        facts['openshift']['common']['deployment_type'] = 'bogus'
+        self.lookup.run(None, variables=facts)
+
+    @raises(AnsibleError)
+    def test_missing_deployment_type(self):
+        facts = copy.deepcopy(self.default_facts)
+        facts['openshift']['common']['short_version'] = '10.10'
+        self.lookup.run(None, variables=facts)
+
+    @raises(AnsibleError)
+    def testMissingOpenShiftFacts(self):
+        facts = {}
+        self.lookup.run(None, variables=facts)
+
+    @raises(AnsibleError)
+    def testMissingMasterRole(self):
+        facts = {'openshift': {}}
+        self.lookup.run(None, variables=facts)
+
+    def testPreExistingPredicates(self):
+        facts = {
+            'openshift': {
+                'master': {
+                    'scheduler_predicates': [
+                        {'name': 'pred_a'},
+                        {'name': 'pred_b'}
+                    ]
+                }
+            }
+        }
+        result = self.lookup.run(None, variables=facts)
+        assert_equal(result, facts['openshift']['master']['scheduler_predicates'])
+
+    def testDefinedPredicates(self):
+        facts = {
+            'openshift': {'master': {}},
+            'openshift_master_scheduler_predicates': [
+                {'name': 'pred_a'},
+                {'name': 'pred_b'}
+            ]
+        }
+        result = self.lookup.run(None, variables=facts)
+        assert_equal(result, facts['openshift_master_scheduler_predicates'])

+ 164 - 0
roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py

@@ -0,0 +1,164 @@
+import copy
+import os
+import sys
+
+from ansible.errors import AnsibleError
+from nose.tools import raises, assert_equal
+
+sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../lookup_plugins/")] + sys.path
+
+from openshift_master_facts_default_priorities import LookupModule  # noqa: E402
+
+DEFAULT_PRIORITIES_1_1 = [
+    {'name': 'LeastRequestedPriority', 'weight': 1},
+    {'name': 'BalancedResourceAllocation', 'weight': 1},
+    {'name': 'SelectorSpreadPriority', 'weight': 1}
+]
+
+DEFAULT_PRIORITIES_1_2 = [
+    {'name': 'LeastRequestedPriority', 'weight': 1},
+    {'name': 'BalancedResourceAllocation', 'weight': 1},
+    {'name': 'SelectorSpreadPriority', 'weight': 1},
+    {'name': 'NodeAffinityPriority', 'weight': 1}
+]
+
+DEFAULT_PRIORITIES_1_3 = [
+    {'name': 'LeastRequestedPriority', 'weight': 1},
+    {'name': 'BalancedResourceAllocation', 'weight': 1},
+    {'name': 'SelectorSpreadPriority', 'weight': 1},
+    {'name': 'NodeAffinityPriority', 'weight': 1},
+    {'name': 'TaintTolerationPriority', 'weight': 1}
+]
+
+DEFAULT_PRIORITIES_1_4 = [
+    {'name': 'LeastRequestedPriority', 'weight': 1},
+    {'name': 'BalancedResourceAllocation', 'weight': 1},
+    {'name': 'SelectorSpreadPriority', 'weight': 1},
+    {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
+    {'name': 'NodeAffinityPriority', 'weight': 1},
+    {'name': 'TaintTolerationPriority', 'weight': 1},
+    {'name': 'InterPodAffinityPriority', 'weight': 1}
+]
+
+ZONE_PRIORITY = {
+    'name': 'Zone',
+    'argument': {
+        'serviceAntiAffinity': {
+            'label': 'zone'
+        }
+    },
+    'weight': 2
+}
+
+
+class TestOpenShiftMasterFactsDefaultPredicates(object):
+    def setUp(self):
+        self.lookup = LookupModule()
+        self.default_facts = {
+            'openshift': {
+                'master': {},
+                'common': {}
+            }
+        }
+
+    @raises(AnsibleError)
+    def test_missing_short_version_and_missing_openshift_release(self):
+        facts = copy.deepcopy(self.default_facts)
+        facts['openshift']['common']['deployment_type'] = 'origin'
+        self.lookup.run(None, variables=facts)
+
+    def check_defaults(self, release, deployment_type, default_priorities,
+                       zones_enabled, short_version):
+        facts = copy.deepcopy(self.default_facts)
+        if short_version:
+            facts['openshift']['common']['short_version'] = release
+        else:
+            facts['openshift_release'] = release
+        facts['openshift']['common']['deployment_type'] = deployment_type
+        results = self.lookup.run(None, variables=facts, zones_enabled=zones_enabled)
+        if zones_enabled:
+            assert_equal(results, default_priorities + [ZONE_PRIORITY])
+        else:
+            assert_equal(results, default_priorities)
+
+    def test_openshift_release_defaults(self):
+        test_vars = [
+            ('1.1', 'origin', DEFAULT_PRIORITIES_1_1),
+            ('3.1', 'openshift-enterprise', DEFAULT_PRIORITIES_1_1),
+            ('1.2', 'origin', DEFAULT_PRIORITIES_1_2),
+            ('3.2', 'openshift-enterprise', DEFAULT_PRIORITIES_1_2),
+            ('1.3', 'origin', DEFAULT_PRIORITIES_1_3),
+            ('3.3', 'openshift-enterprise', DEFAULT_PRIORITIES_1_3),
+            ('1.4', 'origin', DEFAULT_PRIORITIES_1_4),
+            ('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4)
+        ]
+
+        for zones_enabled in (True, False):
+            for release, deployment_type, default_priorities in test_vars:
+                for prepend_v in (True, False):
+                    if prepend_v:
+                        release = 'v' + release
+                yield self.check_defaults, release, deployment_type, default_priorities, zones_enabled, False
+
+    def test_short_version_defaults(self):
+        test_vars = [
+            ('1.1', 'origin', DEFAULT_PRIORITIES_1_1),
+            ('3.1', 'openshift-enterprise', DEFAULT_PRIORITIES_1_1),
+            ('1.2', 'origin', DEFAULT_PRIORITIES_1_2),
+            ('3.2', 'openshift-enterprise', DEFAULT_PRIORITIES_1_2),
+            ('1.3', 'origin', DEFAULT_PRIORITIES_1_3),
+            ('3.3', 'openshift-enterprise', DEFAULT_PRIORITIES_1_3),
+            ('1.4', 'origin', DEFAULT_PRIORITIES_1_4),
+            ('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4)
+        ]
+        for zones_enabled in (True, False):
+            for short_version, deployment_type, default_priorities in test_vars:
+                yield self.check_defaults, short_version, deployment_type, default_priorities, zones_enabled, True
+
+    @raises(AnsibleError)
+    def test_unknown_deployment_types(self):
+        facts = copy.deepcopy(self.default_facts)
+        facts['openshift']['common']['short_version'] = '1.1'
+        facts['openshift']['common']['deployment_type'] = 'bogus'
+        self.lookup.run(None, variables=facts)
+
+    @raises(AnsibleError)
+    def test_missing_deployment_type(self):
+        facts = copy.deepcopy(self.default_facts)
+        facts['openshift']['common']['short_version'] = '10.10'
+        self.lookup.run(None, variables=facts)
+
+    @raises(AnsibleError)
+    def test_missing_openshift_facts(self):
+        facts = {}
+        self.lookup.run(None, variables=facts)
+
+    @raises(AnsibleError)
+    def test_missing_master_role(self):
+        facts = {'openshift': {}}
+        self.lookup.run(None, variables=facts)
+
+    def test_pre_existing_priorities(self):
+        facts = {
+            'openshift': {
+                'master': {
+                    'scheduler_priorities': [
+                        {'name': 'pri_a', 'weight': 1},
+                        {'name': 'pri_b', 'weight': 1}
+                    ]
+                }
+            }
+        }
+        result = self.lookup.run(None, variables=facts)
+        assert_equal(result, facts['openshift']['master']['scheduler_priorities'])
+
+    def testDefinedPredicates(self):
+        facts = {
+            'openshift': {'master': {}},
+            'openshift_master_scheduler_priorities': [
+                {'name': 'pri_a', 'weight': 1},
+                {'name': 'pri_b', 'weight': 1}
+            ]
+        }
+        result = self.lookup.run(None, variables=facts)
+        assert_equal(result, facts['openshift_master_scheduler_priorities'])

+ 5 - 0
roles/openshift_master_facts/vars/main.yml

@@ -1,3 +1,8 @@
+---
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
+openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"
+
 builddefaults_yaml:
   BuildDefaults:
     configuration:

+ 24 - 24
roles/os_firewall/library/os_firewall_manage_iptables.py

@@ -2,7 +2,7 @@
 # -*- coding: utf-8 -*-
 # vim: expandtab:tabstop=4:shiftwidth=4
 # pylint: disable=fixme, missing-docstring
-from subprocess import call, check_output
+import subprocess
 
 DOCUMENTATION = '''
 ---
@@ -29,7 +29,10 @@ class IpTablesAddRuleError(IpTablesError):
 
 
 class IpTablesRemoveRuleError(IpTablesError):
-    pass
+    def __init__(self, chain, msg, cmd, exit_code, output):  # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
+        super(IpTablesRemoveRuleError, self).__init__(msg, cmd, exit_code,
+                                                      output)
+        self.chain = chain
 
 
 class IpTablesSaveError(IpTablesError):
@@ -37,14 +40,14 @@ class IpTablesSaveError(IpTablesError):
 
 
 class IpTablesCreateChainError(IpTablesError):
-    def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
+    def __init__(self, chain, msg, cmd, exit_code, output):  # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
         super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code,
                                                        output)
         self.chain = chain
 
 
 class IpTablesCreateJumpRuleError(IpTablesError):
-    def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
+    def __init__(self, chain, msg, cmd, exit_code, output):  # pylint: disable=too-many-arguments, line-too-long, redefined-outer-name
         super(IpTablesCreateJumpRuleError, self).__init__(msg, cmd, exit_code,
                                                           output)
         self.chain = chain
@@ -53,7 +56,7 @@ class IpTablesCreateJumpRuleError(IpTablesError):
 # TODO: implement rollbacks for any events that were successful and an
 # exception was thrown later. For example, when the chain is created
 # successfully, but the add/remove rule fails.
-class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
+class IpTablesManager(object):  # pylint: disable=too-many-instance-attributes
     def __init__(self, module):
         self.module = module
         self.ip_version = module.params['ip_version']
@@ -68,8 +71,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
 
     def save(self):
         try:
-            self.output.append(check_output(self.save_cmd,
-                                            stderr=subprocess.STDOUT))
+            self.output.append(subprocess.check_output(self.save_cmd, stderr=subprocess.STDOUT))
         except subprocess.CalledProcessError as ex:
             raise IpTablesSaveError(
                 msg="Failed to save iptables rules",
@@ -92,7 +94,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
             else:
                 cmd = self.cmd + ['-A'] + rule
                 try:
-                    self.output.append(check_output(cmd))
+                    self.output.append(subprocess.check_output(cmd))
                     self.changed = True
                     self.save()
                 except subprocess.CalledProcessError as ex:
@@ -112,7 +114,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
             else:
                 cmd = self.cmd + ['-D'] + rule
                 try:
-                    self.output.append(check_output(cmd))
+                    self.output.append(subprocess.check_output(cmd))
                     self.changed = True
                     self.save()
                 except subprocess.CalledProcessError as ex:
@@ -123,7 +125,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
 
     def rule_exists(self, rule):
         check_cmd = self.cmd + ['-C'] + rule
-        return True if call(check_cmd) == 0 else False
+        return True if subprocess.call(check_cmd) == 0 else False
 
     def gen_rule(self, port, proto):
         return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',
@@ -136,7 +138,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
         else:
             try:
                 cmd = self.cmd + ['-L', self.jump_rule_chain, '--line-numbers']
-                output = check_output(cmd, stderr=subprocess.STDOUT)
+                output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
 
                 # break the input rules into rows and columns
                 input_rules = [s.split() for s in to_native(output).split('\n')]
@@ -155,8 +157,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
                 # Naively assume that if the last row is a REJECT or DROP rule,
                 # then we can insert our rule right before it, otherwise we
                 # assume that we can just append the rule.
-                if (last_rule_num and last_rule_target
-                        and last_rule_target in ['REJECT', 'DROP']):
+                if (last_rule_num and last_rule_target and last_rule_target in ['REJECT', 'DROP']):
                     # insert rule
                     cmd = self.cmd + ['-I', self.jump_rule_chain,
                                       str(last_rule_num)]
@@ -164,7 +165,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
                     # append rule
                     cmd = self.cmd + ['-A', self.jump_rule_chain]
                 cmd += ['-j', self.chain]
-                output = check_output(cmd, stderr=subprocess.STDOUT)
+                output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
                 self.changed = True
                 self.output.append(output)
                 self.save()
@@ -192,8 +193,7 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
         else:
             try:
                 cmd = self.cmd + ['-N', self.chain]
-                self.output.append(check_output(cmd,
-                                                stderr=subprocess.STDOUT))
+                self.output.append(subprocess.check_output(cmd, stderr=subprocess.STDOUT))
                 self.changed = True
                 self.output.append("Successfully created chain %s" %
                                    self.chain)
@@ -203,26 +203,26 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
                     chain=self.chain,
                     msg="Failed to create chain: %s" % self.chain,
                     cmd=ex.cmd, exit_code=ex.returncode, output=ex.output
-                    )
+                )
 
     def jump_rule_exists(self):
         cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain]
-        return True if call(cmd) == 0 else False
+        return True if subprocess.call(cmd) == 0 else False
 
     def chain_exists(self):
         cmd = self.cmd + ['-L', self.chain]
-        return True if call(cmd) == 0 else False
+        return True if subprocess.call(cmd) == 0 else False
 
     def gen_cmd(self):
         cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
         return ["/usr/sbin/%s" % cmd]
 
-    def gen_save_cmd(self): # pylint: disable=no-self-use
+    def gen_save_cmd(self):  # pylint: disable=no-self-use
         return ['/usr/libexec/iptables/iptables.init', 'save']
 
 
 def main():
-    module = AnsibleModule(
+    module = AnsibleModule(  # noqa: F405
         argument_spec=dict(
             name=dict(required=True),
             action=dict(required=True, choices=['add', 'remove',
@@ -266,9 +266,9 @@ def main():
                             output=iptables_manager.output)
 
 
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import,  wrong-import-position
 # import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import *  # noqa: F403,E402
+from ansible.module_utils._text import to_native  # noqa: E402
 if __name__ == '__main__':
     main()

+ 0 - 2
setup.cfg

@@ -1,2 +0,0 @@
-[nosetests]
-tests=test,utils

+ 2 - 2
test/modify_yaml_tests.py

@@ -8,7 +8,8 @@ import unittest
 sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../library/")] + sys.path
 
 # pylint: disable=import-error
-from modify_yaml import set_key
+from modify_yaml import set_key  # noqa: E402
+
 
 class ModifyYamlTests(unittest.TestCase):
 
@@ -34,4 +35,3 @@ class ModifyYamlTests(unittest.TestCase):
         self.assertEquals(yaml_value, cfg['masterClients']
                           ['externalKubernetesClientConnectionOverrides']
                           ['acceptContentTypes'])
-

+ 8 - 21
utils/Makefile

@@ -31,8 +31,6 @@ ASCII2MAN = a2x -D $(dir $@) -d manpage -f manpage $<
 MANPAGES := docs/man/man1/atomic-openshift-installer.1
 VERSION := 1.3
 
-PEPEXCLUDES := E501,E121,E124
-
 sdist: clean
 	python setup.py sdist
 	rm -fR $(SHORTNAME).egg-info
@@ -66,7 +64,7 @@ virtualenv:
 	@echo "# Creating a virtualenv"
 	@echo "#############################################"
 	virtualenv $(NAME)env
-	 . $(NAME)env/bin/activate && pip install setuptools==17.1.1
+	. $(NAME)env/bin/activate && pip install setuptools==17.1.1
 	. $(NAME)env/bin/activate && pip install -r test-requirements.txt
 #       If there are any special things to install do it here
 #       . $(NAME)env/bin/activate && INSTALL STUFF
@@ -75,14 +73,14 @@ ci-unittests:
 	@echo "#############################################"
 	@echo "# Running Unit Tests in virtualenv"
 	@echo "#############################################"
-	. $(NAME)env/bin/activate && nosetests -v --with-coverage --cover-html --cover-min-percentage=70 --cover-package=$(SHORTNAME) test/
+	. $(NAME)env/bin/activate && python setup.py nosetests
 	@echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'"
 
 ci-pylint:
 	@echo "#############################################"
 	@echo "# Running PyLint Tests in virtualenv"
 	@echo "#############################################"
-	. $(NAME)env/bin/activate && python -m pylint --rcfile ../git/.pylintrc src/ooinstall/cli_installer.py src/ooinstall/oo_config.py src/ooinstall/openshift_ansible.py src/ooinstall/variants.py ../callback_plugins/openshift_quick_installer.py ../roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+	. $(NAME)env/bin/activate && python -m pylint --rcfile ../git/.pylintrc $(shell find ../ -name $(NAME)env -prune -o -name test -prune -o -name "*.py" -print)
 
 ci-list-deps:
 	@echo "#############################################"
@@ -90,23 +88,12 @@ ci-list-deps:
 	@echo "#############################################"
 	. $(NAME)env/bin/activate && pip freeze
 
-ci-pyflakes:
-	@echo "#################################################"
-	@echo "# Running Pyflakes Compliance Tests in virtualenv"
-	@echo "#################################################"
-	. $(NAME)env/bin/activate && pyflakes src/ooinstall/*.py
-	. $(NAME)env/bin/activate && pyflakes ../callback_plugins/openshift_quick_installer.py
-	. $(NAME)env/bin/activate && pyflakes ../roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
-
-ci-pep8:
+ci-flake8:
 	@echo "#############################################"
-	@echo "# Running PEP8 Compliance Tests in virtualenv"
+	@echo "# Running Flake8 Compliance Tests in virtualenv"
 	@echo "#############################################"
-	. $(NAME)env/bin/activate && pep8 --ignore=$(PEPEXCLUDES) src/$(SHORTNAME)/
-	. $(NAME)env/bin/activate && pep8 --ignore=$(PEPEXCLUDES) ../callback_plugins/openshift_quick_installer.py
-# This one excludes E402 because it is an ansible module and the
-# boilerplate import statement is expected to be at the bottom
-	. $(NAME)env/bin/activate && pep8 --ignore=$(PEPEXCLUDES),E402 ../roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+	. $(NAME)env/bin/activate && flake8 --config=setup.cfg ../ --exclude="utils,../inventory"
+	. $(NAME)env/bin/activate && python setup.py flake8 
 
-ci: clean virtualenv ci-list-deps ci-pep8 ci-pylint ci-pyflakes ci-unittests
+ci: clean virtualenv ci-list-deps ci-flake8 ci-pylint ci-unittests
 	:

+ 13 - 0
utils/setup.cfg

@@ -3,3 +3,16 @@
 # 3. If at all possible, it is good practice to do this. If you cannot, you
 # will need to generate wheels for each Python version that you support.
 universal=1
+
+[nosetests]
+tests=../,../roles/openshift_master_facts/test/,test/
+verbosity=2
+with_coverage=1
+cover_html=1
+cover_package=ooinstall
+cover_min_percentage=70
+
+[flake8]
+max-line-length=120
+exclude=tests/*,setup.py
+ignore=E501,E121,E124

+ 1 - 1
utils/setup.py

@@ -47,7 +47,7 @@ setup(
     # your project is installed. For an analysis of "install_requires" vs pip's
     # requirements files see:
     # https://packaging.python.org/en/latest/requirements.html
-    install_requires=['click', 'PyYAML'],
+    install_requires=['click', 'PyYAML', 'ansible'],
 
     # List additional groups of dependencies here (e.g. development
     # dependencies). You can install these using the following syntax,

+ 3 - 0
utils/src/ooinstall/utils.py

@@ -1,3 +1,5 @@
+# pylint: disable=missing-docstring,invalid-name
+
 import logging
 import re
 
@@ -8,6 +10,7 @@ installer_log = logging.getLogger('installer')
 def debug_env(env):
     for k in sorted(env.keys()):
         if k.startswith("OPENSHIFT") or k.startswith("ANSIBLE") or k.startswith("OO"):
+            # pylint: disable=logging-format-interpolation
             installer_log.debug("{key}: {value}".format(
                 key=k, value=env[k]))
 

+ 1 - 1
utils/test-requirements.txt

@@ -1,7 +1,7 @@
+ansible
 enum
 configparser
 pylint
-pep8
 nose
 coverage
 mock