Browse Source

openshift_fact and misc fixes

- Do not attempt to fetch file to same file location when playbooks are run
  locally on master

- Fix for openshift_facts when run against a host in a VPC that does not assign internal/external hostnames or ips

- Fix setting of labels and annotations on node instances and in
  openshift_facts
  - converted openshift_facts to use json for local_fact storage instead of
    an ini file, included code that should migrate existing ini users to json
  - added region/zone setting to byo inventory

- Fix fact related bug where deployment_type was being set on node role
  instead of common role for node hosts
Jason DeTiberus 10 years ago
parent
commit
5f09b0e1d3

+ 4 - 2
inventory/byo/hosts

@@ -20,7 +20,8 @@ deployment_type=enterprise
 openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
 
 # Pre-release additional repo
-openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
 
 # Origin copr repo
 #openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
@@ -31,4 +32,5 @@ ose3-master-ansible.test.example.com
 
 # host group for nodes
 [nodes]
-ose3-node[1:2]-ansible.test.example.com
+ose3-master-ansible.test.example.com openshift_node_labels="{'region': 'infra', 'zone': 'default'}"
+ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"

+ 2 - 3
playbooks/common/openshift-node/config.yml

@@ -15,6 +15,7 @@
         local_facts:
           hostname: "{{ openshift_hostname | default(None) }}"
           public_hostname: "{{ openshift_public_hostname | default(None) }}"
+          deployment_type: "{{ openshift_deployment_type }}"
       - role: node
         local_facts:
           external_id: "{{ openshift_node_external_id | default(None) }}"
@@ -23,7 +24,6 @@
           pod_cidr: "{{ openshift_node_pod_cidr | default(None) }}"
           labels: "{{ openshift_node_labels | default(None) }}"
           annotations: "{{ openshift_node_annotations | default(None) }}"
-          deployment_type: "{{ openshift_deployment_type }}"
 
 
 - name: Create temp directory for syncing certs
@@ -68,7 +68,6 @@
     fetch:
       src: "{{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz"
       dest: "{{ sync_tmpdir }}/"
-      flat: yes
       fail_on_missing: yes
       validate_checksum: yes
     with_items: openshift_nodes
@@ -79,7 +78,7 @@
   hosts: oo_nodes_to_config
   gather_facts: no
   vars:
-    sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+    sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}/{{ groups['oo_first_master'][0] }}/{{ hostvars.localhost.mktemp.stdout }}"
     openshift_sdn_master_url: "https://{{ hostvars[groups['oo_first_master'][0]].openshift.common.hostname }}:4001"
   pre_tasks:
   - name: Ensure certificate directory exists

+ 211 - 126
roles/openshift_facts/library/openshift_facts.py

@@ -1,6 +1,15 @@
 #!/usr/bin/python
 # -*- coding: utf-8 -*-
 # vim: expandtab:tabstop=4:shiftwidth=4
+# disable pylint checks
+# temporarily disabled until items can be addressed:
+#   fixme - until all TODO comments have been addressed
+# permanently disabled unless someone wants to refactor the object model:
+    #   no-self-use
+    #   too-many-locals
+    #   too-many-branches
+    # pylint:disable=fixme, no-self-use
+    # pylint:disable=too-many-locals, too-many-branches
 
 DOCUMENTATION = '''
 ---
@@ -24,15 +33,18 @@ class OpenShiftFactsFileWriteError(Exception):
 class OpenShiftFactsMetadataUnavailableError(Exception):
     pass
 
-class OpenShiftFacts():
+class OpenShiftFacts(object):
     known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns']
 
     def __init__(self, role, filename, local_facts):
         self.changed = False
         self.filename = filename
         if role not in self.known_roles:
-            raise OpenShiftFactsUnsupportedRoleError("Role %s is not supported by this module" % role)
+            raise OpenShiftFactsUnsupportedRoleError(
+                "Role %s is not supported by this module" % role
+            )
         self.role = role
+        self.system_facts = ansible_facts(module)
         self.facts = self.generate_facts(local_facts)
 
     def generate_facts(self, local_facts):
@@ -42,7 +54,6 @@ class OpenShiftFacts():
         defaults = self.get_defaults(roles)
         provider_facts = self.init_provider_facts()
         facts = self.apply_provider_facts(defaults, provider_facts, roles)
-
         facts = self.merge_facts(facts, local_facts)
         facts['current_config'] = self.current_config(facts)
         self.set_url_facts_if_unset(facts)
@@ -53,35 +64,38 @@ class OpenShiftFacts():
         if 'master' in facts:
             for (url_var, use_ssl, port, default) in [
                     ('api_url',
-                        facts['master']['api_use_ssl'],
-                        facts['master']['api_port'],
-                        facts['common']['hostname']),
+                     facts['master']['api_use_ssl'],
+                     facts['master']['api_port'],
+                     facts['common']['hostname']),
                     ('public_api_url',
-                        facts['master']['api_use_ssl'],
-                        facts['master']['api_port'],
-                        facts['common']['public_hostname']),
+                     facts['master']['api_use_ssl'],
+                     facts['master']['api_port'],
+                     facts['common']['public_hostname']),
                     ('console_url',
-                        facts['master']['console_use_ssl'],
-                        facts['master']['console_port'],
-                        facts['common']['hostname']),
+                     facts['master']['console_use_ssl'],
+                     facts['master']['console_port'],
+                     facts['common']['hostname']),
                     ('public_console_url' 'console_use_ssl',
-                        facts['master']['console_use_ssl'],
-                        facts['master']['console_port'],
-                        facts['common']['public_hostname'])]:
+                     facts['master']['console_use_ssl'],
+                     facts['master']['console_port'],
+                     facts['common']['public_hostname'])]:
                 if url_var not in facts['master']:
                     scheme = 'https' if use_ssl else 'http'
                     netloc = default
-                    if (scheme == 'https' and port != '443') or (scheme == 'http' and port != '80'):
+                    if ((scheme == 'https' and port != '443')
+                            or (scheme == 'http' and port != '80')):
                         netloc = "%s:%s" % (netloc, port)
-                    facts['master'][url_var] = urlparse.urlunparse((scheme, netloc, '', '', '', ''))
+                    facts['master'][url_var] = urlparse.urlunparse(
+                        (scheme, netloc, '', '', '', '')
+                    )
 
 
     # Query current OpenShift config and return a dictionary containing
     # settings that may be valuable for determining actions that need to be
     # taken in the playbooks/roles
     def current_config(self, facts):
-        current_config=dict()
-        roles = [ role for role in facts if role not in ['common','provider'] ]
+        current_config = dict()
+        roles = [role for role in facts if role not in ['common', 'provider']]
         for role in roles:
             if 'roles' in current_config:
                 current_config['roles'].append(role)
@@ -94,31 +108,40 @@ class OpenShiftFacts():
             # Query kubeconfig settings
             kubeconfig_dir = '/var/lib/openshift/openshift.local.certificates'
             if role == 'node':
-                kubeconfig_dir = os.path.join(kubeconfig_dir, "node-%s" % facts['common']['hostname'])
+                kubeconfig_dir = os.path.join(
+                    kubeconfig_dir, "node-%s" % facts['common']['hostname']
+                )
 
             kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
-            if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
+            if (os.path.isfile('/usr/bin/openshift')
+                    and os.path.isfile(kubeconfig_path)):
                 try:
-                    _, output, error = module.run_command(["/usr/bin/openshift", "ex",
-                                                           "config", "view", "-o",
-                                                           "json",
-                                                           "--kubeconfig=%s" % kubeconfig_path],
-                                                           check_rc=False)
+                    _, output, _ = module.run_command(
+                        ["/usr/bin/openshift", "ex", "config", "view", "-o",
+                         "json", "--kubeconfig=%s" % kubeconfig_path],
+                        check_rc=False
+                    )
                     config = json.loads(output)
 
+                    cad = 'certificate-authority-data'
                     try:
                         for cluster in config['clusters']:
-                            config['clusters'][cluster]['certificate-authority-data'] = 'masked'
+                            config['clusters'][cluster][cad] = 'masked'
                     except KeyError:
                         pass
                     try:
                         for user in config['users']:
-                            config['users'][user]['client-certificate-data'] = 'masked'
+                            config['users'][user][cad] = 'masked'
                             config['users'][user]['client-key-data'] = 'masked'
                     except KeyError:
                         pass
 
                     current_config['kubeconfig'] = config
+
+                # override pylint broad-except warning, since we do not want
+                # to bubble up any exceptions if openshift ex config view
+                # fails
+                # pylint: disable=broad-except
                 except Exception:
                     pass
 
@@ -139,7 +162,10 @@ class OpenShiftFacts():
             if ip_value:
                 facts['common'][ip_var] = ip_value
 
-            facts['common'][h_var] = self.choose_hostname([provider_facts['network'].get(h_var)], facts['common'][ip_var])
+            facts['common'][h_var] = self.choose_hostname(
+                [provider_facts['network'].get(h_var)],
+                facts['common'][ip_var]
+            )
 
         if 'node' in roles:
             ext_id = provider_facts.get('external_id')
@@ -158,32 +184,37 @@ class OpenShiftFacts():
 
         return True
 
-    def choose_hostname(self, hostnames=[], fallback=''):
+    def choose_hostname(self, hostnames=None, fallback=''):
         hostname = fallback
+        if hostnames is None:
+            return hostname
 
-        ips = [ i for i in hostnames if i is not None and re.match(r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z', i) ]
-        hosts = [ i for i in hostnames if i is not None and i not in set(ips) ]
+        ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
+        ips = [i for i in hostnames
+               if (i is not None and isinstance(i, basestring)
+                   and re.match(ip_regex, i))]
+        hosts = [i for i in hostnames
+                 if i is not None and i != '' and i not in ips]
 
         for host_list in (hosts, ips):
-            for h in host_list:
-                if self.hostname_valid(h):
-                    return h
+            for host in host_list:
+                if self.hostname_valid(host):
+                    return host
 
         return hostname
 
     def get_defaults(self, roles):
-        ansible_facts = self.get_ansible_facts()
-
         defaults = dict()
 
         common = dict(use_openshift_sdn=True)
-        ip = ansible_facts['default_ipv4']['address']
-        common['ip'] = ip
-        common['public_ip'] = ip
-
-        rc, output, error = module.run_command(['hostname', '-f'])
-        hostname_f = output.strip() if rc == 0 else ''
-        hostname_values = [hostname_f, ansible_facts['nodename'], ansible_facts['fqdn']]
+        ip_addr = self.system_facts['default_ipv4']['address']
+        common['ip'] = ip_addr
+        common['public_ip'] = ip_addr
+
+        exit_code, output, _ = module.run_command(['hostname', '-f'])
+        hostname_f = output.strip() if exit_code == 0 else ''
+        hostname_values = [hostname_f, self.system_facts['nodename'],
+                           self.system_facts['fqdn']]
         hostname = self.choose_hostname(hostname_values)
 
         common['hostname'] = hostname
@@ -195,16 +226,18 @@ class OpenShiftFacts():
             # the urls, instead of forcing both, also to override the hostname
             # without having to re-generate these urls later
             master = dict(api_use_ssl=True, api_port='8443',
-                    console_use_ssl=True, console_path='/console',
-                    console_port='8443', etcd_use_ssl=False,
-                    etcd_port='4001', portal_net='172.30.17.0/24')
+                          console_use_ssl=True, console_path='/console',
+                          console_port='8443', etcd_use_ssl=False,
+                          etcd_port='4001', portal_net='172.30.17.0/24')
             defaults['master'] = master
 
         if 'node' in roles:
             node = dict(external_id=common['hostname'], pod_cidr='',
                         labels={}, annotations={})
-            node['resources_cpu'] = ansible_facts['processor_cores']
-            node['resources_memory'] = int(int(ansible_facts['memtotal_mb']) * 1024 * 1024 * 0.75)
+            node['resources_cpu'] = self.system_facts['processor_cores']
+            node['resources_memory'] = int(
+                int(self.system_facts['memtotal_mb']) * 1024 * 1024 * 0.75
+            )
             defaults['node'] = node
 
         return defaults
@@ -225,13 +258,13 @@ class OpenShiftFacts():
         return facts
 
     def query_metadata(self, metadata_url, headers=None, expect_json=False):
-        r, info = fetch_url(module, metadata_url, headers=headers)
+        result, info = fetch_url(module, metadata_url, headers=headers)
         if info['status'] != 200:
             raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
         if expect_json:
-            return module.from_json(r.read())
+            return module.from_json(result.read())
         else:
-            return [line.strip() for line in r.readlines()]
+            return [line.strip() for line in result.readlines()]
 
     def walk_metadata(self, metadata_url, headers=None, expect_json=False):
         metadata = dict()
@@ -239,49 +272,56 @@ class OpenShiftFacts():
         for line in self.query_metadata(metadata_url, headers, expect_json):
             if line.endswith('/') and not line == 'public-keys/':
                 key = line[:-1]
-                metadata[key]=self.walk_metadata(metadata_url + line, headers,
-                                                 expect_json)
+                metadata[key] = self.walk_metadata(metadata_url + line,
+                                                   headers, expect_json)
             else:
                 results = self.query_metadata(metadata_url + line, headers,
                                               expect_json)
                 if len(results) == 1:
+                    # disable pylint maybe-no-member because overloaded use of
+                    # the module name causes pylint to not detect that results
+                    # is an array or hash
+                    # pylint: disable=maybe-no-member
                     metadata[line] = results.pop()
                 else:
                     metadata[line] = results
         return metadata
 
     def get_provider_metadata(self, metadata_url, supports_recursive=False,
-                          headers=None, expect_json=False):
+                              headers=None, expect_json=False):
         try:
             if supports_recursive:
-                metadata = self.query_metadata(metadata_url, headers, expect_json)
+                metadata = self.query_metadata(metadata_url, headers,
+                                               expect_json)
             else:
-                metadata = self.walk_metadata(metadata_url, headers, expect_json)
-        except OpenShiftFactsMetadataUnavailableError as e:
+                metadata = self.walk_metadata(metadata_url, headers,
+                                              expect_json)
+        except OpenShiftFactsMetadataUnavailableError:
             metadata = None
         return metadata
 
-    def get_ansible_facts(self):
-        if not hasattr(self, 'ansible_facts'):
-            self.ansible_facts = ansible_facts(module)
-        return self.ansible_facts
-
+    # TODO: refactor to reduce the size of this method, potentially create
+    # sub-methods (or classes for the different providers)
+    # temporarily disable pylint too-many-statements
+    # pylint: disable=too-many-statements
     def guess_host_provider(self):
         # TODO: cloud provider facts should probably be submitted upstream
-        ansible_facts = self.get_ansible_facts()
-        product_name = ansible_facts['product_name']
-        product_version = ansible_facts['product_version']
-        virt_type = ansible_facts['virtualization_type']
-        virt_role = ansible_facts['virtualization_role']
+        product_name = self.system_facts['product_name']
+        product_version = self.system_facts['product_version']
+        virt_type = self.system_facts['virtualization_type']
+        virt_role = self.system_facts['virtualization_role']
         provider = None
         metadata = None
 
         # TODO: this is not exposed through module_utils/facts.py in ansible,
         # need to create PR for ansible to expose it
-        bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
+        bios_vendor = get_file_content(
+            '/sys/devices/virtual/dmi/id/bios_vendor'
+        )
         if bios_vendor == 'Google':
             provider = 'gce'
-            metadata_url = 'http://metadata.google.internal/computeMetadata/v1/?recursive=true'
+            metadata_url = ('http://metadata.google.internal/'
+                            'computeMetadata/v1/?recursive=true')
             headers = {'Metadata-Flavor': 'Google'}
             metadata = self.get_provider_metadata(metadata_url, True, headers,
                                                   True)
@@ -290,19 +330,28 @@ class OpenShiftFacts():
             if metadata:
                 metadata['project']['attributes'].pop('sshKeys', None)
                 metadata['instance'].pop('serviceAccounts', None)
-        elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
+        elif (virt_type == 'xen' and virt_role == 'guest'
+              and re.match(r'.*\.amazon$', product_version)):
             provider = 'ec2'
             metadata_url = 'http://169.254.169.254/latest/meta-data/'
             metadata = self.get_provider_metadata(metadata_url)
         elif re.search(r'OpenStack', product_name):
             provider = 'openstack'
-            metadata_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
-            metadata = self.get_provider_metadata(metadata_url, True, None, True)
+            metadata_url = ('http://169.254.169.254/openstack/latest/'
+                            'meta_data.json')
+            metadata = self.get_provider_metadata(metadata_url, True, None,
+                                                  True)
 
             if metadata:
                 ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
-                metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url)
-
+                metadata['ec2_compat'] = self.get_provider_metadata(
+                    ec2_compat_url
+                )
+
+                # disable pylint maybe-no-member because overloaded use of
+                # the module name causes pylint to not detect that results
+                # is an array or hash
+                # pylint: disable=maybe-no-member
                 # Filter public_keys  and random_seed from openstack metadata
                 metadata.pop('public_keys', None)
                 metadata.pop('random_seed', None)
@@ -326,7 +375,8 @@ class OpenShiftFacts():
         if provider == 'gce':
             for interface in metadata['instance']['networkInterfaces']:
                 int_info = dict(ips=[interface['ip']], network_type=provider)
-                int_info['public_ips'] = [ ac['externalIp'] for ac in interface['accessConfigs'] ]
+                int_info['public_ips'] = [ac['externalIp'] for ac
+                                          in interface['accessConfigs']]
                 int_info['public_ips'].extend(interface['forwardedIps'])
                 _, _, network_id = interface['network'].rpartition('/')
                 int_info['network_id'] = network_id
@@ -346,15 +396,26 @@ class OpenShiftFacts():
             # TODO: attempt to resolve public_hostname
             network['public_hostname'] = network['public_ip']
         elif provider == 'ec2':
-            for interface in sorted(metadata['network']['interfaces']['macs'].values(),
-                                    key=lambda x: x['device-number']):
+            for interface in sorted(
+                    metadata['network']['interfaces']['macs'].values(),
+                    key=lambda x: x['device-number']
+            ):
                 int_info = dict()
                 var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
                 for ips_var, int_var in var_map.iteritems():
                     ips = interface[int_var]
-                    int_info[ips_var] = [ips] if isinstance(ips, basestring) else ips
-                int_info['network_type'] = 'vpc' if 'vpc-id' in interface else 'classic'
-                int_info['network_id'] = interface['subnet-id'] if int_info['network_type'] == 'vpc' else None
+                    if isinstance(ips, basestring):
+                        int_info[ips_var] = [ips]
+                    else:
+                        int_info[ips_var] = ips
+                if 'vpc-id' in interface:
+                    int_info['network_type'] = 'vpc'
+                else:
+                    int_info['network_type'] = 'classic'
+                if int_info['network_type'] == 'vpc':
+                    int_info['network_id'] = interface['subnet-id']
+                else:
+                    int_info['network_id'] = None
                 network['interfaces'].append(int_info)
             facts['zone'] = metadata['placement']['availability-zone']
             facts['external_id'] = metadata['instance-id']
@@ -384,7 +445,8 @@ class OpenShiftFacts():
             network['hostname'] = metadata['hostname']
 
             # TODO: verify that public hostname makes sense and is resolvable
-            network['public_hostname'] = metadata['ec2_compat']['public-hostname']
+            pub_h = metadata['ec2_compat']['public-hostname']
+            network['public_hostname'] = pub_h
 
         facts['network'] = network
         return facts
@@ -392,8 +454,8 @@ class OpenShiftFacts():
     def init_provider_facts(self):
         provider_info = self.guess_host_provider()
         provider_facts = self.normalize_provider_facts(
-                provider_info.get('name'),
-                provider_info.get('metadata')
+            provider_info.get('name'),
+            provider_info.get('metadata')
         )
         return provider_facts
 
@@ -402,56 +464,77 @@ class OpenShiftFacts():
         # of openshift.<blah>
         return self.facts
 
-    def init_local_facts(self, facts={}):
+    def init_local_facts(self, facts=None):
         changed = False
+        facts_to_set = {self.role: dict()}
+        if facts is not None:
+            facts_to_set[self.role] = facts
 
-        local_facts = ConfigParser.SafeConfigParser()
-        local_facts.read(self.filename)
-
-        section = self.role
-        if not local_facts.has_section(section):
-            local_facts.add_section(section)
+        # Handle conversion of INI style facts file to json style
+        local_facts = dict()
+        try:
+            ini_facts = ConfigParser.SafeConfigParser()
+            ini_facts.read(self.filename)
+            for section in ini_facts.sections():
+                local_facts[section] = dict()
+                for key, value in ini_facts.items(section):
+                    local_facts[section][key] = value
+
+        except (ConfigParser.MissingSectionHeaderError,
+                ConfigParser.ParsingError):
+            try:
+                with open(self.filename, 'r') as facts_file:
+                    local_facts = json.load(facts_file)
+
+            except (ValueError, IOError) as ex:
+                pass
+
+        for arg in ['labels', 'annotations']:
+            if arg in facts_to_set and isinstance(facts_to_set[arg],
+                                                  basestring):
+                facts_to_set[arg] = module.from_json(facts_to_set[arg])
+
+        new_local_facts = self.merge_facts(local_facts, facts_to_set)
+        for facts in new_local_facts.values():
+            keys_to_delete = []
+            for fact, value in facts.iteritems():
+                if value == "" or value is None:
+                    keys_to_delete.append(fact)
+            for key in keys_to_delete:
+                del facts[key]
+
+        if new_local_facts != local_facts:
             changed = True
 
-        for key, value in facts.iteritems():
-            if isinstance(value, bool):
-                value = str(value)
-            if not value:
-                continue
-            if not local_facts.has_option(section, key) or local_facts.get(section, key) != value:
-                local_facts.set(section, key, value)
-                changed = True
-
-        if changed and not module.check_mode:
-            try:
-                fact_dir = os.path.dirname(self.filename)
-                if not os.path.exists(fact_dir):
-                    os.makedirs(fact_dir)
-                with open(self.filename, 'w') as fact_file:
-                        local_facts.write(fact_file)
-            except (IOError, OSError) as e:
-                raise OpenShiftFactsFileWriteError("Could not create fact file: %s, error: %s" % (self.filename, e))
+            if not module.check_mode:
+                try:
+                    fact_dir = os.path.dirname(self.filename)
+                    if not os.path.exists(fact_dir):
+                        os.makedirs(fact_dir)
+                    with open(self.filename, 'w') as fact_file:
+                        fact_file.write(module.jsonify(new_local_facts))
+                except (IOError, OSError) as ex:
+                    raise OpenShiftFactsFileWriteError(
+                        "Could not create fact file: "
+                        "%s, error: %s" % (self.filename, ex)
+                    )
         self.changed = changed
-
-        role_facts = dict()
-        for section in local_facts.sections():
-            role_facts[section] = dict()
-            for opt, val in local_facts.items(section):
-                role_facts[section][opt] = val
-        return role_facts
+        return new_local_facts
 
 
 def main():
+    # disabling pylint errors for global-variable-undefined and invalid-name
+    # for 'global module' usage, since it is required to use ansible_facts
+    # pylint: disable=global-variable-undefined, invalid-name
     global module
     module = AnsibleModule(
-            argument_spec = dict(
-                    role=dict(default='common',
-                              choices=OpenShiftFacts.known_roles,
-                              required=False),
-                    local_facts=dict(default={}, type='dict', required=False),
-            ),
-            supports_check_mode=True,
-            add_file_common_args=True,
+        argument_spec=dict(
+            role=dict(default='common', required=False,
+                      choices=OpenShiftFacts.known_roles),
+            local_facts=dict(default=None, type='dict', required=False),
+        ),
+        supports_check_mode=True,
+        add_file_common_args=True,
     )
 
     role = module.params['role']
@@ -464,11 +547,13 @@ def main():
     file_params['path'] = fact_file
     file_args = module.load_file_common_arguments(file_params)
     changed = module.set_fs_attributes_if_different(file_args,
-            openshift_facts.changed)
+                                                    openshift_facts.changed)
 
     return module.exit_json(changed=changed,
-            ansible_facts=openshift_facts.get_facts())
+                            ansible_facts=openshift_facts.get_facts())
 
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
 # import module snippets
 from ansible.module_utils.basic import *
 from ansible.module_utils.facts import *