Переглянути джерело

WIP: Scale node to new-installer cluster

This commit adds code to add hosts to new installer
cluster.
Michael Gugino 6 роки тому
батько
коміт
cbc1f4c718

+ 1 - 60
playbooks/init/basic_facts.yml

@@ -25,66 +25,7 @@
   - name: set openshift_deployment_type if unset
     set_fact:
       openshift_deployment_type: "{{ deployment_type }}"
+      openshift_is_atomic: False
     when:
     - openshift_deployment_type is undefined
     - deployment_type is defined
-
-- name: Retrieve existing master configs and validate
-  hosts: oo_masters_to_config
-  gather_facts: no
-  any_errors_fatal: true
-  roles:
-  - openshift_facts
-  tasks:
-  - import_role:
-      name: openshift_control_plane
-      tasks_from: check_existing_config.yml
-
-  - when:
-    - l_existing_config_master_config is defined
-    - l_existing_config_master_config.networkConfig is defined
-    block:
-    - set_fact:
-        openshift_portal_net: "{{ l_existing_config_master_config.networkConfig.serviceNetworkCIDR }}"
-
-    - set_fact:
-        osm_cluster_network_cidr: "{{ l_existing_config_master_config.networkConfig.clusterNetworks[0].cidr }}"
-        osm_host_subnet_length: "{{ l_existing_config_master_config.networkConfig.clusterNetworks[0].hostSubnetLength }}"
-      when:
-      - l_existing_config_master_config.networkConfig.clusterNetworks is defined
-      # End block
-
-- name: Initialize special first-master variables
-  hosts: oo_first_master
-  roles:
-  - role: openshift_facts
-  tasks:
-  - when: not (osm_default_node_selector is defined)
-    block:
-    - set_fact:
-        # l_existing_config_master_config is set in openshift_control_plane/tasks/check_existing_config.yml
-        openshift_master_config_node_selector: "{{ l_existing_config_master_config.projectConfig.defaultNodeSelector }}"
-      when:
-      - l_existing_config_master_config is defined
-      - l_existing_config_master_config.projectConfig is defined
-      - l_existing_config_master_config.projectConfig.defaultNodeSelector is defined
-      - l_existing_config_master_config.projectConfig.defaultNodeSelector != ''
-
-  - set_fact:
-      # We need to setup openshift_client_binary here for special uses of delegate_to in
-      # later roles and plays.
-      first_master_client_binary: "{{  openshift_client_binary }}"
-      #Some roles may require this to be set for first master
-      openshift_client_binary: "{{ openshift_client_binary }}"
-      # we need to know if a default node selector has been manually set outside the installer
-      l_osm_default_node_selector: '{{ osm_default_node_selector | default(openshift_master_config_node_selector) | default("node-role.kubernetes.io/compute=true") }}'
-
-- name: Disable web console if required
-  hosts: oo_masters_to_config
-  gather_facts: no
-  tasks:
-  - set_fact:
-      openshift_web_console_install: False
-    when:
-    - openshift_deployment_subtype is defined
-    - openshift_deployment_subtype == 'registry' or ( osm_disabled_features is defined and 'WebConsole' in osm_disabled_features )

+ 0 - 5
playbooks/init/main.yml

@@ -30,13 +30,8 @@
 - import_playbook: base_packages.yml
   when: l_install_base_packages | default(False) | bool
 
-- import_playbook: cluster_facts.yml
-
 - import_playbook: version.yml
 
-- import_playbook: sanity_checks.yml
-  when: not (skip_sanity_checks | default(False))
-
 - name: Initialization Checkpoint End
   hosts: all
   gather_facts: false

+ 1 - 17
playbooks/init/version.yml

@@ -1,23 +1,7 @@
 ---
 - name: Determine openshift_version to configure on first master
-  hosts: "{{ l_openshift_version_determine_hosts | default('oo_first_master') }}"
+  hosts: oo_nodes_to_config
   tasks:
   - include_role:
       name: openshift_version
       tasks_from: first_master.yml
-
-# NOTE: We set this even on etcd hosts as they may also later run as masters,
-# and we don't want to install wrong version of docker and have to downgrade
-# later.
-- name: Set openshift_version for etcd, node, and master hosts
-  hosts: "{{ l_openshift_version_set_hosts | default(l_default_version_set_hosts) }}"
-  vars:
-    l_default_version_set_hosts: "oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master"
-    l_first_master_openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
-    l_first_master_openshift_pkg_version: "{{ hostvars[groups.oo_first_master.0].openshift_pkg_version | default('') }}"
-    l_first_master_openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag}}"
-  tasks:
-  - set_fact:
-      openshift_version: "{{ l_first_master_openshift_version }}"
-      openshift_pkg_version: "{{ l_first_master_openshift_pkg_version }}"
-      openshift_image_tag: "{{ l_first_master_openshift_image_tag }}"

+ 12 - 8
playbooks/openshift-node/scaleup.yml

@@ -31,11 +31,15 @@
     l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config"
     l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}"
 
-- import_playbook: ../init/version.yml
-  vars:
-    l_openshift_version_set_hosts: "oo_nodes_to_config:!oo_first_master"
-
-- import_playbook: private/bootstrap.yml
-- import_playbook: private/join.yml
-
-- import_playbook: ../openshift-glusterfs/private/add_hosts.yml
+- name: install nodes
+  hosts: oo_nodes_to_config
+  tasks:
+  - import_role:
+      name: openshift_node40
+      tasks_from: install.yml
+  - import_role:
+      name: openshift_node40
+      tasks_from: config.yml
+  - import_role:
+      name: openshift_node40
+      tasks_from: systemd.yml

+ 0 - 3
playbooks/prerequisites.yml

@@ -13,9 +13,6 @@
     l_install_base_packages: True
     l_repo_hosts: "{{ l_scale_up_hosts | default('oo_all_hosts') }}"
 
-- import_playbook: init/validate_hostnames.yml
-  when: not (skip_validate_hostnames | default(False))
-
 # This is required for container runtime for crio, only needs to run once.
 - name: Configure os_firewall
   hosts: "{{ l_scale_up_hosts | default(l_default_firewall_hosts) }}"

+ 0 - 164
roles/container_runtime/tasks/package_docker.yml

@@ -1,165 +1 @@
 ---
-- import_tasks: common/pre.yml
-
-- name: Get current installed Docker version
-  command: "{{ repoquery_installed }} --qf '%{version}' docker"
-  register: curr_docker_version
-  retries: 4
-  until: curr_docker_version is succeeded
-  changed_when: false
-
-# Some basic checks to ensure the role will complete
-- import_tasks: docker_sanity.yml
-
-# Make sure Docker is installed, but does not update a running version.
-# Docker upgrades are handled by a separate playbook.
-# Note: The curr_docker_version.stdout check can be removed when https://github.com/ansible/ansible/issues/33187 gets fixed.
-- name: Install Docker
-  package:
-    name: "{{ pkg_list | join(',') }}"
-    state: present
-  register: result
-  until: result is succeeded
-  vars:
-    pkg_list:
-    - "docker{{ '-' + docker_version if docker_version is defined else '' }}"
-    - skopeo
-
-- block:
-  # Extend the default Docker service unit file when using iptables-services
-  - name: Ensure docker.service.d directory exists
-    file:
-      path: "{{ docker_systemd_dir }}"
-      state: directory
-
-  - name: Configure Docker service unit file
-    template:
-      dest: "{{ docker_systemd_dir }}/custom.conf"
-      src: custom.conf.j2
-    notify:
-    - restart container runtime
-  when: not (os_firewall_use_firewalld | default(False)) | bool
-
-- stat:
-    path: /etc/sysconfig/docker
-    get_checksum: false
-    get_mime: false
-  register: docker_check
-
-- name: Set registry params
-  lineinfile:
-    dest: /etc/sysconfig/docker
-    regexp: '^{{ item.reg_conf_var }}=.*$'
-    line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | lib_utils_oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"
-  when:
-  - item.reg_fact_val != []
-  - docker_check.stat.isreg is defined
-  - docker_check.stat.isreg
-  with_items:
-  - reg_conf_var: ADD_REGISTRY
-    reg_fact_val: "{{ l2_docker_additional_registries }}"
-    reg_flag: --add-registry
-  - reg_conf_var: BLOCK_REGISTRY
-    reg_fact_val: "{{ l2_docker_blocked_registries }}"
-    reg_flag: --block-registry
-  - reg_conf_var: INSECURE_REGISTRY
-    reg_fact_val: "{{ l2_docker_insecure_registries }}"
-    reg_flag: --insecure-registry
-  notify:
-  - restart container runtime
-
-- name: Place additional/blocked/insecure registries in /etc/containers/registries.conf
-  template:
-    dest: "{{ containers_registries_conf_path }}"
-    src: registries.conf
-  when: openshift_docker_use_etc_containers | bool
-  notify:
-  - restart container runtime
-
-- name: Set Proxy Settings
-  lineinfile:
-    dest: /etc/sysconfig/docker
-    regexp: '^{{ item.reg_conf_var }}=.*$'
-    line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'"
-    state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}"
-  with_items:
-  - reg_conf_var: HTTP_PROXY
-    reg_fact_val: "{{ docker_http_proxy }}"
-  - reg_conf_var: HTTPS_PROXY
-    reg_fact_val: "{{ docker_https_proxy }}"
-  - reg_conf_var: NO_PROXY
-    reg_fact_val: "{{ docker_no_proxy }}"
-  notify:
-  - restart container runtime
-  when:
-  - docker_check.stat.isreg is defined
-  - docker_check.stat.isreg
-  - docker_http_proxy != '' or docker_https_proxy != ''
-
-- name: Set various Docker options
-  lineinfile:
-    dest: /etc/sysconfig/docker
-    regexp: '^OPTIONS=.*$'
-    line: "OPTIONS='\
-      {% if ansible_selinux.status | default(None) == 'enabled' and openshift_docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %} \
-      {% if openshift_docker_log_driver %} --log-driver {{ openshift_docker_log_driver }}{% endif %} \
-      {% if l2_docker_log_options != [] %} {{ l2_docker_log_options |  lib_utils_oo_split() | lib_utils_oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \
-      {% if (openshift_docker_hosted_registry_insecure | bool) and openshift_docker_hosted_registry_network %} --insecure-registry={{ openshift_docker_hosted_registry_network }} {% endif %} \
-      {% if docker_options is defined %} {{ docker_options }}{% endif %} \
-      {% if openshift_docker_options %} {{ openshift_docker_options }}{% endif %} \
-      --signature-verification={{ openshift_docker_signature_verification | bool }}'"
-  when: docker_check.stat.isreg is defined and docker_check.stat.isreg
-  notify:
-  - restart container runtime
-
-- stat:
-    path: /etc/sysconfig/docker-network
-    get_checksum: false
-    get_mime: false
-  register: sysconfig_docker_network_check
-
-- name: Configure Docker Network OPTIONS
-  lineinfile:
-    dest: /etc/sysconfig/docker-network
-    regexp: '^DOCKER_NETWORK_OPTIONS=.*$'
-    line: "DOCKER_NETWORK_OPTIONS='\
-      {% if openshift.node is defined and openshift.node.sdn_mtu is defined %} --mtu={{ openshift.node.sdn_mtu }}{% endif %}'"
-  when:
-  - sysconfig_docker_network_check.stat.isreg is defined
-  - sysconfig_docker_network_check.stat.isreg
-  notify:
-  - restart container runtime
-
-# The following task is needed as the systemd module may report a change in
-# state even though docker is already running.
-- name: Detect if docker is already started
-  command: "systemctl show docker -p ActiveState"
-  changed_when: False
-  register: r_docker_already_running_result
-
-- name: Start the Docker service
-  systemd:
-    name: docker
-    enabled: yes
-    state: started
-    daemon_reload: yes
-  register: r_docker_package_docker_start_result
-  until: not (r_docker_package_docker_start_result is failed)
-  retries: 3
-  delay: 30
-
-- set_fact:
-    docker_service_status_changed: "{{ (r_docker_package_docker_start_result is changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}"
-
-- name: Check for docker_storage_path/overlay2
-  stat:
-    path: "{{ docker_storage_path }}/overlay2"
-  register: dsp_stat
-
-- name: Fixup SELinux permissions for docker
-  shell: |
-           semanage fcontext -a -e /var/lib/docker/overlay2 "{{ docker_storage_path }}/overlay2"
-           restorecon -R -v "{{ docker_storage_path }}/overlay2"
-  when: dsp_stat.stat.exists
-
-- import_tasks: common/post.yml

+ 1 - 9
roles/container_runtime/templates/crio-network.j2

@@ -1,9 +1 @@
-{% if 'http_proxy' in openshift.common %}
-HTTP_PROXY={{ openshift.common.http_proxy }}
-{% endif %}
-{% if 'https_proxy' in openshift.common %}
-HTTPS_PROXY={{ openshift.common.https_proxy }}
-{% endif %}
-{% if 'no_proxy' in openshift.common %}
-NO_PROXY={{ openshift.common.no_proxy }}
-{% endif %}
+CRIO_NETWORK_OPTIONS="--cni-config-dir=/etc/kubernetes/cni/net.d --cni-plugin-dir=/var/lib/cni/bin"

+ 55 - 0
roles/lib_utils/action_plugins/parse_ignition.py

@@ -0,0 +1,55 @@
+"""Ansible action plugin to decode ignition payloads"""
+
+import os
+
+from ansible.plugins.action import ActionBase
+from ansible import errors
+from six.moves import urllib
+
+
+def get_files(files_dict, systemd_dict, dir_list, data):
+    """parse data to populate file_dict"""
+    for item in data['storage']['files']:
+        path = item["path"]
+        dir_list.add(os.path.dirname(path))
+        # remove prefix "data:,"
+        contents = urllib.parse.unquote(item['contents']['source'][6:])
+        mode = str(item["mode"])
+        inode = {"contents": contents, "mode": mode}
+        files_dict[path] = inode
+    # get the systemd units files we're here
+    for item in data['systemd']['units']:
+        contents = item['contents']
+        mode = "0644"
+        inode = {"contents": contents, "mode": mode}
+        name = item['name']
+        path = '/etc/systemd/system/' + name
+        dir_list.add(os.path.dirname(path))
+        files_dict[path] = inode
+        enabled = item['enabled']
+        systemd_dict[name] = enabled
+
+
+class ActionModule(ActionBase):
+    """ActionModule for parse_ignition.py"""
+
+    def run(self, tmp=None, task_vars=None):
+        """Run parse_ignition action plugin"""
+        result = super(ActionModule, self).run(tmp, task_vars)
+        result["changed"] = False
+        result["failed"] = False
+        result["msg"] = "Parsed successfully"
+        files_dict = {}
+        systemd_dict = {}
+        dir_list = set()
+        result["files_dict"] = files_dict
+        result["systemd_dict"] = systemd_dict
+
+        # self.task_vars holds all in-scope variables.
+        # Ignore settting self.task_vars outside of init.
+        # pylint: disable=W0201
+        self.task_vars = task_vars or {}
+        ign_file_contents = self._task.args.get('ign_file_contents')
+        get_files(files_dict, systemd_dict, dir_list, ign_file_contents)
+        result["dir_list"] = list(dir_list)
+        return result

Різницю між файлами не показано, бо вона завелика
+ 88 - 0
roles/lib_utils/test/test_data/example.ign.json


+ 33 - 0
roles/lib_utils/test/test_parse_ignition.py

@@ -0,0 +1,33 @@
+'''
+ Unit tests for wildcard
+'''
+import json
+import os
+import sys
+
+MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'action_plugins'))
+sys.path.insert(0, MODULE_PATH)
+
+# pylint: disable=import-error,wrong-import-position,missing-docstring
+import parse_ignition # noqa: E402
+
+
+def read_ign(path):
+    with open(path) as ign_in:
+        data = json.loads(ign_in.read())
+    return data
+
+
+def test_parse_json():
+    ign_data = read_ign('test_data/example.ign.json')
+    files_dict = {}
+    systemd_dict = {}
+    dir_list = set()
+    result = {}
+    result['files_dict'] = files_dict
+    result['systemd_dict'] = systemd_dict
+    parse_ignition.get_files(files_dict, systemd_dict, dir_list, ign_data)
+
+
+if __name__ == '__main__':
+    test_parse_json()

+ 2 - 2
roles/openshift_facts/defaults/main.yml

@@ -45,8 +45,8 @@ osm_image: "{{ l_osm_registry_url | regex_replace('${component}' | regex_escape,
 repoquery_cmd: "{{ (ansible_pkg_mgr == 'dnf') | ternary('dnf repoquery --latest-limit 1 -d 0', 'repoquery --plugins') }}"
 repoquery_installed: "{{ (ansible_pkg_mgr == 'dnf') | ternary('dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed', 'repoquery --plugins --installed') }}"
 
-openshift_use_crio: False
-openshift_use_crio_only: False
+openshift_use_crio: True
+openshift_use_crio_only: True
 openshift_crio_enable_docker_gc: False
 openshift_crio_var_sock: "/var/run/crio/crio.sock"
 openshift_crio_pause_image: "{{ l_os_registry_url | regex_replace('${component}' | regex_escape, 'pod') }}"

+ 61 - 0
roles/openshift_node40/README.md

@@ -0,0 +1,61 @@
+OpenShift Node
+================================
+
+Node service installation
+
+Requirements
+------------
+
+* Ansible 2.2
+* One or more Master servers
+* A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
+rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos
+
+Role Variables
+--------------
+From this role:
+
+| Name                                     | Default value         |                                                          |
+|------------------------------------------|-----------------------|----------------------------------------------------------|
+| openshift_node_start_options             | UNDEF (Optional)      | Options to pass to node start cmdline                    |
+| oreg_url                                 | UNDEF (Optional)      | Default docker registry to use                           |
+| openshift_persistentlocalstorage_enabled | false                 | Enable the persistent local storage                      |
+
+openshift_node_start_options can be used for passing any start node option, e.g.:
+
+--enable=kubelet,plugins
+
+Which would have a node running without kube-proxy and dns.
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+
+Notes
+-----
+
+Currently we support re-labeling nodes but we don't re-schedule running pods nor remove existing labels. That means you will have to trigger the re-schedulling manually. To re-schedule your pods, just follow the steps below:
+
+```
+oc adm manage-node --schedulable=false ${NODE}
+oc adm manage-node --drain ${NODE}
+oc adm manage-node --schedulable=true ${NODE}
+````
+
+> If you are using version less than 1.5/3.5 you must replace `--drain` with `--evacuate`.
+
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+TODO

+ 160 - 0
roles/openshift_node40/defaults/main.yml

@@ -0,0 +1,160 @@
+---
+openshift_node_debug_level: "{{ debug_level | default(2) }}"
+openshift_node_iptables_sync_period: '30s'
+osn_storage_plugin_deps:
+- ceph
+- glusterfs
+- iscsi
+openshift_node_local_quota_per_fsgroup: ""
+openshift_node_proxy_mode: iptables
+openshift_set_node_ip: False
+openshift_config_base: '/etc/origin'
+
+
+# Assume the images are already downloaded on the machine
+system_images_registry: "docker"
+l_osn_image: "{{ (system_images_registry == 'docker') | ternary(osn_image, (osn_image.split('/')|length==2) | ternary(system_images_registry + '/' + osn_image, osn_image)) }}"
+system_osn_image: "{{ (system_images_registry == 'docker') | ternary('docker:' + l_osn_image, l_osn_image) }}"
+
+openshift_node_env_vars: {}
+
+# lo must always be present in this list or dnsmasq will conflict with
+# the node's dns service.
+openshift_node_dnsmasq_except_interfaces:
+- lo
+
+# dnsmasq defaults to neg caching disabled
+openshift_node_dnsmasq_no_negcache: true
+# When openshift_node_dnsmasq_no_negcache is set to false, how many seconds to cache negative lookups.
+openshift_node_dnsmasq_neg_ttl: '1'
+
+r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+
+openshift_node_syscon_auth_mounts_l:
+- type: bind
+  source: "{{ oreg_auth_credentials_path }}"
+  destination: "/root/.docker"
+  options:
+  - ro
+  - bind
+
+# If we need to add new mounts in the future, or the user wants to mount data.
+# This should be in the same format as auth_mounts_l above.
+openshift_node_syscon_add_mounts_l: []
+
+default_r_openshift_node_image_prep_packages:
+- "{{ openshift_service_type }}-node"
+- ansible
+- bash-completion
+- dnsmasq
+- ntp
+- logrotate
+- httpd-tools
+- bind-utils
+- firewalld
+- libselinux-python
+- conntrack-tools
+- openssl
+- iproute
+- python-dbus
+- PyYAML
+- yum-utils
+- glusterfs-fuse
+- device-mapper-multipath
+- nfs-utils
+- cockpit-ws
+- cockpit-system
+- cockpit-bridge
+- cockpit-docker
+- iscsi-initiator-utils
+- ceph-common
+- atomic
+r_openshift_node_image_prep_packages: "{{ default_r_openshift_node_image_prep_packages | union(openshift_node_image_prep_packages | default([])) }}"
+
+r_openshift_node_os_firewall_deny: []
+default_r_openshift_node_os_firewall_allow:
+- service: Kubernetes kubelet
+  port: 10250/tcp
+- service: Kubernetes kube-proxy health check for service load balancers
+  port: 10256/tcp
+- service: http
+  port: 80/tcp
+- service: https
+  port: 443/tcp
+- service: OpenShift OVS sdn
+  port: 4789/udp
+  cond: openshift_use_openshift_sdn | bool
+- service: Calico BGP Port
+  port: 179/tcp
+  cond: "{{ openshift_node_use_calico }}"
+- service: Kubernetes service NodePort TCP
+  port: "{{ openshift_node_port_range | default('') }}/tcp"
+  cond: "{{ openshift_node_port_range is defined }}"
+- service: Kubernetes service NodePort UDP
+  port: "{{ openshift_node_port_range | default('') }}/udp"
+  cond: "{{ openshift_node_port_range is defined }}"
+- service: Prometheus monitoring
+  port: 9000-10000/tcp
+# Allow multiple port ranges to be added to the role
+r_openshift_node_os_firewall_allow: "{{ default_r_openshift_node_os_firewall_allow | union(openshift_node_open_ports | default([])) }}"
+
+# oreg_url is defined by user input
+oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
+l_bind_docker_reg_auth: False
+
+openshift_docker_service_name: "docker"
+
+# These defaults assume forcing journald persistence, fsync to disk once
+# a second, rate-limiting to 10,000 logs a second, no forwarding to
+# syslog or wall, using 8GB of disk space maximum, using 10MB journal
+# files, keeping only a days worth of logs per journal file, and
+# retaining journal files no longer than a month.
+journald_vars_to_replace:
+- { var: Storage, val: persistent }
+- { var: Compress, val: yes }
+- { var: SyncIntervalSec, val: 1s }
+- { var: RateLimitInterval, val: 1s }
+- { var: RateLimitBurst, val: 10000 }
+- { var: SystemMaxUse, val: 8G }
+- { var: SystemKeepFree, val: 20% }
+- { var: SystemMaxFileSize, val: 10M }
+- { var: MaxRetentionSec, val: 1month }
+- { var: MaxFileSec, val: 1day }
+- { var: ForwardToSyslog, val: no }
+- { var: ForwardToWall, val: no }
+
+# NOTE
+# r_openshift_node_*_default may be defined external to this role.
+# openshift_use_*, if defined, may affect other roles or play behavior.
+openshift_node_use_openshift_sdn_default: "{{ openshift_use_openshift_sdn | default(True) }}"
+openshift_node_use_openshift_sdn: "{{ openshift_node_use_openshift_sdn_default }}"
+
+openshift_node_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name }}"
+openshift_node_sdn_network_plugin_name: "{{ openshift_node_sdn_network_plugin_name_default }}"
+
+openshift_node_use_calico_default: "{{ openshift_use_calico | default(False) }}"
+openshift_node_use_calico: "{{ openshift_node_use_calico_default }}"
+
+openshift_node_use_nuage_default: "{{ openshift_use_nuage | default(False) }}"
+openshift_node_use_nuage: "{{ openshift_node_use_nuage_default }}"
+
+openshift_node_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
+openshift_node_use_contiv: "{{ openshift_node_use_contiv_default }}"
+
+openshift_node_use_kuryr_default: "{{ openshift_use_kuryr | default(False) }}"
+openshift_node_use_kuryr: "{{ openshift_node_use_kuryr_default }}"
+
+openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
+
+openshift_node_config_dir_default: "/etc/origin/node"
+openshift_node_config_dir: "{{ openshift_node_config_dir_default }}"
+
+openshift_node_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}"
+openshift_node_image_config_latest: "{{ openshift_node_image_config_latest_default }}"
+
+
+openshift_node_use_instance_profiles: False
+
+openshift_node_use_persistentlocalvolumes: "{{ openshift_persistentlocalstorage_enabled | default(False) | bool }}"

+ 24 - 0
roles/openshift_node40/files/clean-up-crio-pods.sh

@@ -0,0 +1,24 @@
+#!/bin/bash
+for c in $(runc list -q); do
+        output=$(runc state $c | grep io.kubernetes.cri-o.ContainerType)
+        if [[ "$output" =~ "container" ]]; then
+                runc delete -f $c
+        fi
+        for m in $(mount | grep $c | awk '{print $3}'); do
+                umount -R $m
+        done
+done
+for c in $(runc list -q); do
+        output=$(runc state $c | grep io.kubernetes.cri-o.ContainerType)
+        if [[ "$output" =~ "sandbox" ]]; then
+                runc delete -f $c
+        fi
+        for m in $(mount | grep $c | awk '{print $3}'); do
+                umount -R $m
+        done
+done
+mount | grep overlay | awk '{print $3}' | xargs umount | true
+umount -R /var/lib/containers/storage/overlay
+umount -R /var/lib/containers/storage
+rm -rf /var/run/containers/storage/*
+rm -rf /var/lib/containers/storage/*

+ 128 - 0
roles/openshift_node40/files/networkmanager/99-origin-dns.sh

@@ -0,0 +1,128 @@
+#!/bin/bash -x
+# -*- mode: sh; sh-indentation: 2 -*-
+
+# This NetworkManager dispatcher script replicates the functionality of
+# NetworkManager's dns=dnsmasq  however, rather than hardcoding the listening
+# address and /etc/resolv.conf to 127.0.0.1 it pulls the IP address from the
+# interface that owns the default route. This enables us to then configure pods
+# to use this IP address as their only resolver, where as using 127.0.0.1 inside
+# a pod would fail.
+#
+# To use this,
+# - If this host is also a master, reconfigure master dnsConfig to listen on
+#   8053 to avoid conflicts on port 53 and open port 8053 in the firewall
+# - Drop this script in /etc/NetworkManager/dispatcher.d/
+# - systemctl restart NetworkManager
+#
+# Test it:
+# host kubernetes.default.svc.cluster.local
+# host google.com
+#
+# TODO: I think this would be easy to add as a config option in NetworkManager
+# natively, look at hacking that up
+
+cd /etc/sysconfig/network-scripts
+. ./network-functions
+
+[ -f ../network ] && . ../network
+
+if [[ $2 =~ ^(up|dhcp4-change|dhcp6-change)$ ]]; then
+  # If the origin-upstream-dns config file changed we need to restart
+  NEEDS_RESTART=0
+  UPSTREAM_DNS='/etc/dnsmasq.d/origin-upstream-dns.conf'
+  # We'll regenerate the dnsmasq origin config in a temp file first
+  UPSTREAM_DNS_TMP=`mktemp`
+  UPSTREAM_DNS_TMP_SORTED=`mktemp`
+  CURRENT_UPSTREAM_DNS_SORTED=`mktemp`
+  NEW_RESOLV_CONF=`mktemp`
+  NEW_NODE_RESOLV_CONF=`mktemp`
+
+
+  ######################################################################
+  # couldn't find an existing method to determine if the interface owns the
+  # default route
+  def_route=$(/sbin/ip route list match 0.0.0.0/0 | awk '{print $3 }')
+  def_route_int=$(/sbin/ip route get to ${def_route} | awk -F 'dev' '{print $2}' | head -n1 | awk '{print $1}')
+  def_route_ip=$(/sbin/ip route get to ${def_route}  | awk -F 'src' '{print $2}' | head -n1 | awk '{print $1}')
+  if [[ ${DEVICE_IFACE} == ${def_route_int} ]]; then
+    if [ ! -f /etc/dnsmasq.d/origin-dns.conf ]; then
+      cat << EOF > /etc/dnsmasq.d/origin-dns.conf
+no-resolv
+domain-needed
+server=/cluster.local/172.30.0.1
+server=/30.172.in-addr.arpa/172.30.0.1
+enable-dbus
+dns-forward-max=5000
+cache-size=5000
+min-port=1024
+EOF
+      # New config file, must restart
+      NEEDS_RESTART=1
+    fi
+
+    # If network manager doesn't know about the nameservers then the best
+    # we can do is grab them from /etc/resolv.conf but only if we've got no
+    # watermark
+    if ! grep -q '99-origin-dns.sh' /etc/resolv.conf; then
+      if [[ -z "${IP4_NAMESERVERS}" || "${IP4_NAMESERVERS}" == "${def_route_ip}" ]]; then
+            IP4_NAMESERVERS=`grep '^nameserver[[:blank:]]' /etc/resolv.conf | awk '{ print $2 }'`
+      fi
+      ######################################################################
+      # Write out default nameservers for /etc/dnsmasq.d/origin-upstream-dns.conf
+      # and /etc/origin/node/resolv.conf in their respective formats
+      for ns in ${IP4_NAMESERVERS}; do
+        if [[ ! -z $ns ]]; then
+          echo "server=${ns}" >> $UPSTREAM_DNS_TMP
+          echo "nameserver ${ns}" >> $NEW_NODE_RESOLV_CONF
+        fi
+      done
+      # Sort it in case DNS servers arrived in a different order
+      sort $UPSTREAM_DNS_TMP > $UPSTREAM_DNS_TMP_SORTED
+      sort $UPSTREAM_DNS > $CURRENT_UPSTREAM_DNS_SORTED
+      # Compare to the current config file (sorted)
+      NEW_DNS_SUM=`md5sum ${UPSTREAM_DNS_TMP_SORTED} | awk '{print $1}'`
+      CURRENT_DNS_SUM=`md5sum ${CURRENT_UPSTREAM_DNS_SORTED} | awk '{print $1}'`
+      if [ "${NEW_DNS_SUM}" != "${CURRENT_DNS_SUM}" ]; then
+        # DNS has changed, copy the temp file to the proper location (-Z
+        # sets default selinux context) and set the restart flag
+        cp -Z $UPSTREAM_DNS_TMP $UPSTREAM_DNS
+        NEEDS_RESTART=1
+      fi
+      # compare /etc/origin/node/resolv.conf checksum and replace it if different
+      NEW_NODE_RESOLV_CONF_MD5=`md5sum ${NEW_NODE_RESOLV_CONF}`
+      OLD_NODE_RESOLV_CONF_MD5=`md5sum /etc/origin/node/resolv.conf`
+      if [ "${NEW_NODE_RESOLV_CONF_MD5}" != "${OLD_NODE_RESOLV_CONF_MD5}" ]; then
+        cp -Z $NEW_NODE_RESOLV_CONF /etc/origin/node/resolv.conf
+      fi
+    fi
+
+    if ! `systemctl -q is-active dnsmasq.service`; then
+      NEEDS_RESTART=1
+    fi
+
+    ######################################################################
+    if [ "${NEEDS_RESTART}" -eq "1" ]; then
+      systemctl restart dnsmasq
+    fi
+
+    # Only if dnsmasq is running properly make it our only nameserver and place
+    # a watermark on /etc/resolv.conf
+    if `systemctl -q is-active dnsmasq.service`; then
+      if ! grep -q '99-origin-dns.sh' /etc/resolv.conf; then
+          echo "# nameserver updated by /etc/NetworkManager/dispatcher.d/99-origin-dns.sh" >> ${NEW_RESOLV_CONF}
+      fi
+      sed -e '/^nameserver.*$/d' /etc/resolv.conf >> ${NEW_RESOLV_CONF}
+      echo "nameserver "${def_route_ip}"" >> ${NEW_RESOLV_CONF}
+      if ! grep -qw search ${NEW_RESOLV_CONF}; then
+        echo 'search cluster.local' >> ${NEW_RESOLV_CONF}
+      elif ! grep -q 'search cluster.local' ${NEW_RESOLV_CONF}; then
+        # cluster.local should be in first three DNS names so that glibc resolver would work
+        sed -i -e 's/^search[[:blank:]]\(.\+\)\( cluster\.local\)\{0,1\}$/search cluster.local \1/' ${NEW_RESOLV_CONF}
+      fi
+      cp -Z ${NEW_RESOLV_CONF} /etc/resolv.conf
+    fi
+  fi
+
+  # Clean up after yourself
+  rm -f $UPSTREAM_DNS_TMP $UPSTREAM_DNS_TMP_SORTED $CURRENT_UPSTREAM_DNS_SORTED $NEW_RESOLV_CONF
+fi

+ 18 - 0
roles/openshift_node40/files/openshift-node

@@ -0,0 +1,18 @@
+#!/bin/sh
+
+# This launches the Kubelet by converting the node configuration into kube flags.
+
+set -euo pipefail
+
+if ! [[ -f /etc/origin/node/client-ca.crt ]]; then
+  if [[ -f /etc/origin/node/bootstrap.kubeconfig ]]; then
+    oc config --config=/etc/origin/node/bootstrap.kubeconfig view --raw --minify -o go-template='{{ index .clusters 0 "cluster" "certificate-authority-data" }}' | base64 -d - > /etc/origin/node/client-ca.crt
+  fi
+fi
+config=/etc/origin/node/bootstrap-node-config.yaml
+# TODO: remove when dynamic kubelet config is delivered
+if [[ -f /etc/origin/node/node-config.yaml ]]; then
+  config=/etc/origin/node/node-config.yaml
+fi
+flags=$( /usr/bin/openshift-node-config "--config=${config}" )
+eval "exec /usr/bin/hyperkube kubelet --v=${DEBUG_LOGLEVEL:-2} ${flags}"

+ 20 - 0
roles/openshift_node40/handlers/main.yml

@@ -0,0 +1,20 @@
+---
+- name: reload systemd units
+  command: systemctl daemon-reload
+  when:
+  - (not skip_node_svc_handlers | default(False) | bool)
+
+- name: restart NetworkManager
+  systemd:
+    name: NetworkManager
+    state: restarted
+    enabled: True
+  when:
+  - (not skip_node_svc_handlers | default(False) | bool)
+
+- name: restart dnsmasq
+  systemd:
+    name: dnsmasq
+    state: restarted
+  when:
+  - (not skip_node_svc_handlers | default(False) | bool)

+ 17 - 0
roles/openshift_node40/meta/main.yml

@@ -0,0 +1,17 @@
+---
+galaxy_info:
+  author: Jhon Honce
+  description: OpenShift Node
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 2.1
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  categories:
+  - cloud
+dependencies:
+- role: lib_openshift
+- role: lib_utils
+- role: openshift_facts

+ 51 - 0
roles/openshift_node40/tasks/config.yml

@@ -0,0 +1,51 @@
+---
+
+- name: get worker ignition file
+  command: >
+    curl -k {{ openshift_bootstrap_endpoint }}
+  register: l_worker_bootstrap
+
+- debug:
+    var: l_worker_bootstrap.stdout
+
+- name: parse ignition file
+  parse_ignition:
+    ign_file_contents: "{{ l_worker_bootstrap.stdout }}"
+  register: l_parse_ignition_res
+
+- name: Create all the directories we will need
+  command: "mkdir -p {{ item }}"
+  with_items: "{{ l_parse_ignition_res.dir_list }}"
+
+- name: create files from ignition contents
+  copy:
+    content: "{{ item.value.contents }}"
+    dest: "{{ item.key }}"
+    mode: "{{ l_file_mode }}"
+  with_dict: "{{ l_parse_ignition_res.files_dict }}"
+  vars:
+    l_mode_prepend: "{{ '0' if (item.value.mode | length < 4) else '' }}"
+    l_file_mode: "{{ l_mode_prepend ~ item.value.mode }}"
+
+#### Disable SWAP #####
+# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
+# swapoff is a custom module in lib_utils that comments out swap entries in
+# /etc/fstab and runs swapoff -a, if necessary.
+- name: Disable swap
+  swapoff: {}
+  when: openshift_disable_swap | default(true) | bool
+
+# The atomic-openshift-node service will set this parameter on
+# startup, but if the network service is restarted this setting is
+# lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388
+- sysctl:
+    name: net.ipv4.ip_forward
+    value: 1
+    sysctl_file: "/etc/sysctl.d/99-openshift.conf"
+    reload: yes
+
+- name: Setting sebool container_manage_cgroup
+  seboolean:
+    name: container_manage_cgroup
+    state: yes
+    persistent: yes

+ 10 - 0
roles/openshift_node40/tasks/install.yml

@@ -0,0 +1,10 @@
+---
+
+- name: Install openshift packages
+  package:
+    name: "{{ l_node_packages | join(',') }}"
+  vars:
+    l_node_packages:
+    - "origin-node{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
+    - "origin-clients{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
+    - "origin-hyperkube{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"

+ 13 - 0
roles/openshift_node40/tasks/systemd.yml

@@ -0,0 +1,13 @@
+---
+
+- name: daemon reload
+  command: "systemctl daemon-reload"
+
+# dictionary of kv pairs, servicename: enabled, eg:
+# {'kubernetes': "true"}
+- name: Start and enable services
+  systemd:
+    name: "{{ item.key }}"
+    state: "{{ 'restarted' if (item.value | bool) else 'stopped' }}"
+    enabled: "{{ item.value | bool }}"
+  with_dict: "{{ l_parse_ignition_res.systemd_dict }}"

+ 69 - 0
roles/openshift_node40/templates/bootstrap.yml.j2

@@ -0,0 +1,69 @@
+{% raw -%}
+#!/usr/bin/ansible-playbook
+---
+- hosts: localhost
+  gather_facts: yes
+  vars:
+    origin_dns:
+      file: /etc/dnsmasq.d/origin-dns.conf
+      lines:
+      - regex: ^listen-address
+        state: present
+        line: "listen-address={{ ansible_default_ipv4.address }}"
+
+  tasks:
+  - include_vars: openshift_settings.yaml
+
+  - name: set the data for origin_dns
+    lineinfile:
+      create: yes
+      state: "{{ item.state | default('present') }}"
+      insertafter: "{{ item.after | default(omit) }}"
+      path: "{{ origin_dns.file }}"
+      regexp: "{{ item.regex }}"
+      line: "{{ item.line | default(omit)}}"
+    with_items: "{{ origin_dns.lines }}"
+
+  - when:
+    - openshift_node_config_name is defined
+    - openshift_node_config_name != ''
+    block:
+    - name: determine the openshift_service_type
+      stat:
+        path: /etc/sysconfig/atomic-openshift-node
+        get_checksum: false
+        get_attributes: false
+        get_mime: false
+      register: service_type_results
+
+    - name: set openshift_service_type fact based on stat results
+      set_fact:
+        openshift_service_type: "{{ service_type_results.stat.exists | ternary('atomic-openshift', 'origin') }}"
+
+    - name: update the sysconfig to have necessary variables
+      lineinfile:
+        dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
+        line: "{{ item.line }}"
+        regexp: "{{ item.regexp }}"
+      with_items:
+      - line: "BOOTSTRAP_CONFIG_NAME={{ openshift_node_config_name }}"
+        regexp: "^BOOTSTRAP_CONFIG_NAME=.*"
+{% endraw %}
+
+{% if openshift_cloudprovider_kind | default('') == 'aws' %}
+  # need to update aws.conf file if the instance has come up in a new region
+  - name: set up aws.conf
+    block:
+    - name: get current AZ
+      uri:
+        url: http://169.254.169.254/latest/meta-data/placement/availability-zone
+        return_content: yes
+      register: aws_out
+
+    - name: set AZ in aws.conf
+      ini_file:
+        path: /etc/origin/cloudprovider/aws.conf
+        section: Global
+        option: Zone
+        value: "{% raw %}{{ aws_out.content }}{% endraw %}"
+{% endif %}

+ 20 - 0
roles/openshift_node40/templates/multipath.conf.j2

@@ -0,0 +1,20 @@
+# LIO iSCSI
+# TODO: Add env variables for tweaking
+devices {
+        device {
+                vendor "LIO-ORG"
+                user_friendly_names "yes" 
+                path_grouping_policy "failover"
+                path_selector "round-robin 0"
+                failback immediate
+                path_checker "tur"
+                prio "alua"
+                no_path_retry 120
+                rr_weight "uniform"
+        }
+}
+defaults {
+	user_friendly_names yes
+	find_multipaths yes
+}
+

+ 26 - 0
roles/openshift_node40/templates/node.service.j2

@@ -0,0 +1,26 @@
+[Unit]
+Description=OpenShift Node
+After={{ openshift_docker_service_name }}.service
+After=chronyd.service
+After=ntpd.service
+Wants={{ openshift_docker_service_name }}.service
+Documentation=https://github.com/openshift/origin
+Wants=dnsmasq.service
+After=dnsmasq.service
+{% if openshift_use_crio | bool %}Wants=cri-o.service{% endif %}
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node
+ExecStart=/usr/local/bin/openshift-node
+LimitNOFILE=65536
+LimitCORE=infinity
+WorkingDirectory=/var/lib/origin/
+SyslogIdentifier={{ openshift_service_type }}-node
+Restart=always
+RestartSec=5s
+TimeoutStartSec=300
+OOMScoreAdjust=-999
+
+[Install]
+WantedBy=multi-user.target

+ 17 - 0
roles/openshift_node40/templates/origin-dns.conf.j2

@@ -0,0 +1,17 @@
+no-resolv
+domain-needed
+{% if openshift_node_dnsmasq_no_negcache %}
+no-negcache
+{% else %}
+neg-ttl={{ openshift_node_dnsmasq_neg_ttl }}
+{% endif %}
+max-cache-ttl=1
+enable-dbus
+dns-forward-max=10000
+cache-size=10000
+bind-dynamic
+min-port=1024
+{% for interface in openshift_node_dnsmasq_except_interfaces %}
+except-interface={{ interface }}
+{% endfor %}
+# End of config

+ 1 - 11
roles/openshift_version/tasks/first_master.yml

@@ -1,15 +1,5 @@
 ---
-# Determine the openshift_version to configure if none has been specified or set previously.
-
-# Protect the installed version by default unless explicitly told not to, or given an
-# openshift_version already.
-- name: Use openshift_current_version fact as version to configure if already installed
-  set_fact:
-    openshift_version: "{{ openshift_current_version }}"
-  when:
-  - openshift_current_version is defined
-  - openshift_version is not defined or openshift_version == ""
-  - openshift_protect_installed_version | bool
+# Determine the openshift_version
 
 - name: Set openshift_version to openshift_release if undefined
   set_fact: