소스 검색

Add new openshift_control_plane and openshift_sdn roles

Is a replacement to the openshift_master role.  OpenShift SDN role
serves two purposes - managing the network (kube proxy, dns, and SDN if
enabled) as well as temporarily performing a sync loop.
Clayton Coleman 7 년 전
부모
커밋
b17728d542
36개의 변경된 파일1898개의 추가작업 그리고 10개의 파일을 삭제
  1. 5 5
      playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
  2. 5 5
      playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
  3. 50 0
      roles/openshift_control_plane/README.md
  4. 152 0
      roles/openshift_control_plane/defaults/main.yml
  5. 48 0
      roles/openshift_control_plane/files/apiserver.yaml
  6. 45 0
      roles/openshift_control_plane/files/controller.yaml
  7. 16 0
      roles/openshift_control_plane/files/scripts/docker/master-logs
  8. 14 0
      roles/openshift_control_plane/files/scripts/docker/master-restart
  9. 27 0
      roles/openshift_control_plane/handlers/main.yml
  10. 17 0
      roles/openshift_control_plane/meta/main.yml
  11. 15 0
      roles/openshift_control_plane/tasks/bootstrap.yml
  12. 17 0
      roles/openshift_control_plane/tasks/configure_external_etcd.yml
  13. 44 0
      roles/openshift_control_plane/tasks/firewall.yml
  14. 29 0
      roles/openshift_control_plane/tasks/journald.yml
  15. 224 0
      roles/openshift_control_plane/tasks/main.yml
  16. 50 0
      roles/openshift_control_plane/tasks/registry_auth.yml
  17. 25 0
      roles/openshift_control_plane/tasks/restart.yml
  18. 34 0
      roles/openshift_control_plane/tasks/set_loopback_context.yml
  19. 63 0
      roles/openshift_control_plane/tasks/static.yml
  20. 10 0
      roles/openshift_control_plane/tasks/static_shim.yml
  21. 7 0
      roles/openshift_control_plane/tasks/update_etcd_client_urls.yml
  22. 45 0
      roles/openshift_control_plane/tasks/upgrade.yml
  23. 36 0
      roles/openshift_control_plane/tasks/upgrade/rpm_upgrade.yml
  24. 175 0
      roles/openshift_control_plane/tasks/upgrade/upgrade_scheduler.yml
  25. 37 0
      roles/openshift_control_plane/tasks/upgrade_facts.yml
  26. 5 0
      roles/openshift_control_plane/templates/htpasswd.j2
  27. 16 0
      roles/openshift_control_plane/templates/master.env.j2
  28. 232 0
      roles/openshift_control_plane/templates/master.yaml.v1.j2
  29. 7 0
      roles/openshift_control_plane/templates/sessionSecretsFile.yaml.v1.j2
  30. 5 0
      roles/openshift_sdn/defaults/main.yml
  31. 9 0
      roles/openshift_sdn/files/sdn-images.yaml
  32. 83 0
      roles/openshift_sdn/files/sdn-ovs.yaml
  33. 29 0
      roles/openshift_sdn/files/sdn-policy.yaml
  34. 251 0
      roles/openshift_sdn/files/sdn.yaml
  35. 19 0
      roles/openshift_sdn/meta/main.yaml
  36. 52 0
      roles/openshift_sdn/tasks/main.yml

+ 5 - 5
playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml

@@ -9,11 +9,11 @@
 - name: Restart containerized services
   service: name={{ item }} state=started
   with_items:
-    - etcd_container
-    - openvswitch
-    - "{{ openshift_service_type }}-master-api"
-    - "{{ openshift_service_type }}-master-controllers"
-    - "{{ openshift_service_type }}-node"
+  - etcd_container
+  - openvswitch
+  - "{{ openshift_service_type }}-master-api"
+  - "{{ openshift_service_type }}-master-controllers"
+  - "{{ openshift_service_type }}-node"
   failed_when: false
   when: openshift_is_containerized | bool
 

+ 5 - 5
playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml

@@ -4,11 +4,11 @@
 - name: Stop containerized services
   service: name={{ item }} state=stopped
   with_items:
-    - "{{ openshift_service_type }}-master-api"
-    - "{{ openshift_service_type }}-master-controllers"
-    - "{{ openshift_service_type }}-node"
-    - etcd_container
-    - openvswitch
+  - "{{ openshift_service_type }}-master-api"
+  - "{{ openshift_service_type }}-master-controllers"
+  - "{{ openshift_service_type }}-node"
+  - etcd_container
+  - openvswitch
   failed_when: false
   when: openshift_is_containerized | bool
 

+ 50 - 0
roles/openshift_control_plane/README.md

@@ -0,0 +1,50 @@
+OpenShift Control Plane
+==================================
+
+Installs the services that comprise the OpenShift control plane onto nodes that are preconfigured for
+bootstrapping.
+
+Requirements
+------------
+
+* Ansible 2.2
+* A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
+rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos.
+
+Role Variables
+--------------
+
+From this role:
+
+| Name                                             | Default value         |                                                                               |
+|---------------------------------------------------|-----------------------|-------------------------------------------------------------------------------|
+| openshift_node_ips                                | []                    | List of the openshift node ip addresses to pre-register when master starts up |
+| oreg_url                                          | UNDEF                 | Default docker registry to use                                                |
+| oreg_url_master                                   | UNDEF                 | Default docker registry to use, specifically on the master                    |
+| openshift_master_api_port                         | UNDEF                 |                                                                               |
+| openshift_master_console_port                     | UNDEF                 |                                                                               |
+| openshift_master_api_url                          | UNDEF                 |                                                                               |
+| openshift_master_console_url                      | UNDEF                 |                                                                               |
+| openshift_master_public_api_url                   | UNDEF                 |                                                                               |
+| openshift_master_public_console_url               | UNDEF                 |                                                                               |
+| openshift_master_saconfig_limit_secret_references | false                 |                                                                               |
+
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+TODO

+ 152 - 0
roles/openshift_control_plane/defaults/main.yml

@@ -0,0 +1,152 @@
+---
+# openshift_master_defaults_in_use is a workaround to detect if we are consuming
+# the plays from the role or outside of the role.
+openshift_master_defaults_in_use: True
+openshift_master_debug_level: "{{ debug_level | default(2) }}"
+
+r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+
+osm_image_default_dict:
+  origin: 'openshift/origin'
+  openshift-enterprise: 'openshift3/ose'
+osm_image_default: "{{ osm_image_default_dict[openshift_deployment_type] }}"
+osm_image: "{{ osm_image_default }}"
+
+l_openshift_master_images_dict:
+  origin: 'openshift/origin-${component}:${version}'
+  openshift-enterprise: 'openshift3/ose-${component}:${version}'
+l_osm_registry_url_default: "{{ l_openshift_master_images_dict[openshift_deployment_type] }}"
+l_osm_registry_url: "{{ oreg_url_master | default(oreg_url) | default(l_osm_registry_url_default) }}"
+
+system_images_registry_dict:
+  openshift-enterprise: "registry.access.redhat.com"
+  origin: "docker.io"
+
+system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}"
+
+l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+
+openshift_master_dns_port: 8053
+osm_default_node_selector: ''
+osm_project_request_template: ''
+osm_mcs_allocator_range: 's0:/2'
+osm_mcs_labels_per_project: 5
+osm_uid_allocator_range: '1000000000-1999999999/10000'
+osm_project_request_message: ''
+
+openshift_node_ips: []
+r_openshift_master_clean_install: false
+r_openshift_master_os_firewall_enable: true
+r_openshift_master_os_firewall_deny: []
+default_r_openshift_master_os_firewall_allow:
+- service: api server https
+  port: "{{ openshift.master.api_port }}/tcp"
+- service: api controllers https
+  port: "{{ openshift.master.controllers_port }}/tcp"
+- service: skydns tcp
+  port: "{{ openshift_master_dns_port }}/tcp"
+- service: skydns udp
+  port: "{{ openshift_master_dns_port }}/udp"
+- service: etcd embedded
+  port: 4001/tcp
+  cond: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+r_openshift_master_os_firewall_allow: "{{ default_r_openshift_master_os_firewall_allow | union(openshift_master_open_ports | default([])) }}"
+
+# oreg_url is defined by user input
+oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
+oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
+oreg_auth_credentials_replace: False
+l_bind_docker_reg_auth: False
+openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False) | bool) or (openshift_use_crio_only | default(False)) }}"
+
+containerized_svc_dir: "/usr/lib/systemd/system"
+ha_svc_template_path: "native-cluster"
+
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}"
+
+openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig"
+loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
+openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"
+openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
+
+scheduler_config:
+  kind: Policy
+  apiVersion: v1
+  predicates: "{{ openshift_master_scheduler_predicates
+                  | default(openshift_master_scheduler_current_predicates
+                            | default(openshift_master_scheduler_default_predicates)) }}"
+  priorities: "{{ openshift_master_scheduler_priorities
+                  | default(openshift_master_scheduler_current_priorities
+                            | default(openshift_master_scheduler_default_priorities)) }}"
+
+openshift_master_valid_grant_methods:
+- auto
+- prompt
+- deny
+
+openshift_master_is_scaleup_host: False
+
+# openshift_master_oauth_template is deprecated.  Should be added to deprecations
+# and removed.
+openshift_master_oauth_template: False
+openshift_master_oauth_templates_default:
+  login: "{{ openshift_master_oauth_template }}"
+openshift_master_oauth_templates: "{{ openshift_master_oauth_template | ternary(openshift_master_oauth_templates_default, False) }}"
+# Here we combine openshift_master_oath_template into 'login' key of openshift_master_oath_templates, if not present.
+l_openshift_master_oauth_templates: "{{ openshift_master_oauth_templates | default(openshift_master_oauth_templates_default) }}"
+
+# These defaults assume forcing journald persistence, fsync to disk once
+# a second, rate-limiting to 10,000 logs a second, no forwarding to
+# syslog or wall, using 8GB of disk space maximum, using 10MB journal
+# files, keeping only a days worth of logs per journal file, and
+# retaining journal files no longer than a month.
+journald_vars_to_replace:
+- { var: Storage, val: persistent }
+- { var: Compress, val: yes }
+- { var: SyncIntervalSec, val: 1s }
+- { var: RateLimitInterval, val: 1s }
+- { var: RateLimitBurst, val: 10000 }
+- { var: SystemMaxUse, val: 8G }
+- { var: SystemKeepFree, val: 20% }
+- { var: SystemMaxFileSize, val: 10M }
+- { var: MaxRetentionSec, val: 1month }
+- { var: MaxFileSec, val: 1day }
+- { var: ForwardToSyslog, val: no }
+- { var: ForwardToWall, val: no }
+
+
+# NOTE
+# r_openshift_master_*_default may be defined external to this role.
+# openshift_use_*, if defined, may affect other roles or play behavior.
+r_openshift_master_use_openshift_sdn_default: "{{ openshift_use_openshift_sdn | default(True) }}"
+r_openshift_master_use_openshift_sdn: "{{ r_openshift_master_use_openshift_sdn_default }}"
+
+r_openshift_master_use_nuage_default: "{{ openshift_use_nuage | default(False) }}"
+r_openshift_master_use_nuage: "{{ r_openshift_master_use_nuage_default }}"
+
+r_openshift_master_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
+r_openshift_master_use_contiv: "{{ r_openshift_master_use_contiv_default }}"
+
+r_openshift_master_use_kuryr_default: "{{ openshift_use_kuryr | default(False) }}"
+r_openshift_master_use_kuryr: "{{ r_openshift_master_use_kuryr_default }}"
+
+r_openshift_master_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+r_openshift_master_data_dir: "{{ r_openshift_master_data_dir_default }}"
+
+r_openshift_master_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}"
+r_openshift_master_sdn_network_plugin_name: "{{ r_openshift_master_sdn_network_plugin_name_default }}"
+
+openshift_master_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}"
+openshift_master_image_config_latest: "{{ openshift_master_image_config_latest_default }}"
+
+openshift_master_config_dir_default: "{{ openshift.common.config_base ~ '/master' if openshift is defined and 'common' in openshift else '/etc/origin/master' }}"
+openshift_master_config_dir: "{{ openshift_master_config_dir_default }}"
+
+openshift_master_bootstrap_enabled: False
+
+openshift_master_csr_sa: node-bootstrapper
+openshift_master_csr_namespace: openshift-infra
+
+openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
+openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"

+ 48 - 0
roles/openshift_control_plane/files/apiserver.yaml

@@ -0,0 +1,48 @@
+kind: Pod
+apiVersion: v1
+metadata:
+  name: master-api
+  namespace: kube-system
+  labels:
+    openshift.io/control-plane: "true"
+    openshift.io/component: api
+spec:
+  restartPolicy: Always
+  hostNetwork: true
+  containers:
+  - name: api
+    image: openshift/origin:v3.9.0-alpha.4
+    command: ["/bin/bash", "-c"]
+    args:
+    - |
+      #!/bin/bash
+      set -euo pipefail
+      if [[ -f /etc/origin/master/master.env ]]; then
+        set -o allexport
+        source /etc/origin/master/master.env
+      fi
+      exec openshift start master api --config=/etc/origin/master/master-config.yaml
+    securityContext:
+      privileged: true
+    volumeMounts:
+     - mountPath: /etc/origin/master/
+       name: master-config
+     - mountPath: /etc/origin/cloudprovider/
+       name: master-cloud-provider
+     - mountPath: /var/lib/origin/
+       name: master-data
+    livenessProbe:
+      httpGet:
+        scheme: HTTPS
+        port: 8443
+        path: healthz
+  volumes:
+  - name: master-config
+    hostPath:
+      path: /etc/origin/master/
+  - name: master-cloud-provider
+    hostPath:
+      path: /etc/origin/cloudprovider
+  - name: master-data
+    hostPath:
+      path: /var/lib/origin

+ 45 - 0
roles/openshift_control_plane/files/controller.yaml

@@ -0,0 +1,45 @@
+kind: Pod
+apiVersion: v1
+metadata:
+  name: master-controllers
+  namespace: kube-system
+  labels:
+    openshift.io/control-plane: "true"
+    openshift.io/component: controllers
+spec:
+  restartPolicy: Always
+  hostNetwork: true
+  containers:
+  - name: controllers
+    image: openshift/origin:v3.9.0-alpha.4
+    command: ["/bin/bash", "-c"]
+    args:
+    - |
+      #!/bin/bash
+      set -euo pipefail
+      if [[ -f /etc/origin/master/master.env ]]; then
+        set -o allexport
+        source /etc/origin/master/master.env
+      fi
+      exec openshift start master controllers --config=/etc/origin/master/master-config.yaml --listen=https://0.0.0.0:8444
+    securityContext:
+      privileged: true
+    volumeMounts:
+     - mountPath: /etc/origin/master/
+       name: master-config
+     - mountPath: /etc/origin/cloudprovider/
+       name: master-cloud-provider
+    livenessProbe:
+      httpGet:
+        scheme: HTTPS
+        port: 8444
+        path: healthz
+  # second controllers container would be started here
+  # scheduler container started here
+  volumes:
+  - name: master-config
+    hostPath:
+      path: /etc/origin/master/
+  - name: master-cloud-provider
+    hostPath:
+      path: /etc/origin/cloudprovider

+ 16 - 0
roles/openshift_control_plane/files/scripts/docker/master-logs

@@ -0,0 +1,16 @@
+#!/bin/bash
+set -euo pipefail
+
+# Return the logs for a given static pod by component name and container name. Remaining arguments are passed to the
+# current container runtime.
+if [[ -z "${1-}" || -z "${2-}" ]]; then
+  echo "A component name like 'api', 'etcd', or 'controllers' must be specified along with the container name within that component." 1>&2
+  exit 1
+fi
+uid=$(docker ps -l -a --filter "label=openshift.io/component=${1}" --filter "label=io.kubernetes.container.name=POD" --format '{{ .Label "io.kubernetes.pod.uid" }}')
+if [[ -z "${uid}" ]]; then
+  echo "Component ${1} is stopped or not running" 1>&2
+  exit 0
+fi
+container=$(docker ps -l -a -q --filter "label=io.kubernetes.pod.uid=${uid}" --filter "label=io.kubernetes.container.name=${2}")
+exec docker logs "${@:3}" "${container}"

+ 14 - 0
roles/openshift_control_plane/files/scripts/docker/master-restart

@@ -0,0 +1,14 @@
+#!/bin/bash
+set -euo pipefail
+
+# Restart the named component by stopping its base container.
+if [[ -z "${1-}" ]]; then
+  echo "A component name like 'api', 'etcd', or 'controllers' must be specified." 1>&2
+  exit 1
+fi
+container=$(docker ps -l -q --filter "label=openshift.io/component=${1}" --filter "label=io.kubernetes.container.name=POD")
+if [[ -z "${container}" ]]; then
+  echo "Component ${1} is already stopped" 1>&2
+  exit 0
+fi
+exec docker stop "${container}" --time 30 >/dev/null

+ 27 - 0
roles/openshift_control_plane/handlers/main.yml

@@ -0,0 +1,27 @@
+---
+- name: restart master
+  command: /usr/bin/master-restart "{{ item }}"
+  with_items:
+  - api
+  - controllers
+  when:
+  - not (master_api_service_status_changed | default(false) | bool)
+  notify:
+  - verify API server
+
+- name: verify API server
+  # Using curl here since the uri module requires python-httplib2 and
+  # wait_for port doesn't provide health information.
+  command: >
+    curl --silent --tlsv1.2
+    --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+    {{ openshift.master.api_url }}/healthz/ready
+  args:
+    # Disables the following warning:
+    # Consider using get_url or uri module rather than running curl
+    warn: no
+  register: l_api_available_output
+  until: l_api_available_output.stdout == 'ok'
+  retries: 120
+  delay: 1
+  changed_when: false

+ 17 - 0
roles/openshift_control_plane/meta/main.yml

@@ -0,0 +1,17 @@
+---
+galaxy_info:
+  author: Clayton Coleman
+  description: OpenShift Control Plane
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 2.2
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  categories:
+  - cloud
+dependencies:
+- role: lib_openshift
+- role: lib_utils
+- role: openshift_facts

+ 15 - 0
roles/openshift_control_plane/tasks/bootstrap.yml

@@ -0,0 +1,15 @@
+---
+# TODO: create a module for this command.
+# oc_serviceaccounts_kubeconfig
+- name: create service account kubeconfig with csr rights
+  command: >
+    oc serviceaccounts create-kubeconfig {{ openshift_master_csr_sa }} -n {{ openshift_master_csr_namespace }}
+  register: kubeconfig_out
+  until: kubeconfig_out.rc == 0
+  retries: 24
+  delay: 5
+
+- name: put service account kubeconfig into a file on disk for bootstrap
+  copy:
+    content: "{{ kubeconfig_out.stdout }}"
+    dest: "{{ openshift_master_config_dir }}/bootstrap.kubeconfig"

+ 17 - 0
roles/openshift_control_plane/tasks/configure_external_etcd.yml

@@ -0,0 +1,17 @@
+---
+- name: Remove etcdConfig section
+  yedit:
+    src: /etc/origin/master/master-config.yaml
+    key: "etcdConfig"
+    state: absent
+- name: Set etcdClientInfo.ca to master.etcd-ca.crt
+  yedit:
+    src: /etc/origin/master/master-config.yaml
+    key: etcdClientInfo.ca
+    value: master.etcd-ca.crt
+- name: Set etcdClientInfo.urls to the external etcd
+  yedit:
+    src: /etc/origin/master/master-config.yaml
+    key: etcdClientInfo.urls
+    value:
+      - "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"

+ 44 - 0
roles/openshift_control_plane/tasks/firewall.yml

@@ -0,0 +1,44 @@
+---
+- when: r_openshift_master_firewall_enabled | bool and not r_openshift_master_use_firewalld | bool
+  block:
+  - name: Add iptables allow rules
+    os_firewall_manage_iptables:
+      name: "{{ item.service }}"
+      action: add
+      protocol: "{{ item.port.split('/')[1] }}"
+      port: "{{ item.port.split('/')[0] }}"
+    when:
+    - item.cond | default(True)
+    with_items: "{{ r_openshift_master_os_firewall_allow }}"
+
+  - name: Remove iptables rules
+    os_firewall_manage_iptables:
+      name: "{{ item.service }}"
+      action: remove
+      protocol: "{{ item.port.split('/')[1] }}"
+      port: "{{ item.port.split('/')[0] }}"
+    when:
+    - item.cond | default(True)
+    with_items: "{{ r_openshift_master_os_firewall_deny }}"
+
+- when: r_openshift_master_firewall_enabled | bool and r_openshift_master_use_firewalld | bool
+  block:
+  - name: Add firewalld allow rules
+    firewalld:
+      port: "{{ item.port }}"
+      permanent: true
+      immediate: true
+      state: enabled
+    when:
+    - item.cond | default(True)
+    with_items: "{{ r_openshift_master_os_firewall_allow }}"
+
+  - name: Remove firewalld allow rules
+    firewalld:
+      port: "{{ item.port }}"
+      permanent: true
+      immediate: true
+      state: disabled
+    when:
+    - item.cond | default(True)
+    with_items: "{{ r_openshift_master_os_firewall_deny }}"

+ 29 - 0
roles/openshift_control_plane/tasks/journald.yml

@@ -0,0 +1,29 @@
+---
+- name: Checking for journald.conf
+  stat: path=/etc/systemd/journald.conf
+  register: journald_conf_file
+
+- name: Create journald persistence directories
+  file:
+    path: /var/log/journal
+    state: directory
+
+- name: Update journald setup
+  replace:
+    dest: /etc/systemd/journald.conf
+    regexp: '^(\#| )?{{ item.var }}=\s*.*?$'
+    replace: ' {{ item.var }}={{ item.val }}'
+    backup: yes
+  with_items: "{{ journald_vars_to_replace | default([]) }}"
+  when: journald_conf_file.stat.exists
+  register: journald_update
+
+# I need to restart journald immediatelly, otherwise it gets into way during
+# further steps in ansible
+- name: Restart journald
+  command: "systemctl restart systemd-journald"
+  retries: 3
+  delay: 5
+  register: result
+  until: result.rc == 0
+  when: journald_update is changed

+ 224 - 0
roles/openshift_control_plane/tasks/main.yml

@@ -0,0 +1,224 @@
+---
+# TODO: add ability to configure certificates given either a local file to
+#       point to or certificate contents, set in default cert locations.
+
+# Authentication Variable Validation
+# TODO: validate the different identity provider kinds as well
+- fail:
+    msg: >
+      Invalid OAuth grant method: {{ openshift_master_oauth_grant_method }}
+  when:
+  - openshift_master_oauth_grant_method is defined
+  - openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods
+
+- name: Open up firewall ports
+  import_tasks: firewall.yml
+
+- name: Create r_openshift_master_data_dir
+  file:
+    path: "{{ r_openshift_master_data_dir }}"
+    state: directory
+    mode: 0755
+    owner: root
+    group: root
+
+- name: Create config parent directory if it does not exist
+  file:
+    path: "{{ openshift_master_config_dir }}"
+    state: directory
+
+- name: Create the policy file if it does not already exist
+  command: >
+    {{ openshift_client_binary }} adm create-bootstrap-policy-file
+      --filename={{ openshift_master_policy }}
+  args:
+    creates: "{{ openshift_master_policy }}"
+
+- name: Create the scheduler config
+  copy:
+    content: "{{ scheduler_config | to_nice_json }}"
+    dest: "{{ openshift_master_scheduler_conf }}"
+    backup: true
+
+- name: Install httpd-tools if needed
+  package: name=httpd-tools state=present
+  when:
+  - item.kind == 'HTPasswdPasswordIdentityProvider'
+  - not openshift_is_atomic | bool
+  with_items: "{{ openshift_master_identity_providers }}"
+  register: result
+  until: result is succeeded
+
+- name: Ensure htpasswd directory exists
+  file:
+    path: "{{ item.filename | dirname }}"
+    state: directory
+  when:
+  - item.kind == 'HTPasswdPasswordIdentityProvider'
+  with_items: "{{ openshift_master_identity_providers }}"
+
+- name: Create the htpasswd file if needed
+  template:
+    dest: "{{ item.filename }}"
+    src: htpasswd.j2
+    backup: yes
+    mode: 0600
+  when:
+  - item.kind == 'HTPasswdPasswordIdentityProvider'
+  - openshift.master.manage_htpasswd | bool
+  with_items: "{{ openshift_master_identity_providers }}"
+
+- name: Ensure htpasswd file exists
+  copy:
+    dest: "{{ item.filename }}"
+    force: no
+    content: ""
+    mode: 0600
+  when:
+  - item.kind == 'HTPasswdPasswordIdentityProvider'
+  with_items: "{{ openshift_master_identity_providers }}"
+
+- name: Create the ldap ca file if needed
+  copy:
+    dest: "{{ item.ca if 'ca' in item and '/' in item.ca else openshift_master_config_dir ~ '/' ~ item.ca | default('ldap_ca.crt') }}"
+    content: "{{ openshift.master.ldap_ca }}"
+    mode: 0600
+    backup: yes
+  when:
+  - openshift.master.ldap_ca is defined
+  - item.kind == 'LDAPPasswordIdentityProvider'
+  with_items: "{{ openshift_master_identity_providers }}"
+
+- name: Create the openid ca file if needed
+  copy:
+    dest: "{{ item.ca if 'ca' in item and '/' in item.ca else openshift_master_config_dir ~ '/' ~ item.ca | default('openid_ca.crt') }}"
+    content: "{{ openshift.master.openid_ca }}"
+    mode: 0600
+    backup: yes
+  when:
+  - openshift.master.openid_ca is defined
+  - item.kind == 'OpenIDIdentityProvider'
+  - item.ca | default('') != ''
+  with_items: "{{ openshift_master_identity_providers }}"
+
+- name: Create the request header ca file if needed
+  copy:
+    dest: "{{ item.clientCA if 'clientCA' in item and '/' in item.clientCA else openshift_master_config_dir ~ '/' ~ item.clientCA | default('request_header_ca.crt') }}"
+    content: "{{ openshift.master.request_header_ca }}"
+    mode: 0600
+    backup: yes
+  when:
+  - openshift.master.request_header_ca is defined
+  - item.kind == 'RequestHeaderIdentityProvider'
+  - item.clientCA | default('') != ''
+  with_items: "{{ openshift_master_identity_providers }}"
+
+- name: Set fact of all etcd host IPs
+  openshift_facts:
+    role: common
+    local_facts:
+      no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}"
+
+- name: Update journald config
+  include_tasks: journald.yml
+
+- name: Create session secrets file
+  template:
+    dest: "{{ openshift.master.session_secrets_file }}"
+    src: sessionSecretsFile.yaml.v1.j2
+    owner: root
+    group: root
+    mode: 0600
+  when:
+  - openshift.master.session_auth_secrets is defined
+  - openshift.master.session_encryption_secrets is defined
+
+- set_fact:
+    # translate_idps is a custom filter in role lib_utils
+    translated_identity_providers: "{{ openshift_master_identity_providers | translate_idps('v1') }}"
+
+# TODO: add the validate parameter when there is a validation command to run
+- name: Create master config
+  template:
+    dest: "{{ openshift_master_config_file }}"
+    src: master.yaml.v1.j2
+    backup: true
+    owner: root
+    group: root
+    mode: 0600
+
+- include_tasks: set_loopback_context.yml
+
+- name: Create the master service env file
+  template:
+    src: "master.env.j2"
+    dest: /etc/origin/master/master.env
+    backup: true
+
+- include_tasks: static.yml
+
+- name: Start and enable self-hosting node
+  systemd:
+    name: "{{ openshift_service_type }}-node"
+    state: restarted
+    enabled: yes
+
+- name: Verify that the control plane is running
+  command: >
+    curl -k {{ openshift.master.api_url }}/healthz
+  args:
+    # Disables the following warning:
+    # Consider using get_url or uri module rather than running curl
+    warn: no
+  register: control_plane_health
+  until: control_plane_health.stdout == 'ok'
+  retries: 60
+  delay: 5
+  changed_when: false
+  # Ignore errors so we can log troubleshooting info on failures.
+  ignore_errors: yes
+
+# Capture debug output here to simplify triage
+- when: control_plane_health.stdout != 'ok'
+  block:
+  - name: Check status in the kube-system namespace
+    command: >
+      {{ openshift_client_binary }} status --config=/etc/origin/master/admin.kubeconfig -n kube-system
+    register: control_plane_status
+    ignore_errors: true
+  - debug:
+      msg: "{{ control_plane_status.stdout_lines }}"
+  - name: Get pods in the kube-system namespace
+    command: >
+      {{ openshift_client_binary }} get pods --config=/etc/origin/master/admin.kubeconfig -n kube-system -o wide
+    register: control_plane_pods
+    ignore_errors: true
+  - debug:
+      msg: "{{ control_plane_pods.stdout_lines }}"
+  - name: Get events in the kube-system namespace
+    command: >
+      {{ openshift_client_binary }} get events --config=/etc/origin/master/admin.kubeconfig -n kube-system
+    register: control_plane_events
+    ignore_errors: true
+  - debug:
+      msg: "{{ control_plane_events.stdout_lines }}"
+  - name: Get API logs
+    command: >
+      /usr/local/bin/master-logs api api
+    register: control_plane_logs_api
+    ignore_errors: true
+  - debug:
+      msg: "{{ control_plane_logs_api.stdout_lines }}"
+  - name: Get node logs
+    command: journalctl --no-pager -n 300 -u {{ openshift_service_type }}-node
+    register: control_plane_logs_node
+    ignore_errors: true
+  - debug:
+      msg: "{{ control_plane_logs_node.stdout_lines }}"
+
+- name: Report control plane errors
+  fail:
+    msg: Control plane install failed.
+  when: control_plane_health.stdout != 'ok'
+
+- include_tasks: bootstrap.yml

+ 50 - 0
roles/openshift_control_plane/tasks/registry_auth.yml

@@ -0,0 +1,50 @@
+---
+- name: Check for credentials file for registry auth
+  stat:
+    path: "{{ oreg_auth_credentials_path }}"
+  when: oreg_auth_user is defined
+  register: master_oreg_auth_credentials_stat
+
+- name: Create credentials for registry auth
+  command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+  when:
+  - not (openshift_docker_alternative_creds | default(False))
+  - oreg_auth_user is defined
+  - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+  register: master_oreg_auth_credentials_create
+  retries: 3
+  delay: 5
+  until: master_oreg_auth_credentials_create.rc == 0
+  notify:
+  - restart master
+
+# docker_creds is a custom module from lib_utils
+# 'docker login' requires a docker.service running on the local host, this is an
+# alternative implementation for non-docker hosts.  This implementation does not
+# check the registry to determine whether or not the credentials will work.
+- name: Create credentials for registry auth (alternative)
+  docker_creds:
+    path: "{{ oreg_auth_credentials_path }}"
+    registry: "{{ oreg_host }}"
+    username: "{{ oreg_auth_user }}"
+    password: "{{ oreg_auth_password }}"
+  when:
+  - openshift_docker_alternative_creds | default(False) | bool
+  - oreg_auth_user is defined
+  - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+  register: master_oreg_auth_credentials_create_alt
+  notify:
+  - restart master
+
+# Container images may need the registry credentials
+- name: Setup ro mount of /root/.docker for containerized hosts
+  set_fact:
+    l_bind_docker_reg_auth: True
+  when:
+  - openshift_is_containerized | bool
+  - oreg_auth_user is defined
+  - >
+      (master_oreg_auth_credentials_stat.stat.exists
+      or oreg_auth_credentials_replace
+      or master_oreg_auth_credentials_create.changed
+      or master_oreg_auth_credentials_create_alt.changed) | bool

+ 25 - 0
roles/openshift_control_plane/tasks/restart.yml

@@ -0,0 +1,25 @@
+---
+- name: restart master
+  command: /usr/bin/master-restart "{{ item }}"
+  with_items:
+  - api
+  - controllers
+  notify:
+  - verify API server
+
+- name: verify API server
+  # Using curl here since the uri module requires python-httplib2 and
+  # wait_for port doesn't provide health information.
+  command: >
+    curl --silent --tlsv1.2
+    --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+    {{ openshift.master.api_url }}/healthz/ready
+  args:
+    # Disables the following warning:
+    # Consider using get_url or uri module rather than running curl
+    warn: no
+  register: l_api_available_output
+  until: l_api_available_output.stdout == 'ok'
+  retries: 120
+  delay: 1
+  changed_when: false

+ 34 - 0
roles/openshift_control_plane/tasks/set_loopback_context.yml

@@ -0,0 +1,34 @@
+---
+- name: Test local loopback context
+  command: >
+    {{ openshift_client_binary }} config view
+    --config={{ openshift_master_loopback_config }}
+  changed_when: false
+  register: l_loopback_config
+
+- command: >
+    {{ openshift_client_binary }} config set-cluster
+    --certificate-authority={{ openshift_master_config_dir }}/ca.crt
+    --embed-certs=true --server={{ openshift.master.loopback_api_url }}
+    {{ openshift.master.loopback_cluster_name }}
+    --config={{ openshift_master_loopback_config }}
+  when:
+  - loopback_context_string not in l_loopback_config.stdout
+  register: set_loopback_cluster
+
+- command: >
+    {{ openshift_client_binary }} config set-context
+    --cluster={{ openshift.master.loopback_cluster_name }}
+    --namespace=default --user={{ openshift.master.loopback_user }}
+    {{ openshift.master.loopback_context_name }}
+    --config={{ openshift_master_loopback_config }}
+  when:
+  - set_loopback_cluster is changed
+  register: l_set_loopback_context
+
+- command: >
+    {{ openshift_client_binary }} config use-context {{ openshift.master.loopback_context_name }}
+    --config={{ openshift_master_loopback_config }}
+  when:
+  - l_set_loopback_context is changed
+  register: set_current_context

+ 63 - 0
roles/openshift_control_plane/tasks/static.yml

@@ -0,0 +1,63 @@
+---
+- name: Enable bootstrapping in the master config
+  yedit:
+    src: /etc/origin/master/master-config.yaml
+    edits:
+    - key: kubernetesMasterConfig.controllerArguments.cluster-signing-cert-file
+      value:
+      - /etc/origin/master/ca.crt
+    - key: kubernetesMasterConfig.controllerArguments.cluster-signing-key-file
+      value:
+      - /etc/origin/master/ca.key
+
+- name: Create temp directory for static pods
+  command: mktemp -d /tmp/openshift-ansible-XXXXXX
+  register: mktemp
+  changed_when: false
+
+- name: Prepare master static pods
+  copy:
+    src: "{{ item }}"
+    dest: "{{ mktemp.stdout }}"
+    mode: 0600
+  with_items:
+  - apiserver.yaml
+  - controller.yaml
+
+- name: Update master static pods
+  yedit:
+    src: "{{ mktemp.stdout }}/{{ item }}"
+    edits:
+    - key: spec.containers[0].image
+      value: "{{ osm_image }}:{{ openshift_image_tag }}"
+  with_items:
+  - apiserver.yaml
+  - controller.yaml
+
+- name: Update master static pods
+  copy:
+    remote_src: true
+    src: "{{ mktemp.stdout }}/{{ item }}"
+    dest: "/etc/origin/node/pods/"
+    mode: 0600
+  with_items:
+  - apiserver.yaml
+  - controller.yaml
+
+- name: Remove temporary directory
+  file:
+    name: "{{ mktemp.stdout }}"
+    state: absent
+  changed_when: False
+
+- name: Establish the default bootstrap kubeconfig for masters
+  copy:
+    remote_src: true
+    src: "/etc/origin/master/admin.kubeconfig"
+    dest: "{{ item }}"
+    mode: 0600
+  with_items:
+  # bootstrap as an admin
+  - /etc/origin/node/bootstrap.kubeconfig
+  # copy to this location to bypass initial bootstrap request
+  - /etc/origin/node/node.kubeconfig

+ 10 - 0
roles/openshift_control_plane/tasks/static_shim.yml

@@ -0,0 +1,10 @@
+---
+# TODO: package this?
+- name: Copy static master scripts
+  copy:
+    src: "{{ item }}"
+    dest: "/usr/bin/"
+    mode: 0500
+  with_items:
+  - scripts/docker/master-logs
+  - scripts/docker/master-restart

+ 7 - 0
roles/openshift_control_plane/tasks/update_etcd_client_urls.yml

@@ -0,0 +1,7 @@
+---
+- yedit:
+    src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+    key: 'etcdClientInfo.urls'
+    value: "{{ openshift.master.etcd_urls }}"
+  notify:
+  - restart master

+ 45 - 0
roles/openshift_control_plane/tasks/upgrade.yml

@@ -0,0 +1,45 @@
+---
+- include_tasks: upgrade/rpm_upgrade.yml
+  when: not openshift_is_containerized | bool
+
+- include_tasks: upgrade/upgrade_scheduler.yml
+
+# master_config_hook is passed in from upgrade play.
+- include_tasks: "upgrade/{{ master_config_hook }}"
+  when: master_config_hook is defined
+
+- include_tasks: journald.yml
+
+- name: Check for ca-bundle.crt
+  stat:
+    path: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
+  register: ca_bundle_stat
+  failed_when: false
+
+- name: Check for ca.crt
+  stat:
+    path: "{{ openshift.common.config_base }}/master/ca.crt"
+  register: ca_crt_stat
+  failed_when: false
+
+- name: Migrate ca.crt to ca-bundle.crt
+  command: mv ca.crt ca-bundle.crt
+  args:
+    chdir: "{{ openshift.common.config_base }}/master"
+  when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
+
+- name: Link ca.crt to ca-bundle.crt
+  file:
+    src: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
+    path: "{{ openshift.common.config_base }}/master/ca.crt"
+    state: link
+  when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
+
+- name: Update oreg value
+  yedit:
+    src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+    key: 'imageConfig.format'
+    value: "{{ oreg_url | default(oreg_url_master) }}"
+  when: oreg_url is defined or oreg_url_master is defined
+
+- include_tasks: static.yml

+ 36 - 0
roles/openshift_control_plane/tasks/upgrade/rpm_upgrade.yml

@@ -0,0 +1,36 @@
+---
+# When we update package "a-${version}" and a requires b >= ${version} if we
+# don't specify the version of b yum will choose the latest version of b
+# available and the whole set of dependencies end up at the latest version.
+# Since the package module, unlike the yum module, doesn't flatten a list
+# of packages into one transaction we need to do that explicitly. The ansible
+# core team tells us not to rely on yum module transaction flattening anyway.
+
+# TODO: If the sdn package isn't already installed this will install it, we
+# should fix that
+
+- import_tasks: ../static.yml
+
+- name: Upgrade master packages
+  command:
+    yum install -y {{ master_pkgs | join(' ') }} \
+    {{ ' --exclude *' ~ openshift_service_type ~ '*3.9*' if openshift_release | version_compare('3.9','<') else '' }}
+  vars:
+    master_pkgs:
+      - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}"
+      - "{{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }}"
+  register: result
+  until: result is succeeded
+  when: ansible_pkg_mgr == 'yum'
+
+- name: Upgrade master packages - dnf
+  dnf:
+    name: "{{ master_pkgs | join(',') }}"
+    state: present
+  vars:
+    master_pkgs:
+      - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
+      - "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}"
+  register: result
+  until: result is succeeded
+  when: ansible_pkg_mgr == 'dnf'

+ 175 - 0
roles/openshift_control_plane/tasks/upgrade/upgrade_scheduler.yml

@@ -0,0 +1,175 @@
+---
+# Upgrade predicates
+- vars:
+    # openshift_master_facts_default_predicates is a custom lookup plugin in
+    # role lib_utils
+    prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
+    prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}"
+    default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}"
+    # older_predicates are the set of predicates that have previously been
+    # hard-coded into openshift_facts
+    older_predicates:
+    - - name: MatchNodeSelector
+      - name: PodFitsResources
+      - name: PodFitsPorts
+      - name: NoDiskConflict
+      - name: NoVolumeZoneConflict
+      - name: MaxEBSVolumeCount
+      - name: MaxGCEPDVolumeCount
+      - name: Region
+        argument:
+          serviceAffinity:
+            labels:
+            - region
+    - - name: MatchNodeSelector
+      - name: PodFitsResources
+      - name: PodFitsPorts
+      - name: NoDiskConflict
+      - name: NoVolumeZoneConflict
+      - name: Region
+        argument:
+          serviceAffinity:
+            labels:
+            - region
+    - - name: MatchNodeSelector
+      - name: PodFitsResources
+      - name: PodFitsPorts
+      - name: NoDiskConflict
+      - name: Region
+        argument:
+          serviceAffinity:
+            labels:
+            - region
+    # older_predicates_no_region are the set of predicates that have previously
+    # been hard-coded into openshift_facts, with the Region predicate removed
+    older_predicates_no_region:
+    - - name: MatchNodeSelector
+      - name: PodFitsResources
+      - name: PodFitsPorts
+      - name: NoDiskConflict
+      - name: NoVolumeZoneConflict
+      - name: MaxEBSVolumeCount
+      - name: MaxGCEPDVolumeCount
+    - - name: MatchNodeSelector
+      - name: PodFitsResources
+      - name: PodFitsPorts
+      - name: NoDiskConflict
+      - name: NoVolumeZoneConflict
+    - - name: MatchNodeSelector
+      - name: PodFitsResources
+      - name: PodFitsPorts
+      - name: NoDiskConflict
+  block:
+
+  # Handle case where openshift_master_predicates is defined
+  - block:
+    - debug:
+        msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}"
+      when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region]
+
+    - debug:
+        msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}"
+      when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates
+    when: openshift_master_scheduler_predicates | default(none) is not none
+
+  # Handle cases where openshift_master_predicates is not defined
+  - block:
+    - debug:
+        msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}"
+      when:
+      - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
+      - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates]
+
+    - set_fact:
+        openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}"
+      when:
+      - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
+      - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates]
+
+    - set_fact:
+        openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}"
+      when:
+      - openshift_master_scheduler_current_predicates != default_predicates_no_region
+      - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region]
+
+    when: openshift_master_scheduler_predicates | default(none) is none
+
+
+# Upgrade priorities
+- vars:
+    prev_priorities: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
+    prev_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, zones_enabled=False) }}"
+    default_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', zones_enabled=False) }}"
+    # older_priorities are the set of priorities that have previously been
+    # hard-coded into openshift_facts
+    older_priorities:
+    - - name: LeastRequestedPriority
+        weight: 1
+      - name: SelectorSpreadPriority
+        weight: 1
+      - name: Zone
+        weight: 2
+        argument:
+          serviceAntiAffinity:
+            label: zone
+    # older_priorities_no_region are the set of priorities that have previously
+    # been hard-coded into openshift_facts, with the Zone priority removed
+    older_priorities_no_zone:
+    - - name: LeastRequestedPriority
+        weight: 1
+      - name: SelectorSpreadPriority
+        weight: 1
+  block:
+
+  # Handle case where openshift_master_priorities is defined
+  - block:
+    - debug:
+        msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}"
+      when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone]
+
+    - debug:
+        msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}"
+      when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities
+    when: openshift_master_scheduler_priorities | default(none) is not none
+
+  # Handle cases where openshift_master_priorities is not defined
+  - block:
+    - debug:
+        msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}"
+      when:
+      - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
+      - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities]
+
+    - set_fact:
+        openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}"
+      when:
+      - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
+      - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities]
+
+    - set_fact:
+        openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}"
+      when:
+      - openshift_master_scheduler_current_priorities != default_priorities_no_zone
+      - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone]
+
+    when: openshift_master_scheduler_priorities | default(none) is none
+
+
+# Update scheduler
+- vars:
+    scheduler_config:
+      kind: Policy
+      apiVersion: v1
+      predicates: "{{ openshift_upgrade_scheduler_predicates
+                      | default(openshift_master_scheduler_current_predicates) }}"
+      priorities: "{{ openshift_upgrade_scheduler_priorities
+                      | default(openshift_master_scheduler_current_priorities) }}"
+  block:
+  - name: Update scheduler config
+    copy:
+      content: "{{ scheduler_config | to_nice_json }}"
+      dest: "{{ openshift_master_scheduler_conf }}"
+      backup: true
+  when: >
+    openshift_upgrade_scheduler_predicates is defined or
+    openshift_upgrade_scheduler_priorities is defined

+ 37 - 0
roles/openshift_control_plane/tasks/upgrade_facts.yml

@@ -0,0 +1,37 @@
+---
+# This file exists because we call systemd_units.yml from outside of the role
+# during upgrades.  When we remove this pattern, we can probably
+# eliminate most of these set_fact items.
+
+- name: Set openshift_master_config_dir if unset
+  set_fact:
+    openshift_master_config_dir: '/etc/origin/master'
+  when: openshift_master_config_dir is not defined
+
+- name: Set r_openshift_master_data_dir if unset
+  set_fact:
+    r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+  when: r_openshift_master_data_dir is not defined
+
+- set_fact:
+    oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
+  when: oreg_auth_credentials_path is not defined
+
+- set_fact:
+    oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
+  when: oreg_host is not defined
+
+- set_fact:
+    oreg_auth_credentials_replace: False
+  when: oreg_auth_credentials_replace is not defined
+
+- name: Set openshift_master_debug_level
+  set_fact:
+    openshift_master_debug_level: "{{ debug_level | default(2) }}"
+  when:
+  - openshift_master_debug_level is not defined
+
+- name: Init HA Service Info
+  set_fact:
+    containerized_svc_dir: "{{ containerized_svc_dir | default('/usr/lib/systemd/system') }}"
+    ha_svc_template_path: "{{ ha_svc_template_path | default('native-cluster') }}"

+ 5 - 0
roles/openshift_control_plane/templates/htpasswd.j2

@@ -0,0 +1,5 @@
+{% if 'htpasswd_users' in openshift.master %}
+{%   for user,pass in openshift.master.htpasswd_users.items() %}
+{{     user ~ ':' ~ pass }}
+{%   endfor %}
+{% endif %}

+ 16 - 0
roles/openshift_control_plane/templates/master.env.j2

@@ -0,0 +1,16 @@
+{% if openshift_cloudprovider_kind | default('') == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined %}
+AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key }}
+AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key }}
+{% endif %}
+
+# Proxy configuration
+# See https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html#configuring-global-proxy
+{% if 'http_proxy' in openshift.common %}
+HTTP_PROXY={{ openshift.common.http_proxy | default('') }}
+{% endif %}
+{% if 'https_proxy' in openshift.common %}
+HTTPS_PROXY={{ openshift.common.https_proxy | default('')}}
+{% endif %}
+{% if 'no_proxy' in openshift.common %}
+NO_PROXY={{ openshift.common.no_proxy | default('') }},{{ openshift.common.portal_net }},{{ openshift.master.sdn_cluster_network_cidr }}
+{% endif %}

+ 232 - 0
roles/openshift_control_plane/templates/master.yaml.v1.j2

@@ -0,0 +1,232 @@
+kind: MasterConfig
+apiVersion: v1
+admissionConfig:
+{% if 'admission_plugin_config' in openshift.master %}
+  pluginConfig:{{ openshift.master.admission_plugin_config | lib_utils_to_padded_yaml(level=2) }}
+{% endif %}
+apiLevels:
+- v1
+{% if not openshift_version_gte_3_9 %}
+assetConfig:
+  logoutURL: "{{ openshift.master.logout_url | default('') }}"
+  masterPublicURL: {{ openshift.master.public_api_url }}
+  publicURL: {{ openshift.master.public_console_url }}/
+{% if 'logging_public_url' in openshift.master %}
+  loggingPublicURL: {{ openshift.master.logging_public_url }}
+{% endif %}
+{% if openshift_hosted_metrics_deploy_url is defined %}
+  metricsPublicURL: {{ openshift_hosted_metrics_deploy_url }}
+{% endif %}
+{% if 'extension_scripts' in openshift.master %}
+  extensionScripts: {{ openshift.master.extension_scripts | lib_utils_to_padded_yaml(1, 2) }}
+{% endif %}
+{% if 'extension_stylesheets' in openshift.master %}
+  extensionStylesheets: {{ openshift.master.extension_stylesheets | lib_utils_to_padded_yaml(1, 2) }}
+{% endif %}
+{% if 'extensions' in openshift.master %}
+  extensions: {{ openshift.master.extensions | lib_utils_to_padded_yaml(1, 2) }}
+{% endif %}
+  servingInfo:
+    bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.console_port }}
+    bindNetwork: tcp4
+    certFile: master.server.crt
+    clientCA: ""
+    keyFile: master.server.key
+    maxRequestsInFlight: 0
+    requestTimeoutSeconds: 0
+{% if openshift_master_min_tls_version is defined %}
+    minTLSVersion: {{ openshift_master_min_tls_version }}
+{% endif %}
+{% if openshift_master_cipher_suites is defined %}
+    cipherSuites:
+{% for cipher_suite in openshift_master_cipher_suites %}
+    - {{ cipher_suite }}
+{% endfor %}
+{% endif %}
+# assetconfig end
+{% endif %}
+{% if openshift.master.audit_config | default(none) is not none %}
+auditConfig:{{ openshift.master.audit_config | lib_utils_to_padded_yaml(level=1) }}
+{% endif %}
+controllerConfig:
+  election:
+    lockName: openshift-master-controllers
+  serviceServingCert:
+    signer:
+      certFile: service-signer.crt
+      keyFile: service-signer.key
+controllers: '*'
+corsAllowedOrigins:
+  # anchor with start (\A) and end (\z) of the string, make the check case insensitive ((?i)) and escape hostname
+{% for origin in ['127.0.0.1', 'localhost', openshift.common.ip, openshift.common.public_ip] | union(openshift.common.all_hostnames) | unique %}
+  - (?i)//{{ origin | regex_escape() }}(:|\z)
+{% endfor %}
+{% for custom_origin in openshift.master.custom_cors_origins | default("") %}
+  - (?i)//{{ custom_origin | regex_escape() }}(:|\z)
+{% endfor %}
+{% if 'disabled_features' in openshift.master %}
+disabledFeatures: {{ openshift.master.disabled_features | to_json }}
+{% endif %}
+{% if openshift.master.embedded_dns | bool %}
+dnsConfig:
+  bindAddress: {{ openshift.master.bind_addr }}:{{ openshift_master_dns_port }}
+  bindNetwork: tcp4
+{% endif %}
+etcdClientInfo:
+  ca: master.etcd-ca.crt
+  certFile: master.etcd-client.crt
+  keyFile: master.etcd-client.key
+  urls:
+{% for etcd_url in openshift.master.etcd_urls %}
+    - {{ etcd_url }}
+{% endfor %}
+etcdStorageConfig:
+  kubernetesStoragePrefix: kubernetes.io
+  kubernetesStorageVersion: v1
+  openShiftStoragePrefix: openshift.io
+  openShiftStorageVersion: v1
+imageConfig:
+  format: {{ l_osm_registry_url }}
+  latest: {{ openshift_master_image_config_latest }}
+{% if 'image_policy_config' in openshift.master %}
+imagePolicyConfig:{{ openshift.master.image_policy_config | lib_utils_to_padded_yaml(level=1) }}
+{% endif %}
+kubeletClientInfo:
+{# TODO: allow user specified kubelet port #}
+  ca: ca-bundle.crt
+  certFile: master.kubelet-client.crt
+  keyFile: master.kubelet-client.key
+  port: 10250
+{% if openshift.master.embedded_kube | bool %}
+kubernetesMasterConfig:
+  apiServerArguments: {{ openshift.master.api_server_args | default(None) | lib_utils_to_padded_yaml( level=2 ) }}
+    storage-backend:
+    - etcd3
+    storage-media-type:
+    - application/vnd.kubernetes.protobuf
+  controllerArguments: {{ openshift.master.controller_args | default(None) | lib_utils_to_padded_yaml( level=2 ) }}
+  masterCount: {{ openshift.master.master_count }}
+  masterIP: {{ openshift.common.ip }}
+  podEvictionTimeout: {{ openshift.master.pod_eviction_timeout | default("") }}
+  proxyClientInfo:
+    certFile: master.proxy-client.crt
+    keyFile: master.proxy-client.key
+  schedulerArguments: {{ openshift_master_scheduler_args | default(None) | lib_utils_to_padded_yaml( level=3 ) }}
+  schedulerConfigFile: {{ openshift_master_scheduler_conf }}
+  servicesNodePortRange: "{{ openshift_node_port_range | default("") }}"
+  servicesSubnet: {{ openshift.common.portal_net }}
+  staticNodeNames: {{ openshift_node_ips | default([], true) }}
+{% endif %}
+masterClients:
+{# TODO: allow user to set externalKubernetesKubeConfig #}
+  externalKubernetesClientConnectionOverrides:
+    acceptContentTypes: application/vnd.kubernetes.protobuf,application/json
+    contentType: application/vnd.kubernetes.protobuf
+    burst: {{ openshift_master_external_ratelimit_burst | default(400) }}
+    qps: {{ openshift_master_external_ratelimit_qps | default(200) }}
+  externalKubernetesKubeConfig: ""
+  openshiftLoopbackClientConnectionOverrides:
+    acceptContentTypes: application/vnd.kubernetes.protobuf,application/json
+    contentType: application/vnd.kubernetes.protobuf
+    burst: {{ openshift_master_loopback_ratelimit_burst | default(600) }}
+    qps: {{ openshift_master_loopback_ratelimit_qps | default(300) }}
+  openshiftLoopbackKubeConfig: openshift-master.kubeconfig
+masterPublicURL: {{ openshift.master.public_api_url }}
+networkConfig:
+  clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
+  hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
+{% if openshift_version_gte_3_7 | bool %}
+  clusterNetworks:
+  - cidr: {{ openshift.master.sdn_cluster_network_cidr }}
+    hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
+{% endif %}
+{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_use_kuryr or r_openshift_master_sdn_network_plugin_name == 'cni' %}
+  networkPluginName: {{ r_openshift_master_sdn_network_plugin_name_default }}
+{% endif %}
+# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
+  serviceNetworkCIDR: {{ openshift.common.portal_net }}
+  externalIPNetworkCIDRs: {{ openshift_master_external_ip_network_cidrs | default(["0.0.0.0/0"]) | lib_utils_to_padded_yaml(1,2) }}
+{% if openshift_master_ingress_ip_network_cidr is defined %}
+  ingressIPNetworkCIDR: {{ openshift_master_ingress_ip_network_cidr }}
+{% endif %}
+oauthConfig:
+{% if 'oauth_always_show_provider_selection' in openshift.master %}
+  alwaysShowProviderSelection: {{ openshift.master.oauth_always_show_provider_selection }}
+{% endif %}
+{% if l_openshift_master_oauth_templates %}
+  templates:{{ l_openshift_master_oauth_templates | lib_utils_to_padded_yaml(level=2) }}
+{% endif %}
+  assetPublicURL: {{ openshift.master.public_console_url }}/
+  grantConfig:
+    method: {{ openshift.master.oauth_grant_method }}
+  identityProviders:
+{% for line in translated_identity_providers.splitlines() %}
+  {{ line }}
+{% endfor %}
+  masterCA: ca-bundle.crt
+  masterPublicURL: {{ openshift.master.public_api_url }}
+  masterURL: {{ openshift.master.api_url }}
+  sessionConfig:
+    sessionMaxAgeSeconds: {{ openshift.master.session_max_seconds }}
+    sessionName: {{ openshift.master.session_name }}
+{% if openshift.master.session_auth_secrets is defined and openshift.master.session_encryption_secrets is defined %}
+    sessionSecretsFile: {{ openshift.master.session_secrets_file }}
+{% endif %}
+  tokenConfig:
+    accessTokenMaxAgeSeconds: {{ openshift.master.access_token_max_seconds }}
+    authorizeTokenMaxAgeSeconds: {{ openshift.master.auth_token_max_seconds }}
+pauseControllers: false
+policyConfig:
+  bootstrapPolicyFile: {{ openshift_master_policy }}
+  openshiftInfrastructureNamespace: openshift-infra
+  openshiftSharedResourcesNamespace: openshift
+projectConfig:
+  defaultNodeSelector: "{{ osm_default_node_selector }}"
+  projectRequestMessage: "{{ osm_project_request_message }}"
+  projectRequestTemplate: "{{ osm_project_request_template }}"
+  securityAllocator:
+    mcsAllocatorRange: "{{ osm_mcs_allocator_range }}"
+    mcsLabelsPerProject: {{ osm_mcs_labels_per_project }}
+    uidAllocatorRange: "{{ osm_uid_allocator_range }}"
+routingConfig:
+  subdomain:  "{{ openshift_master_default_subdomain }}"
+serviceAccountConfig:
+  limitSecretReferences: {{ openshift_master_saconfig_limitsecretreferences | default(false) }}
+  managedNames:
+  - default
+  - builder
+  - deployer
+  masterCA: ca-bundle.crt
+  privateKeyFile: serviceaccounts.private.key
+  publicKeyFiles:
+  - serviceaccounts.public.key
+servingInfo:
+  bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.api_port }}
+  bindNetwork: tcp4
+  certFile: master.server.crt
+  clientCA: ca.crt
+  keyFile: master.server.key
+  maxRequestsInFlight: {{ openshift.master.max_requests_inflight }}
+  requestTimeoutSeconds: 3600
+{% if openshift.master.named_certificates | default([]) | length > 0 %}
+  namedCertificates:
+{% for named_certificate in openshift.master.named_certificates %}
+  - certFile: {{ named_certificate['certfile'] }}
+    keyFile: {{ named_certificate['keyfile'] }}
+    names:
+{% for name in named_certificate['names'] %}
+    - "{{ name }}"
+{% endfor %}
+{% endfor %}
+{% endif %}
+{% if openshift_master_min_tls_version is defined %}
+  minTLSVersion: {{ openshift_master_min_tls_version }}
+{% endif %}
+{% if openshift_master_cipher_suites is defined %}
+  cipherSuites:
+{% for cipher_suite in openshift_master_cipher_suites %}
+  - {{ cipher_suite }}
+{% endfor %}
+{% endif %}
+volumeConfig:
+  dynamicProvisioningEnabled: {{ openshift.master.dynamic_provisioning_enabled }}

+ 7 - 0
roles/openshift_control_plane/templates/sessionSecretsFile.yaml.v1.j2

@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: SessionSecrets
+secrets:
+{% for secret in openshift.master.session_auth_secrets %}
+- authentication: "{{ openshift.master.session_auth_secrets[loop.index0] }}"
+  encryption: "{{ openshift.master.session_encryption_secrets[loop.index0] }}"
+{% endfor %}

+ 5 - 0
roles/openshift_sdn/defaults/main.yml

@@ -0,0 +1,5 @@
+---
+openshift_node_image_dict:
+  origin: 'openshift/node'
+  openshift-enterprise: 'openshift3/node'
+osn_image: "{{ openshift_node_image_dict[openshift_deployment_type | default('origin')] }}"

+ 9 - 0
roles/openshift_sdn/files/sdn-images.yaml

@@ -0,0 +1,9 @@
+apiVersion: image.openshift.io/v1
+kind: ImageStreamTag
+metadata:
+  name: node:v3.9
+  namespace: openshift-sdn
+tag:
+  from:
+    kind: DockerImage
+    name: openshift/node:v3.9

+ 83 - 0
roles/openshift_sdn/files/sdn-ovs.yaml

@@ -0,0 +1,83 @@
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+  name: ovs
+  namespace: openshift-sdn
+  annotations:
+    kubernetes.io/description: |
+      This daemon set launches the openvswitch daemon.
+    image.openshift.io/triggers: |
+      [{"from":{"kind":"ImageStreamTag","name":"node:v3.9"},"fieldPath":"spec.template.spec.containers[?(@.name==\"openvswitch\")].image"}]
+spec:
+  selector:
+    matchLabels:
+      app: ovs
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        app: ovs
+        component: network
+        type: infra
+        openshift.io/component: network
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      # Requires fairly broad permissions - ability to read all services and network functions as well
+      # as all pods.
+      serviceAccountName: sdn
+      hostNetwork: true
+      containers:
+      - name: openvswitch
+        image: " "
+        command:
+        - /bin/bash
+        - -c
+        - |
+          #!/bin/bash
+          set -euo pipefail
+          function quit {
+              /usr/share/openvswitch/scripts/ovs-ctl stop
+              exit 0
+          }
+          trap quit SIGTERM
+          /usr/share/openvswitch/scripts/ovs-ctl start --system-id=random
+          while true; do sleep 5; done
+        securityContext:
+          runAsUser: 0
+          privileged: true
+        volumeMounts:
+        - mountPath: /lib/modules
+          name: host-modules
+          readOnly: true
+        - mountPath: /run/openvswitch
+          name: host-run-ovs
+        - mountPath: /var/run/openvswitch
+          name: host-run-ovs
+        - mountPath: /sys
+          name: host-sys
+          readOnly: true
+        - mountPath: /etc/openvswitch
+          name: host-config-openvswitch
+        resources:
+          requests:
+            cpu: 100m
+            memory: 200Mi
+          limits:
+            cpu: 200m
+            memory: 300Mi
+
+      volumes:
+      - name: host-modules
+        hostPath:
+          path: /lib/modules
+      - name: host-run-ovs
+        hostPath:
+          path: /run/openvswitch
+      - name: host-sys
+        hostPath:
+          path: /sys
+      - name: host-config-openvswitch
+        hostPath:
+          path: /etc/origin/openvswitch

+ 29 - 0
roles/openshift_sdn/files/sdn-policy.yaml

@@ -0,0 +1,29 @@
+kind: List
+apiVersion: v1
+items:
+- kind: ServiceAccount
+  apiVersion: v1
+  metadata:
+    name: sdn
+    namespace: openshift-sdn
+- apiVersion: authorization.openshift.io/v1
+  kind: ClusterRoleBinding
+  metadata:
+    name: sdn-cluster-reader
+  roleRef:
+    name: cluster-reader
+  subjects:
+  - kind: ServiceAccount
+    name: sdn
+    namespace: openshift-sdn
+- apiVersion: authorization.openshift.io/v1
+  kind: ClusterRoleBinding
+  metadata:
+    name: sdn-reader
+  roleRef:
+    name: system:sdn-reader
+  subjects:
+  - kind: ServiceAccount
+    name: sdn
+    namespace: openshift-sdn
+# TODO: PSP binding

+ 251 - 0
roles/openshift_sdn/files/sdn.yaml

@@ -0,0 +1,251 @@
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+  name: sdn
+  namespace: openshift-sdn
+  annotations:
+    kubernetes.io/description: |
+      This daemon set launches the OpenShift networking components (kube-proxy, DNS, and openshift-sdn).
+      It expects that OVS is running on the node.
+    image.openshift.io/triggers: |
+      [
+        {"from":{"kind":"ImageStreamTag","name":"node:v3.9"},"fieldPath":"spec.template.spec.containers[?(@.name==\"sync\")].image"},
+        {"from":{"kind":"ImageStreamTag","name":"node:v3.9"},"fieldPath":"spec.template.spec.containers[?(@.name==\"sdn\")].image"}
+      ]
+spec:
+  selector:
+    matchLabels:
+      app: sdn
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        app: sdn
+        component: network
+        type: infra
+        openshift.io/component: network
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      # Requires fairly broad permissions - ability to read all services and network functions as well
+      # as all pods.
+      serviceAccountName: sdn
+      hostNetwork: true
+      # Must be hostPID because it invokes operations on processes in the host space
+      hostPID: true
+      containers:
+
+      # The sync container is a temporary config loop until Kubelet dynamic config is implemented. It refreshes
+      # the contents of /etc/origin/node/ with the config map ${BOOTSTRAP_CONFIG_NAME} from the openshift-node
+      # namespace. It will restart the Kubelet on the host if it detects the node-config.yaml has changed.
+      #
+      # 1. Dynamic Kubelet config must pull down a full configmap
+      # 2. Nodes must relabel themselves https://github.com/kubernetes/kubernetes/issues/59314
+      #
+      - name: sync
+        image: " "
+        command:
+        - /bin/bash
+        - -c
+        - |
+          #!/bin/bash
+          set -euo pipefail
+
+          # loop until BOOTSTRAP_CONFIG_NAME is set
+          set -o allexport
+          while true; do
+            if [[ -f /etc/sysconfig/origin-node ]]; then
+              source /etc/sysconfig/origin-node
+              if [[ -z "${BOOTSTRAP_CONFIG_NAME-}" ]]; then
+                echo "info: Waiting for BOOTSTRAP_CONFIG_NAME to be set" 2>&1
+                sleep 15
+                continue
+              fi
+              break
+            fi
+          done
+
+          # track the current state of the config
+          if [[ -f /etc/origin/node/node-config.yaml ]]; then
+            md5sum /etc/origin/node/node-config.yaml > /tmp/.old
+          else
+            touch /tmp/.old
+          fi
+
+          # periodically refresh both node-config.yaml and relabel the node
+          while true; do
+            name=${BOOTSTRAP_CONFIG_NAME}
+            if ! oc extract --config=/etc/origin/node/node.kubeconfig "cm/${BOOTSTRAP_CONFIG_NAME}" -n openshift-node --to=/etc/origin/node --confirm; then
+              echo "error: Unable to retrieve latest config for node" 2>&1
+              sleep 15
+              continue
+            fi
+            # detect whether the node-config.yaml has changed, and if so trigger a restart of the kubelet.
+            md5sum /etc/origin/node/node-config.yaml > /tmp/.new
+            if [[ "$( cat /tmp/.old )" != "$( cat /tmp/.new )" ]]; then
+              echo "info: Configuration changed, restarting kubelet" 2>&1
+              # TODO: kubelet doesn't relabel nodes, best effort for now
+              # https://github.com/kubernetes/kubernetes/issues/59314
+              if args="$(openshift start node --write-flags --config /etc/origin/node/node-config.yaml)"; then
+                labels=' --node-labels=([^ ]+) '
+                if [[ ${args} =~ ${labels} ]]; then
+                  labels="${BASH_REMATCH[1]//,/ }"
+                  echo "info: Applying node labels $labels" 2>&1
+                  if ! oc label --config=/etc/origin/node/node.kubeconfig "node/${NODE_NAME}" ${labels} --overwrite; then
+                    echo "error: Unable to apply labels, will retry in 10" 2>&1
+                    sleep 10
+                    continue
+                  fi
+                fi
+              fi
+              if ! pgrep -U 0 -f 'hyperkube kubelet ' | xargs kill; then
+                echo "error: Unable to restart Kubelet" 2>&1
+              fi
+            fi
+            cp -f /tmp/.new /tmp/.old
+            sleep 180
+          done
+
+        env:
+        - name: NODE_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        securityContext:
+          runAsUser: 0
+          # Permission could be reduced by selecting an appropriate SELinux policy
+          privileged: true
+        volumeMounts:
+        # Directory which contains the host configuration. We write to this directory
+        - mountPath: /etc/origin/node/
+          name: host-config
+        - mountPath: /etc/sysconfig/origin-node
+          name: host-sysconfig-node
+          readOnly: true
+
+      # The network container launches the openshift-sdn process, the kube-proxy, and the local DNS service.
+      # It relies on an up to date node-config.yaml being present.
+      - name: sdn
+        image: " "
+        command: 
+        - /bin/bash
+        - -c
+        - |
+          #!/bin/bash
+          set -euo pipefail
+          # Take over network functions on the node
+          rm -Rf /etc/cni/net.d/*
+          rm -Rf /host/opt/cni/bin/*
+          cp -Rf /opt/cni/bin/* /host/opt/cni/bin/
+
+          if [[ -f /etc/sysconfig/origin-node ]]; then
+            set -o allexport
+            source /etc/sysconfig/origin-node
+          fi
+
+          # use either the bootstrapped node kubeconfig or the static configuration
+          file=/etc/origin/node/node.kubeconfig
+          if [[ ! -f "${file}" ]]; then
+            # use the static node config if it exists
+            # TODO: remove when static node configuration is no longer supported
+            for f in /etc/origin/node/system*.kubeconfig; do
+              echo "info: Using ${f} for node configuration" 1>&2
+              file="${f}"
+              break
+            done
+          fi
+          # Use the same config as the node, but with the service account token
+          oc config "--config=${file}" view --flatten > /tmp/kubeconfig
+          oc config --config=/tmp/kubeconfig set-credentials sa "--token=$( cat /var/run/secrets/kubernetes.io/serviceaccount/token )"
+          oc config --config=/tmp/kubeconfig set-context "$( oc config --config=/tmp/kubeconfig current-context )" --user=sa
+          # Launch the network process
+          exec openshift start network --config=/etc/origin/node/node-config.yaml --kubeconfig=/tmp/kubeconfig --loglevel=${DEBUG_LOGLEVEL:-2}
+
+        securityContext:
+          runAsUser: 0
+          # Permission could be reduced by selecting an appropriate SELinux policy
+          privileged: true
+
+        volumeMounts:
+        # Directory which contains the host configuration.
+        - mountPath: /etc/origin/node/
+          name: host-config
+          readOnly: true
+        - mountPath: /etc/sysconfig/origin-node
+          name: host-sysconfig-node
+          readOnly: true
+        # Run directories where we need to be able to access sockets
+        - mountPath: /var/run/dbus/
+          name: host-var-run-dbus
+          readOnly: true
+        - mountPath: /var/run/openvswitch/
+          name: host-var-run-ovs
+          readOnly: true
+        - mountPath: /var/run/kubernetes/
+          name: host-var-run-kubernetes
+          readOnly: true
+        # We mount our socket here
+        - mountPath: /var/run/openshift-sdn
+          name: host-var-run-openshift-sdn
+        # CNI related mounts which we take over
+        - mountPath: /host/opt/cni/bin
+          name: host-opt-cni-bin
+        - mountPath: /etc/cni/net.d
+          name: host-etc-cni-netd
+        - mountPath: /var/lib/cni/networks/openshift-sdn
+          name: host-var-lib-cni-networks-openshift-sdn
+
+        resources:
+          requests:
+            cpu: 100m
+            memory: 200Mi
+        env:
+        - name: OPENSHIFT_DNS_DOMAIN
+          value: cluster.local
+        ports:
+        - name: healthz
+          containerPort: 10256
+        livenessProbe:
+          initialDelaySeconds: 10
+          httpGet:
+            path: /healthz
+            port: 10256
+            scheme: HTTP
+        lifecycle:
+
+      volumes:
+      # In bootstrap mode, the host config contains information not easily available
+      # from other locations.
+      - name: host-config
+        hostPath:
+          path: /etc/origin/node
+      - name: host-sysconfig-node
+        hostPath:
+          path: /etc/sysconfig/origin-node
+      - name: host-modules
+        hostPath:
+          path: /lib/modules
+
+      - name: host-var-run-ovs
+        hostPath:
+          path: /var/run/openvswitch
+      - name: host-var-run-kubernetes
+        hostPath:
+          path: /var/run/kubernetes
+      - name: host-var-run-dbus
+        hostPath:
+          path: /var/run/dbus
+      - name: host-var-run-openshift-sdn
+        hostPath:
+          path: /var/run/openshift-sdn
+
+      - name: host-opt-cni-bin
+        hostPath:
+          path: /opt/cni/bin
+      - name: host-etc-cni-netd
+        hostPath:
+          path: /etc/cni/net.d
+      - name: host-var-lib-cni-networks-openshift-sdn
+        hostPath:
+          path: /var/lib/cni/networks/openshift-sdn

+ 19 - 0
roles/openshift_sdn/meta/main.yaml

@@ -0,0 +1,19 @@
+---
+galaxy_info:
+  author: OpenShift Development <dev@lists.openshift.redhat.com>
+  description: Deploy OpenShift SDN
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 2.4
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  - name: Fedora
+    versions:
+    - all
+  categories:
+  - openshift
+dependencies:
+- role: lib_openshift
+- role: openshift_facts

+ 52 - 0
roles/openshift_sdn/tasks/main.yml

@@ -0,0 +1,52 @@
+---
+# Fact setting
+# - name: Set default image variables based on deployment type
+#   include_vars: "{{ item }}"
+#   with_first_found:
+#     - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+#     - "default_images.yml"
+
+- name: Ensure openshift-sdn project exists
+  oc_project:
+    name: openshift-sdn
+    state: present
+    node_selector:
+      - ""
+
+- name: Make temp directory for templates
+  command: mktemp -d /tmp/console-ansible-XXXXXX
+  register: mktemp
+  changed_when: False
+
+- name: Copy web console templates to temp directory
+  copy:
+    src: "{{ item }}"
+    dest: "{{ mktemp.stdout }}/{{ item | basename }}"
+  with_fileglob:
+    - "files/*.yaml"
+
+- name: Update the image tag
+  yedit:
+    src: "{{ mktemp.stdout }}/sdn-images.yaml"
+    key: 'tag.from.name'
+    # TODO: this should be easier to replace
+    value: "{{ osn_image }}:{{ openshift_image_tag }}"
+  when: oreg_url is defined
+
+- name: Ensure the SDN can run privileged
+  oc_adm_policy_user:
+    namespace: "openshift-sdn"
+    resource_kind: scc
+    resource_name: privileged
+    state: present
+    user: "system:serviceaccount:openshift-sdn:sdn"
+
+- name: Apply the SDN config
+  shell: >
+    {{ openshift_client_binary }} apply -f {{ mktemp.stdout }}
+
+- name: Remove temp directory
+  file:
+    state: absent
+    name: "{{ mktemp.stdout }}"
+  changed_when: False