فهرست منبع

Refactored to use Ansible systemd module

* Ansible systemd module used in place of service module
* Refactored command tasks which are no longer necessary
* Applying rules from openshift-ansible Best Practices Guide
Russell Teague 8 سال پیش
والد
کامیت
ec9c22ca6a
56فایلهای تغییر یافته به همراه230 افزوده شده و 189 حذف شده
  1. 1 1
      roles/cockpit/meta/main.yml
  2. 1 1
      roles/cockpit/tasks/main.yml
  3. 1 1
      roles/dns/README.md
  4. 2 1
      roles/dns/handlers/main.yml
  5. 1 0
      roles/dns/meta/main.yml
  6. 2 7
      roles/dns/tasks/main.yml
  7. 3 2
      roles/docker/handlers/main.yml
  8. 1 7
      roles/docker/tasks/udev_workaround.yml
  9. 2 1
      roles/etcd/README.md
  10. 1 1
      roles/etcd/handlers/main.yml
  11. 1 1
      roles/etcd/meta/main.yml
  12. 11 21
      roles/etcd/tasks/main.yml
  13. 2 1
      roles/flannel/README.md
  14. 2 2
      roles/flannel/handlers/main.yml
  15. 1 1
      roles/flannel/meta/main.yml
  16. 1 1
      roles/flannel/tasks/main.yml
  17. 1 1
      roles/kube_nfs_volumes/README.md
  18. 1 1
      roles/kube_nfs_volumes/handlers/main.yml
  19. 1 1
      roles/kube_nfs_volumes/meta/main.yml
  20. 8 2
      roles/kube_nfs_volumes/tasks/nfs.yml
  21. 3 1
      roles/nuage_master/README.md
  22. 12 6
      roles/nuage_master/handlers/main.yaml
  23. 3 3
      roles/nuage_master/meta/main.yml
  24. 2 1
      roles/nuage_node/README.md
  25. 2 2
      roles/nuage_node/handlers/main.yaml
  26. 3 3
      roles/nuage_node/meta/main.yml
  27. 2 0
      roles/openshift_loadbalancer/README.md
  28. 1 1
      roles/openshift_loadbalancer/handlers/main.yml
  29. 1 1
      roles/openshift_loadbalancer/meta/main.yml
  30. 2 6
      roles/openshift_loadbalancer/tasks/main.yml
  31. 2 1
      roles/openshift_master/README.md
  32. 3 3
      roles/openshift_master/handlers/main.yml
  33. 1 1
      roles/openshift_master/meta/main.yml
  34. 36 28
      roles/openshift_master/tasks/main.yml
  35. 1 1
      roles/openshift_master_cluster/README.md
  36. 1 1
      roles/openshift_master_cluster/meta/main.yml
  37. 4 2
      roles/openshift_metrics/README.md
  38. 3 3
      roles/openshift_metrics/handlers/main.yml
  39. 15 1
      roles/openshift_metrics/meta/main.yaml
  40. 4 4
      roles/openshift_node/README.md
  41. 2 2
      roles/openshift_node/handlers/main.yml
  42. 40 29
      roles/openshift_node/tasks/main.yml
  43. 2 0
      roles/openshift_node_certificates/README.md
  44. 2 2
      roles/openshift_node_certificates/handlers/main.yml
  45. 1 1
      roles/openshift_node_certificates/meta/main.yml
  46. 2 2
      roles/openshift_node_dnsmasq/handlers/main.yml
  47. 1 1
      roles/openshift_node_dnsmasq/meta/main.yml
  48. 6 6
      roles/openshift_node_dnsmasq/tasks/main.yml
  49. 3 3
      roles/openshift_storage_nfs/README.md
  50. 1 1
      roles/openshift_storage_nfs/handlers/main.yml
  51. 1 1
      roles/openshift_storage_nfs/meta/main.yml
  52. 8 10
      roles/openshift_storage_nfs/tasks/main.yml
  53. 2 3
      roles/openshift_storage_nfs_lvm/README.md
  54. 1 1
      roles/openshift_storage_nfs_lvm/handlers/main.yml
  55. 1 1
      roles/openshift_storage_nfs_lvm/meta/main.yml
  56. 12 3
      roles/openshift_storage_nfs_lvm/tasks/nfs.yml

+ 1 - 1
roles/cockpit/meta/main.yml

@@ -4,7 +4,7 @@ galaxy_info:
   description: Deploy and Enable cockpit-ws plus optional plugins
   company: Red Hat, Inc.
   license: Apache License, Version 2.0
-  min_ansible_version: 1.7
+  min_ansible_version: 2.2
   platforms:
   - name: EL
     versions:

+ 1 - 1
roles/cockpit/tasks/main.yml

@@ -10,7 +10,7 @@
   when: not openshift.common.is_containerized | bool
 
 - name: Enable cockpit-ws
-  service:
+  systemd:
     name: cockpit.socket
     enabled: true
     state: started

+ 1 - 1
roles/dns/README.md

@@ -6,7 +6,7 @@ Configure a DNS server serving IPs of all the nodes of the cluster
 Requirements
 ------------
 
-None
+Ansible 2.2
 
 Role Variables
 --------------

+ 2 - 1
roles/dns/handlers/main.yml

@@ -1,4 +1,5 @@
+---
 - name: restart bind
-  service:
+  systemd:
     name: named
     state: restarted

+ 1 - 0
roles/dns/meta/main.yml

@@ -4,5 +4,6 @@ galaxy_info:
   description: Deploy and configure a DNS server
   company: Amadeus SAS
   license: ASL 2.0
+  min_ansible_version: 2.2
 dependencies:
 - { role: openshift_facts }

+ 2 - 7
roles/dns/tasks/main.yml

@@ -11,7 +11,6 @@
   template:
     dest: "/tmp/dockerbuild/Dockerfile"
     src: Dockerfile
-  register: install_result
   when: openshift.common.is_containerized | bool
 
 - name: Build Bind image
@@ -22,13 +21,8 @@
   template:
     dest: "/etc/systemd/system/named.service"
     src: named.service.j2
-  register: install_result
   when: openshift.common.is_containerized | bool
 
-- name: reload systemd
-  command: /usr/bin/systemctl --system daemon-reload
-  when: openshift.common.is_containerized | bool and install_result | changed
-
 - name: Create bind zone dir
   file: path=/var/named state=directory
   when: openshift.common.is_containerized | bool
@@ -45,7 +39,8 @@
   notify: restart bind
 
 - name: Enable Bind
-  service:
+  systemd:
     name: named
     state: started
     enabled: yes
+    daemon_reload: yes

+ 3 - 2
roles/docker/handlers/main.yml

@@ -1,12 +1,13 @@
 ---
 
 - name: restart docker
-  service:
+  systemd:
     name: docker
     state: restarted
   when: not docker_service_status_changed | default(false) | bool
 
 - name: restart udev
-  service:
+  systemd:
     name: systemd-udevd
     state: restarted
+    daemon_reload: yes

+ 1 - 7
roles/docker/tasks/udev_workaround.yml

@@ -21,10 +21,4 @@
     owner: root
     mode: "0644"
   notify:
-  - restart udev
-  register: udevw_override_conf
-
-- name: reload systemd config files
-  command: systemctl daemon-reload
-  when: udevw_override_conf | changed
- 
+    - restart udev

+ 2 - 1
roles/etcd/README.md

@@ -6,7 +6,8 @@ Configures an etcd cluster for an arbitrary number of hosts
 Requirements
 ------------
 
-This role assumes it's being deployed on a RHEL/Fedora based host with package
+* Ansible 2.2
+* This role assumes it's being deployed on a RHEL/Fedora based host with package
 named 'etcd' available via yum or dnf (conditionally).
 
 Role Variables

+ 1 - 1
roles/etcd/handlers/main.yml

@@ -1,5 +1,5 @@
 ---
 
 - name: restart etcd
-  service: name={{ etcd_service }} state=restarted
+  systemd: name={{ etcd_service }} state=restarted
   when: not (etcd_service_status_changed | default(false) | bool)

+ 1 - 1
roles/etcd/meta/main.yml

@@ -7,7 +7,7 @@ galaxy_info:
   description: etcd management
   company: Red Hat, Inc.
   license: Apache License, Version 2.0
-  min_ansible_version: 2.1
+  min_ansible_version: 2.2
   platforms:
   - name: EL
     versions:

+ 11 - 21
roles/etcd/tasks/main.yml

@@ -20,36 +20,25 @@
   template:
     dest: "/etc/systemd/system/etcd_container.service"
     src: etcd.docker.service
-  register: install_etcd_result
   when: etcd_is_containerized | bool
 
-- name: Ensure etcd datadir exists
-  when: etcd_is_containerized | bool
+- name: Ensure etcd datadir exists when containerized
   file:
     path: "{{ etcd_data_dir }}"
     state: directory
     mode: 0700
-
-- name: Check for etcd service presence
-  command: systemctl show etcd.service
-  register: etcd_show
-  changed_when: false
-  failed_when: false
+  when: etcd_is_containerized | bool
 
 - name: Disable system etcd when containerized
-  when: etcd_is_containerized | bool and etcd_show.rc == 0 and 'LoadState=not-found' not in etcd_show.stdout
-  service:
+  systemd:
     name: etcd
     state: stopped
     enabled: no
-
-- name: Mask system etcd when containerized
-  when: etcd_is_containerized | bool and etcd_show.rc == 0 and 'LoadState=not-found' not in etcd_show.stdout
-  command: systemctl mask etcd
-
-- name: Reload systemd units
-  command: systemctl daemon-reload
-  when: etcd_is_containerized | bool and ( install_etcd_result | changed )
+    masked: yes
+    daemon_reload: yes
+  when: etcd_is_containerized | bool
+  register: task_result
+  failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
 
 - name: Validate permissions on the config dir
   file:
@@ -68,7 +57,7 @@
     - restart etcd
 
 - name: Enable etcd
-  service:
+  systemd:
     name: "{{ etcd_service }}"
     state: started
     enabled: yes
@@ -77,5 +66,6 @@
 - include: etcdctl.yml
   when: openshift_etcd_etcdctl_profile | default(true) | bool
 
-- set_fact:
+- name: Set fact etcd_service_status_changed
+  set_fact:
     etcd_service_status_changed: "{{ start_result | changed }}"

+ 2 - 1
roles/flannel/README.md

@@ -6,7 +6,8 @@ Configure flannel on openshift nodes
 Requirements
 ------------
 
-This role assumes it's being deployed on a RHEL/Fedora based host with package
+* Ansible 2.2
+* This role assumes it's being deployed on a RHEL/Fedora based host with package
 named 'flannel' available via yum or dnf (conditionally), in version superior
 to 0.3.
 

+ 2 - 2
roles/flannel/handlers/main.yml

@@ -1,8 +1,8 @@
 ---
 - name: restart flanneld
   become: yes
-  service: name=flanneld state=restarted
+  systemd: name=flanneld state=restarted
 
 - name: restart docker
   become: yes
-  service: name=docker state=restarted
+  systemd: name=docker state=restarted

+ 1 - 1
roles/flannel/meta/main.yml

@@ -4,7 +4,7 @@ galaxy_info:
   description: flannel management
   company: Red Hat, Inc.
   license: Apache License, Version 2.0
-  min_ansible_version: 2.1
+  min_ansible_version: 2.2
   platforms:
   - name: EL
     versions:

+ 1 - 1
roles/flannel/tasks/main.yml

@@ -27,7 +27,7 @@
 
 - name: Enable flanneld
   become: yes
-  service:
+  systemd:
     name: flanneld
     state: started
     enabled: yes

+ 1 - 1
roles/kube_nfs_volumes/README.md

@@ -11,8 +11,8 @@ system) on the disks!
 
 ## Requirements
 
+* Ansible 2.2
 * Running Kubernetes with NFS persistent volume support (on a remote machine).
-
 * Works only on RHEL/Fedora-like distros.
 
 ## Role Variables

+ 1 - 1
roles/kube_nfs_volumes/handlers/main.yml

@@ -1,3 +1,3 @@
 ---
 - name: restart nfs
-  service: name=nfs-server state=restarted
+  systemd: name=nfs-server state=restarted

+ 1 - 1
roles/kube_nfs_volumes/meta/main.yml

@@ -4,7 +4,7 @@ galaxy_info:
   description: Partition disks and use them as Kubernetes NFS physical volumes.
   company: Red Hat, Inc.
   license: license (Apache)
-  min_ansible_version: 1.4
+  min_ansible_version: 2.2
   platforms:
   - name: EL
     versions:

+ 8 - 2
roles/kube_nfs_volumes/tasks/nfs.yml

@@ -4,10 +4,16 @@
   when: not openshift.common.is_containerized | bool
 
 - name: Start rpcbind on Fedora/Red Hat
-  service: name=rpcbind state=started enabled=yes
+  systemd:
+    name: rpcbind
+    state: started
+    enabled: yes
 
 - name: Start nfs on Fedora/Red Hat
-  service: name=nfs-server state=started enabled=yes
+  systemd:
+    name: nfs-server
+    state: started
+    enabled: yes
 
 - name: Export the directories
   lineinfile: dest=/etc/exports

+ 3 - 1
roles/nuage_master/README.md

@@ -5,4 +5,6 @@ Setup Nuage Kubernetes Monitor on the Master node
 
 Requirements
 ------------
-This role assumes it has been deployed on RHEL/Fedora
+
+* Ansible 2.2
+* This role assumes it has been deployed on RHEL/Fedora

+ 12 - 6
roles/nuage_master/handlers/main.yaml

@@ -1,18 +1,24 @@
 ---
 - name: restart nuage-openshift-monitor
   become: yes
-  service: name=nuage-openshift-monitor state=restarted
+  systemd: name=nuage-openshift-monitor state=restarted
 
 - name: restart master
-  service: name={{ openshift.common.service_type }}-master state=restarted
+  systemd: name={{ openshift.common.service_type }}-master state=restarted
   when: (not openshift_master_ha | bool) and (not master_service_status_changed | default(false))
 
 - name: restart master api
-  service: name={{ openshift.common.service_type }}-master-api state=restarted
-  when: (openshift_master_ha | bool) and (not master_api_service_status_changed | default(false)) and openshift.master.cluster_method == 'native'
+  systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+  when: >
+    (openshift_master_ha | bool) and
+    (not master_api_service_status_changed | default(false)) and
+    openshift.master.cluster_method == 'native'
 
 # TODO: need to fix up ignore_errors here
 - name: restart master controllers
-  service: name={{ openshift.common.service_type }}-master-controllers state=restarted
-  when: (openshift_master_ha | bool) and (not master_controllers_service_status_changed | default(false)) and openshift.master.cluster_method == 'native'
+  systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+  when: >
+    (openshift_master_ha | bool) and
+    (not master_controllers_service_status_changed | default(false)) and
+    openshift.master.cluster_method == 'native'
   ignore_errors: yes

+ 3 - 3
roles/nuage_master/meta/main.yml

@@ -1,10 +1,10 @@
 ---
 galaxy_info:
-  author: Vishal Patil 
+  author: Vishal Patil
   description:
   company: Nuage Networks
   license: Apache License, Version 2.0
-  min_ansible_version: 1.8
+  min_ansible_version: 2.2
   platforms:
   - name: EL
     versions:
@@ -18,5 +18,5 @@ dependencies:
   - role: openshift_etcd_client_certificates
   - role: os_firewall
     os_firewall_allow:
-    - service: openshift-monitor 
+    - service: openshift-monitor
       port: "{{ nuage_mon_rest_server_port }}/tcp"

+ 2 - 1
roles/nuage_node/README.md

@@ -6,4 +6,5 @@ Setup Nuage VRS (Virtual Routing Switching) on the Openshift Node
 Requirements
 ------------
 
-This role assumes it has been deployed on RHEL/Fedora
+* Ansible 2.2
+* This role assumes it has been deployed on RHEL/Fedora

+ 2 - 2
roles/nuage_node/handlers/main.yaml

@@ -1,11 +1,11 @@
 ---
 - name: restart vrs
   become: yes
-  service: name=openvswitch state=restarted
+  systemd: name=openvswitch state=restarted
 
 - name: restart node
   become: yes
-  service: name={{ openshift.common.service_type }}-node state=restarted
+  systemd: name={{ openshift.common.service_type }}-node state=restarted
 
 - name: save iptable rules
   become: yes

+ 3 - 3
roles/nuage_node/meta/main.yml

@@ -1,10 +1,10 @@
 ---
 galaxy_info:
-  author: Vishal Patil 
+  author: Vishal Patil
   description:
   company: Nuage Networks
   license: Apache License, Version 2.0
-  min_ansible_version: 1.8
+  min_ansible_version: 2.2
   platforms:
   - name: EL
     versions:
@@ -17,7 +17,7 @@ dependencies:
   - role: nuage_ca
   - role: os_firewall
     os_firewall_allow:
-    - service: vxlan 
+    - service: vxlan
       port: 4789/udp
     - service: nuage-monitor
       port: "{{ nuage_mon_rest_server_port }}/tcp"

+ 2 - 0
roles/openshift_loadbalancer/README.md

@@ -6,6 +6,8 @@ OpenShift HaProxy Loadbalancer Configuration
 Requirements
 ------------
 
+* Ansible 2.2
+
 This role is intended to be applied to the [lb] host group which is
 separate from OpenShift infrastructure components.
 

+ 1 - 1
roles/openshift_loadbalancer/handlers/main.yml

@@ -1,6 +1,6 @@
 ---
 - name: restart haproxy
-  service:
+  systemd:
     name: haproxy
     state: restarted
   when: not (haproxy_start_result_changed | default(false) | bool)

+ 1 - 1
roles/openshift_loadbalancer/meta/main.yml

@@ -4,7 +4,7 @@ galaxy_info:
   description: OpenShift haproxy loadbalancer
   company: Red Hat, Inc.
   license: Apache License, Version 2.0
-  min_ansible_version: 1.9
+  min_ansible_version: 2.2
   platforms:
   - name: EL
     versions:

+ 2 - 6
roles/openshift_loadbalancer/tasks/main.yml

@@ -27,11 +27,6 @@
     option: LimitNOFILE
     value: "{{ openshift_loadbalancer_limit_nofile | default(100000) }}"
   notify: restart haproxy
-  register: nofile_limit_result
-
-- name: Reload systemd if needed
-  command: systemctl daemon-reload
-  when: nofile_limit_result | changed
 
 - name: Configure haproxy
   template:
@@ -43,10 +38,11 @@
   notify: restart haproxy
 
 - name: Enable and start haproxy
-  service:
+  systemd:
     name: haproxy
     state: started
     enabled: yes
+    daemon_reload: yes
   register: start_result
 
 - set_fact:

+ 2 - 1
roles/openshift_master/README.md

@@ -6,7 +6,8 @@ Master service installation
 Requirements
 ------------
 
-A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
+* Ansible 2.2
+* A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
 rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos.
 
 Role Variables

+ 3 - 3
roles/openshift_master/handlers/main.yml

@@ -1,16 +1,16 @@
 ---
 - name: restart master
-  service: name={{ openshift.common.service_type }}-master state=restarted
+  systemd: name={{ openshift.common.service_type }}-master state=restarted
   when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
   notify: Verify API Server
 
 - name: restart master api
-  service: name={{ openshift.common.service_type }}-master-api state=restarted
+  systemd: name={{ openshift.common.service_type }}-master-api state=restarted
   when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
   notify: Verify API Server
 
 - name: restart master controllers
-  service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+  systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
   when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
 
 - name: Verify API Server

+ 1 - 1
roles/openshift_master/meta/main.yml

@@ -4,7 +4,7 @@ galaxy_info:
   description: Master
   company: Red Hat, Inc.
   license: Apache License, Version 2.0
-  min_ansible_version: 2.1
+  min_ansible_version: 2.2
   platforms:
   - name: EL
     versions:

+ 36 - 28
roles/openshift_master/tasks/main.yml

@@ -64,9 +64,9 @@
   args:
     creates: "{{ openshift_master_policy }}"
   notify:
-  - restart master
-  - restart master api
-  - restart master controllers
+    - restart master
+    - restart master api
+    - restart master controllers
 
 - name: Create the scheduler config
   copy:
@@ -74,9 +74,9 @@
     dest: "{{ openshift_master_scheduler_conf }}"
     backup: true
   notify:
-  - restart master
-  - restart master api
-  - restart master controllers
+    - restart master
+    - restart master api
+    - restart master controllers
 
 - name: Install httpd-tools if needed
   package: name=httpd-tools state=present
@@ -147,8 +147,8 @@
     mode: 0600
   when: openshift.master.session_auth_secrets is defined and openshift.master.session_encryption_secrets is defined
   notify:
-  - restart master
-  - restart master api
+    - restart master
+    - restart master api
 
 - set_fact:
     translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1', openshift.common.version, openshift.common.deployment_type) }}"
@@ -163,9 +163,9 @@
     group: root
     mode: 0600
   notify:
-  - restart master
-  - restart master api
-  - restart master controllers
+    - restart master
+    - restart master api
+    - restart master controllers
 
 - include: set_loopback_context.yml
   when: openshift.common.version_gte_3_2_or_1_2
@@ -179,7 +179,10 @@
 # https://github.com/openshift/origin/issues/6065
 # https://github.com/openshift/origin/issues/6447
 - name: Start and enable master
-  service: name={{ openshift.common.service_type }}-master enabled=yes state=started
+  systemd:
+    name: "{{ openshift.common.service_type }}-master"
+    enabled: yes
+    state: started
   when: not openshift_master_ha | bool
   register: start_result
   until: not start_result | failed
@@ -187,29 +190,30 @@
   delay: 60
   notify: Verify API Server
 
-- name: Check for non-HA master service presence
-  command: systemctl show {{ openshift.common.service_type }}-master.service
-  register: master_svc_show
-  changed_when: false
-  failed_when: false
-
 - name: Stop and disable non-HA master when running HA
-  service:
+  systemd:
     name: "{{ openshift.common.service_type }}-master"
     enabled: no
     state: stopped
-  when: openshift_master_ha | bool and master_svc_show.rc == 0 and 'LoadState=not-found' not in master_svc_show.stdout
+  when: openshift_master_ha | bool
+  register: task_result
+  failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
 
 - set_fact:
     master_service_status_changed: "{{ start_result | changed }}"
   when: not openshift_master_ha | bool
 
 - name: Mask master service
-  command: systemctl mask {{ openshift.common.service_type }}-master
-  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and not openshift.common.is_containerized | bool
+  systemd:
+    name: "{{ openshift.common.service_type }}-master"
+    masked: yes
+  when: >
+    openshift_master_ha | bool and
+    openshift.master.cluster_method == 'native' and
+    not openshift.common.is_containerized | bool
 
 - name: Start and enable master api on first master
-  service:
+  systemd:
     name: "{{ openshift.common.service_type }}-master-api"
     enabled: yes
     state: started
@@ -228,7 +232,7 @@
   when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
 
 - name: Start and enable master api all masters
-  service:
+  systemd:
     name: "{{ openshift.common.service_type }}-master-api"
     enabled: yes
     state: started
@@ -264,7 +268,7 @@
   when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and master_api_service_status_changed | bool
 
 - name: Start and enable master controller on first master
-  service:
+  systemd:
     name: "{{ openshift.common.service_type }}-master-controllers"
     enabled: yes
     state: started
@@ -274,12 +278,13 @@
   retries: 1
   delay: 60
 
-- pause:
+- name: Wait for master controller service to start on first master
+  pause:
     seconds: 15
   when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
 
 - name: Start and enable master controller on all masters
-  service:
+  systemd:
     name: "{{ openshift.common.service_type }}-master-controllers"
     enabled: yes
     state: started
@@ -300,7 +305,10 @@
   register: install_result
 
 - name: Start and enable cluster service
-  service: name=pcsd enabled=yes state=started
+  systemd:
+    name: pcsd
+    enabled: yes
+    state: started
   when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
     and not openshift.common.is_containerized | bool
 

+ 1 - 1
roles/openshift_master_cluster/README.md

@@ -6,7 +6,7 @@ TODO
 Requirements
 ------------
 
-TODO
+* Ansible 2.2
 
 Role Variables
 --------------

+ 1 - 1
roles/openshift_master_cluster/meta/main.yml

@@ -4,7 +4,7 @@ galaxy_info:
   description:
   company: Red Hat, Inc.
   license: Apache License, Version 2.0
-  min_ansible_version: 1.8
+  min_ansible_version: 2.2
   platforms:
   - name: EL
     versions:

+ 4 - 2
roles/openshift_metrics/README.md

@@ -5,8 +5,10 @@ OpenShift Metrics Installation
 
 Requirements
 ------------
-It requires subdomain fqdn to be set.
-If persistence is enabled, then it also requires NFS.
+
+* Ansible 2.2
+* It requires subdomain fqdn to be set.
+* If persistence is enabled, then it also requires NFS.
 
 Role Variables
 --------------

+ 3 - 3
roles/openshift_metrics/handlers/main.yml

@@ -1,16 +1,16 @@
 ---
 - name: restart master
-  service: name={{ openshift.common.service_type }}-master state=restarted
+  systemd: name={{ openshift.common.service_type }}-master state=restarted
   when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
   notify: Verify API Server
 
 - name: restart master api
-  service: name={{ openshift.common.service_type }}-master-api state=restarted
+  systemd: name={{ openshift.common.service_type }}-master-api state=restarted
   when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
   notify: Verify API Server
 
 - name: restart master controllers
-  service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+  systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
   when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
 
 - name: Verify API Server

+ 15 - 1
roles/openshift_metrics/meta/main.yaml

@@ -1,3 +1,17 @@
+---
+galaxy_info:
+  author: David Martín
+  description:
+  company:
+  license: Apache License, Version 2.0
+  min_ansible_version: 2.2
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  categories:
+  - cloud
+  - system
 dependencies:
 - { role: openshift_examples }
-- { role: openshift_facts }
+- { role: openshift_facts }

+ 4 - 4
roles/openshift_node/README.md

@@ -6,10 +6,10 @@ Node service installation
 Requirements
 ------------
 
-One or more Master servers.
-
-A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
-rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos.
+* Ansible 2.2
+* One or more Master servers
+* A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
+rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos
 
 Role Variables
 --------------

+ 2 - 2
roles/openshift_node/handlers/main.yml

@@ -1,6 +1,6 @@
 ---
 - name: restart openvswitch
-  service: name=openvswitch state=restarted
+  systemd: name=openvswitch state=restarted
   when: not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool
   notify:
   - restart openvswitch pause
@@ -10,5 +10,5 @@
   when: openshift.common.is_containerized | bool
 
 - name: restart node
-  service: name={{ openshift.common.service_type }}-node state=restarted
+  systemd: name={{ openshift.common.service_type }}-node state=restarted
   when: not (node_service_status_changed | default(false) | bool)

+ 40 - 29
roles/openshift_node/tasks/main.yml

@@ -2,35 +2,37 @@
 # TODO: allow for overriding default ports where possible
 - fail:
     msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
-  when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
+  when: >
+    (not ansible_selinux or ansible_selinux.status != 'enabled') and
+    deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
 
 - name: Set node facts
   openshift_facts:
     role: "{{ item.role }}"
     local_facts: "{{ item.local_facts }}"
   with_items:
-  # Reset node labels to an empty dictionary.
-  - role: node
-    local_facts:
-      labels: {}
-  - role: node
-    local_facts:
-      annotations: "{{ openshift_node_annotations | default(none) }}"
-      debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
-      iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
-      kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
-      labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
-      registry_url: "{{ oreg_url | default(none) }}"
-      schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
-      sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
-      storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
-      set_node_ip: "{{ openshift_set_node_ip | default(None) }}"
-      node_image: "{{ osn_image | default(None) }}"
-      ovs_image: "{{ osn_ovs_image | default(None) }}"
-      proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
-      local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
-      dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
-      env_vars: "{{ openshift_node_env_vars | default(None) }}"
+    # Reset node labels to an empty dictionary.
+    - role: node
+      local_facts:
+        labels: {}
+    - role: node
+      local_facts:
+        annotations: "{{ openshift_node_annotations | default(none) }}"
+        debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
+        iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
+        kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
+        labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
+        registry_url: "{{ oreg_url | default(none) }}"
+        schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
+        sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
+        storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
+        set_node_ip: "{{ openshift_set_node_ip | default(None) }}"
+        node_image: "{{ osn_image | default(None) }}"
+        ovs_image: "{{ osn_ovs_image | default(None) }}"
+        proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
+        local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
+        dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
+        env_vars: "{{ openshift_node_env_vars | default(None) }}"
 
 # We have to add tuned-profiles in the same transaction otherwise we run into depsolving
 # problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
@@ -80,7 +82,10 @@
   sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes state=present reload=yes
 
 - name: Start and enable openvswitch docker service
-  service: name=openvswitch.service enabled=yes state=started
+  systemd:
+    name: openvswitch.service
+    enabled: yes
+    state: started
   when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
   register: ovs_start_result
 
@@ -102,7 +107,7 @@
     group: root
     mode: 0600
   notify:
-  - restart node
+    - restart node
 
 - name: Configure AWS Cloud Provider Settings
   lineinfile:
@@ -118,7 +123,7 @@
   no_log: True
   when: "openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined"
   notify:
-  - restart node
+    - restart node
 
 - name: Configure Node Environment Variables
   lineinfile:
@@ -128,7 +133,7 @@
     create: true
   with_dict: "{{ openshift.node.env_vars | default({}) }}"
   notify:
-  - restart node
+    - restart node
 
 - name: NFS storage plugin configuration
   include: storage_plugins/nfs.yml
@@ -168,11 +173,17 @@
   when: openshift.common.is_containerized | bool
 
 - name: Start and enable node dep
-  service: name={{ openshift.common.service_type }}-node-dep enabled=yes state=started
+  systemd:
+    name: "{{ openshift.common.service_type }}-node-dep"
+    enabled: yes
+    state: started
   when: openshift.common.is_containerized | bool
 
 - name: Start and enable node
-  service: name={{ openshift.common.service_type }}-node enabled=yes state=started
+  systemd:
+    name: "{{ openshift.common.service_type }}-node"
+    enabled: yes
+    state: started
   register: node_start_result
   until: not node_start_result | failed
   retries: 1

+ 2 - 0
roles/openshift_node_certificates/README.md

@@ -6,6 +6,8 @@ This role determines if OpenShift node certificates must be created, delegates c
 Requirements
 ------------
 
+* Ansible 2.2
+
 Role Variables
 --------------
 

+ 2 - 2
roles/openshift_node_certificates/handlers/main.yml

@@ -2,9 +2,9 @@
 - name: update ca trust
   command: update-ca-trust
   notify:
-  - restart docker after updating ca trust
+    - restart docker after updating ca trust
 
 - name: restart docker after updating ca trust
-  service:
+  systemd:
     name: docker
     state: restarted

+ 1 - 1
roles/openshift_node_certificates/meta/main.yml

@@ -4,7 +4,7 @@ galaxy_info:
   description: OpenShift Node Certificates
   company: Red Hat, Inc.
   license: Apache License, Version 2.0
-  min_ansible_version: 2.1
+  min_ansible_version: 2.2
   platforms:
   - name: EL
     versions:

+ 2 - 2
roles/openshift_node_dnsmasq/handlers/main.yml

@@ -1,10 +1,10 @@
 ---
 - name: restart NetworkManager
-  service:
+  systemd:
     name: NetworkManager
     state: restarted
 
 - name: restart dnsmasq
-  service:
+  systemd:
     name: dnsmasq
     state: restarted

+ 1 - 1
roles/openshift_node_dnsmasq/meta/main.yml

@@ -4,7 +4,7 @@ galaxy_info:
   description: OpenShift Node DNSMasq support
   company: Red Hat, Inc.
   license: Apache License, Version 2.0
-  min_ansible_version: 1.7
+  min_ansible_version: 2.2
   platforms:
   - name: EL
     versions:

+ 6 - 6
roles/openshift_node_dnsmasq/tasks/main.yml

@@ -22,16 +22,16 @@
 
 - name: Deploy additional dnsmasq.conf
   template:
-   src: "{{ openshift_node_dnsmasq_additional_config_file }}"
-   dest: /etc/dnsmasq.d/openshift-ansible.conf
-   owner: root
-   group: root
-   mode: 0644
+    src: "{{ openshift_node_dnsmasq_additional_config_file }}"
+    dest: /etc/dnsmasq.d/openshift-ansible.conf
+    owner: root
+    group: root
+    mode: 0644
   when: openshift_node_dnsmasq_additional_config_file is defined
   notify: restart dnsmasq
 
 - name: Enable dnsmasq
-  service:
+  systemd:
     name: dnsmasq
     enabled: yes
     state: started

+ 3 - 3
roles/openshift_storage_nfs/README.md

@@ -6,10 +6,10 @@ OpenShift NFS Server Installation
 Requirements
 ------------
 
-This role is intended to be applied to the [nfs] host group which is
+* Ansible 2.2
+* This role is intended to be applied to the [nfs] host group which is
 separate from OpenShift infrastructure components.
-
-Requires access to the 'nfs-utils' package.
+* Requires access to the 'nfs-utils' package.
 
 Role Variables
 --------------

+ 1 - 1
roles/openshift_storage_nfs/handlers/main.yml

@@ -1,6 +1,6 @@
 ---
 - name: restart nfs-server
-  service:
+  systemd:
     name: nfs-server
     state: restarted
   when: not (nfs_service_status_changed | default(false))

+ 1 - 1
roles/openshift_storage_nfs/meta/main.yml

@@ -4,7 +4,7 @@ galaxy_info:
   description: OpenShift NFS Server
   company: Red Hat, Inc.
   license: Apache License, Version 2.0
-  min_ansible_version: 1.9
+  min_ansible_version: 2.2
   platforms:
   - name: EL
     versions:

+ 8 - 10
roles/openshift_storage_nfs/tasks/main.yml

@@ -10,7 +10,7 @@
   register: nfs_config
 
 - name: Restart nfs-config
-  service: name=nfs-config state=restarted
+  systemd: name=nfs-config state=restarted
   when: nfs_config | changed
 
 - name: Ensure exports directory exists
@@ -26,9 +26,9 @@
     owner: nfsnobody
     group: nfsnobody
   with_items:
-  - "{{ openshift.hosted.registry }}"
-  - "{{ openshift.hosted.metrics }}"
-  - "{{ openshift.hosted.logging }}"
+    - "{{ openshift.hosted.registry }}"
+    - "{{ openshift.hosted.metrics }}"
+    - "{{ openshift.hosted.logging }}"
 
 
 - name: Configure exports
@@ -36,7 +36,7 @@
     dest: /etc/exports.d/openshift-ansible.exports
     src: exports.j2
   notify:
-  - restart nfs-server
+    - restart nfs-server
 
 # Now that we're putting our exports in our own file clean up the old ones
 - name: register exports
@@ -51,16 +51,14 @@
   with_items: "{{ exports_out.stdout_lines | default([]) }}"
   when: exports_out.rc == 0
   notify:
-  - restart nfs-server
+    - restart nfs-server
 
 - name: Enable and start services
-  service:
-    name: "{{ item }}"
+  systemd:
+    name: nfs-server
     state: started
     enabled: yes
   register: start_result
-  with_items:
-  - nfs-server
 
 - set_fact:
     nfs_service_status_changed: "{{ start_result | changed }}"

+ 2 - 3
roles/openshift_storage_nfs_lvm/README.md

@@ -8,10 +8,9 @@ create persistent volumes.
 
 ## Requirements
 
-* NFS server with NFS, iptables, and everything setup.
-
+* Ansible 2.2
+* NFS server with NFS, iptables, and everything setup
 * A lvm volume group created on the nfs server (default: openshiftvg)
-
 * The lvm volume needs to have as much free space as you are allocating
 
 ## Role Variables

+ 1 - 1
roles/openshift_storage_nfs_lvm/handlers/main.yml

@@ -1,3 +1,3 @@
 ---
 - name: restart nfs
-  service: name=nfs-server state=restarted
+  systemd: name=nfs-server state=restarted

+ 1 - 1
roles/openshift_storage_nfs_lvm/meta/main.yml

@@ -4,7 +4,7 @@ galaxy_info:
   description: Create LVM volumes and use them as openshift persistent volumes.
   company: Red Hat, Inc.
   license: license (Apache)
-  min_ansible_version: 1.4
+  min_ansible_version: 2.2
   platforms:
   - name: EL
     versions:

+ 12 - 3
roles/openshift_storage_nfs_lvm/tasks/nfs.yml

@@ -4,14 +4,23 @@
   when: not openshift.common.is_containerized | bool
 
 - name: Start rpcbind
-  service: name=rpcbind state=started enabled=yes
+  systemd:
+    name: rpcbind
+    state: started
+    enabled: yes
 
 - name: Start nfs
-  service: name=nfs-server state=started enabled=yes
+  systemd:
+    name: nfs-server
+    state: started
+    enabled: yes
 
 - name: Export the directories
   lineinfile: dest=/etc/exports
               regexp="^{{ osnl_mount_dir }}/{{ item }} "
               line="{{ osnl_mount_dir }}/{{ item }} {{osnl_nfs_export_options}}"
-  with_sequence: start={{osnl_volume_num_start}} count={{osnl_number_of_volumes}} format={{osnl_volume_prefix}}{{osnl_volume_size}}g%04d
+  with_sequence:
+    start: "{{osnl_volume_num_start}}"
+    count: "{{osnl_number_of_volumes}}"
+    format: "{{osnl_volume_prefix}}{{osnl_volume_size}}g%04d"
   notify: restart nfs