Kaynağa Gözat

Merge pull request #11389 from jstuever/cherrypick_11343_11386

Cherrypick 11343 11386
OpenShift Merge Robot 6 yıl önce
ebeveyn
işleme
172e2b8aca

+ 2 - 2
images/installer/root/usr/local/bin/entrypoint-provider

@@ -24,9 +24,9 @@ if ! whoami &>/dev/null; then
   echo "${USER:-default}:x:$(id -u):$(id -g):Default User:$HOME:/sbin/nologin" >> /etc/passwd
 fi
 
-# Provide a "files_dir" variable that points to inventory/dynamic/injected
+# Provide a path to the pull secret
 mkdir -p "${WORK}/inventory/dynamic/${TYPE}/group_vars/all"
-echo "files_dir: \"${FILES}\"" > "${WORK}/inventory/dynamic/${TYPE}/group_vars/all/00_default_files_dir.yml"
+echo "openshift_pull_secret_path: \"${FILES}/pull-secret\"" > "${WORK}/inventory/dynamic/${TYPE}/group_vars/all/00_pull_secret_path.yml"
 # Add any injected variable files into the group vars directory
 find "${FILES}" \( -name '*.yml' -or -name '*.yaml' -or -name vars \) -print0 | xargs -0 -L1 -I {} ln -fs {} "${WORK}/inventory/dynamic/${TYPE}/group_vars/all"
 # Avoid sudo when running locally - nothing in the image requires it.

+ 1 - 1
playbooks/openshift-node/scaleup.yml

@@ -13,4 +13,4 @@
 - name: install nodes
   hosts: new_workers
   roles:
-  - openshift_node40
+  - openshift_node

+ 61 - 0
roles/openshift_node/README.md

@@ -0,0 +1,61 @@
+OpenShift Node
+================================
+
+Node service installation
+
+Requirements
+------------
+
+* Ansible 2.2
+* One or more Master servers
+* A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
+rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos
+
+Role Variables
+--------------
+From this role:
+
+| Name                                     | Default value         |                                                          |
+|------------------------------------------|-----------------------|----------------------------------------------------------|
+| openshift_node_start_options             | UNDEF (Optional)      | Options to pass to node start cmdline                    |
+| oreg_url                                 | UNDEF (Optional)      | Default docker registry to use                           |
+| openshift_persistentlocalstorage_enabled | false                 | Enable the persistent local storage                      |
+
+openshift_node_start_options can be used for passing any start node option, e.g.:
+
+--enable=kubelet,plugins
+
+Which would have a node running without kube-proxy and dns.
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+
+Notes
+-----
+
+Currently we support re-labeling nodes but we don't re-schedule running pods nor remove existing labels. That means you will have to trigger the re-schedulling manually. To re-schedule your pods, just follow the steps below:
+
+```
+oc adm manage-node --schedulable=false ${NODE}
+oc adm manage-node --drain ${NODE}
+oc adm manage-node --schedulable=true ${NODE}
+````
+
+> If you are using version less than 1.5/3.5 you must replace `--drain` with `--evacuate`.
+
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+TODO

roles/openshift_node40/callback_plugins/aa_version_requirement.py → roles/openshift_node/callback_plugins/aa_version_requirement.py


+ 89 - 0
roles/openshift_node/defaults/main.yml

@@ -0,0 +1,89 @@
+---
+openshift_kubeconfig_path: '~/.kube/config'
+openshift_pull_secret_path: '~/pull-secret.txt'
+
+openshift_node_machineconfigpool: 'worker'
+openshift_node_tls_verify: false
+
+openshift_node_kubeconfig: "{{ lookup('file', openshift_kubeconfig_path) | from_yaml }}"
+openshift_node_bootstrap_port: 22623
+openshift_node_bootstrap_server: "{{ openshift_node_kubeconfig.clusters.0.cluster.server.split(':')[0:-1] | join(':') }}:{{ openshift_node_bootstrap_port }}"
+openshift_node_bootstrap_endpoint: "{{ openshift_node_bootstrap_server }}/config/{{ openshift_node_machineconfigpool }}"
+
+openshift_node_install_packages:
+  # Packages from redhat-coreos.git manifest-base.yaml
+  - kernel
+  - irqbalance
+  - microcode_ctl
+  - systemd
+  - systemd-journal-gateway
+  #- rpm-ostree
+  #- nss-altfiles
+  - selinux-policy-targeted
+  - setools-console
+  #- ignition
+  #- ignition-dracut
+  - dracut-network
+  - passwd
+  #- grub2
+  #- grub2-efi
+  #- ostree-grub2
+  #- efibootmgr
+  #- shim
+  - openssh-server
+  - openssh-clients
+  - podman
+  - skopeo
+  - runc
+  - containernetworking-plugins
+  #- cri-o
+  - cri-tools
+  #- toolbox
+  - nfs-utils
+  - NetworkManager
+  - dnsmasq
+  - lvm2
+  - iscsi-initiator-utils
+  - sg3_utils
+  - device-mapper-multipath
+  - xfsprogs
+  - e2fsprogs
+  - mdadm
+  - cryptsetup
+  - chrony
+  #- coreos-metadata
+  - logrotate
+  - sssd
+  - shadow-utils
+  - sudo
+  - coreutils
+  - less
+  - tar
+  - xz
+  - gzip
+  - bzip2
+  - rsync
+  - tmux
+  - nmap-ncat
+  - net-tools
+  - bind-utils
+  - strace
+  - bash-completion
+  - vim-minimal
+  - nano
+  #- openshift-hyperkube
+  #- openshift-clients
+  #- pivot
+  #- subscription-manager-rhsm-certificates
+  #
+  # Packages from redhat-coreos.git maipo/manifest.yaml
+  #- redhat-release-coreos
+  - authconfig
+  - policycoreutils-python
+  - iptables-services
+  - bridge-utils
+  - biosdevname
+  - container-storage-setup
+  - cloud-utils-growpart
+  - ceph-common
+  - glusterfs-fuse

roles/openshift_node40/library/swapoff.py → roles/openshift_node/library/swapoff.py


roles/openshift_node40/meta/main.yml → roles/openshift_node/meta/main.yml


+ 11 - 11
roles/openshift_node40/tasks/config.yml

@@ -22,6 +22,11 @@
     state: yes
     persistent: yes
 
+- name: create temp directory
+  tempfile:
+    state: directory
+  register: tempfile
+
 - name: Wait for bootstrap endpoint to show up
   uri:
     url: "{{ openshift_node_bootstrap_endpoint }}"
@@ -36,22 +41,17 @@
 - name: Fetch bootstrap ignition file locally
   uri:
     url: "{{ openshift_node_bootstrap_endpoint }}"
-    dest: "{{ ign_file }}"
+    dest: "{{ tempfile.path }}/bootstrap.ign"
     validate_certs: false
 
-- name: create temp directory
-  tempfile:
-    state: directory
-  register: tempfile
-
 - name: Copy pull secret in the directory
   copy:
-    src: "{{ pull_secret }}"
+    src: "{{ openshift_pull_secret_path }}"
     dest: "{{ tempfile.path }}/pull-secret.json"
 
 - name: Get release image
   k8s_facts:
-    kubeconfig: "{{ kubeconfig_path }}"
+    kubeconfig: "{{ openshift_kubeconfig_path }}"
     kind: ClusterVersion
     name: version
   delegate_to: localhost
@@ -70,7 +70,7 @@
     openshift_release_image: "{{ clusterversion.resources[0].status.desired.image }}"
 
 - name: Pull release image
-  command: "podman pull --tls-verify={{ tls_verify }} --authfile {{ tempfile.path }}/pull-secret.json {{ openshift_release_image }}"
+  command: "podman pull --tls-verify={{ openshift_node_tls_verify }} --authfile {{ tempfile.path }}/pull-secret.json {{ openshift_release_image }}"
 
 - name: Get machine controller daemon image from release image
   command: "podman run --rm {{ openshift_release_image }} image machine-config-daemon"
@@ -78,14 +78,14 @@
 
 - block:
   - name: Pull MCD image
-    command: "podman pull --tls-verify={{ tls_verify }} --authfile {{ tempfile.path }}/pull-secret.json {{ release_image_mcd.stdout }}"
+    command: "podman pull --tls-verify={{ openshift_node_tls_verify }} --authfile {{ tempfile.path }}/pull-secret.json {{ release_image_mcd.stdout }}"
 
   - name: Apply ignition manifest
     command: "podman run {{ podman_mounts }} {{ podman_flags }} {{ mcd_command }}"
     vars:
       podman_flags: "--privileged --rm -ti {{ release_image_mcd.stdout }}"
       podman_mounts: "-v /:/rootfs -v /var/run/dbus:/var/run/dbus -v /run/systemd:/run/systemd"
-      mcd_command: "start --node-name {{ ansible_hostname }} --once-from {{ ign_file }}"
+      mcd_command: "start --node-name {{ ansible_hostname }} --once-from {{ tempfile.path }}/bootstrap.ign"
     # MCD reboots the machine, run the task but do not wait for completion
     register: manifest_apply
     async: 900  # 15 minutes

+ 6 - 5
roles/openshift_node40/tasks/install.yml

@@ -6,11 +6,6 @@
   async: 3600
   poll: 30
 
-- name: Enable the CRI-O service
-  systemd:
-    name: "cri-o"
-    enabled: yes
-
 - name: Install openshift packages
   package:
     name: "{{ l_node_packages | join(',') }}"
@@ -18,5 +13,11 @@
   poll: 30
   vars:
     l_node_packages:
+    - cri-o
     - openshift-clients
     - openshift-hyperkube
+
+- name: Enable the CRI-O service
+  systemd:
+    name: "cri-o"
+    enabled: yes

roles/openshift_node40/tasks/main.yml → roles/openshift_node/tasks/main.yml


+ 0 - 26
roles/openshift_node40/defaults/main.yml

@@ -1,26 +0,0 @@
----
-openshift_node_kubeconfig: "{{ lookup('file', kubeconfig_path) | from_yaml }}"
-openshift_node_bootstrap_port: 22623
-openshift_node_bootstrap_machineconfigpool: 'worker'
-openshift_node_bootstrap_server: "{{ openshift_node_kubeconfig.clusters.0.cluster.server.split(':')[0:-1] | join(':') }}:{{ openshift_node_bootstrap_port }}"
-openshift_node_bootstrap_endpoint: "{{ openshift_node_bootstrap_server }}/config/{{ openshift_node_bootstrap_machineconfigpool }}"
-
-openshift_release_image: "registry.svc.ci.openshift.org/openshift/origin-release:v4.0"
-ign_file: "/tmp/bootstrap.ign"
-pull_secret: "{{ files_dir }}/pull-secret"
-tls_verify: false
-
-openshift_node_install_packages:
-  # Packages from old init/base_packages
-  - iproute
-  - dbus-python
-  - PyYAML
-  - libsemanage-python
-  - yum-utils
-  - python-docker-py
-  - systemd-journal-gateway
-  - python-ipaddress
-  # Packages from old roles/container_runtime
-  - cri-o
-  - cri-tools
-  - podman

+ 3 - 1
test/aws/scaleup.yml

@@ -32,7 +32,9 @@
     vars:
       openshift_version: "4.0"
 
-- import_playbook: ../../playbooks/openshift-node/scaleup.yml
+- import_playbook: ../../playbooks/scaleup.yml
+  vars:
+    openshift_kubeconfig_path: "{{ kubeconfig_path }}"
 
 - name: wait for nodes to join
   hosts: new_workers

+ 1 - 1
test/libvirt/node_scaleup.sh

@@ -4,4 +4,4 @@ APB3="`which python3` `which ansible-playbook`"
 WORKDIR=$PWD
 
 cd ../..
-$APB3 -vvv -i $WORKDIR/inventory.txt playbooks/openshift-node/scaleup.yml
+$APB3 -vvv -i $WORKDIR/inventory.txt playbooks/scaleup.yml