Browse Source

Remove k8s modules from test playbooks

The k8s Ansible modules were only used in the test playbooks which have
now been refactored to use the `command` module with `oc` to be
consistent with the main playbooks.

Summary of changes:
- Removed ssh bastion creation playbooks and files
- ssh bastion creation is done via script in CI
- Creating machine sets is broken out into a separately included playbook
- Preparation of nodes has been consolidated
- Wait for new worker configs to roll out before removing CoreOS nodes
- removed python2-openshift from -test package, was required for `k8s*` modules
- Playbooks in test/ are now syntax checked
Russell Teague 5 years ago
parent
commit
55ddf4cf7b

+ 0 - 1
openshift-ansible.spec

@@ -61,7 +61,6 @@ cp -rp test %{buildroot}%{_datadir}/ansible/%{name}/
 Summary:       Openshift and Atomic Enterprise Ansible Test Playbooks
 Requires:      %{name} = %{version}-%{release}
 Requires:      ansible = 2.9.5
-Requires:      python2-openshift
 Requires:      openssh-clients
 BuildArch:     noarch
 

+ 7 - 3
setup.py

@@ -67,14 +67,14 @@ def recursive_search(search_list, field):
     return fields_found
 
 
-def find_playbooks():
+def find_playbooks(base_dir):
     ''' find Ansible playbooks'''
     all_playbooks = set()
     included_playbooks = set()
 
     exclude_dirs = ('adhoc', 'tasks', 'ovirt')
     for yaml_file in find_files(
-            os.path.join(os.getcwd(), 'playbooks'),
+            os.path.join(os.getcwd(), base_dir),
             exclude_dirs, None, r'^[^\.].*\.ya?ml$'):
         with open(yaml_file, 'r') as contents:
             for task in yaml.safe_load(contents) or {}:
@@ -288,7 +288,7 @@ class OpenShiftAnsibleSyntaxCheck(Command):
         if not has_errors:
             print('...PASSED')
 
-        all_playbooks, included_playbooks = find_playbooks()
+        all_playbooks, included_playbooks = find_playbooks('playbooks')
 
         print('#' * 60)
         print('Invalid Playbook Include Checks')
@@ -323,6 +323,10 @@ class OpenShiftAnsibleSyntaxCheck(Command):
         print('Ansible Playbook Entry Point Syntax Checks')
         # Evaluate the difference between all playbooks and included playbooks
         entrypoint_playbooks = sorted(all_playbooks.difference(included_playbooks))
+        # Add ci test playbooks
+        test_playbooks, test_included_playbooks = find_playbooks('test')
+        test_entrypoint_playbooks = sorted(test_playbooks.difference(test_included_playbooks))
+        entrypoint_playbooks.extend(test_entrypoint_playbooks)
         print('Entry point playbook count: {}'.format(len(entrypoint_playbooks)))
         for playbook in entrypoint_playbooks:
             print('-' * 60)

+ 55 - 0
test/aws/create_machines.yml

@@ -0,0 +1,55 @@
+---
+- name: Create AWS instances using machine sets
+  hosts: localhost
+  connection: local
+
+  vars:
+    new_workers_list: []
+
+  tasks:
+  - import_tasks: tasks/get_machinesets.yml
+
+  - include_tasks: tasks/create_machineset.yml
+    loop: "{{ (machineset.stdout | from_json)['items'] }}"
+    when:
+    - item.status.replicas is defined
+    - item.status.replicas != 0
+
+- name: Prepare new nodes
+  hosts: new_workers
+  gather_facts: false
+  tasks:
+  - wait_for_connection: {}
+  - setup: {}
+  - name: Copy ops-mirror.pem
+    copy:
+      src: ../../inventory/dynamic/injected/ops-mirror.pem
+      dest: /var/lib/yum/ops-mirror.pem
+      owner: root
+      group: root
+      mode: 0644
+  - name: Initialize openshift repos
+    import_tasks: tasks/additional_repos.yml
+
+  # Preparation required for e2e tests
+  - name: Disable selinux
+    selinux:
+      policy: targeted
+      state: permissive
+  - name: Create core user for storage tests to pass
+    user:
+      name: core
+      group: wheel
+  - name: Make sure core user has ssh config directory
+    file:
+      name: /home/core/.ssh
+      state: directory
+      owner: core
+      group: wheel
+      mode: 0700
+  - name: Install nfs-utils for storage tests
+    package:
+      name: nfs-utils
+      state: present
+    register: result
+    until: result is succeeded

+ 0 - 20
test/aws/files/01_service.yml

@@ -1,20 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
-  labels:
-    run: ssh-bastion
-  name: ssh-bastion
-  namespace: byoh-ssh-bastion
-  annotations:
-    service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp"
-    service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
-spec:
-  externalTrafficPolicy: Local
-  ports:
-  - name: ssh
-    port: 22
-    protocol: TCP
-    targetPort: ssh
-  selector:
-    run: ssh-bastion
-  type: LoadBalancer

+ 0 - 5
test/aws/files/02_serviceaccount.yml

@@ -1,5 +0,0 @@
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: ssh-bastion
-  namespace: byoh-ssh-bastion

+ 0 - 14
test/aws/files/03_role.yml

@@ -1,14 +0,0 @@
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: ssh-bastion
-  namespace: byoh-ssh-bastion
-rules:
-- apiGroups:
-  - security.openshift.io
-  resources:
-  - securitycontextconstraints
-  verbs:
-  - use
-  resourceNames:
-  - privileged

+ 0 - 15
test/aws/files/04_rolebinding.yml

@@ -1,15 +0,0 @@
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  annotations:
-    openshift.io/description: Allows ssh-pod to run as root
-  name: ssh-bastion
-  namespace: byoh-ssh-bastion
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: ssh-bastion
-subjects:
-- apiGroup: rbac.authorization.k8s.io
-  kind: User
-  name: system:serviceaccount:byoh-ssh-bastion:ssh-bastion

+ 0 - 18
test/aws/files/05_clusterrole.yml

@@ -1,18 +0,0 @@
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: ssh-bastion
-rules:
-- apiGroups:
-  - "machineconfiguration.openshift.io"
-  resources:
-  - "machineconfigs"
-  verbs:
-  - get
-- apiGroups:
-  - ""
-  resources:
-  - "nodes"
-  verbs:
-  - list
-  - get

+ 0 - 14
test/aws/files/06_clusterrolebinding.yml

@@ -1,14 +0,0 @@
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  annotations:
-    openshift.io/description: Allows ssh-pod to read nodes and machineconfigs
-  name: ssh-bastion
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: ssh-bastion
-subjects:
-- apiGroup: rbac.authorization.k8s.io
-  kind: User
-  name: system:serviceaccount:byoh-ssh-bastion:ssh-bastion

+ 0 - 43
test/aws/files/07_deployment.yml

@@ -1,43 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  labels:
-    run: ssh-bastion
-  name: ssh-bastion
-  namespace: byoh-ssh-bastion
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      run: ssh-bastion
-  template:
-    metadata:
-      labels:
-        run: ssh-bastion
-    spec:
-      serviceAccountName: "ssh-bastion"
-      containers:
-      - image: quay.io/eparis/ssh:latest
-        imagePullPolicy: Always
-        name: ssh-bastion
-        securityContext:
-          privileged: true
-        ports:
-        - containerPort: 22
-          name: ssh
-          protocol: TCP
-        volumeMounts:
-        - name: ssh-host-keys
-          mountPath: "/etc/ssh/"
-          readOnly: true
-      volumes:
-      - name: ssh-host-keys
-        secret:
-          secretName: ssh-host-keys
-          items:
-          - key: ssh_host_rsa_key
-            path: ssh_host_rsa_key
-            mode: 256
-          - key: sshd_config
-            path: sshd_config
-      restartPolicy: Always

+ 0 - 20
test/aws/files/sshd_config

@@ -1,20 +0,0 @@
-HostKey /etc/ssh/ssh_host_rsa_key
-HostKey /etc/ssh/ssh_host_ecdsa_key
-HostKey /etc/ssh/ssh_host_ed25519_key
-SyslogFacility AUTHPRIV
-PermitRootLogin no
-AuthorizedKeysFile	/home/core/.ssh/authorized_keys
-PasswordAuthentication no
-ChallengeResponseAuthentication no
-GSSAPIAuthentication yes
-GSSAPICleanupCredentials no
-UsePAM yes
-X11Forwarding yes
-PrintMotd no
-AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
-AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
-AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE
-AcceptEnv XMODIFIERS
-Subsystem	sftp	/usr/libexec/openssh/sftp-server
-ClientAliveCountMax 5
-ClientAliveInterval 5

+ 0 - 34
test/aws/get_machinesets.yml

@@ -1,34 +0,0 @@
----
-- name: List existing worker nodes
-  k8s_facts:
-    kubeconfig: "{{ kubeconfig_path }}"
-    kind: Node
-    label_selectors:
-    - "node-role.kubernetes.io/worker"
-  register: pre_scaleup_workers
-  until:
-  - pre_scaleup_workers.resources is defined
-  - pre_scaleup_workers.resources | length > 0
-  retries: 36
-  delay: 5
-
-- name: Set fact pre_scaleup_workers_name
-  set_fact:
-    pre_scaleup_workers_name: "{{ pre_scaleup_workers.resources | map(attribute='metadata.name') | list }}"
-
-- name: Get existing worker machinesets
-  k8s_facts:
-    api_version: machine.openshift.io/v1beta1
-    kubeconfig: "{{ kubeconfig_path }}"
-    namespace: openshift-machine-api
-    kind: MachineSet
-  register: machineset
-  until:
-  - machineset.resources is defined
-  - machineset.resources | length > 0
-  retries: 36
-  delay: 5
-
-- name: Set fact pre_scaleup_machineset_names
-  set_fact:
-    pre_scaleup_machineset_names: "{{ machineset.resources | map(attribute='metadata.name') | list }}"

+ 44 - 99
test/aws/scaleup.yml

@@ -1,116 +1,40 @@
 ---
-- name: create new nodes
-  hosts: localhost
-  connection: local
-  vars:
-    new_workers_list: []
-
-  tasks:
-  - import_tasks: ssh_bastion.yml
 
-  - import_tasks: get_machinesets.yml
-
-  - include_tasks: create_machineset.yml
-    loop: "{{ machineset.resources }}"
-    when:
-    - item.status.replicas is defined
-    - item.status.replicas != 0
-
-- name: wait for nodes to become available
-  hosts: new_workers
-  gather_facts: false
-  tasks:
-  - wait_for_connection: {}
-  - setup: {}
-  - name: Copy ops-mirror.pem
-    copy:
-      src: ../../inventory/dynamic/injected/ops-mirror.pem
-      dest: /var/lib/yum/ops-mirror.pem
-      owner: root
-      group: root
-      mode: 0644
-  - name: Initialize openshift repos
-    import_tasks: additional_repos.yml
+- import_playbook: create_machines.yml
 
 - import_playbook: ../../playbooks/scaleup.yml
   vars:
     openshift_kubeconfig_path: "{{ kubeconfig_path }}"
 
-- name: wait for nodes to join
+- name: Wait for nodes to join
   hosts: new_workers
   tasks:
-  - name: HACK disable selinux
-    selinux:
-      policy: targeted
-      state: permissive
-  - name: Create core user for storage tests to pass
-    user:
-      name: core
-      group: wheel
-  - name: Make sure core user has ssh config directory
-    file:
-      name: /home/core/.ssh
-      state: directory
-      owner: core
-      group: wheel
-      mode: 0700
-  - name: Install nfs-utils for storage tests
-    package:
-      name: nfs-utils
-      state: present
-    register: result
-    until: result is succeeded
-  - name: Wait for new nodes to be ready
-    k8s_facts:
-      kubeconfig: "{{ kubeconfig_path }}"
-      kind: Node
-      name: "{{ node_name }}"
-    delegate_to: localhost
-    register: new_machine
-    until:
-    - new_machine.resources is defined
-    - new_machine.resources | length > 0
-    - new_machine.resources[0].status is defined
-    - new_machine.resources[0].status.conditions is defined
-    - new_machine.resources[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
-    # Give the node three minutes to come back online.
-    retries: 48
-    delay: 30
-    ignore_errors: true
-  - when: new_machine is failed
-    block:
-    - include_tasks: gather_logs.yml
-    - fail:
-        msg: Node failed to become Ready.
+  - block:
+    - name: Wait for nodes to report ready
+      command: >
+        oc get node {{ hostvars[item].ansible_nodename | lower }}
+        --kubeconfig={{ kubeconfig_path }}
+        --output=jsonpath='{.status.conditions[?(@.type=="Ready")].status}'
+      loop: "{{ ansible_play_batch }}"
+      delegate_to: localhost
+      run_once: true
+      register: oc_get
+      until:
+      - oc_get.stdout == "True"
+      retries: 36
+      delay: 5
+      changed_when: false
+
+    rescue:
+    - include_tasks: tasks/gather_logs.yml
+    - name: DEBUG - Node failed to report ready
+      fail:
+        msg: "Node failed to report Ready"
 
 - name: Remove CoreOS nodes
   hosts: localhost
   connection: local
   tasks:
-  - name: Mark CoreOS nodes as unschedulable
-    command: >
-      oc adm cordon {{ item | lower }}
-      --kubeconfig={{ kubeconfig_path }}
-    with_items: "{{ pre_scaleup_workers_name }}"
-
-  - name: Drain CoreOS nodes
-    command: >
-      oc adm drain {{ item | lower }}
-      --kubeconfig={{ kubeconfig_path }}
-      --force --delete-local-data --ignore-daemonsets
-      --timeout=0s
-    with_items: "{{ pre_scaleup_workers_name }}"
-
-  - name: remove existing machinesets
-    k8s:
-      api_version: machine.openshift.io/v1beta1
-      kubeconfig: "{{ kubeconfig_path }}"
-      namespace: openshift-machine-api
-      kind: MachineSet
-      name: "{{ item }}"
-      state: absent
-    with_items: "{{ pre_scaleup_machineset_names }}"
-
   - block:
     - name: Wait for worker configs to roll out
       command: >
@@ -129,3 +53,24 @@
     - name: DEBUG - Worker config rollout failed
       fail:
         msg: "Worker config rollout failed"
+
+  - name: Mark pre-scaleup worker nodes as unschedulable
+    command: >
+      oc adm cordon {{ item | lower }}
+      --kubeconfig={{ kubeconfig_path }}
+    loop: "{{ pre_scaleup_workers_name }}"
+
+  - name: Drain pre-scaleup worker nodes
+    command: >
+      oc adm drain {{ item | lower }}
+      --kubeconfig={{ kubeconfig_path }}
+      --force --delete-local-data --ignore-daemonsets
+      --timeout=0s
+    loop: "{{ pre_scaleup_workers_name }}"
+
+  - name: Remove pre-scaleup machinesets
+    command: >
+      oc delete machinesets {{ item }}
+      --kubeconfig={{ kubeconfig_path }}
+      --namespace=openshift-machine-api
+    loop: "{{ pre_scaleup_machineset_names }}"

+ 0 - 94
test/aws/ssh_bastion.yml

@@ -1,94 +0,0 @@
----
-- name: Create ssh bastion namespace
-  k8s:
-    kubeconfig: "{{ kubeconfig_path }}"
-    kind: Namespace
-    name: byoh-ssh-bastion
-    state: present
-
-- name: Create ssh bastion keys secret
-  k8s:
-    kubeconfig: "{{ kubeconfig_path }}"
-    resource_definition:
-      apiVersion: v1
-      kind: Secret
-      metadata:
-        name: ssh-host-keys
-        namespace: byoh-ssh-bastion
-      data:
-        ssh_host_rsa_key: "{{ lookup('file', '../../inventory/dynamic/injected/ssh-privatekey') | b64encode }}"
-        sshd_config: "{{ lookup('file', 'files/sshd_config') | b64encode }}"
-  no_log: true
-
-- name: Create ssh bastion service
-  k8s:
-    kubeconfig: "{{ kubeconfig_path }}"
-    src: files/01_service.yml
-
-- name: Create ssh bastion service account
-  k8s:
-    kubeconfig: "{{ kubeconfig_path }}"
-    src: files/02_serviceaccount.yml
-
-- name: Create ssh bastion role
-  k8s:
-    kubeconfig: "{{ kubeconfig_path }}"
-    src: files/03_role.yml
-
-- name: Create ssh bastion role binding
-  k8s:
-    kubeconfig: "{{ kubeconfig_path }}"
-    src: files/04_rolebinding.yml
-
-- name: Create ssh bastion cluster role
-  k8s:
-    kubeconfig: "{{ kubeconfig_path }}"
-    src: files/05_clusterrole.yml
-
-- name: Create ssh bastion cluster role binding
-  k8s:
-    kubeconfig: "{{ kubeconfig_path }}"
-    src: files/06_clusterrolebinding.yml
-
-- name: Create ssh bastion deployment
-  k8s:
-    kubeconfig: "{{ kubeconfig_path }}"
-    src: files/07_deployment.yml
-
-- name: Wait for ssh bastion deployment to rollout
-  k8s_facts:
-    kubeconfig: "{{ kubeconfig_path }}"
-    namespace: byoh-ssh-bastion
-    kind: Deployment
-    name: ssh-bastion
-  register: k8s_result
-  until:
-  - k8s_result.resources is defined
-  - k8s_result.resources | length > 0
-  - k8s_result.resources[0].status is defined
-  - k8s_result.resources[0].status.availableReplicas is defined
-  - k8s_result.resources[0].status.availableReplicas > 0
-  retries: 36
-  delay: 5
-
-- name: Get ssh bastion address
-  k8s_facts:
-    kubeconfig: "{{ kubeconfig_path }}"
-    namespace: byoh-ssh-bastion
-    kind: Service
-    name: ssh-bastion
-  register: k8s_result
-  until:
-  - k8s_result.resources is defined
-  - k8s_result.resources | length > 0
-  - k8s_result.resources[0].status is defined
-  - k8s_result.resources[0].status.loadBalancer is defined
-  - k8s_result.resources[0].status.loadBalancer.ingress is defined
-  - k8s_result.resources[0].status.loadBalancer.ingress | length > 0
-  - k8s_result.resources[0].status.loadBalancer.ingress[0].hostname is defined
-  retries: 36
-  delay: 5
-
-- name: Set fact ssh_bastion
-  set_fact:
-    ssh_bastion: "{{ k8s_result.resources[0].status.loadBalancer.ingress[0].hostname }}"

+ 1 - 1
test/aws/additional_repos.yml

@@ -12,7 +12,7 @@
     sslcacert: "{{ item.sslcacert | default(omit) }}"
     file: "{{ item.name }}"
     enabled: "{{ item.enabled | default('no')}}"
-  with_items: "{{ openshift_additional_repos }}"
+  loop: "{{ openshift_additional_repos }}"
   when:
   - openshift_additional_repos is defined
   - openshift_additional_repos | length > 0

+ 24 - 3
test/aws/create_machineset.yml

@@ -27,9 +27,15 @@
                 keyName: "{{ openshift_aws_scaleup_key }}"
 
 - name: Import machineset definition
-  k8s:
-    kubeconfig: "{{ kubeconfig_path }}"
-    definition: "{{ machineset | to_yaml }}"
+  command: >
+    oc apply -f -
+    --kubeconfig={{ kubeconfig_path }}
+  register: oc_apply
+  args:
+    stdin: "{{ machineset | to_yaml }}"
+  changed_when:
+  - ('created' in oc_apply.stdout) or
+    ('configured' in oc_apply.stdout)
 
 - block:
   - name: Get machines in the machineset
@@ -82,6 +88,21 @@
     fail:
       msg: "Machine creation failed, error: {{ new_machine.resources[0].status.errorMessage }}"
 
+- name: Get ssh bastion address
+  command: >
+    oc get service ssh-bastion
+    --kubeconfig={{ kubeconfig_path }}
+    --namespace=test-ssh-bastion
+    --output=jsonpath='{.status.loadBalancer.ingress[0].hostname}'
+  register: oc_get
+  until:
+  - oc_get.stdout != ''
+  changed_when: false
+
+- name: Set fact ssh_bastion
+  set_fact:
+    ssh_bastion: "{{ oc_get.stdout }}"
+
 - name: Add machine to the inventory
   add_host:
     name: "{{ item }}"

+ 1 - 1
test/aws/gather_logs.yml

@@ -17,7 +17,7 @@
   command: "crictl logs {{ item }}"
   register: crictl_logs_output
   no_log: true
-  with_items: "{{ crictl_ps_output.stdout_lines }}"
+  loop: "{{ crictl_ps_output.stdout_lines }}"
   ignore_errors: true
 - name: Write container logs locally
   local_action:

+ 30 - 0
test/aws/tasks/get_machinesets.yml

@@ -0,0 +1,30 @@
+---
+- name: List existing worker nodes
+  command: >
+    oc get nodes
+    --kubeconfig={{ kubeconfig_path }}
+    --selector="node-role.kubernetes.io/worker"
+    --output=json
+  register: oc_get
+  until:
+  - oc_get.stdout != ''
+  changed_when: false
+
+- name: Set fact pre_scaleup_workers_name
+  set_fact:
+    pre_scaleup_workers_name: "{{ (oc_get.stdout | from_json)['items'] | map(attribute='metadata.name') | list }}"
+
+- name: Get existing worker machinesets
+  command: >
+    oc get machinesets
+    --kubeconfig={{ kubeconfig_path }}
+    --namespace=openshift-machine-api
+    --output=json
+  register: machineset
+  until:
+  - machineset.stdout != ''
+  changed_when: false
+
+- name: Set fact pre_scaleup_machineset_names
+  set_fact:
+    pre_scaleup_machineset_names: "{{ (machineset.stdout | from_json)['items'] | map(attribute='metadata.name') | list }}"