Browse Source

Merge remote-tracking branch 'origin/master' into cns_secgrp

Emilio Garcia 6 years ago
parent
commit
0b6616e37e

+ 3 - 3
playbooks/azure/openshift-cluster/group_vars/all/yum_repos.yml

@@ -40,9 +40,9 @@ azure_node_repos:
     sslclientkey: /var/lib/yum/client-key.pem
     enabled: yes
 
-  # TODO: Replace me post GA with https://mirror.openshift.com/libra/rhui-rhel-server-7-ose-3.11/
-  - name: rhel-server-7-ose-3.11
-    baseurl: https://mirror.openshift.com/enterprise/all/3.11/latest/x86_64/os
+  # TODO: Replace me post GA with https://mirror.openshift.com/libra/rhui-rhel-server-7-ose-4.0/
+  - name: rhel-server-7-ose-4.0
+    baseurl: https://mirror.openshift.com/enterprise/all/4.0/latest/x86_64/os
     gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
     # TODO: Remove me post GA
     gpgcheck: false

+ 0 - 131
playbooks/azure/openshift-cluster/launch.yml

@@ -1,131 +0,0 @@
----
-- hosts: localhost
-  gather_facts: no
-  tasks:
-  - import_role:
-      name: lib_utils
-
-  - name: create temporary directory
-    tempfile:
-      state: directory
-    register: tmp
-
-  - name: download acs-engine
-    get_url:
-      url: "{{ item }}"
-      dest: "{{ tmp.path }}/"
-    with_list:
-    - "http://acs-engine-build-azure.svc.ci.openshift.org/acs-engine"
-    - "http://acs-engine-build-azure.svc.ci.openshift.org/openshift.json"
-
-  - name: make acs-engine executable
-    file:
-      path: "{{ tmp.path }}/acs-engine"
-      mode: 0755
-
-  - name: configure acs-engine
-    yedit:
-      content_type: json
-      src: "{{ tmp.path }}/openshift.json"
-      edits:
-      - key: properties.orchestratorProfile.openShiftConfig.clusterUsername
-        value: demo
-      - key: properties.orchestratorProfile.openShiftConfig.clusterPassword
-        value: "{{ 16 | lib_utils_oo_random_word }}"
-      - key: properties.orchestratorProfile.orchestratorVersion
-        value: unstable
-      # azProfile
-      - key: properties.azProfile.tenantId
-        value: "{{ lookup('env', 'AZURE_TENANT') }}"
-      - key: properties.azProfile.subscriptionId
-        value: "{{ lookup('env', 'AZURE_SUBSCRIPTION_ID') }}"
-      - key: properties.azProfile.resourceGroup
-        value: "{{ openshift_azure_resource_group_name }}"
-      - key: properties.azProfile.location
-        value: "{{ openshift_azure_resource_location }}"
-      # masterProfile
-      - key: properties.masterProfile.dnsPrefix
-        value: "a{{ 16 | lib_utils_oo_random_word }}a"
-      - key: properties.masterProfile.imageReference.name
-        value: "{{ openshift_azure_input_image_name }}"
-      - key: properties.masterProfile.imageReference.resourceGroup
-        value: "{{ openshift_azure_input_image_ns }}"
-      - key: properties.masterProfile.vmSize
-        value: "{{ openshift_azure_vm_size | default('Standard_D4s_v3') }}"
-      # agentpool compute
-      - key: properties.agentPoolProfiles[0].imageReference.name
-        value: "{{ openshift_azure_input_image_name }}"
-      - key: properties.agentPoolProfiles[0].imageReference.resourceGroup
-        value: "{{ openshift_azure_input_image_ns }}"
-      - key: properties.agentPoolProfiles[0].vmSize
-        value: "{{ openshift_azure_vm_size | default('Standard_D4s_v3') }}"
-      # agentpool infra
-      - key: properties.agentPoolProfiles[1].imageReference.name
-        value: "{{ openshift_azure_input_image_name }}"
-      - key: properties.agentPoolProfiles[1].imageReference.resourceGroup
-        value: "{{ openshift_azure_input_image_ns }}"
-      - key: properties.agentPoolProfiles[1].vmSize
-        value: "{{ openshift_azure_vm_size | default('Standard_D4s_v3') }}"
-      # linuxprofile
-      - key: properties.linuxProfile.adminUsername
-        value: "cloud-user"
-      - key: properties.linuxProfile.ssh.publicKeys[0].keyData
-        value: "{{ openshift_azure_vm_ssh_public_key }}"
-      # serviceprincipal
-      - key: properties.servicePrincipalProfile.clientId
-        value: "{{ lookup('env', 'AZURE_CLIENT_ID') }}"
-      - key: properties.servicePrincipalProfile.secret
-        value: "{{ lookup('env', 'AZURE_SECRET') }}"
-
-  - name: run acs-engine deploy
-    command: |
-      {{ tmp.path }}/acs-engine deploy \
-        --resource-group {{ openshift_azure_resource_group_name }} \
-        --location {{ openshift_azure_resource_location }} \
-        --subscription-id {{ lookup('env', 'AZURE_SUBSCRIPTION_ID') }} \
-        --auth-method client_secret \
-        --client-id {{ lookup('env', 'AZURE_CLIENT_ID') }} \
-        --client-secret {{ lookup('env', 'AZURE_SECRET') }} \
-        --output-directory {{ tmp.path }}/deploy \
-        {{ tmp.path }}/openshift.json
-    no_log: true
-    ignore_errors: yes
-    register: deploy
-
-  - debug:
-      msg: "{{ deploy.stdout }}"
-
-  - debug:
-      msg: "{{ deploy.stderr }}"
-
-  # This code attempts to persist the data to /var/tmp which is bind
-  # mounted into the calling container.  This enables the CI to reuse
-  # the cluster created in the previous steps to perform the e2e tests
-  - name: persist the acs-engine generated artifacts
-    copy:
-      src: "{{ tmp.path }}/deploy"
-      dest: /var/tmp/
-    when: openshift_ci_persist_artifacts | default(False)
-
-  - name: delete temporary directory
-    file:
-      path: "{{ tmp.path }}"
-      state: absent
-
-  - block:
-    - name: get azure deployment message
-      command: >
-        az group deployment list
-        -g "{{ openshift_azure_resource_group_name }}"
-        --query "[0].properties.additionalProperties.error.details[0].message"
-        -o tsv
-      register: message
-
-    - debug:
-        msg: "{{ (message.stdout | from_json).error.details[0].message }}"
-      when: message.stdout != ""
-
-    - assert:
-        that: "{{ not deploy.failed }}"
-
-    when: deploy.failed

+ 0 - 1
playbooks/openshift-checks/certificate_expiry/default.yaml

@@ -5,6 +5,5 @@
 - name: Check cert expirys
   hosts: nodes:masters:etcd
   become: yes
-  gather_facts: no
   roles:
     - role: openshift_certificate_expiry

+ 0 - 1
playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml

@@ -11,7 +11,6 @@
 ---
 - name: Generate certificate expiration reports
   hosts: nodes:masters:etcd
-  gather_facts: no
   vars:
     openshift_certificate_expiry_save_json_results: yes
     openshift_certificate_expiry_generate_html_report: yes

+ 0 - 1
playbooks/openshift-checks/certificate_expiry/easy-mode.yaml

@@ -8,7 +8,6 @@
 
 - name: Check cert expirys
   hosts: nodes:masters:etcd
-  gather_facts: no
   vars:
     openshift_certificate_expiry_save_json_results: yes
     openshift_certificate_expiry_generate_html_report: yes

+ 0 - 1
playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml

@@ -3,7 +3,6 @@
 
 - name: Check cert expirys
   hosts: nodes:masters:etcd
-  gather_facts: no
   vars:
     openshift_certificate_expiry_generate_html_report: yes
     openshift_certificate_expiry_save_json_results: yes

+ 0 - 1
playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml

@@ -3,7 +3,6 @@
 
 - name: Check cert expirys
   hosts: nodes:masters:etcd
-  gather_facts: no
   vars:
     openshift_certificate_expiry_generate_html_report: yes
     openshift_certificate_expiry_save_json_results: yes

+ 0 - 1
playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml

@@ -4,7 +4,6 @@
 
 - name: Check cert expirys
   hosts: nodes:masters:etcd
-  gather_facts: no
   vars:
     openshift_certificate_expiry_warning_days: 1500
     openshift_certificate_expiry_save_json_results: yes

+ 0 - 1
playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml

@@ -4,7 +4,6 @@
 
 - name: Check cert expirys
   hosts: nodes:masters:etcd
-  gather_facts: no
   vars:
     openshift_certificate_expiry_warning_days: 1500
   roles:

+ 14 - 18
roles/lib_openshift/library/oc_csr_approve.py

@@ -105,12 +105,11 @@ class CSRapprove(object):
             self.module.fail_json(**self.result)
         return stdout
 
-    def get_ready_nodes(self):
-        '''Get list of nodes currently ready vi oc'''
+    def get_nodes(self):
+        '''Get all nodes via oc get nodes -ojson'''
         # json output is necessary for consistency here.
         command = "{} {} get nodes -ojson".format(self.oc_bin, self.oc_conf)
         stdout = self.run_command(command)
-
         try:
             data = json.loads(stdout)
         except JSONDecodeError as err:
@@ -119,14 +118,7 @@ class CSRapprove(object):
             self.result['state'] = 'unknown'
             self.module.fail_json(**self.result)
         self.result['oc_get_nodes'] = data
-        ready_nodes = []
-        for node in data['items']:
-            if node.get('status') and node['status'].get('conditions'):
-                for condition in node['status']['conditions']:
-                    # "True" is a string here, not a boolean.
-                    if condition['type'] == "Ready" and condition['status'] == 'True':
-                        ready_nodes.append(node['metadata']['name'])
-        return ready_nodes
+        return [node['metadata']['name'] for node in data['items']]
 
     def get_csrs(self):
         '''Retrieve csrs from cluster using oc get csr -ojson'''
@@ -252,10 +244,12 @@ class CSRapprove(object):
 
     def run(self):
         '''execute the csr approval process'''
-        nodes_ready = self.get_ready_nodes()
-        # don't need to check nodes that are already ready.
-        client_not_ready_nodes = [item for item in self.node_list
-                                  if item not in nodes_ready]
+        all_nodes = self.get_nodes()
+        # don't need to check nodes that have already joined the cluster because
+        # client csr needs to be approved for now to show in output of
+        # oc get nodes.
+        not_found_nodes = [item for item in self.node_list
+                           if item not in all_nodes]
 
         # Get all csrs, no good way to filter on pending.
         client_csrs = self.get_csrs()
@@ -263,11 +257,13 @@ class CSRapprove(object):
         client_csr_dict = self.process_csrs(client_csrs, "client")
         self.result['client_csrs'] = client_csr_dict
 
-        # This method is fail-happy and expects all non-Ready nodes have available
+        # This method is fail-happy and expects all not found nodes have available
         # csrs.  Handle failure for this method via ansible retry/until.
-        self.confirm_needed_requests_present(client_not_ready_nodes,
+        self.confirm_needed_requests_present(not_found_nodes,
                                              client_csr_dict)
-
+        # If for some reason a node is found in oc get nodes but it still needs
+        # a client csr approved, this method will approve all outstanding
+        # client csrs for any node in our self.node_list.
         self.approve_csrs(client_csr_dict, 'client')
 
         # # Server Cert Section # #

+ 8 - 8
roles/lib_openshift/test/test_oc_csr_approve.py

@@ -43,7 +43,7 @@ def test_parse_subject_cn():
     assert oc_csr_approve.parse_subject_cn(subject) == 'test.io'
 
 
-def test_get_ready_nodes():
+def test_get_nodes():
     output_file = os.path.join(ASSET_PATH, 'oc_get_nodes.json')
     with open(output_file) as stdoutfile:
         oc_get_nodes_stdout = stdoutfile.read()
@@ -53,8 +53,8 @@ def test_get_ready_nodes():
 
     with patch(RUN_CMD_MOCK) as call_mock:
         call_mock.return_value = (0, oc_get_nodes_stdout, '')
-        ready_nodes = approver.get_ready_nodes()
-    assert ready_nodes == ['fedora1.openshift.io', 'fedora3.openshift.io']
+        all_nodes = approver.get_nodes()
+    assert all_nodes == ['fedora1.openshift.io', 'fedora2.openshift.io', 'fedora3.openshift.io']
 
 
 def test_get_csrs():
@@ -89,15 +89,15 @@ def test_get_csrs():
 def test_confirm_needed_requests_present():
     module = DummyModule({})
     csr_dict = {'some-csr': 'fedora1.openshift.io'}
-    not_ready_nodes = ['host1']
+    not_found_nodes = ['host1']
     approver = CSRapprove(module, 'oc', '/dev/null', [])
     with pytest.raises(Exception) as err:
-        approver.confirm_needed_requests_present(not_ready_nodes, csr_dict)
+        approver.confirm_needed_requests_present(not_found_nodes, csr_dict)
         assert 'Exception: Could not find csr for nodes: host1' in str(err)
 
-    not_ready_nodes = ['fedora1.openshift.io']
+    not_found_nodes = ['fedora1.openshift.io']
     # this should complete silently
-    approver.confirm_needed_requests_present(not_ready_nodes, csr_dict)
+    approver.confirm_needed_requests_present(not_found_nodes, csr_dict)
 
 
 def test_approve_csrs():
@@ -173,7 +173,7 @@ def test_verify_server_csrs():
 
 if __name__ == '__main__':
     test_parse_subject_cn()
-    test_get_ready_nodes()
+    test_get_nodes()
     test_get_csrs()
     test_confirm_needed_requests_present()
     test_approve_csrs()

+ 3 - 3
roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py

@@ -27,11 +27,11 @@ class LookupModule(LookupBase):
                 # pylint: disable=line-too-long
                 raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
 
-        if short_version not in ['3.6', '3.7', '3.8', '3.9', '3.10', '3.11', 'latest']:
+        if short_version not in ['3.6', '3.7', '3.8', '3.9', '3.10', '3.11', '4.0', '4.1', '4.2', 'latest']:
             raise AnsibleError("Unknown short_version %s" % short_version)
 
         if short_version == 'latest':
-            short_version = '3.11'
+            short_version = '4.0'
 
         # Predicates ordered according to OpenShift Origin source:
         # origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go
@@ -64,7 +64,7 @@ class LookupModule(LookupBase):
                 {'name': 'NoVolumeNodeConflict'},
             ])
 
-        if short_version in ['3.9', '3.10', '3.11']:
+        if short_version in ['3.9', '3.10', '3.11', '4.0', '4.1', '4.2']:
             predicates.extend([
                 {'name': 'NoVolumeZoneConflict'},
                 {'name': 'MaxEBSVolumeCount'},

+ 3 - 3
roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py

@@ -27,13 +27,13 @@ class LookupModule(LookupBase):
                 # pylint: disable=line-too-long
                 raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
 
-        if short_version not in ['3.6', '3.7', '3.8', '3.9', '3.10', '3.11', 'latest']:
+        if short_version not in ['3.6', '3.7', '3.8', '3.9', '3.10', '3.11', '4.0', '4.1', '4.2', 'latest']:
             raise AnsibleError("Unknown short_version %s" % short_version)
 
         if short_version == 'latest':
-            short_version = '3.11'
+            short_version = '4.0'
 
-        if short_version in ['3.6', '3.7', '3.8', '3.9', '3.10', '3.11']:
+        if short_version in ['3.6', '3.7', '3.8', '3.9', '3.10', '3.11', '4.0', '4.1', '4.2']:
             priorities.extend([
                 {'name': 'SelectorSpreadPriority', 'weight': 1},
                 {'name': 'InterPodAffinityPriority', 'weight': 1},

+ 2 - 1
roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py

@@ -46,7 +46,7 @@ DEFAULT_PREDICATES_3_9 = [
     {'name': 'CheckVolumeBinding'},
 ]
 
-DEFAULT_PREDICATES_3_11 = DEFAULT_PREDICATES_3_10 = DEFAULT_PREDICATES_3_9
+DEFAULT_PREDICATES_4_0 = DEFAULT_PREDICATES_3_11 = DEFAULT_PREDICATES_3_10 = DEFAULT_PREDICATES_3_9
 
 REGION_PREDICATE = {
     'name': 'Region',
@@ -64,6 +64,7 @@ TEST_VARS = [
     ('3.9', DEFAULT_PREDICATES_3_9),
     ('3.10', DEFAULT_PREDICATES_3_10),
     ('3.11', DEFAULT_PREDICATES_3_11),
+    ('4.0', DEFAULT_PREDICATES_4_0),
 ]
 
 

+ 2 - 1
roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py

@@ -11,7 +11,7 @@ DEFAULT_PRIORITIES_3_6 = [
     {'name': 'TaintTolerationPriority', 'weight': 1}
 ]
 DEFAULT_PRIORITIES_3_8 = DEFAULT_PRIORITIES_3_7 = DEFAULT_PRIORITIES_3_6
-DEFAULT_PRIORITIES_3_11 = DEFAULT_PRIORITIES_3_10 = DEFAULT_PRIORITIES_3_9 = DEFAULT_PRIORITIES_3_8
+DEFAULT_PRIORITIES_4_0 = DEFAULT_PRIORITIES_3_11 = DEFAULT_PRIORITIES_3_10 = DEFAULT_PRIORITIES_3_9 = DEFAULT_PRIORITIES_3_8
 
 ZONE_PRIORITY = {
     'name': 'Zone',
@@ -30,6 +30,7 @@ TEST_VARS = [
     ('3.9', DEFAULT_PRIORITIES_3_9),
     ('3.10', DEFAULT_PRIORITIES_3_10),
     ('3.11', DEFAULT_PRIORITIES_3_11),
+    ('4.0', DEFAULT_PRIORITIES_4_0),
 ]
 
 

+ 2 - 2
roles/openshift_console/defaults/main.yml

@@ -23,6 +23,6 @@ l_openshift_console_branding_dict:
 openshift_console_branding: "{{ l_openshift_console_branding_dict[openshift_deployment_type] }}"
 
 l_openshift_console_documentation_url_dict:
-  origin: 'https://docs.okd.io/3.11/'
-  openshift-enterprise: 'https://docs.openshift.com/container-platform/3.11/'
+  origin: 'https://docs.okd.io/4.0/'
+  openshift-enterprise: 'https://docs.openshift.com/container-platform/4.0/'
 openshift_console_documentation_base_url: "{{ l_openshift_console_documentation_url_dict[openshift_deployment_type] }}"

+ 4 - 4
roles/openshift_control_plane/tasks/upgrade.yml

@@ -84,11 +84,11 @@
   yedit:
     src: "{{ openshift.common.config_base }}/master/master-config.yaml"
     key: "kubernetesMasterConfig.controllerArguments.pv-recycler-pod-template-filepath-nfs"
-    value: "/etc/origin/master/recycler_pod.yaml"
-    value_type: list
+    value:
+    - /etc/origin/master/recycler_pod.yaml
 - name: Update controller-manager to have hostpath recycler pod
   yedit:
     src: "{{ openshift.common.config_base }}/master/master-config.yaml"
     key: "kubernetesMasterConfig.controllerArguments.pv-recycler-pod-template-filepath-hostpath"
-    value: "/etc/origin/master/recycler_pod.yaml"
-    value_type: list
+    value:
+    - /etc/origin/master/recycler_pod.yaml

+ 18 - 0
roles/openshift_manage_node/tasks/config.yml

@@ -9,3 +9,21 @@
   until: node_schedulable is succeeded
   when: "'nodename' in openshift.node"
   delegate_to: "{{ openshift_master_host }}"
+
+- name: Wait for sync DS to set annotations on all nodes
+  oc_obj:
+    state: list
+    kind: node
+    selector: ""
+  register: node_status
+  until:
+    - node_status.results is defined
+    - node_status.results.results is defined
+    - node_status.results.results | length > 0
+    - node_status.results.results[0]['items']
+        | map(attribute='metadata.annotations') | map('list') | flatten
+        | select('match', '[\"node.openshift.io/md5sum\"]') | list | length ==
+      node_status.results.results[0]['items'] | length
+  retries: 60
+  delay: 10
+  delegate_to: "{{ openshift_master_host }}"

+ 4 - 3
roles/openshift_node_group/tasks/sync.yml

@@ -72,7 +72,7 @@
   retries: 60
   delay: 10
 
-- name: Wait for sync DS to set annotations on all nodes
+- name: Wait for sync DS to set annotations on master nodes
   oc_obj:
     state: list
     kind: node
@@ -83,8 +83,9 @@
     - node_status.results.results is defined
     - node_status.results.results | length > 0
     - node_status.results.results[0]['items']
-     | map(attribute='metadata.annotations') | map('list') | flatten
-     | select('match', '[\"node.openshift.io/md5sum\"]') | list | length == node_status.results.results | length
+        | map(attribute='metadata.annotations') | map('list') | flatten
+        | select('match', '[\"node.openshift.io/md5sum\"]') | list | length ==
+      node_status.results.results[0]['items'] | length
   retries: 60
   delay: 10
 

+ 1 - 4
roles/openshift_storage_nfs_lvm/tasks/nfs.yml

@@ -23,8 +23,5 @@
   lineinfile: dest=/etc/exports
               regexp="^{{ osnl_mount_dir }}/{{ item }} "
               line="{{ osnl_mount_dir }}/{{ item }} {{osnl_nfs_export_options}}"
-  with_sequence:
-    start: "{{osnl_volume_num_start}}"
-    count: "{{osnl_number_of_volumes}}"
-    format: "{{osnl_volume_prefix}}{{osnl_volume_size}}g%04d"
+  with_sequence: start={{osnl_volume_num_start}} count={{osnl_number_of_volumes}} format={{osnl_volume_prefix}}{{osnl_volume_size}}g%04d
   notify: restart nfs

+ 1 - 1
roles/openshift_version/defaults/main.yml

@@ -1,3 +1,3 @@
 ---
 openshift_protect_installed_version: True
-openshift_release: '3.11'
+openshift_release: '4.0'