Browse Source

Merge pull request #9744 from vrutkovs/prow-aws-playbooks

Add playbooks to provision AWS VMs for CI
OpenShift Merge Robot 6 years ago
parent
commit
5b57dd216d

+ 0 - 1
.dockerignore

@@ -3,7 +3,6 @@ bin
 docs
 hack
 inventory/hosts.*
-test
 utils
 **/*.md
 *.spec

+ 1 - 0
images/installer/root/usr/local/bin/entrypoint-provider

@@ -25,6 +25,7 @@ if ! whoami &>/dev/null; then
 fi
 
 # Provide a "files_dir" variable that points to inventory/dynamic/injected
+mkdir -p "${WORK}/inventory/dynamic/${TYPE}/group_vars/all"
 echo "files_dir: \"${FILES}\"" > "${WORK}/inventory/dynamic/${TYPE}/group_vars/all/00_default_files_dir.yml"
 # Add any injected variable files into the group vars directory
 find "${FILES}" \( -name '*.yml' -or -name '*.yaml' -or -name vars \) -print0 | xargs -0 -L1 -I {} ln -fs {} "${WORK}/inventory/dynamic/${TYPE}/group_vars/all"

+ 2 - 1
inventory/.gitignore

@@ -1,2 +1,3 @@
 hosts
-/dynamic/gcp/group_vars/all/00_default_files_dir.yml
+/dynamic/gcp/group_vars/all/00_default_files_dir.yml
+/dynamic/aws/group_vars/all/00_default_files_dir.yml

+ 0 - 0
inventory/dynamic/aws/group_vars/all/.gitkeep


+ 6 - 13
test/ci/README.md

@@ -1,14 +1,7 @@
-This directory contains scripts and other files that are executed by our
-CI integration tests.
+* Copy `test/ci/vars.yml.sample` to `test/ci/vars.yml`
+* Adjust it your liking - this would be the host configuration
+* Adjust `inventory/group_vars/OSEv3/vars.yml` - this would be Origin-specific config
+* Provision instances via `ansible-playbook -vv -i test/ci/inventory/ test/ci/launch.yml`
+  This would place inventory file in `test/ci/inventory/hosts` and run prerequisites and deploy.
 
-CI should call a script.  The only arguments that each script should accept
-are:
-
-1) Path to openshift-ansible/playbooks
-2) Inventory path.
-3) Extra vars path.
-
-Ideally, inventory path and extra vars should live somewhere in this
-subdirectory instead of the CI's source.
-
-Extravars should typically be unnecessary.
+* Once the setup is complete run `ansible-playbook -vv -i test/ci/inventory/ test/ci/deprovision.yml`

+ 34 - 0
test/ci/deprovision.yml

@@ -0,0 +1,34 @@
+---
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  tasks:
+    - include_vars: "{{ item }}"
+      with_first_found:
+        - vars.yml
+        - vars.yaml
+
+    - name: Gather ec2 facts
+      ec2_instance_facts:
+        region: "{{ aws_region }}"
+        filters:
+          tag-key: "kubernetes.io/cluster/{{ aws_cluster_id }}"
+      register: ec2
+
+    - name: Stop VMs
+      ec2:
+        instance_ids: "{{ item.instance_id }}"
+        region: "{{ aws_region }}"
+        state: stopped
+        wait: no
+      with_items: "{{ ec2.instances }}"
+      ignore_errors: true
+
+    - name: Rename VMs
+      ec2_tag:
+        resource: "{{ item.instance_id }}"
+        region: "{{ aws_region }}"
+        tags:
+          Name: "{{ item.tags.Name }}-terminate"
+      when: "'-terminate' not in item.tags.Name"
+      with_items: "{{ ec2.instances }}"

+ 0 - 4
test/ci/extra_vars/default.yml

@@ -1,4 +0,0 @@
----
-# Using extra_vars is typically not ideal.  Please don't use extra_vars
-# unless there is no other way to accomplish a task.
-openshift_this_var_is_not_used: True

+ 0 - 33
test/ci/install.sh

@@ -1,33 +0,0 @@
-#!/bin/bash
-
-set -x
-
-# Argument 1: path to openshift-ansible/playbooks
-# Argument 2: inventory path
-# Argument 3: Extra vars path
-
-echo "Running prerequisites"
-
-ansible-playbook -vv            \
-                 --inventory $2 \
-                 --e @$3        \
-                 $1/prerequisites.yml
-
-echo "Running network_manager setup"
-
-if [[ -s "$1/openshift-node/network_manager.yml" ]]; then
-   playbook="$1/openshift-node/network_manager.yml"
-else
-   playbook="$1/byo/openshift-node/network_manager.yml"
-fi
-ansible-playbook -vv            \
-                 --inventory $2 \
-                 --e @$3       \
-                ${playbook}
-
-echo "Running openshift-ansible deploy_cluster"
-
-ansible-playbook -vv            \
-                 --inventory $2 \
-                 --e @$3        \
-                 $1/deploy_cluster.yml

+ 0 - 4
test/ci/inventory/group_vars/OSEv3/checks.yml

@@ -1,4 +0,0 @@
----
-openshift_check_min_host_disk_gb: 10
-openshift_check_min_host_memory_gb: 8
-openshift_disable_check: package_update,package_availability

+ 0 - 2
test/ci/inventory/group_vars/OSEv3/general.yml

@@ -1,2 +0,0 @@
----
-debug_level: 5

+ 0 - 37
test/ci/inventory/group_vars/OSEv3/logging.yml

@@ -1,37 +0,0 @@
----
-openshift_logging_use_mux: false
-openshift_logging_use_ops: true
-openshift_logging_es_log_appenders:
-  - "console"
-openshift_logging_fluentd_journal_read_from_head: false
-openshift_logging_fluentd_audit_container_engine: true
-
-openshift_logging_curator_cpu_request: "100m"
-openshift_logging_curator_memory_limit: "32Mi"
-openshift_logging_curator_ops_cpu_request: "100m"
-openshift_logging_curator_ops_memory_limit: "32Mi"
-openshift_logging_elasticsearch_proxy_cpu_request: "100m"
-openshift_logging_elasticsearch_proxy_memory_limit: "32Mi"
-openshift_logging_es_cpu_request: "400m"
-openshift_logging_es_memory_limit: "4Gi"
-openshift_logging_es_ops_cpu_request: "400m"
-openshift_logging_es_ops_memory_limit: "4Gi"
-openshift_logging_eventrouter_cpu_request: "100m"
-openshift_logging_eventrouter_memory_limit: "64Mi"
-openshift_logging_fluentd_cpu_request: "100m"
-openshift_logging_fluentd_memory_limit: "256Mi"
-openshift_logging_kibana_cpu_request: "100m"
-openshift_logging_kibana_memory_limit: "128Mi"
-openshift_logging_kibana_ops_cpu_request: "100m"
-openshift_logging_kibana_ops_memory_limit: "128Mi"
-openshift_logging_kibana_ops_proxy_cpu_request: "100m"
-openshift_logging_kibana_ops_proxy_memory_limit: "64Mi"
-openshift_logging_kibana_proxy_cpu_request: "100m"
-openshift_logging_kibana_proxy_memory_limit: "64Mi"
-openshift_logging_mux_cpu_request: "400m"
-openshift_logging_mux_memory_limit: "256Mi"
-
-# TODO: remove this once we have oauth-proxy images built that are in step
-#       with the logging images (version and prefix)
-openshift_logging_elasticsearch_proxy_image_prefix: "docker.io/openshift/"
-openshift_logging_elasticsearch_proxy_image_version: "v1.0.0"

+ 114 - 0
test/ci/inventory/group_vars/OSEv3/vars.yml

@@ -0,0 +1,114 @@
+---
+ansible_become: true
+ansible_become_sudo: true
+
+openshift_deployment_type: origin
+openshift_repos_enable_testing: false
+
+#Minimal set of services
+openshift_web_console_install: true
+openshift_console_install: true
+openshift_metrics_install_metrics: false
+openshift_metrics_install_logging: false
+openshift_logging_install_logging: false
+openshift_management_install_management: false
+openshift_hosted_prometheus_deploy: false
+template_service_broker_install: false
+ansible_service_broker_install: false
+openshift_enable_service_catalog: false
+osm_use_cockpit: false
+openshift_monitoring_deploy: false
+openshift_metering_install: false
+openshift_metrics_server_install: false
+openshift_monitor_availability_install: false
+openshift_enable_olm: false
+openshift_descheduler_install: false
+openshift_node_problem_detector_install: false
+openshift_autoheal_deploy: false
+openshift_cluster_autoscaler_install: false
+
+# debugging
+debug_level: 4
+etcd_debug: true
+etcd_log_package_levels: 'auth=INFO,etcdmain=DEBUG,etcdserver=DEBUG'
+openshift_docker_options: "--log-driver=journald"
+
+#Disable journald persistence
+journald_vars_to_replace:
+  - { var: Storage, val: volatile }
+  - { var: Compress, val: no }
+  - { var: SyncIntervalSec, val: 1s }
+  - { var: RateLimitInterval, val: 1s }
+  - { var: RateLimitBurst, val: 10000 }
+  - { var: SystemMaxUse, val: 8G }
+  - { var: SystemKeepFree, val: 20% }
+  - { var: SystemMaxFileSize, val: 10M }
+  - { var: MaxRetentionSec, val: 1month }
+  - { var: MaxFileSec, val: 1day }
+  - { var: ForwardToSyslog, val: no }
+  - { var: ForwardToWall, val: no }
+
+#Other settings
+openshift_enable_origin_repo: false
+osm_default_node_selector: "node-role.kubernetes.io/compute=true"
+openshift_hosted_infra_selector: "node-role.kubernetes.io/infra=true"
+openshift_logging_es_nodeselector:
+  node-role.kubernetes.io/infra: "true"
+openshift_logging_es_ops_nodeselector:
+  node-role.kubernetes.io/infra: "true"
+osm_controller_args:
+  enable-hostpath-provisioner:
+    - "true"
+openshift_hosted_router_create_certificate: true
+openshift_master_audit_config:
+  enabled: true
+openshift_master_identity_providers:
+  - name: "allow_all"
+    login: "true"
+    challenge: "true"
+    kind: "AllowAllPasswordIdentityProvider"
+openshift_template_service_broker_namespaces:
+  - "openshift"
+enable_excluders: "true"
+osm_cluster_network_cidr: "10.128.0.0/14"
+openshift_portal_net: "172.30.0.0/16"
+osm_host_subnet_length: 9
+openshift_check_min_host_disk_gb: 1.5
+openshift_check_min_host_memory_gb: 1.9
+openshift_disable_check: package_update,package_availability,memory_availability,disk_availability
+
+openshift_logging_use_mux: false
+openshift_logging_use_ops: true
+openshift_logging_es_log_appenders:
+  - "console"
+openshift_logging_fluentd_journal_read_from_head: false
+openshift_logging_fluentd_audit_container_engine: true
+
+openshift_logging_curator_cpu_request: "100m"
+openshift_logging_curator_memory_limit: "32Mi"
+openshift_logging_curator_ops_cpu_request: "100m"
+openshift_logging_curator_ops_memory_limit: "32Mi"
+openshift_logging_elasticsearch_proxy_cpu_request: "100m"
+openshift_logging_elasticsearch_proxy_memory_limit: "32Mi"
+openshift_logging_es_cpu_request: "400m"
+openshift_logging_es_memory_limit: "4Gi"
+openshift_logging_es_ops_cpu_request: "400m"
+openshift_logging_es_ops_memory_limit: "4Gi"
+openshift_logging_eventrouter_cpu_request: "100m"
+openshift_logging_eventrouter_memory_limit: "64Mi"
+openshift_logging_fluentd_cpu_request: "100m"
+openshift_logging_fluentd_memory_limit: "256Mi"
+openshift_logging_kibana_cpu_request: "100m"
+openshift_logging_kibana_memory_limit: "128Mi"
+openshift_logging_kibana_ops_cpu_request: "100m"
+openshift_logging_kibana_ops_memory_limit: "128Mi"
+openshift_logging_kibana_ops_proxy_cpu_request: "100m"
+openshift_logging_kibana_ops_proxy_memory_limit: "64Mi"
+openshift_logging_kibana_proxy_cpu_request: "100m"
+openshift_logging_kibana_proxy_memory_limit: "64Mi"
+openshift_logging_mux_cpu_request: "400m"
+openshift_logging_mux_memory_limit: "256Mi"
+
+openshift_master_cluster_method: native
+
+openshift_node_port_range: '30000-32000'

+ 0 - 34
test/ci/inventory/group_vars/all.yml

@@ -1,34 +0,0 @@
----
-# The variables in this file may be overridden by different group vars.
-# The 'all' group is overridden by any specific group that a host is a part of,
-# and the values here serve as 'defaults' for the CI testing.
-
-openshift_deployment_type: origin
-etcd_data_dir: "{{ lookup('env', 'ETCD_DATA_DIR') | default('/var/lib/etcd/') }}"
-openshift_node_port_range: '30000-32000'
-
-# These env vars are created by the CI.  This allows us
-# to test specific versions of openshift.
-openshift_pkg_version: "{{ lookup('env', 'ORIGIN_TAG') }}"
-openshift_release: "{{ lookup('env', 'ORIGIN_RELEASE') }}"
-oreg_url: "docker.io/openshift/origin-${component}:{{ lookup('env', 'ORIGIN_COMMIT') }}"
-
-osm_default_node_selector: "node-role.kubernetes.io/infra=true"
-osm_controller_args:
-  enable-hostpath-provisioner:
-    - "true"
-openshift_hosted_router_selector: "node-role.kubernetes.io/infra=true"
-openshift_hosted_router_create_certificate: true
-openshift_hosted_registry_selector: "node-role.kubernetes.io/infra=true"
-openshift_master_audit_config:
-  enabled: true
-openshift_master_identity_providers:
-  - name: "allow_all"
-    login: "true"
-    challenge: "true"
-    kind: "AllowAllPasswordIdentityProvider"
-openshift_template_service_broker_namespaces:
-  - "openshift"
-ansible_ssh_user: "ec2-user"
-enable_excluders: "false"
-openshift_portal_net: "172.30.0.0/16"

+ 0 - 4
test/ci/inventory/group_vars/containerized_hosts.yml

@@ -1,4 +0,0 @@
----
-openshift_image_tag: "{{ lookup('env', 'OPENSHIFT_IMAGE_TAG') }}"
-oreg_url: "docker.io/openshift/origin-${component}:{{ openshift_image_tag }}"
-containerized: True

+ 0 - 9
test/ci/inventory/host_vars/localhost.yml

@@ -1,9 +0,0 @@
----
-openshift_node_labels:
-  region: infra
-  zone: default
-  node-role.kubernetes.io/infra: "true"
-openshift_schedulable: True
-ansible_become: True
-ansible_become_user: root
-ansible_connection: local

+ 0 - 23
test/ci/inventory/local.txt

@@ -1,23 +0,0 @@
-[OSEv3]
-
-[OSEv3:children]
-masters
-nodes
-etcd
-lb
-nfs
-
-[lb]
-# Empty, but present to pass integration tests.
-
-[nfs]
-# Empty, but present to pass integration tests.
-
-[masters]
-localhost
-
-[nodes]
-localhost
-
-[etcd]
-localhost

+ 0 - 30
test/ci/inventory/local_containerized.txt

@@ -1,30 +0,0 @@
-[containerized_hosts]
-
-[containerized_hosts:children]
-masters
-nodes
-etcd
-
-[OSEv3]
-
-[OSEv3:children]
-masters
-nodes
-etcd
-lb
-nfs
-
-[lb]
-# Empty, but present to pass integration tests.
-
-[nfs]
-# Empty, but present to pass integration tests.
-
-[masters]
-localhost
-
-[nodes]
-localhost
-
-[etcd]
-localhost

+ 80 - 0
test/ci/launch.yml

@@ -0,0 +1,80 @@
+---
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  tasks:
+    - include_vars: "{{ item }}"
+      with_first_found:
+        - vars.yml
+        - vars.yaml
+
+    - name: list available AMIs
+      ec2_ami_facts:
+        region: "{{ aws_region }}"
+        filters: "{{ aws_ami_tags }}"
+      register: ami_facts
+      when: aws_image is not defined
+
+    - name: determine which AMI to use
+      set_fact:
+        aws_image: "{{ ami_facts.images[-1].image_id }}"
+      when: aws_image is not defined
+
+    - name: determine which AMI to use
+      set_fact:
+        aws_image: "{{ ami_facts.images[-1].image_id }}"
+      when: aws_image is not defined
+
+    - name: Create EC2 instance
+      ec2:
+        region: "{{ aws_region }}"
+        key_name: "{{ aws_key }}"
+        instance_type: "{{ item.aws_flavor }}"
+        image: "{{ item.aws_image | default(aws_image) }}"
+        wait: yes
+        group: "{{ item.aws_security_group }}"
+        count: 1
+        vpc_subnet_id: "{{ aws_subnet }}"
+        assign_public_ip: yes
+        instance_tags: "{{ aws_instance_tags }}"
+        volumes: "{{ item.aws_volumes | default(omit) }}"
+      register: ec2
+      with_items: "{{ aws_instances }}"
+      vars:
+        aws_instance_tags: |
+          {
+            "kubernetes.io/cluster/{{ aws_cluster_id }}": "true",
+            "Name": "{{ item.name }}",
+            "ansible-groups": "{{ item.ansible_groups | join(',') }}",
+            "ansible-node-group": "{{ item.node_group }}",
+          }
+
+    - name: Add machine to inventory
+      add_host:
+        name: "{{ item.instances.0.tags['Name'] }}"
+        ansible_host: "{{ item.instances.0.dns_name }}"
+        ansible_user: "{{ item.instances.0.aws_user | default(aws_user)}}"
+        groups: "{{ item.instances.0.tags['ansible-groups'].split(',') }}"
+        aws_region: "{{ aws_region }}"
+        aws_ip: "{{ item.instances.0.public_ip }}"
+        aws_id: "{{ item.instances.0.id }}"
+        openshift_node_group_name: "{{ item.instances.0.tags['ansible-node-group'] }}"
+      with_items: "{{ ec2.results }}"
+
+    - name: write the inventory
+      template:
+        src: ./template-inventory.j2
+        dest: "inventory/hosts"
+
+    - name: Refresh inventory to ensure new instances exist in inventory
+      meta: refresh_inventory
+
+- hosts: all
+  gather_facts: no
+  become: true
+  tasks:
+    - wait_for_connection: {}
+    - setup: {}
+
+- import_playbook: ../../playbooks/prerequisites.yml
+- import_playbook: ../../playbooks/deploy_cluster.yml

+ 26 - 0
test/ci/template-inventory.j2

@@ -0,0 +1,26 @@
+[OSEv3:vars]
+ansible_python_interpreter="{{ python }}"
+ansible_user="{{ aws_user }}"
+aws_region="{{ aws_region }}"
+openshift_master_default_subdomain="{{ hostvars[groups[('lb' in groups) | ternary('lb', 'masters')][0]]["aws_ip"] }}.xip.io"
+
+[OSEv3:children]
+{% for group in groups %}
+{% if group not in ["all", "ungrouped", "OSEv3"] %}
+{{group}}
+{% endif %}
+{% endfor %}
+
+{% for group in groups %}
+{% if group not in ["all", "ungrouped", "OSEv3"] %}
+[{{group}}]
+{% for entry in groups[group] %}
+{% set addon_opts = "" %}
+{% if group == "nodes" %}
+{% set addon_opts = addon_opts + " openshift_node_group_name='" + hostvars[entry]['openshift_node_group_name'] + "'" %}
+{% endif %}
+{{ entry }} ansible_host='{{ hostvars[entry]['ansible_host'] }}' aws_id='{{ hostvars[entry]['aws_id'] }}' {{ addon_opts }}
+{% endfor %}
+{% endif %}
+
+{% endfor %}

+ 38 - 0
test/ci/vars.yml.sample

@@ -0,0 +1,38 @@
+---
+vm_prefix: "ci_test"
+
+type: aws
+aws_user: "ec2-user"
+python: "/usr/bin/python"
+aws_key: "libra"
+aws_region: "us-east-1"
+aws_cluster_id: "ci"
+# us-east-1d
+aws_subnet: "subnet-cf57c596"
+
+aws_ami_tags:
+  "tag:operating_system": "rhel"
+  "tag:image_stage": "base"
+  "tag:ready": "yes"
+
+aws_instances:
+- name: "{{ vm_prefix }}-master"
+  ansible_groups:
+    - masters
+    - etcd
+    - nodes
+  aws_flavor: t2.large
+  aws_security_group: public
+  node_group: "node-config-all-in-one"
+  # Use custom AMI tags
+  # aws_ami_tags:
+  #   operating_system: "rhel"
+  #   image_stage: "base"
+  #   ready: "yes"
+  # Use custom AMI
+  #aws_image: "ami-70e8fd66"
+  # Attach custom volumes
+  #aws_volumes:
+  # - device_name: /dev/sdb
+  #   volume_size: 50
+  #   delete_on_termination: yes