Browse Source

Merge pull request #11435 from patrickdillon/byoh-224-cleanup-test

Clean up test dir: delete 3.X CI & move libvirt to hack
OpenShift Merge Robot 6 years ago
parent
commit
536ac8a793
41 changed files with 0 additions and 349 deletions
  1. 0 0
      hack/libvirt/README.md
  2. 0 0
      hack/libvirt/cleanup.sh
  3. 0 0
      hack/libvirt/deploy.sh
  4. 0 0
      hack/libvirt/deploy_centos.sh
  5. 0 0
      hack/libvirt/dnsmasq_setup.sh
  6. 0 0
      hack/libvirt/generate_assets.sh
  7. 0 0
      hack/libvirt/generate_inventory.sh
  8. 0 0
      hack/libvirt/group_vars/nodes.yml
  9. 0 0
      hack/libvirt/install-config.yml.template
  10. 0 0
      hack/libvirt/installrc
  11. 0 0
      hack/libvirt/installrc_centos
  12. 0 0
      hack/libvirt/inv.txt.template
  13. 0 0
      hack/libvirt/node_scaleup.sh
  14. 0 0
      hack/libvirt/playbooks/files/openshift-local.repo
  15. 0 0
      hack/libvirt/playbooks/localrepo.yml
  16. 0 0
      hack/libvirt/playbooks/prep.yml
  17. 0 0
      hack/libvirt/playbooks/rhel_prep.yml
  18. 0 0
      hack/libvirt/playbooks/templates/buildah_repo.sh
  19. 0 0
      hack/libvirt/rhel_setup.sh
  20. 0 0
      hack/libvirt/run_ansible.sh
  21. 0 0
      hack/libvirt/ssh_config.sh
  22. 0 0
      hack/libvirt/terraform/bootstrap/README.md
  23. 0 0
      hack/libvirt/terraform/bootstrap/main.tf
  24. 0 0
      hack/libvirt/terraform/bootstrap/meta-data.tpl
  25. 0 0
      hack/libvirt/terraform/bootstrap/user-data.tpl
  26. 0 0
      hack/libvirt/terraform/bootstrap/variables.tf
  27. 0 0
      hack/libvirt/terraform/config.tf
  28. 0 0
      hack/libvirt/terraform/main.tf
  29. 0 0
      hack/libvirt/terraform/terraform.tfvars.template
  30. 0 0
      hack/libvirt/terraform/user-data.tpl
  31. 0 0
      hack/libvirt/terraform/variables-libvirt.tf
  32. 0 0
      hack/libvirt/terraform/volume/main.tf
  33. 0 0
      hack/libvirt/terraform/volume/outputs.tf
  34. 0 0
      hack/libvirt/terraform/volume/variables.tf
  35. 0 0
      hack/libvirt/terraform_provision.sh
  36. 0 7
      test/ci/README.md
  37. 0 45
      test/ci/deprovision.yml
  38. 0 113
      test/ci/inventory/group_vars/OSEv3/vars.yml
  39. 0 112
      test/ci/launch.yml
  40. 0 26
      test/ci/template-inventory.j2
  41. 0 46
      test/ci/vars.yml.sample

test/libvirt/README.md → hack/libvirt/README.md


test/libvirt/cleanup.sh → hack/libvirt/cleanup.sh


test/libvirt/deploy.sh → hack/libvirt/deploy.sh


test/libvirt/deploy_centos.sh → hack/libvirt/deploy_centos.sh


test/libvirt/dnsmasq_setup.sh → hack/libvirt/dnsmasq_setup.sh


test/libvirt/generate_assets.sh → hack/libvirt/generate_assets.sh


test/libvirt/generate_inventory.sh → hack/libvirt/generate_inventory.sh


test/libvirt/group_vars/nodes.yml → hack/libvirt/group_vars/nodes.yml


test/libvirt/install-config.yml.template → hack/libvirt/install-config.yml.template


test/libvirt/installrc → hack/libvirt/installrc


test/libvirt/installrc_centos → hack/libvirt/installrc_centos


test/libvirt/inv.txt.template → hack/libvirt/inv.txt.template


test/libvirt/node_scaleup.sh → hack/libvirt/node_scaleup.sh


test/libvirt/playbooks/files/openshift-local.repo → hack/libvirt/playbooks/files/openshift-local.repo


test/libvirt/playbooks/localrepo.yml → hack/libvirt/playbooks/localrepo.yml


test/libvirt/playbooks/prep.yml → hack/libvirt/playbooks/prep.yml


test/libvirt/playbooks/rhel_prep.yml → hack/libvirt/playbooks/rhel_prep.yml


test/libvirt/playbooks/templates/buildah_repo.sh → hack/libvirt/playbooks/templates/buildah_repo.sh


test/libvirt/rhel_setup.sh → hack/libvirt/rhel_setup.sh


test/libvirt/run_ansible.sh → hack/libvirt/run_ansible.sh


test/libvirt/ssh_config.sh → hack/libvirt/ssh_config.sh


test/libvirt/terraform/bootstrap/README.md → hack/libvirt/terraform/bootstrap/README.md


test/libvirt/terraform/bootstrap/main.tf → hack/libvirt/terraform/bootstrap/main.tf


test/libvirt/terraform/bootstrap/meta-data.tpl → hack/libvirt/terraform/bootstrap/meta-data.tpl


test/libvirt/terraform/bootstrap/user-data.tpl → hack/libvirt/terraform/bootstrap/user-data.tpl


test/libvirt/terraform/bootstrap/variables.tf → hack/libvirt/terraform/bootstrap/variables.tf


test/libvirt/terraform/config.tf → hack/libvirt/terraform/config.tf


test/libvirt/terraform/main.tf → hack/libvirt/terraform/main.tf


test/libvirt/terraform/terraform.tfvars.template → hack/libvirt/terraform/terraform.tfvars.template


test/libvirt/terraform/user-data.tpl → hack/libvirt/terraform/user-data.tpl


test/libvirt/terraform/variables-libvirt.tf → hack/libvirt/terraform/variables-libvirt.tf


test/libvirt/terraform/volume/main.tf → hack/libvirt/terraform/volume/main.tf


test/libvirt/terraform/volume/outputs.tf → hack/libvirt/terraform/volume/outputs.tf


test/libvirt/terraform/volume/variables.tf → hack/libvirt/terraform/volume/variables.tf


test/libvirt/terraform_provision.sh → hack/libvirt/terraform_provision.sh


+ 0 - 7
test/ci/README.md

@@ -1,7 +0,0 @@
-* Copy `test/ci/vars.yml.sample` to `test/ci/vars.yml`
-* Adjust it your liking - this would be the host configuration
-* Adjust `inventory/group_vars/OSEv3/vars.yml` - this would be Origin-specific config
-* Provision instances via `ansible-playbook -vv -i test/ci/inventory/ test/ci/launch.yml`
-  This would place inventory file in `test/ci/inventory/hosts` and run prerequisites and deploy.
-
-* Once the setup is complete run `ansible-playbook -vv -i test/ci/inventory/ test/ci/deprovision.yml`

+ 0 - 45
test/ci/deprovision.yml

@@ -1,45 +0,0 @@
----
-- hosts: localhost
-  connection: local
-  gather_facts: false
-  tasks:
-    - include_vars: "{{ item }}"
-      with_first_found:
-        - vars.yml
-        - vars.yaml
-
-    - name: Gather ec2 facts
-      ec2_instance_facts:
-        region: "{{ aws_region }}"
-        filters:
-          tag-key: "kubernetes.io/cluster/{{ aws_cluster_id }}"
-      register: ec2
-
-    - name: Terminate instances
-      ec2:
-        instance_ids: "{{ item.instance_id }}"
-        region: "{{ aws_region }}"
-        state: absent
-        wait: no
-      with_items: "{{ ec2.instances }}"
-      when: not aws_use_auto_terminator | default(true)
-
-    - when: aws_use_auto_terminator | default(true)
-      block:
-        - name: Stop VMs
-          ec2:
-            instance_ids: "{{ item.instance_id }}"
-            region: "{{ aws_region }}"
-            state: stopped
-            wait: no
-          with_items: "{{ ec2.instances }}"
-          ignore_errors: true
-
-        - name: Rename VMs
-          ec2_tag:
-            resource: "{{ item.instance_id }}"
-            region: "{{ aws_region }}"
-            tags:
-              Name: "{{ item.tags.Name }}-terminate"
-          when: "'-terminate' not in item.tags.Name"
-          with_items: "{{ ec2.instances }}"

+ 0 - 113
test/ci/inventory/group_vars/OSEv3/vars.yml

@@ -1,113 +0,0 @@
----
-ansible_become: true
-ansible_become_sudo: true
-
-openshift_deployment_type: origin
-openshift_repos_enable_testing: false
-
-#Minimal set of services
-openshift_web_console_install: true
-openshift_console_install: true
-openshift_metrics_install_metrics: false
-openshift_metrics_install_logging: false
-openshift_logging_install_logging: false
-openshift_management_install_management: false
-template_service_broker_install: false
-ansible_service_broker_install: false
-openshift_enable_service_catalog: false
-osm_use_cockpit: false
-openshift_monitoring_deploy: false
-openshift_metering_install: false
-openshift_metrics_server_install: false
-openshift_monitor_availability_install: false
-openshift_enable_olm: false
-openshift_descheduler_install: false
-openshift_node_problem_detector_install: false
-openshift_autoheal_deploy: false
-openshift_cluster_autoscaler_install: false
-
-# debugging
-debug_level: 4
-etcd_debug: true
-etcd_log_package_levels: 'auth=INFO,etcdmain=DEBUG,etcdserver=DEBUG'
-openshift_docker_options: "--log-driver=journald"
-
-#Disable journald persistence
-journald_vars_to_replace:
-  - { var: Storage, val: volatile }
-  - { var: Compress, val: no }
-  - { var: SyncIntervalSec, val: 1s }
-  - { var: RateLimitInterval, val: 1s }
-  - { var: RateLimitBurst, val: 10000 }
-  - { var: SystemMaxUse, val: 8G }
-  - { var: SystemKeepFree, val: 20% }
-  - { var: SystemMaxFileSize, val: 10M }
-  - { var: MaxRetentionSec, val: 1month }
-  - { var: MaxFileSec, val: 1day }
-  - { var: ForwardToSyslog, val: no }
-  - { var: ForwardToWall, val: no }
-
-#Other settings
-openshift_enable_origin_repo: false
-osm_default_node_selector: "node-role.kubernetes.io/compute=true"
-openshift_hosted_infra_selector: "node-role.kubernetes.io/infra=true"
-openshift_logging_es_nodeselector:
-  node-role.kubernetes.io/infra: "true"
-openshift_logging_es_ops_nodeselector:
-  node-role.kubernetes.io/infra: "true"
-osm_controller_args:
-  enable-hostpath-provisioner:
-    - "true"
-openshift_hosted_router_create_certificate: true
-openshift_master_audit_config:
-  enabled: true
-openshift_master_identity_providers:
-  - name: "allow_all"
-    login: "true"
-    challenge: "true"
-    kind: "AllowAllPasswordIdentityProvider"
-openshift_template_service_broker_namespaces:
-  - "openshift"
-enable_excluders: "true"
-osm_cluster_network_cidr: "10.128.0.0/14"
-openshift_portal_net: "172.30.0.0/16"
-osm_host_subnet_length: 9
-openshift_check_min_host_disk_gb: 1.5
-openshift_check_min_host_memory_gb: 1.9
-openshift_disable_check: package_update,package_availability,memory_availability,disk_availability
-
-openshift_logging_use_mux: false
-openshift_logging_use_ops: true
-openshift_logging_es_log_appenders:
-  - "console"
-openshift_logging_fluentd_journal_read_from_head: false
-openshift_logging_fluentd_audit_container_engine: true
-
-openshift_logging_curator_cpu_request: "100m"
-openshift_logging_curator_memory_limit: "32Mi"
-openshift_logging_curator_ops_cpu_request: "100m"
-openshift_logging_curator_ops_memory_limit: "32Mi"
-openshift_logging_elasticsearch_proxy_cpu_request: "100m"
-openshift_logging_elasticsearch_proxy_memory_limit: "32Mi"
-openshift_logging_es_cpu_request: "400m"
-openshift_logging_es_memory_limit: "4Gi"
-openshift_logging_es_ops_cpu_request: "400m"
-openshift_logging_es_ops_memory_limit: "4Gi"
-openshift_logging_eventrouter_cpu_request: "100m"
-openshift_logging_eventrouter_memory_limit: "64Mi"
-openshift_logging_fluentd_cpu_request: "100m"
-openshift_logging_fluentd_memory_limit: "256Mi"
-openshift_logging_kibana_cpu_request: "100m"
-openshift_logging_kibana_memory_limit: "128Mi"
-openshift_logging_kibana_ops_cpu_request: "100m"
-openshift_logging_kibana_ops_memory_limit: "128Mi"
-openshift_logging_kibana_ops_proxy_cpu_request: "100m"
-openshift_logging_kibana_ops_proxy_memory_limit: "64Mi"
-openshift_logging_kibana_proxy_cpu_request: "100m"
-openshift_logging_kibana_proxy_memory_limit: "64Mi"
-openshift_logging_mux_cpu_request: "400m"
-openshift_logging_mux_memory_limit: "256Mi"
-
-openshift_master_cluster_method: native
-
-openshift_node_port_range: '30000-32000'

+ 0 - 112
test/ci/launch.yml

@@ -1,112 +0,0 @@
----
-- hosts: localhost
-  connection: local
-  gather_facts: false
-  tasks:
-    - include_vars: "{{ item }}"
-      with_first_found:
-        - vars.yml
-        - vars.yaml
-
-    - name: list available AMIs
-      ec2_ami_facts:
-        region: "{{ aws_region }}"
-        filters: "{{ aws_ami_tags }}"
-      register: ami_facts
-      when: aws_image is not defined
-
-    - name: determine which AMI to use
-      set_fact:
-        aws_image: "{{ ami_facts.images[-1].image_id }}"
-      when: aws_image is not defined
-
-    - name: determine which AMI to use
-      set_fact:
-        aws_image: "{{ ami_facts.images[-1].image_id }}"
-      when: aws_image is not defined
-
-    - name: Create EC2 instance
-      ec2:
-        region: "{{ aws_region }}"
-        key_name: "{{ aws_key }}"
-        instance_type: "{{ item.aws_flavor }}"
-        image: "{{ item.aws_image | default(aws_image) }}"
-        wait: yes
-        group: "{{ item.aws_security_group }}"
-        count: 1
-        vpc_subnet_id: "{{ aws_subnet }}"
-        assign_public_ip: yes
-        instance_tags: "{{ aws_instance_tags }}"
-        volumes: "{{ item.aws_volumes | default(omit) }}"
-      register: ec2
-      with_items: "{{ aws_instances }}"
-      vars:
-        aws_instance_tags: |
-          {
-            "kubernetes.io/cluster/{{ aws_cluster_id }}": "true",
-            "Name": "{{ item.name }}",
-            "ansible-groups": "{{ item.ansible_groups | join(',') }}",
-            "ansible-node-group": "{{ item.node_group }}",
-            "expirationDate": "{{ item.aws_expiration_date | default(aws_expiration_date) }}"
-          }
-
-    - name: Add machine to inventory
-      add_host:
-        name: "{{ item.instances.0.tags['Name'] }}"
-        ansible_host: "{{ item.instances.0.dns_name }}"
-        ansible_user: "{{ item.instances.0.aws_user | default(aws_user)}}"
-        groups: "{{ item.instances.0.tags['ansible-groups'].split(',') }}"
-        aws_region: "{{ aws_region }}"
-        aws_ip: "{{ item.instances.0.public_ip }}"
-        aws_id: "{{ item.instances.0.id }}"
-        openshift_node_group_name: "{{ item.instances.0.tags['ansible-node-group'] }}"
-      with_items: "{{ ec2.results }}"
-
-    - name: write the inventory
-      template:
-        src: ./template-inventory.j2
-        dest: "inventory/hosts"
-
-    - name: Refresh inventory to ensure new instances exist in inventory
-      meta: refresh_inventory
-
-- hosts: all
-  gather_facts: no
-  become: true
-  tasks:
-    - wait_for_connection: {}
-    - name: Make sure hostname is set to public ansible host
-      hostname:
-        name: "{{ ansible_host }}"
-    - name: Detecting Operating System
-      shell: ls /run/ostree-booted
-      ignore_errors: yes
-      failed_when: false
-      register: ostree_output
-    - name: Update all packages
-      package:
-        name: '*'
-        state: latest
-      when: ostree_output.rc != 0
-      register: yum_update
-    - name: Update Atomic system
-      command: atomic host upgrade
-      when: ostree_output.rc == 0
-      register: ostree_update
-    - name: Reboot machines
-      shell: sleep 5 && systemctl reboot
-      async: 1
-      poll: 0
-      ignore_errors: true
-      when: yum_update | changed or ostree_update | changed
-    - name: Wait for connection
-      wait_for_connection:
-        connect_timeout: 20
-        sleep: 5
-        delay: 5
-        timeout: 300
-    - setup: {}
-
-- import_playbook: ../../playbooks/openshift-node/network_manager.yml
-- import_playbook: ../../playbooks/prerequisites.yml
-- import_playbook: ../../playbooks/deploy_cluster.yml

+ 0 - 26
test/ci/template-inventory.j2

@@ -1,26 +0,0 @@
-[OSEv3:vars]
-ansible_python_interpreter="{{ python }}"
-ansible_user="{{ aws_user }}"
-aws_region="{{ aws_region }}"
-openshift_master_default_subdomain="{{ hostvars[groups[('lb' in groups) | ternary('lb', 'masters')][0]]["aws_ip"] }}.xip.io"
-
-[OSEv3:children]
-{% for group in groups %}
-{% if group not in ["all", "ungrouped", "OSEv3"] %}
-{{group}}
-{% endif %}
-{% endfor %}
-
-{% for group in groups %}
-{% if group not in ["all", "ungrouped", "OSEv3"] %}
-[{{group}}]
-{% for entry in groups[group] %}
-{% set addon_opts = "" %}
-{% if group == "nodes" %}
-{% set addon_opts = addon_opts + " openshift_node_group_name='" + hostvars[entry]['openshift_node_group_name'] + "'" %}
-{% endif %}
-{{ entry }} ansible_host='{{ hostvars[entry]['ansible_host'] }}' aws_id='{{ hostvars[entry]['aws_id'] }}' {{ addon_opts }}
-{% endfor %}
-{% endif %}
-
-{% endfor %}

+ 0 - 46
test/ci/vars.yml.sample

@@ -1,46 +0,0 @@
----
-vm_prefix: "ci_test"
-#aws_use_auto_terminator is set to True by default, as rh-dev account doesn't have permission
-# to terminate instances. These should be stopped and renamed to include 'terminate' instead
-#aws_use_auto_terminator: false
-
-type: aws
-aws_user: "ec2-user"
-python: "/usr/bin/python"
-
-aws_key: "libra"
-aws_region: "us-east-1"
-aws_cluster_id: "ci"
-# us-east-1d
-aws_subnet: "subnet-cf57c596"
-
-aws_expiration_date: "{{ lookup('pipe','date -d \"4 hours\" --iso=minutes --utc') }}"
-
-aws_ami_tags:
-  "tag:operating_system": "rhel"
-  "tag:image_stage": "base"
-  "tag:ready": "yes"
-
-aws_instances:
-- name: "{{ vm_prefix }}-master"
-  ansible_groups:
-    - masters
-    - etcd
-    - nodes
-  aws_flavor: t2.large
-  aws_security_group: public
-  node_group: "node-config-all-in-one"
-  # Use custom AMI tags
-  # aws_ami_tags:
-  #   operating_system: "rhel"
-  #   image_stage: "base"
-  #   ready: "yes"
-  # Use custom AMI
-  #aws_image: "ami-70e8fd66"
-  # Attach custom volumes
-  #aws_volumes:
-  # - device_name: /dev/sdb
-  #   volume_size: 50
-  #   delete_on_termination: yes
-  #Set expiration date for instances on CI namespace
-  #aws_expiration_date: "{{ lookup('pipe','date -d \"4 hours\" --iso=minutes --utc') }}"