Jelajahi Sumber

Merge branch 'devel-40' into devel-40-master-rebase

Vadim Rutkovsky 6 tahun lalu
induk
melakukan
5a02a2e31a

+ 2 - 4
playbooks/deploy_cluster_40.yml

@@ -44,7 +44,7 @@
   tasks:
   # This is required for openshift_node40/config.yml
   - set_fact:
-      openshift_bootstrap_endpoint: "https://{{ mcd_endpoint }}/config/master"
+      openshift_bootstrap_endpoint: "https://{{ openshift_master_cluster_hostname }}:{{ mcd_port }}/config/master"
   - name: Wait for bootstrap endpoint to show up
     uri:
       url: "{{ openshift_bootstrap_endpoint }}"
@@ -70,7 +70,7 @@
   tasks:
   # This is required for openshift_node40/config.yml
   - set_fact:
-      openshift_bootstrap_endpoint: "https://{{ mcd_endpoint }}/config/worker"
+      openshift_bootstrap_endpoint: "https://{{ openshift_master_cluster_hostname }}:{{ mcd_port }}/config/worker"
   - name: Wait for bootstrap endpoint to show up
     uri:
       url: "{{ openshift_bootstrap_endpoint }}"
@@ -170,5 +170,3 @@
     - fail:
         msg: Required operators didn't complete the install
     when: operator.failed
-
-  - pause: {}

+ 3 - 0
roles/openshift_gcp/defaults/main.yml

@@ -122,6 +122,9 @@ openshift_gcp_firewall_rules:
     allowed:
       - ip_protocol: 'tcp'
         ports:
+          - '80'
+          - '443'
+          - '1936'
           - "{{ openshift_gcp_master_healthcheck_port }}"
           - "{{ openshift_gcp_kubernetes_api_port }}"
           - "{{ openshift_master_api_port }}"

+ 0 - 2
roles/openshift_gcp/templates/remove.j2.sh

@@ -14,7 +14,6 @@ if gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${
         # export all dns records that match into a zone format, and turn each line into a set of args for
         # record-sets transaction.
         gcloud dns record-sets export --project "{{ openshift_gcp_project }}" -z "${dns_zone}" --zone-file-format "${dns}"
-
         # Write the header
         ETCD_DNS_NAME="_etcd-server-ssl._tcp.{{ lookup('env', 'INSTANCE_PREFIX') | mandatory }}.{{ public_hosted_zone }}."
         grep -F -e "${ETCD_DNS_NAME}" "${dns}" | awk '{ print "--name", $1, "--ttl", $2, "--type", $4 }' | head -n1 | xargs echo -n > "${dns}.input"
@@ -22,7 +21,6 @@ if gcloud --project "{{ openshift_gcp_project }}" dns managed-zones describe "${
         grep -F -e "${ETCD_DNS_NAME}" "${dns}" | awk '{ print " \x27"$5" "$6" "$7" "$8"\x27"; }' | tr -d '\n\r' >> "${dns}.input" || true
         echo >> "${dns}.input"
 
-
         if [ -s "${dns}.input" ]; then
             rm -f "${dns}"
             gcloud --project "{{ openshift_gcp_project }}" dns record-sets transaction --transaction-file=$dns start -z "${dns_zone}"

+ 19 - 0
roles/openshift_node40/tasks/aws.yml

@@ -0,0 +1,19 @@
+---
+- name: Configure AWS Cloud Provider Settings
+  lineinfile:
+    dest: /etc/kubernetes/kubelet-env
+    regexp: "{{ item.regex }}"
+    line: "{{ item.line }}"
+    create: true
+  with_items:
+    - regex: '^AWS_ACCESS_KEY_ID='
+      line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}"
+    - regex: '^AWS_SECRET_ACCESS_KEY='
+      line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
+  register: sys_env_update
+  no_log: True
+  when:
+    - openshift_cloudprovider_kind is defined
+    - openshift_cloudprovider_kind == 'aws'
+    - openshift_cloudprovider_aws_access_key is defined
+    - openshift_cloudprovider_aws_secret_key is defined

+ 7 - 0
test/aws/README.md

@@ -0,0 +1,7 @@
+* Copy `test/ci/vars.yml.sample` to `test/ci/vars.yml`
+* Adjust it your liking - this would be the host configuration
+* Adjust `inventory/group_vars/OSEv3/vars.yml` - this would be Origin-specific config
+* Provision instances via `ansible-playbook -vv -i test/ci/inventory/ test/ci/launch.yml`
+  This would place inventory file in `test/ci/inventory/hosts` and run prerequisites and deploy.
+
+* Once the setup is complete run `ansible-playbook -vv -i test/ci/inventory/ test/ci/deprovision.yml`

+ 45 - 0
test/aws/deprovision.yml

@@ -0,0 +1,45 @@
+---
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  tasks:
+    - include_vars: "{{ item }}"
+      with_first_found:
+        - vars.yml
+        - vars.yaml
+
+    - name: Gather ec2 facts
+      ec2_instance_facts:
+        region: "{{ aws_region }}"
+        filters:
+          tag-key: "kubernetes.io/cluster/{{ aws_cluster_id }}"
+      register: ec2
+
+    - name: Terminate instances
+      ec2:
+        instance_ids: "{{ item.instance_id }}"
+        region: "{{ aws_region }}"
+        state: absent
+        wait: no
+      with_items: "{{ ec2.instances }}"
+      when: not aws_use_auto_terminator | default(true)
+
+    - when: aws_use_auto_terminator | default(true)
+      block:
+        - name: Stop VMs
+          ec2:
+            instance_ids: "{{ item.instance_id }}"
+            region: "{{ aws_region }}"
+            state: stopped
+            wait: no
+          with_items: "{{ ec2.instances }}"
+          ignore_errors: true
+
+        - name: Rename VMs
+          ec2_tag:
+            resource: "{{ item.instance_id }}"
+            region: "{{ aws_region }}"
+            tags:
+              Name: "{{ item.tags.Name }}-terminate"
+          when: "'-terminate' not in item.tags.Name"
+          with_items: "{{ ec2.instances }}"

+ 113 - 0
test/aws/inventory/group_vars/OSEv3/vars.yml

@@ -0,0 +1,113 @@
+---
+ansible_become: true
+ansible_become_sudo: true
+
+openshift_deployment_type: origin
+openshift_repos_enable_testing: false
+
+#Minimal set of services
+openshift_web_console_install: true
+openshift_console_install: true
+openshift_metrics_install_metrics: false
+openshift_metrics_install_logging: false
+openshift_logging_install_logging: false
+openshift_management_install_management: false
+template_service_broker_install: false
+ansible_service_broker_install: false
+openshift_enable_service_catalog: false
+osm_use_cockpit: false
+openshift_monitoring_deploy: false
+openshift_metering_install: false
+openshift_metrics_server_install: false
+openshift_monitor_availability_install: false
+openshift_enable_olm: false
+openshift_descheduler_install: false
+openshift_node_problem_detector_install: false
+openshift_autoheal_deploy: false
+openshift_cluster_autoscaler_install: false
+
+# debugging
+debug_level: 4
+etcd_debug: true
+etcd_log_package_levels: 'auth=INFO,etcdmain=DEBUG,etcdserver=DEBUG'
+openshift_docker_options: "--log-driver=journald"
+
+#Disable journald persistence
+journald_vars_to_replace:
+  - { var: Storage, val: volatile }
+  - { var: Compress, val: no }
+  - { var: SyncIntervalSec, val: 1s }
+  - { var: RateLimitInterval, val: 1s }
+  - { var: RateLimitBurst, val: 10000 }
+  - { var: SystemMaxUse, val: 8G }
+  - { var: SystemKeepFree, val: 20% }
+  - { var: SystemMaxFileSize, val: 10M }
+  - { var: MaxRetentionSec, val: 1month }
+  - { var: MaxFileSec, val: 1day }
+  - { var: ForwardToSyslog, val: no }
+  - { var: ForwardToWall, val: no }
+
+#Other settings
+openshift_enable_origin_repo: false
+osm_default_node_selector: "node-role.kubernetes.io/compute=true"
+openshift_hosted_infra_selector: "node-role.kubernetes.io/infra=true"
+openshift_logging_es_nodeselector:
+  node-role.kubernetes.io/infra: "true"
+openshift_logging_es_ops_nodeselector:
+  node-role.kubernetes.io/infra: "true"
+osm_controller_args:
+  enable-hostpath-provisioner:
+    - "true"
+openshift_hosted_router_create_certificate: true
+openshift_master_audit_config:
+  enabled: true
+openshift_master_identity_providers:
+  - name: "allow_all"
+    login: "true"
+    challenge: "true"
+    kind: "AllowAllPasswordIdentityProvider"
+openshift_template_service_broker_namespaces:
+  - "openshift"
+enable_excluders: "true"
+osm_cluster_network_cidr: "10.128.0.0/14"
+openshift_portal_net: "172.30.0.0/16"
+osm_host_subnet_length: 9
+openshift_check_min_host_disk_gb: 1.5
+openshift_check_min_host_memory_gb: 1.9
+openshift_disable_check: package_update,package_availability,memory_availability,disk_availability
+
+openshift_logging_use_mux: false
+openshift_logging_use_ops: true
+openshift_logging_es_log_appenders:
+  - "console"
+openshift_logging_fluentd_journal_read_from_head: false
+openshift_logging_fluentd_audit_container_engine: true
+
+openshift_logging_curator_cpu_request: "100m"
+openshift_logging_curator_memory_limit: "32Mi"
+openshift_logging_curator_ops_cpu_request: "100m"
+openshift_logging_curator_ops_memory_limit: "32Mi"
+openshift_logging_elasticsearch_proxy_cpu_request: "100m"
+openshift_logging_elasticsearch_proxy_memory_limit: "32Mi"
+openshift_logging_es_cpu_request: "400m"
+openshift_logging_es_memory_limit: "4Gi"
+openshift_logging_es_ops_cpu_request: "400m"
+openshift_logging_es_ops_memory_limit: "4Gi"
+openshift_logging_eventrouter_cpu_request: "100m"
+openshift_logging_eventrouter_memory_limit: "64Mi"
+openshift_logging_fluentd_cpu_request: "100m"
+openshift_logging_fluentd_memory_limit: "256Mi"
+openshift_logging_kibana_cpu_request: "100m"
+openshift_logging_kibana_memory_limit: "128Mi"
+openshift_logging_kibana_ops_cpu_request: "100m"
+openshift_logging_kibana_ops_memory_limit: "128Mi"
+openshift_logging_kibana_ops_proxy_cpu_request: "100m"
+openshift_logging_kibana_ops_proxy_memory_limit: "64Mi"
+openshift_logging_kibana_proxy_cpu_request: "100m"
+openshift_logging_kibana_proxy_memory_limit: "64Mi"
+openshift_logging_mux_cpu_request: "400m"
+openshift_logging_mux_memory_limit: "256Mi"
+
+openshift_master_cluster_method: native
+
+openshift_node_port_range: '30000-32000'

+ 112 - 0
test/aws/launch.yml

@@ -0,0 +1,112 @@
+---
+- hosts: localhost
+  connection: local
+  gather_facts: false
+  tasks:
+    - include_vars: "{{ item }}"
+      with_first_found:
+        - vars.yml
+        - vars.yaml
+
+    - name: list available AMIs
+      ec2_ami_facts:
+        region: "{{ aws_region }}"
+        filters: "{{ aws_ami_tags }}"
+      register: ami_facts
+      when: aws_image is not defined
+
+    - name: determine which AMI to use
+      set_fact:
+        aws_image: "{{ ami_facts.images[-1].image_id }}"
+      when: aws_image is not defined
+
+    - name: determine which AMI to use
+      set_fact:
+        aws_image: "{{ ami_facts.images[-1].image_id }}"
+      when: aws_image is not defined
+
+    - name: Create EC2 instance
+      ec2:
+        region: "{{ aws_region }}"
+        key_name: "{{ aws_key }}"
+        instance_type: "{{ item.aws_flavor }}"
+        image: "{{ item.aws_image | default(aws_image) }}"
+        wait: yes
+        group: "{{ item.aws_security_group }}"
+        count: 1
+        vpc_subnet_id: "{{ aws_subnet }}"
+        assign_public_ip: yes
+        instance_tags: "{{ aws_instance_tags }}"
+        volumes: "{{ item.aws_volumes | default(omit) }}"
+      register: ec2
+      with_items: "{{ aws_instances }}"
+      vars:
+        aws_instance_tags: |
+          {
+            "kubernetes.io/cluster/{{ aws_cluster_id }}": "true",
+            "Name": "{{ item.name }}",
+            "ansible-groups": "{{ item.ansible_groups | join(',') }}",
+            "ansible-node-group": "{{ item.node_group }}",
+            "expirationDate": "{{ item.aws_expiration_date | default(aws_expiration_date) }}"
+          }
+
+    - name: Add machine to inventory
+      add_host:
+        name: "{{ item.instances.0.tags['Name'] }}"
+        ansible_host: "{{ item.instances.0.dns_name }}"
+        ansible_user: "{{ item.instances.0.aws_user | default(aws_user)}}"
+        groups: "{{ item.instances.0.tags['ansible-groups'].split(',') }}"
+        aws_region: "{{ aws_region }}"
+        aws_ip: "{{ item.instances.0.public_ip }}"
+        aws_id: "{{ item.instances.0.id }}"
+        openshift_node_group_name: "{{ item.instances.0.tags['ansible-node-group'] }}"
+      with_items: "{{ ec2.results }}"
+
+    - name: write the inventory
+      template:
+        src: ./template-inventory.j2
+        dest: "inventory/hosts"
+
+    - name: Refresh inventory to ensure new instances exist in inventory
+      meta: refresh_inventory
+
+- hosts: all
+  gather_facts: no
+  become: true
+  tasks:
+    - wait_for_connection: {}
+    - name: Make sure hostname is set to public ansible host
+      hostname:
+        name: "{{ ansible_host }}"
+    - name: Detecting Operating System
+      shell: ls /run/ostree-booted
+      ignore_errors: yes
+      failed_when: false
+      register: ostree_output
+    - name: Update all packages
+      package:
+        name: '*'
+        state: latest
+      when: ostree_output.rc != 0
+      register: yum_update
+    - name: Update Atomic system
+      command: atomic host upgrade
+      when: ostree_output.rc == 0
+      register: ostree_update
+    - name: Reboot machines
+      shell: sleep 5 && systemctl reboot
+      async: 1
+      poll: 0
+      ignore_errors: true
+      when: yum_update | changed or ostree_update | changed
+    - name: Wait for connection
+      wait_for_connection:
+        connect_timeout: 20
+        sleep: 5
+        delay: 5
+        timeout: 300
+    - setup: {}
+
+- import_playbook: ../../playbooks/openshift-node/network_manager.yml
+- import_playbook: ../../playbooks/prerequisites.yml
+- import_playbook: ../../playbooks/deploy_cluster.yml

+ 26 - 0
test/aws/template-inventory.j2

@@ -0,0 +1,26 @@
+[OSEv3:vars]
+ansible_python_interpreter="{{ python }}"
+ansible_user="{{ aws_user }}"
+aws_region="{{ aws_region }}"
+openshift_master_default_subdomain="{{ hostvars[groups[('lb' in groups) | ternary('lb', 'masters')][0]]["aws_ip"] }}.xip.io"
+
+[OSEv3:children]
+{% for group in groups %}
+{% if group not in ["all", "ungrouped", "OSEv3"] %}
+{{group}}
+{% endif %}
+{% endfor %}
+
+{% for group in groups %}
+{% if group not in ["all", "ungrouped", "OSEv3"] %}
+[{{group}}]
+{% for entry in groups[group] %}
+{% set addon_opts = "" %}
+{% if group == "nodes" %}
+{% set addon_opts = addon_opts + " openshift_node_group_name='" + hostvars[entry]['openshift_node_group_name'] + "'" %}
+{% endif %}
+{{ entry }} ansible_host='{{ hostvars[entry]['ansible_host'] }}' aws_id='{{ hostvars[entry]['aws_id'] }}' {{ addon_opts }}
+{% endfor %}
+{% endif %}
+
+{% endfor %}

+ 46 - 0
test/aws/vars.yml.sample

@@ -0,0 +1,46 @@
+---
+vm_prefix: "ci_test"
+#aws_use_auto_terminator is set to True by default, as rh-dev account doesn't have permission
+# to terminate instances. These should be stopped and renamed to include 'terminate' instead
+#aws_use_auto_terminator: false
+
+type: aws
+aws_user: "ec2-user"
+python: "/usr/bin/python"
+
+aws_key: "libra"
+aws_region: "us-east-1"
+aws_cluster_id: "ci"
+# us-east-1d
+aws_subnet: "subnet-cf57c596"
+
+aws_expiration_date: "{{ lookup('pipe','date -d \"4 hours\" --iso=minutes --utc') }}"
+
+aws_ami_tags:
+  "tag:operating_system": "rhel"
+  "tag:image_stage": "base"
+  "tag:ready": "yes"
+
+aws_instances:
+- name: "{{ vm_prefix }}-master"
+  ansible_groups:
+    - masters
+    - etcd
+    - nodes
+  aws_flavor: t2.large
+  aws_security_group: public
+  node_group: "node-config-all-in-one"
+  # Use custom AMI tags
+  # aws_ami_tags:
+  #   operating_system: "rhel"
+  #   image_stage: "base"
+  #   ready: "yes"
+  # Use custom AMI
+  #aws_image: "ami-70e8fd66"
+  # Attach custom volumes
+  #aws_volumes:
+  # - device_name: /dev/sdb
+  #   volume_size: 50
+  #   delete_on_termination: yes
+  #Set expiration date for instances on CI namespace
+  #aws_expiration_date: "{{ lookup('pipe','date -d \"4 hours\" --iso=minutes --utc') }}"