Browse Source

Revert "GCE support"

Thomas Wiest 9 years ago
parent
commit
3073d1f729

+ 3 - 14
README_GCE.md

@@ -39,13 +39,6 @@ Create a gce.ini file for GCE
 * gce_service_account_pem_file_path - Full path from previous steps
 * gce_project_id - Found in "Projects", it list all the gce projects you are associated with.  The page lists their "Project Name" and "Project ID".  You want the "Project ID"
 
-Mandatory customization variables (check the values according to your tenant):
-* zone = europe-west1-d
-* network = default
-* gce_machine_type = n1-standard-2
-* gce_machine_image = preinstalled-slave-50g-v5
-
-
 1. vi ~/.gce/gce.ini
 1. make the contents look like this:
 ```
@@ -53,15 +46,11 @@ Mandatory customization variables (check the values according to your tenant):
 gce_service_account_email_address = long...@developer.gserviceaccount.com
 gce_service_account_pem_file_path = /full/path/to/project_id-gce_key_hash.pem
 gce_project_id = project_id
-zone = europe-west1-d
-network = default
-gce_machine_type = n1-standard-2
-gce_machine_image = preinstalled-slave-50g-v5
-
 ```
-1. Define the environment variable GCE_INI_PATH so gce.py can pick it up and bin/cluster can also read it
+1. Setup a sym link so that gce.py will pick it up (link must be in same dir as gce.py)
 ```
-export GCE_INI_PATH=~/.gce/gce.ini
+  cd openshift-ansible/inventory/gce
+  ln -s ~/.gce/gce.ini gce.ini
 ```
 
 

+ 4 - 8
bin/cluster

@@ -142,14 +142,10 @@ class Cluster(object):
         """
         config = ConfigParser.ConfigParser()
         if 'gce' == provider:
-            gce_ini_default_path = os.path.join(
-                'inventory/gce/hosts/gce.ini')
-            gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
-            if os.path.exists(gce_ini_path): 
-                config.readfp(open(gce_ini_path))
-
-                for key in config.options('gce'):
-                    os.environ[key] = config.get('gce', key)
+            config.readfp(open('inventory/gce/hosts/gce.ini'))
+
+            for key in config.options('gce'):
+                os.environ[key] = config.get('gce', key)
 
             inventory = '-i inventory/gce/hosts'
         elif 'aws' == provider:

+ 3 - 6
inventory/gce/hosts/gce.py

@@ -120,7 +120,6 @@ class GceInventory(object):
             os.path.dirname(os.path.realpath(__file__)), "gce.ini")
         gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
 
-
         # Create a ConfigParser.
         # This provides empty defaults to each key, so that environment
         # variable configuration (as opposed to INI configuration) is able
@@ -174,7 +173,6 @@ class GceInventory(object):
         args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
         kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
 
-        
         # Retrieve and return the GCE driver.
         gce = get_driver(Provider.GCE)(*args, **kwargs)
         gce.connection.user_agent_append(
@@ -213,8 +211,7 @@ class GceInventory(object):
             'gce_image': inst.image,
             'gce_machine_type': inst.size,
             'gce_private_ip': inst.private_ips[0],
-            # Hosts don't always have a public IP name
-            #'gce_public_ip': inst.public_ips[0],
+            'gce_public_ip': inst.public_ips[0],
             'gce_name': inst.name,
             'gce_description': inst.extra['description'],
             'gce_status': inst.extra['status'],
@@ -222,8 +219,8 @@ class GceInventory(object):
             'gce_tags': inst.extra['tags'],
             'gce_metadata': md,
             'gce_network': net,
-            # Hosts don't always have a public IP name
-            #'ansible_ssh_host': inst.public_ips[0]
+            # Hosts don't have a public name, so we add an IP
+            'ansible_ssh_host': inst.public_ips[0]
         }
 
     def get_instance(self, instance_name):

+ 1 - 1
inventory/openstack/hosts/nova.py

@@ -34,7 +34,7 @@ except ImportError:
 # executed with no parameters, return the list of
 # all groups and hosts
 
-NOVA_CONFIG_FILES = [os.path.join(os.path.dirname(os.path.realpath(__file__)), "nova.ini"),
+NOVA_CONFIG_FILES = [os.getcwd() + "/nova.ini",
                      os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")),
                      "/etc/ansible/nova.ini"]
 

+ 0 - 15
playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml

@@ -1,15 +0,0 @@
----
-- set_fact: k8s_type=infra
-- set_fact: sub_host_type="{{ type }}"
-- set_fact: number_infra="{{ count }}"
-
-- name: Generate infra  instance names(s)
-  set_fact:
-    scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
-  register: infra_names_output
-  with_sequence: count={{ number_infra }}
-
-- set_fact:
-    infra_names: "{{ infra_names_output.results | default([])
-                    | oo_collect('ansible_facts')
-                    | oo_collect('scratch_name') }}"

+ 0 - 4
playbooks/gce/openshift-cluster/config.yml

@@ -10,8 +10,6 @@
   - set_fact:
       g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
       g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
-      use_sdn: "{{ do_we_use_openshift_sdn }}"
-      sdn_plugin: "{{ sdn_network_plugin }}"
 
 - include: ../../common/openshift-cluster/config.yml
   vars:
@@ -24,5 +22,3 @@
     openshift_debug_level: 2
     openshift_deployment_type: "{{ deployment_type }}"
     openshift_hostname: "{{ gce_private_ip }}"
-    openshift_use_openshift_sdn: "{{ hostvars.localhost.use_sdn  }}"
-    os_sdn_network_plugin_name: "{{ hostvars.localhost.sdn_plugin }}"

+ 0 - 49
playbooks/gce/openshift-cluster/join_node.yml

@@ -1,49 +0,0 @@
----
-- name: Populate oo_hosts_to_update group
-  hosts: localhost
-  gather_facts: no
-  vars_files:
-  - vars.yml
-  tasks:
-  - name: Evaluate oo_hosts_to_update
-    add_host:
-      name: "{{ node_ip }}"
-      groups: oo_hosts_to_update
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-
-- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-
-- name: Populate oo_masters_to_config host group
-  hosts: localhost
-  gather_facts: no
-  vars_files:
-  - vars.yml
-  tasks:
-  - name: Evaluate oo_nodes_to_config
-    add_host:
-      name: "{{ node_ip }}"
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-      groups: oo_nodes_to_config
-
-  - name: Evaluate oo_first_master
-    add_host:
-      name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-      groups: oo_first_master
-    when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
-
-#- include: config.yml
-- include: ../../common/openshift-node/config.yml
-  vars:
-    openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: 4
-    openshift_deployment_type: "{{ deployment_type }}"
-    openshift_hostname: "{{ ansible_default_ipv4.address }}"
-    openshift_use_openshift_sdn: true
-    openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} "
-    os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
-    osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
-    osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"

+ 27 - 27
playbooks/gce/openshift-cluster/launch.yml

@@ -28,33 +28,33 @@
       type: "{{ k8s_type }}"
       g_sub_host_type: "{{ sub_host_type }}"
 
-#  - include: ../../common/openshift-cluster/set_infra_launch_facts_tasks.yml
-#    vars:
-#      type: "infra"
-#      count: "{{ num_infra }}"
-#  - include: tasks/launch_instances.yml
-#    vars:
-#      instances: "{{ infra_names }}"
-#      cluster: "{{ cluster_id }}"
-#      type: "{{ k8s_type }}"
-#      g_sub_host_type: "{{ sub_host_type }}"
-#
-#  - set_fact:
-#      a_infra: "{{ infra_names[0] }}"
-#  - add_host: name={{ a_infra }} groups=service_master
-#
+  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+    vars:
+      type: "infra"
+      count: "{{ num_infra }}"
+  - include: tasks/launch_instances.yml
+    vars:
+      instances: "{{ infra_names }}"
+      cluster: "{{ cluster_id }}"
+      type: "{{ k8s_type }}"
+      g_sub_host_type: "{{ sub_host_type }}"
+
+  - set_fact:
+      a_infra: "{{ infra_names[0] }}"
+  - add_host: name={{ a_infra }} groups=service_master
+
 - include: update.yml
-#
-#- name: Deploy OpenShift Services
-#  hosts: service_master
-#  connection: ssh
-#  gather_facts: yes
-#  roles:
-#  - openshift_registry
-#  - openshift_router
-#
-#- include: ../../common/openshift-cluster/create_services.yml
-#  vars:
-#     g_svc_master: "{{ service_master }}"
+
+- name: Deploy OpenShift Services
+  hosts: service_master
+  connection: ssh
+  gather_facts: yes
+  roles:
+  - openshift_registry
+  - openshift_router
+
+- include: ../../common/openshift-cluster/create_services.yml
+  vars:
+     g_svc_master: "{{ service_master }}"
 
 - include: list.yml

+ 2 - 2
playbooks/gce/openshift-cluster/list.yml

@@ -14,11 +14,11 @@
       groups: oo_list_hosts
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated | default([]))
+    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
 
 - name: List instance(s)
   hosts: oo_list_hosts
   gather_facts: no
   tasks:
   - debug:
-      msg: "private ip:{{ hostvars[inventory_hostname].gce_private_ip }}"
+      msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}"

+ 8 - 13
playbooks/gce/openshift-cluster/tasks/launch_instances.yml

@@ -10,38 +10,33 @@
     service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
     pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
     project_id: "{{ lookup('env', 'gce_project_id') }}"
-    zone: "{{ lookup('env', 'zone') }}"
-    network: "{{ lookup('env', 'network') }}"
-# unsupported in 1.9.+
-    #service_account_permissions: "datastore,logging-write"
     tags:
       - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}
       - env-{{ cluster }}
       - host-type-{{ type }}
-      - sub-host-type-{{ g_sub_host_type }}
+      - sub-host-type-{{ sub_host_type }}
       - env-host-type-{{ cluster }}-openshift-{{ type }}
-  when: instances |length > 0 
   register: gce
 
 - name: Add new instances to groups and set variables needed
   add_host:
     hostname: "{{ item.name }}"
-    ansible_ssh_host: "{{ item.name }}"
+    ansible_ssh_host: "{{ item.public_ip }}"
     ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
     ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
     groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
     gce_public_ip: "{{ item.public_ip }}"
     gce_private_ip: "{{ item.private_ip }}"
-  with_items: gce.instance_data | default([])
+  with_items: gce.instance_data
 
 - name: Wait for ssh
-  wait_for: port=22 host={{ item.name }}
-  with_items: gce.instance_data | default([])
+  wait_for: port=22 host={{ item.public_ip }}
+  with_items: gce.instance_data
 
 - name: Wait for user setup
   command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
   register: result
   until: result.rc == 0
-  retries: 30
-  delay: 5
-  with_items: gce.instance_data | default([])
+  retries: 20
+  delay: 10
+  with_items: gce.instance_data

+ 21 - 34
playbooks/gce/openshift-cluster/terminate.yml

@@ -1,18 +1,25 @@
 ---
 - name: Terminate instance(s)
   hosts: localhost
-  connection: local
   gather_facts: no
   vars_files:
   - vars.yml
   tasks:
-  - set_fact: scratch_group=tag_env-{{ cluster_id }}
+  - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node
   - add_host:
       name: "{{ item }}"
-      groups: oo_hosts_to_terminate
+      groups: oo_hosts_to_terminate, oo_nodes_to_terminate
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated | default([]))
+    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+
+  - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master
+  - add_host:
+      name: "{{ item }}"
+      groups: oo_hosts_to_terminate, oo_masters_to_terminate
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
 
 - name: Unsubscribe VMs
   hosts: oo_hosts_to_terminate
@@ -25,34 +32,14 @@
           lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
             default('no', True) | lower in ['no', 'false']
 
-- name: Terminate instances(s)
-  hosts: localhost
-  connection: local
-  gather_facts: no
-  vars_files:
-  - vars.yml
-  tasks:
-
-    - name: Terminate instances that were previously launched
-      local_action:
-        module: gce
-        state: 'absent'
-        name: "{{ item }}"
-        service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
-        pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
-        project_id: "{{ lookup('env', 'gce_project_id') }}"
-        zone: "{{ lookup('env', 'zone') }}"
-      with_items: groups['oo_hosts_to_terminate'] | default([])
-      when: item is defined
+- include: ../openshift-node/terminate.yml
+  vars:
+    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+    gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
 
-#- include: ../openshift-node/terminate.yml
-#  vars:
-#    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
-#    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
-#    gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
-#
-#- include: ../openshift-master/terminate.yml
-#  vars:
-#    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
-#    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
-#    gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+- include: ../openshift-master/terminate.yml
+  vars:
+    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+    gce_project_id: "{{ lookup('env', 'gce_project_id') }}"

+ 3 - 5
playbooks/gce/openshift-cluster/vars.yml

@@ -1,11 +1,8 @@
 ---
-do_we_use_openshift_sdn: true
-sdn_network_plugin: redhat/openshift-ovs-subnet 
-# os_sdn_network_plugin_name can be ovssubnet or multitenant, see https://docs.openshift.org/latest/architecture/additional_concepts/sdn.html#ovssubnet-plugin-operation
 deployment_vars:
   origin:
-    image: preinstalled-slave-50g-v5
-    ssh_user: root
+    image: centos-7
+    ssh_user:
     sudo: yes
   online:
     image: libra-rhel7
@@ -15,3 +12,4 @@ deployment_vars:
     image: rhel-7
     ssh_user:
     sudo: yes
+

+ 5 - 30
playbooks/openstack/openshift-cluster/launch.yml

@@ -19,32 +19,15 @@
     changed_when: false
     failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr
 
-  - name: Create OpenStack Stack
-    command: 'heat stack-create -f {{ openstack_infra_heat_stack }}
-             -P key_pair={{ openstack_ssh_keypair }}
-             -P cluster_id={{ cluster_id }}
-             -P dns_nameservers={{ openstack_network_dns | join(",") }}
-             -P cidr={{ openstack_network_cidr }}
-             -P ssh_incoming={{ openstack_ssh_access_from }}
-             -P num_masters={{ num_masters }}
-             -P num_nodes={{ num_nodes }}
-             -P num_infra={{ num_infra }}
-             -P master_image={{ deployment_vars[deployment_type].image }}
-             -P node_image={{ deployment_vars[deployment_type].image }}
-             -P infra_image={{ deployment_vars[deployment_type].image }}
-             -P master_flavor={{ openstack_flavor["master"] }}
-             -P node_flavor={{ openstack_flavor["node"] }}
-             -P infra_flavor={{ openstack_flavor["infra"] }}
-             -P ssh_public_key="{{ openstack_ssh_public_key }}"
-             openshift-ansible-{{ cluster_id }}-stack'
+  - set_fact:
+      heat_stack_action: 'stack-create'
     when: stack_show_result.rc == 1
   - set_fact:
       heat_stack_action: 'stack-update'
     when: stack_show_result.rc == 0
 
-  - name: Update OpenStack Stack
-    command: 'heat stack-update -f {{ openstack_infra_heat_stack }}
-             -P key_pair={{ openstack_ssh_keypair }}
+  - name: Create or Update OpenStack Stack
+    command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }}
              -P cluster_id={{ cluster_id }}
              -P cidr={{ openstack_network_cidr }}
              -P dns_nameservers={{ openstack_network_dns | join(",") }}
@@ -67,7 +50,7 @@
     shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}'''
     register: stack_show_status_result
     until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS']
-    retries: 300
+    retries: 30
     delay: 1
     failed_when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
 
@@ -136,12 +119,4 @@
 
 - include: update.yml
 
-# Fix icmp reject iptables rules
-# It should be solved in openshift-sdn but unfortunately it's not the case
-# Mysterious
-- name: Configuring Nodes for RBox
-  hosts: oo_nodes_to_config
-  roles:
-    - rbox-node
-
 - include: list.yml

+ 1 - 1
roles/openshift_facts/tasks/main.yml

@@ -1,5 +1,5 @@
 ---
-- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0 and not 1.9.0.1
+- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0
   assert:
     that:
     - ansible_version | version_compare('1.8.0', 'ge')

+ 1 - 1
roles/openshift_manage_node/tasks/main.yml

@@ -3,7 +3,7 @@
       {{ openshift.common.client_binary }} get node {{ item }}
   register: omd_get_node
   until: omd_get_node.rc == 0
-  retries: 20
+  retries: 10
   delay: 5
   with_items: openshift_nodes
 

+ 1 - 1
roles/openshift_node/tasks/main.yml

@@ -22,7 +22,7 @@
       deployment_type: "{{ openshift_deployment_type }}"
   - role: node
     local_facts:
-      labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default() ) }}"
+      labels: "{{ openshift_node_labels | default(none) }}"
       annotations: "{{ openshift_node_annotations | default(none) }}"
       registry_url: "{{ oreg_url | default(none) }}"
       debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"