Browse Source

Adding AWS support to openshift-ansible module
- Update documentation to say that ssh configuration need to point to the private key file
- Removing the -p argument when calling time .... because it is misintrepreted by ruby
- Turning the cluster.sh to agnostic in its help/error message by replacing explicit reference to GCE by a a variable
- Fixing a bug within the playbooks that incorrectly references the minions and master fact group.
- Adding playbooks for AWS, which are almost of copy/paste for those of GCE
- Added environment variable OO_PROVIDER to allow definition of the provider. Defaults is gce
- TODO implement the terminate.yml cookbook

Akram Ben Aissi 10 years ago
parent
commit
f929f3f94c

+ 14 - 2
README_AWS.md

@@ -14,13 +14,25 @@ Create a credentials file
    export AWS_ACCESS_KEY_ID='AKIASTUFF'
    export AWS_SECRET_ACCESS_KEY='STUFF'
 ```
-
 1. source this file
 ```
   source ~/.aws_creds
 ```
+Note: You must source this file in each shell that you want to run cloud.rb
+
+
+(Optional) Setup your $HOME/.ssh/config file
+-------------------------------------------
+In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use '.ssh/config' 
+to setup a private key file to allow ansible to connect to the created hosts.
+
+To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS.
+'''
+Host *.compute-1.amazonaws.com
+  PrivateKey $HOME/.ssh/my_private_key.pem
+'''
 
-1. Note: You must source this file in each shell that you want to run cloud.rb
+Alternatively, you can configure your ssh-agent to hold the credentials to connect to your AWS instances.
 
 
 Install Dependencies

+ 12 - 3
cluster.sh

@@ -2,7 +2,16 @@
 
 MINIONS=3
 MASTERS=1
-PROVIDER=gce
+
+# If the environment variable OO_PROVDER is defined, it used for the provider
+PROVIDER=$OO_PROVIDER
+# Otherwise, default is gce (Google Compute Engine)
+if [ "x$PROVIDER" == "x" ];then
+   PROVIDER=gce
+fi
+
+UPPER_CASE_PROVIDER=$(echo $PROVIDER | tr '[:lower:]' '[:upper:]')
+
 
 # FIXME: Add options
 MASTER_PLAYBOOK=openshift-master
@@ -12,10 +21,10 @@ MINION_PLAYBOOK=openshift-minion
 # @formatter:off
 function usage {
     cat 1>&2 <<-EOT
-        ${0} : [create|terminate|update|list] {GCE environment tag}
+        ${0} : [create|terminate|update|list] { ${UPPER_CASE_PROVIDER} environment tag}
 
         Supported environment tags:
-        $(grep 'SUPPORTED_ENVS.*=' ./lib/gce_command.rb)
+        $(grep 'SUPPORTED_ENVS.*=' ./lib/${PROVIDER}_command.rb)
 EOT
 }
 # @formatter:on

+ 1 - 3
lib/ansible_helper.rb

@@ -25,7 +25,6 @@ module OpenShift
         tmpfile    = Tempfile.open('extra_vars') { |f| f.write(@extra_vars.to_json); f}
 
         cmds = []
-
         #cmds << 'set -x'
         cmds << %Q[export ANSIBLE_FILTER_PLUGINS="#{Dir.pwd}/filter_plugins"]
 
@@ -35,8 +34,7 @@ module OpenShift
 
         # We need pipelining off so that we can do sudo to enable the root account
         cmds << %Q[export ANSIBLE_SSH_PIPELINING='#{@pipelining.to_s}']
-        cmds << %Q[time -p ansible-playbook -i #{@inventory} #{@verbosity} #{playbook} --extra-vars '@#{tmpfile.path}']
-
+        cmds << %Q[time ansible-playbook  -i #{@inventory} #{@verbosity} #{playbook} --extra-vars '@#{tmpfile.path}' ]
         cmd = cmds.join(' ; ')
 
         pid = spawn(cmd, :out => $stdout, :err => $stderr, :close_others => true)

+ 40 - 0
playbooks/aws/openshift-master/config.yml

@@ -0,0 +1,40 @@
+- name: "populate oo_hosts_to_config host group if needed"
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: Evaluate oo_host_group_exp if it's set
+    add_host: "name={{ item }} groups=oo_hosts_to_config"
+    with_items: "{{ oo_host_group_exp | default('') }}"
+    when: oo_host_group_exp is defined
+
+- name: "Gather facts for minions in {{ oo_env }}"
+  hosts: "tag_env-host-type_{{ oo_env }}-openshift-minion"
+  connection: ssh
+  user: root
+
+- name: "Set Origin specific facts on localhost (for later use)"
+  hosts: localhost
+  gather_facts: no
+  tasks:
+    - name: Setting oo_minion_ips fact on localhost
+      set_fact:
+        oo_minion_ips: "{{ hostvars
+            | oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-minion'])
+            | oo_collect(attribute='ansible_eth0.ipv4.address') }}"
+      when: groups['tag_env-host-type_' + oo_env + '-openshift-minion'] is defined
+
+- name: "Configure instances"
+  hosts: oo_hosts_to_config
+  connection: ssh
+  user: root
+  vars_files:
+    - vars.yml
+  roles:
+    - ../../../roles/base_os
+    - ../../../roles/repos
+    - {
+        role: ../../../roles/openshift_master,
+        oo_minion_ips: "{{ hostvars['localhost'].oo_minion_ips | default(['']) }}",
+        oo_bind_ip: "{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address | default(['']) }}"
+      }
+    - ../../../roles/pods

+ 69 - 0
playbooks/aws/openshift-master/launch.yml

@@ -0,0 +1,69 @@
+---
+- name: Launch instance(s)
+  hosts: localhost
+  connection: local
+  gather_facts: no
+
+  vars:
+    inst_region: us-east-1
+    atomic_ami: ami-86781fee
+    user_data_file: user_data.txt
+
+  vars_files:
+    - vars.yml
+
+  tasks:
+    - name: Launch instances
+      ec2:
+        state: present
+        region: "{{ inst_region }}"
+        keypair: libra
+        group: ['public']
+        instance_type: m3.large
+        image: "{{ atomic_ami }}"
+        count: "{{ oo_new_inst_names | oo_len }}"
+        user_data: "{{ lookup('file', user_data_file) }}"
+        wait: yes
+      register: ec2
+
+    - name: Add new instances public IPs to the atomic proxy host group
+      add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances"
+      with_items: ec2.instances
+
+    - name: Add Name and environment tags to instances
+      ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present"
+      with_together:
+        - oo_new_inst_names
+        - ec2.instances
+      args:
+        tags:
+          Name: "{{ item.0 }}"
+
+    - name: Add other tags to instances
+      ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
+      with_items: ec2.instances
+      args:
+        tags: "{{ oo_new_inst_tags }}"
+
+    - name: Add new instances public IPs to oo_hosts_to_config
+      add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config"
+      with_together:
+        - oo_new_inst_names
+        - ec2.instances
+
+    - debug: var=ec2
+
+    - name: Wait for ssh
+      wait_for: "port=22 host={{ item.dns_name }}"
+      with_items: ec2.instances
+
+    - name: Wait for root user setup
+      command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup"
+      register: result
+      until: result.rc == 0
+      retries: 20
+      delay: 10
+      with_items: ec2.instances
+
+# Apply the configs, seprate so that just the configs can be run by themselves
+- include: config.yml

+ 0 - 0
playbooks/aws/openshift-master/vars.yml


+ 40 - 0
playbooks/aws/openshift-minion/config.yml

@@ -0,0 +1,40 @@
+- name: "populate oo_hosts_to_config host group if needed"
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: Evaluate oo_host_group_exp
+    add_host: "name={{ item }} groups=oo_hosts_to_config"
+    with_items: "{{ oo_host_group_exp | default('') }}"
+    when: oo_host_group_exp is defined
+
+- name: "Gather facts for masters in {{ oo_env }}"
+  hosts: "tag_env-host-type_{{ oo_env }}-openshift-master"
+  connection: ssh
+  user: root
+
+- name: "Set OO sepcific facts on localhost (for later use)"
+  hosts: localhost
+  gather_facts: no
+  tasks:
+    - name: Setting oo_master_ips fact on localhost
+      set_fact:
+        oo_master_ips: "{{ hostvars
+            | oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-master'])
+            | oo_collect(attribute='ansible_eth0.ipv4.address') }}"
+      when: groups['tag_env-host-type_' + oo_env + '-openshift-master'] is defined
+
+- name: "Configure instances"
+  hosts: oo_hosts_to_config
+  connection: ssh
+  user: root
+  vars_files:
+    - vars.yml
+  roles:
+    - ../../../roles/base_os
+    - ../../../roles/repos
+    - ../../../roles/docker
+    - {
+        role: ../../../roles/openshift_minion,
+        oo_master_ips: "{{ hostvars['localhost'].oo_master_ips | default(['']) }}",
+        oo_bind_ip: "{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address | default(['']) }}"
+      }

+ 69 - 0
playbooks/aws/openshift-minion/launch.yml

@@ -0,0 +1,69 @@
+---
+- name: Launch instance(s)
+  hosts: localhost
+  connection: local
+  gather_facts: no
+
+  vars:
+    inst_region: us-east-1
+    atomic_ami: ami-86781fee
+    user_data_file: user_data.txt
+
+  vars_files:
+    - vars.yml
+
+  tasks:
+    - name: Launch instances
+      ec2:
+        state: present
+        region: "{{ inst_region }}"
+        keypair: libra
+        group: ['public']
+        instance_type: m3.large
+        image: "{{ atomic_ami }}"
+        count: "{{ oo_new_inst_names | oo_len }}"
+        user_data: "{{ lookup('file', user_data_file) }}"
+        wait: yes
+      register: ec2
+
+    - name: Add new instances public IPs to the atomic proxy host group
+      add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances"
+      with_items: ec2.instances
+
+    - name: Add Name and environment tags to instances
+      ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present"
+      with_together:
+        - oo_new_inst_names
+        - ec2.instances
+      args:
+        tags:
+          Name: "{{ item.0 }}"
+
+    - name: Add other tags to instances
+      ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
+      with_items: ec2.instances
+      args:
+        tags: "{{ oo_new_inst_tags }}"
+
+    - name: Add new instances public IPs to oo_hosts_to_config
+      add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config"
+      with_together:
+        - oo_new_inst_names
+        - ec2.instances
+
+    - debug: var=ec2
+
+    - name: Wait for ssh
+      wait_for: "port=22 host={{ item.dns_name }}"
+      with_items: ec2.instances
+
+    - name: Wait for root user setup
+      command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup"
+      register: result
+      until: result.rc == 0
+      retries: 20
+      delay: 10
+      with_items: ec2.instances
+
+# Apply the configs, seprate so that just the configs can be run by themselves
+- include: config.yml

+ 0 - 0
playbooks/aws/openshift-minion/vars.yml