Browse Source

Merge pull request #53 from twiest/atomic

Changed os2-atomic-proxy to be able to launch inside of both a vpc and classic. Changed STG to default to launching in a vpc, prod still launches in classic. Also cleaned up some cruft.
Thomas Wiest 10 years ago
parent
commit
04343e7588

+ 0 - 7
lib/ansible_helper.rb

@@ -89,13 +89,6 @@ extra_vars: #{@extra_vars.to_json}
         ah.inventory = 'inventory/aws/ec2.py'
         return ah
       end
-
-
-      def ignore_bug_6407
-        puts
-        puts %q[ .----  Spurious warning "It is unnecessary to use '{{' in loops" (ansible bug 6407)  ----.]
-        puts %q[ V                                                                                        V]
-      end
     end
   end
 end

+ 0 - 2
lib/aws_command.rb

@@ -42,7 +42,6 @@ module OpenShift
 
         puts
         puts "Creating #{options[:count]} #{options[:type]} instance(s) in AWS..."
-        ah.ignore_bug_6407
 
         # Make sure we're completely up to date before launching
         clear_cache()
@@ -91,7 +90,6 @@ module OpenShift
 
         puts
         puts "Configuring #{options[:type]} instance(s) in AWS..."
-        ah.ignore_bug_6407
 
         ah.run_playbook("playbooks/aws/#{host_type}/config.yml")
       end

+ 0 - 3
lib/gce_command.rb

@@ -43,7 +43,6 @@ module OpenShift
 
         puts
         puts "Creating #{options[:count]} #{options[:type]} instance(s) in GCE..."
-        ah.ignore_bug_6407
 
         ah.run_playbook("playbooks/gce/#{options[:type]}/launch.yml")
       end
@@ -80,7 +79,6 @@ module OpenShift
 
         puts
         puts "Configuring #{options[:type]} instance(s) in GCE..."
-        ah.ignore_bug_6407
 
         ah.run_playbook("playbooks/gce/#{host_type}/config.yml")
       end
@@ -118,7 +116,6 @@ module OpenShift
 
         puts
         puts "Terminating #{options[:type]} instance(s) in GCE..."
-        ah.ignore_bug_6407
 
         ah.run_playbook("playbooks/gce/#{host_type}/terminate.yml")
       end

+ 30 - 2
playbooks/aws/os2-atomic-proxy/launch.yml

@@ -8,12 +8,35 @@
     inst_region: us-east-1
     atomic_ami: ami-8e239fe6
     user_data_file: user_data.txt
+    oo_vpc_subnet_id:    # Purposely left blank, these are here to be overridden in env vars_files
+    oo_assign_public_ip: # Purposely left blank, these are here to be overridden in env vars_files
 
   vars_files:
     - vars.yml
+    - "vars.{{ oo_env }}.yml"
 
   tasks:
-    - name: Launch instances
+    - name: Launch instances in VPC
+      ec2:
+        state: present
+        region: "{{ inst_region }}"
+        keypair: mmcgrath_libra
+        group_id: "{{ oo_security_group_ids }}"
+        instance_type: m3.large
+        image: "{{ atomic_ami }}"
+        count: "{{ oo_new_inst_names | oo_len }}"
+        user_data: "{{ lookup('file', user_data_file) }}"
+        wait: yes
+        assign_public_ip: "{{ oo_assign_public_ip }}"
+        vpc_subnet_id: "{{ oo_vpc_subnet_id }}"
+      when: oo_vpc_subnet_id
+      register: ec2_vpc
+
+    - set_fact:
+        ec2: "{{ ec2_vpc }}"
+      when: oo_vpc_subnet_id
+
+    - name: Launch instances in Classic
       ec2:
         state: present
         region: "{{ inst_region }}"
@@ -24,7 +47,12 @@
         count: "{{ oo_new_inst_names | oo_len }}"
         user_data: "{{ lookup('file', user_data_file) }}"
         wait: yes
-      register: ec2
+      when: not oo_vpc_subnet_id
+      register: ec2_classic
+
+    - set_fact:
+        ec2: "{{ ec2_classic }}"
+      when: not oo_vpc_subnet_id
 
     - name: Add new instances public IPs to the atomic proxy host group
       add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances"

+ 7 - 0
playbooks/aws/os2-atomic-proxy/vars.stg.yml

@@ -1,3 +1,10 @@
 ---
 oo_env_long: staging
 oo_zabbix_hostgroups: ['STG Environment']
+oo_vpc_subnet_id: subnet-700bdd07
+oo_assign_public_ip: yes
+oo_security_group_ids:
+  - sg-02c2f267 # Libra (vpc)
+  - sg-f0bfbe95 # stg (vpc)
+  - sg-a3bfbec6 # stg_proxy (vpc)
+  - sg-d4bfbeb1 # stg_proxy_atomic (vpc)

+ 7 - 6
roles/atomic_base/tasks/bash.yml

@@ -5,9 +5,10 @@
 - name: Link to .profile to .bashrc
   file: src=/root/.bashrc dest=/root/.profile owner=root group=root state=link
 
-- name: Setup Timezone [{{ oo_timezone }}]
-  file: >
-    src=/usr/share/zoneinfo/{{ oo_timezone }}
-    dest=/etc/localtime
-    owner=root
-    group=root state=link
+- name: "Setup Timezone [{{ oo_timezone }}]"
+  file:
+    src: "/usr/share/zoneinfo/{{ oo_timezone }}"
+    dest: /etc/localtime
+    owner: root
+    group: root
+    state: link

+ 9 - 9
roles/atomic_base/tasks/ostree.yml

@@ -1,18 +1,18 @@
 ---
 - name: Copy ostree repo config
-  copy: >
-    src=ostree/repo_config
-    dest=/ostree/repo/config
-    owner=root
-    group=root
-    mode=0644
+  copy:
+    src: ostree/repo_config
+    dest: /ostree/repo/config
+    owner: root
+    group: root
+    mode: 0644
 
 - name: "WORK AROUND: Stat redhat repo file"
   stat: path=/etc/yum.repos.d/redhat.repo
   register: redhat_repo
 
 - name: "WORK AROUND: subscription manager failures"
-  file: >
-    path=/etc/yum.repos.d/redhat.repo
-    state=touch
+  file:
+    path: /etc/yum.repos.d/redhat.repo
+    state: touch
   when: redhat_repo.stat.exists == False

+ 18 - 18
roles/atomic_proxy/tasks/setup_puppet.yml

@@ -1,24 +1,24 @@
 ---
 - name: make puppet conf dir
-  file: >
-    dest={{ oo_proxy_puppet_volume_dir }}/etc/puppet
-    mode=755
-    owner=root
-    group=root
-    state=directory
+  file:
+    dest: "{{ oo_proxy_puppet_volume_dir }}/etc/puppet"
+    mode: 755
+    owner: root
+    group: root
+    state: directory
 
 - name: upload puppet auth config
-  copy: >
-    src=puppet/auth.conf
-    dest={{ oo_proxy_puppet_volume_dir }}/etc/puppet/auth.conf
-    mode=0644
-    owner=root
-    group=root
+  copy:
+    src: puppet/auth.conf
+    dest: "{{ oo_proxy_puppet_volume_dir }}/etc/puppet/auth.conf"
+    mode: 0644
+    owner: root
+    group: root
 
 - name: upload puppet config
-  template: >
-    src=puppet/puppet.conf.j2
-    dest={{ oo_proxy_puppet_volume_dir }}/etc/puppet/puppet.conf
-    mode=0644
-    owner=root
-    group=root
+  template:
+    src: puppet/puppet.conf.j2
+    dest: "{{ oo_proxy_puppet_volume_dir }}/etc/puppet/puppet.conf"
+    mode: 0644
+    owner: root
+    group: root

+ 15 - 10
roles/base_os/tasks/main.yaml

@@ -2,25 +2,30 @@
 # basic role, configures irbrc, vimrc
 
 - name: Ensure irbrc is installed for user root
-  copy: >
-    src=irbrc
-    dest=/root/.irbrc
+  copy:
+    src: irbrc
+    dest: /root/.irbrc
 
 - name: Ensure vimrc is installed for user root
-  copy: >
-    src=vimrc
-    dest=/root/.vimrc
+  copy:
+    src: vimrc
+    dest: /root/.vimrc
 
 - name: Ensure vimrc is installed for user root
-  copy: >
-    src=vimrc
-    dest=/root/.vimrc
+  copy:
+    src: vimrc
+    dest: /root/.vimrc
 
 - name: Install firewalld
-  yum: pkg=firewalld state=installed
+  yum:
+    pkg: firewalld
+    state: installed
 
 - name: enable firewalld service
   command: /usr/bin/systemctl enable firewalld.service
 
 - name: start firewalld service
   command: /usr/bin/systemctl start firewalld.service
+
+- name: need to pause here, otherwise the firewalld service starting can sometimes cause ssh to fail
+  pause: seconds=10

+ 4 - 4
roles/kubernetes_apiserver/tasks/main.yml

@@ -4,10 +4,10 @@
   yum: pkg=kubernetes
 
 - name: Configure apiserver settings
-  lineinfile: >
-    dest=/etc/sysconfig/kubernetes
-    regexp={{ item.regex }}
-    line="{{ item.line }}"
+  lineinfile:
+    dest: /etc/sysconfig/kubernetes
+    regexp: "{{ item.regex }}"
+    line: "{{ item.line }}"
   with_items:
     - { regex: '^KUBE_API_MACHINES=', line: 'KUBE_API_MACHINES=\"{{ oo_minion_ips | join(",") }}\"' }
     - { regex: '^KUBE_API_ADDRESS=',  line: 'KUBE_API_ADDRESS=\"0.0.0.0\"' }

+ 4 - 4
roles/kubernetes_kubelet/tasks/main.yml

@@ -4,10 +4,10 @@
   yum: pkg=kubernetes state=installed
 
 - name: Configure kubelet
-  lineinfile: >
-    dest=/etc/sysconfig/kubernetes
-    regexp={{ item.regex }}
-    line="{{ item.line }}"
+  lineinfile:
+    dest: /etc/sysconfig/kubernetes
+    regexp: "{{ item.regex }}"
+    line: "{{ item.line }}"
   with_items:
     - { regex: '^KUBE_ETCD_SERVERS=', line: 'KUBE_ETCD_SERVERS=\"http://{{ oo_master_ips[0] }}:4001\"' }
     - { regex: '^KUBE_KUBELET_ADDRESS=', line: 'KUBE_KUBELET_ADDRESS=\"0.0.0.0\"' }

+ 4 - 4
roles/kubernetes_proxy/tasks/main.yml

@@ -4,10 +4,10 @@
   yum: pkg=kubernetes state=installed
 
 - name: Configure kubernetes-proxy etcd servers
-  lineinfile: >
-    dest=/etc/sysconfig/kubernetes
-    regexp={{ item.regex }}
-    line="{{ item.line }}"
+  lineinfile:
+    dest: /etc/sysconfig/kubernetes
+    regexp: "{{ item.regex }}"
+    line: "{{ item.line }}"
   with_items:
     - { regex: '^KUBE_ETCD_SERVERS=', line: 'KUBE_ETCD_SERVERS=\"http://{{ oo_master_ips[0] }}:4001\"' }
   notify:

+ 4 - 4
roles/openshift_master/tasks/main.yml

@@ -8,10 +8,10 @@
   command: /usr/bin/hostname {{ oo_bind_ip }}
 
 - name: Configure OpenShift Master settings
-  lineinfile: >
-    dest=/etc/sysconfig/openshift
-    regexp={{ item.regex }}
-    line="{{ item.line }}"
+  lineinfile:
+    dest: /etc/sysconfig/openshift
+    regexp: "{{ item.regex }}"
+    line: "{{ item.line }}"
   with_items:
     - { regex: '^ROLE=', line: 'ROLE=\"master\"' }
     - { regex: '^OPTIONS=', line: 'OPTIONS=\"--nodes={{ oo_minion_ips | join(",") }}  --loglevel=5\"' }

+ 4 - 4
roles/openshift_minion/tasks/main.yml

@@ -8,10 +8,10 @@
   command: /usr/bin/hostname {{ oo_bind_ip }}
 
 - name: Configure OpenShift Minion settings
-  lineinfile: >
-    dest=/etc/sysconfig/openshift
-    regexp={{ item.regex }}
-    line="{{ item.line }}"
+  lineinfile:
+    dest: /etc/sysconfig/openshift
+    regexp: "{{ item.regex }}"
+    line: "{{ item.line }}"
   with_items:
     - { regex: '^ROLE=', line: 'ROLE=\"node\"' }
     - { regex: '^OPTIONS=', line: 'OPTIONS=\"--master=http://{{ oo_master_ips[0] }}:8080  --loglevel=5\"' }

+ 0 - 11
roles/repos/tasks/main.yaml

@@ -1,16 +1,5 @@
 ---
 # The following role lays down the correct repository and gpg key for yum
-
-#- name: Ensure oso rhui rhel 7 server repository exists in yum.repos.d
-#  copy: >
-#    src=oso-rhui-rhel-7-server.repo
-#    dest=/etc/yum.repos.d/oso-rhui-rhel-7-server.repo
-#
-#- name: Ensure Red Hat GPG Key is in place for the previous repo
-#  copy: >
-#    src=RPM-GPG-KEY-redhat-release
-#    dest=/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
-
 - name: Ensure rhel 7 libra candidate exists in yum.repos.d
   copy: src=rhel-7-libra-candidate.repo dest=/etc/yum.repos.d/rhel-7-libra-candidate.repo