Prechádzať zdrojové kódy

Added atomic aws host to cloud.rb

Thomas Wiest 10 rokov pred
rodič
commit
5f9c7eb2d2
48 zmenil súbory, kde vykonal 1749 pridanie a 13 odobranie
  1. 4 0
      cloud.rb
  2. 56 0
      inventory/aws/ec2.ini
  3. 610 0
      inventory/aws/ec2.py
  4. 9 1
      lib/ansible_helper.rb
  5. 144 0
      lib/aws_command.rb
  6. 82 0
      lib/aws_helper.rb
  7. 2 10
      lib/gce_command.rb
  8. 14 2
      lib/gce_helper.rb
  9. 4 0
      lib/launch_helper.rb
  10. 21 0
      playbooks/aws/os2-atomic-proxy/config.yml
  11. 69 0
      playbooks/aws/os2-atomic-proxy/launch.yml
  12. 6 0
      playbooks/aws/os2-atomic-proxy/user_data.txt
  13. 2 0
      playbooks/aws/os2-atomic-proxy/vars.stg.yml
  14. 1 0
      playbooks/aws/os2-atomic-proxy/vars.yml
  15. 56 0
      roles/atomic_base/README.md
  16. 2 0
      roles/atomic_base/defaults/main.yml
  17. 12 0
      roles/atomic_base/files/bash/bashrc
  18. 10 0
      roles/atomic_base/files/ostree/repo_config
  19. 7 0
      roles/atomic_base/files/system/90-nofile.conf
  20. 2 0
      roles/atomic_base/handlers/main.yml
  21. 19 0
      roles/atomic_base/meta/main.yml
  22. 13 0
      roles/atomic_base/tasks/bash.yml
  23. 6 0
      roles/atomic_base/tasks/cloud_user.yml
  24. 4 0
      roles/atomic_base/tasks/main.yml
  25. 18 0
      roles/atomic_base/tasks/ostree.yml
  26. 3 0
      roles/atomic_base/tasks/system.yml
  27. 2 0
      roles/atomic_base/vars/main.yml
  28. 56 0
      roles/atomic_proxy/README.md
  29. 2 0
      roles/atomic_proxy/defaults/main.yml
  30. 37 0
      roles/atomic_proxy/files/ctr-proxy-1.service
  31. 37 0
      roles/atomic_proxy/files/ctr-proxy-monitoring-1.service
  32. 37 0
      roles/atomic_proxy/files/ctr-proxy-puppet-1.service
  33. 29 0
      roles/atomic_proxy/files/proxy_containers_deploy_descriptor.json
  34. 116 0
      roles/atomic_proxy/files/puppet/auth.conf
  35. 43 0
      roles/atomic_proxy/files/setup-proxy-containers.sh
  36. 2 0
      roles/atomic_proxy/handlers/main.yml
  37. 21 0
      roles/atomic_proxy/meta/main.yml
  38. 21 0
      roles/atomic_proxy/tasks/main.yml
  39. 24 0
      roles/atomic_proxy/tasks/setup_puppet.yml
  40. 40 0
      roles/atomic_proxy/templates/puppet/puppet.conf.j2
  41. 16 0
      roles/atomic_proxy/templates/sync/sync-proxy-configs.sh.j2
  42. 2 0
      roles/atomic_proxy/vars/main.yml
  43. 56 0
      roles/shutdown_nightly/README.md
  44. 2 0
      roles/shutdown_nightly/defaults/main.yml
  45. 2 0
      roles/shutdown_nightly/handlers/main.yml
  46. 19 0
      roles/shutdown_nightly/meta/main.yml
  47. 7 0
      roles/shutdown_nightly/tasks/main.yml
  48. 2 0
      roles/shutdown_nightly/vars/main.yml

+ 4 - 0
cloud.rb

@@ -2,6 +2,7 @@
 
 require 'thor'
 require_relative 'lib/gce_command'
+require_relative 'lib/aws_command'
 
 # Don't buffer output to the client
 STDOUT.sync = true
@@ -12,6 +13,9 @@ module OpenShift
     class CloudCommand < Thor
       desc 'gce', 'Manages Google Compute Engine assets'
       subcommand "gce", GceCommand
+
+      desc 'aws', 'Manages Amazon Web Services assets'
+      subcommand "aws", AwsCommand
     end
   end
 end

+ 56 - 0
inventory/aws/ec2.ini

@@ -0,0 +1,56 @@
+# Ansible EC2 external inventory script settings
+#
+
+[ec2]
+
+# to talk to a private eucalyptus instance uncomment these lines
+# and edit edit eucalyptus_host to be the host name of your cloud controller
+#eucalyptus = True
+#eucalyptus_host = clc.cloud.domain.org
+
+# AWS regions to make calls to. Set this to 'all' to make request to all regions
+# in AWS and merge the results together. Alternatively, set this to a comma
+# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
+#regions = all
+regions = us-east-1
+regions_exclude = us-gov-west-1,cn-north-1
+
+# When generating inventory, Ansible needs to know how to address a server.
+# Each EC2 instance has a lot of variables associated with it. Here is the list:
+#   http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
+# Below are 2 variables that are used as the address of a server:
+#   - destination_variable
+#   - vpc_destination_variable
+
+# This is the normal destination variable to use. If you are running Ansible
+# from outside EC2, then 'public_dns_name' makes the most sense. If you are
+# running Ansible from within EC2, then perhaps you want to use the internal
+# address, and should set this to 'private_dns_name'.
+destination_variable = public_dns_name
+
+# For server inside a VPC, using DNS names may not make sense. When an instance
+# has 'subnet_id' set, this variable is used. If the subnet is public, setting
+# this to 'ip_address' will return the public IP address. For instances in a
+# private subnet, this should be set to 'private_ip_address', and Ansible must
+# be run from with EC2.
+vpc_destination_variable = ip_address
+
+# To tag instances on EC2 with the resource records that point to them from
+# Route53, uncomment and set 'route53' to True.
+route53 = False
+
+# Additionally, you can specify the list of zones to exclude looking up in
+# 'route53_excluded_zones' as a comma-separated list.
+# route53_excluded_zones = samplezone1.com, samplezone2.com
+
+# API calls to EC2 are slow. For this reason, we cache the results of an API
+# call. Set this to the path you want cache files to be written to. Two files
+# will be written to this directory:
+#   - ansible-ec2.cache
+#   - ansible-ec2.index
+cache_path = ~/.ansible/tmp
+
+# The number of seconds a cache file is considered valid. After this many
+# seconds, a new API call will be made, and the cache file will be updated.
+# To disable the cache, set this value to 0
+cache_max_age = 300

+ 610 - 0
inventory/aws/ec2.py

@@ -0,0 +1,610 @@
+#!/usr/bin/env python
+
+'''
+EC2 external inventory script
+=================================
+
+Generates inventory that Ansible can understand by making API request to
+AWS EC2 using the Boto library.
+
+NOTE: This script assumes Ansible is being executed where the environment
+variables needed for Boto have already been set:
+    export AWS_ACCESS_KEY_ID='AK123'
+    export AWS_SECRET_ACCESS_KEY='abc123'
+
+This script also assumes there is an ec2.ini file alongside it.  To specify a
+different path to ec2.ini, define the EC2_INI_PATH environment variable:
+
+    export EC2_INI_PATH=/path/to/my_ec2.ini
+
+If you're using eucalyptus you need to set the above variables and
+you need to define:
+
+    export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
+
+For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
+
+When run against a specific host, this script returns the following variables:
+ - ec2_ami_launch_index
+ - ec2_architecture
+ - ec2_association
+ - ec2_attachTime
+ - ec2_attachment
+ - ec2_attachmentId
+ - ec2_client_token
+ - ec2_deleteOnTermination
+ - ec2_description
+ - ec2_deviceIndex
+ - ec2_dns_name
+ - ec2_eventsSet
+ - ec2_group_name
+ - ec2_hypervisor
+ - ec2_id
+ - ec2_image_id
+ - ec2_instanceState
+ - ec2_instance_type
+ - ec2_ipOwnerId
+ - ec2_ip_address
+ - ec2_item
+ - ec2_kernel
+ - ec2_key_name
+ - ec2_launch_time
+ - ec2_monitored
+ - ec2_monitoring
+ - ec2_networkInterfaceId
+ - ec2_ownerId
+ - ec2_persistent
+ - ec2_placement
+ - ec2_platform
+ - ec2_previous_state
+ - ec2_private_dns_name
+ - ec2_private_ip_address
+ - ec2_publicIp
+ - ec2_public_dns_name
+ - ec2_ramdisk
+ - ec2_reason
+ - ec2_region
+ - ec2_requester_id
+ - ec2_root_device_name
+ - ec2_root_device_type
+ - ec2_security_group_ids
+ - ec2_security_group_names
+ - ec2_shutdown_state
+ - ec2_sourceDestCheck
+ - ec2_spot_instance_request_id
+ - ec2_state
+ - ec2_state_code
+ - ec2_state_reason
+ - ec2_status
+ - ec2_subnet_id
+ - ec2_tenancy
+ - ec2_virtualization_type
+ - ec2_vpc_id
+
+These variables are pulled out of a boto.ec2.instance object. There is a lack of
+consistency with variable spellings (camelCase and underscores) since this
+just loops through all variables the object exposes. It is preferred to use the
+ones with underscores when multiple exist.
+
+In addition, if an instance has AWS Tags associated with it, each tag is a new
+variable named:
+ - ec2_tag_[Key] = [Value]
+
+Security groups are comma-separated in 'ec2_security_group_ids' and
+'ec2_security_group_names'.
+'''
+
+# (c) 2012, Peter Sankauskas
+#
+# This file is part of Ansible,
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
+
+######################################################################
+
+import sys
+import os
+import argparse
+import re
+from time import time
+import boto
+from boto import ec2
+from boto import rds
+from boto import route53
+import ConfigParser
+
+try:
+    import json
+except ImportError:
+    import simplejson as json
+
+
+class Ec2Inventory(object):
+    def _empty_inventory(self):
+        return {"_meta" : {"hostvars" : {}}}
+
+    def __init__(self):
+        ''' Main execution path '''
+
+        # Inventory grouped by instance IDs, tags, security groups, regions,
+        # and availability zones
+        self.inventory = self._empty_inventory()
+
+        # Index of hostname (address) to instance ID
+        self.index = {}
+
+        # Read settings and parse CLI arguments
+        self.read_settings()
+        self.parse_cli_args()
+
+        # Cache
+        if self.args.refresh_cache:
+            self.do_api_calls_update_cache()
+        elif not self.is_cache_valid():
+            self.do_api_calls_update_cache()
+
+        # Data to print
+        if self.args.host:
+            data_to_print = self.get_host_info()
+
+        elif self.args.list:
+            # Display list of instances for inventory
+            if self.inventory == self._empty_inventory():
+                data_to_print = self.get_inventory_from_cache()
+            else:
+                data_to_print = self.json_format_dict(self.inventory, True)
+
+        print data_to_print
+
+
+    def is_cache_valid(self):
+        ''' Determines if the cache files have expired, or if it is still valid '''
+
+        if os.path.isfile(self.cache_path_cache):
+            mod_time = os.path.getmtime(self.cache_path_cache)
+            current_time = time()
+            if (mod_time + self.cache_max_age) > current_time:
+                if os.path.isfile(self.cache_path_index):
+                    return True
+
+        return False
+
+
+    def read_settings(self):
+        ''' Reads the settings from the ec2.ini file '''
+
+        config = ConfigParser.SafeConfigParser()
+        ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
+        ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path)
+        config.read(ec2_ini_path)
+
+        # is eucalyptus?
+        self.eucalyptus_host = None
+        self.eucalyptus = False
+        if config.has_option('ec2', 'eucalyptus'):
+            self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
+        if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
+            self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
+
+        # Regions
+        self.regions = []
+        configRegions = config.get('ec2', 'regions')
+        configRegions_exclude = config.get('ec2', 'regions_exclude')
+        if (configRegions == 'all'):
+            if self.eucalyptus_host:
+                self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
+            else:
+                for regionInfo in ec2.regions():
+                    if regionInfo.name not in configRegions_exclude:
+                        self.regions.append(regionInfo.name)
+        else:
+            self.regions = configRegions.split(",")
+
+        # Destination addresses
+        self.destination_variable = config.get('ec2', 'destination_variable')
+        self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
+
+        # Route53
+        self.route53_enabled = config.getboolean('ec2', 'route53')
+        self.route53_excluded_zones = []
+        if config.has_option('ec2', 'route53_excluded_zones'):
+            self.route53_excluded_zones.extend(
+                config.get('ec2', 'route53_excluded_zones', '').split(','))
+
+        # Cache related
+        cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
+        if not os.path.exists(cache_dir):
+            os.makedirs(cache_dir)
+
+        self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
+        self.cache_path_index = cache_dir + "/ansible-ec2.index"
+        self.cache_max_age = config.getint('ec2', 'cache_max_age')
+        
+
+
+    def parse_cli_args(self):
+        ''' Command line argument processing '''
+
+        parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
+        parser.add_argument('--list', action='store_true', default=True,
+                           help='List instances (default: True)')
+        parser.add_argument('--host', action='store',
+                           help='Get all the variables about a specific instance')
+        parser.add_argument('--refresh-cache', action='store_true', default=False,
+                           help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
+        self.args = parser.parse_args()
+
+
+    def do_api_calls_update_cache(self):
+        ''' Do API calls to each region, and save data in cache files '''
+
+        if self.route53_enabled:
+            self.get_route53_records()
+
+        for region in self.regions:
+            self.get_instances_by_region(region)
+            self.get_rds_instances_by_region(region)
+
+        self.write_to_cache(self.inventory, self.cache_path_cache)
+        self.write_to_cache(self.index, self.cache_path_index)
+
+
+    def get_instances_by_region(self, region):
+        ''' Makes an AWS EC2 API call to the list of instances in a particular
+        region '''
+
+        try:
+            if self.eucalyptus:
+                conn = boto.connect_euca(host=self.eucalyptus_host)
+                conn.APIVersion = '2010-08-31'
+            else:
+                conn = ec2.connect_to_region(region)
+
+            # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
+            if conn is None:
+                print("region name: %s likely not supported, or AWS is down.  connection to region failed." % region)
+                sys.exit(1)
+ 
+            reservations = conn.get_all_instances()
+            for reservation in reservations:
+                for instance in reservation.instances:
+                    self.add_instance(instance, region)
+        
+        except boto.exception.BotoServerError, e:
+            if  not self.eucalyptus:
+                print "Looks like AWS is down again:"
+            print e
+            sys.exit(1)
+
+    def get_rds_instances_by_region(self, region):
+	''' Makes an AWS API call to the list of RDS instances in a particular
+        region '''
+
+        try:
+            conn = rds.connect_to_region(region)
+            if conn:
+                instances = conn.get_all_dbinstances()
+                for instance in instances:
+                    self.add_rds_instance(instance, region)
+        except boto.exception.BotoServerError, e:
+            if not e.reason == "Forbidden":
+                print "Looks like AWS RDS is down: "
+                print e
+                sys.exit(1)
+
+    def get_instance(self, region, instance_id):
+        ''' Gets details about a specific instance '''
+        if self.eucalyptus:
+            conn = boto.connect_euca(self.eucalyptus_host)
+            conn.APIVersion = '2010-08-31'
+        else:
+            conn = ec2.connect_to_region(region)
+
+        # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
+        if conn is None:
+            print("region name: %s likely not supported, or AWS is down.  connection to region failed." % region)
+            sys.exit(1)
+
+        reservations = conn.get_all_instances([instance_id])
+        for reservation in reservations:
+            for instance in reservation.instances:
+                return instance
+
+
+    def add_instance(self, instance, region):
+        ''' Adds an instance to the inventory and index, as long as it is
+        addressable '''
+
+        # Only want running instances
+        if instance.state != 'running':
+            return
+
+        # Select the best destination address
+        if instance.subnet_id:
+            dest = getattr(instance, self.vpc_destination_variable)
+        else:
+            dest =  getattr(instance, self.destination_variable)
+
+        if not dest:
+            # Skip instances we cannot address (e.g. private VPC subnet)
+            return
+
+        # Add to index
+        self.index[dest] = [region, instance.id]
+
+        # Inventory: Group by instance ID (always a group of 1)
+        self.inventory[instance.id] = [dest]
+
+        # Inventory: Group by region
+        self.push(self.inventory, region, dest)
+
+        # Inventory: Group by availability zone
+        self.push(self.inventory, instance.placement, dest)
+
+        # Inventory: Group by instance type
+        self.push(self.inventory, self.to_safe('type_' + instance.instance_type), dest)
+
+        # Inventory: Group by key pair
+        if instance.key_name:
+            self.push(self.inventory, self.to_safe('key_' + instance.key_name), dest)
+        
+        # Inventory: Group by security group
+        try:
+            for group in instance.groups:
+                key = self.to_safe("security_group_" + group.name)
+                self.push(self.inventory, key, dest)
+        except AttributeError:
+            print 'Package boto seems a bit older.'
+            print 'Please upgrade boto >= 2.3.0.'
+            sys.exit(1)
+
+        # Inventory: Group by tag keys
+        for k, v in instance.tags.iteritems():
+            key = self.to_safe("tag_" + k + "=" + v)
+            self.push(self.inventory, key, dest)
+
+        # Inventory: Group by Route53 domain names if enabled
+        if self.route53_enabled:
+            route53_names = self.get_instance_route53_names(instance)
+            for name in route53_names:
+                self.push(self.inventory, name, dest)
+
+        # Global Tag: tag all EC2 instances
+        self.push(self.inventory, 'ec2', dest)
+
+        self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
+
+
+    def add_rds_instance(self, instance, region):
+        ''' Adds an RDS instance to the inventory and index, as long as it is
+        addressable '''
+
+        # Only want available instances
+        if instance.status != 'available':
+            return
+
+        # Select the best destination address
+        #if instance.subnet_id:
+            #dest = getattr(instance, self.vpc_destination_variable)
+        #else:
+            #dest =  getattr(instance, self.destination_variable)
+        dest = instance.endpoint[0]
+
+        if not dest:
+            # Skip instances we cannot address (e.g. private VPC subnet)
+            return
+
+        # Add to index
+        self.index[dest] = [region, instance.id]
+
+        # Inventory: Group by instance ID (always a group of 1)
+        self.inventory[instance.id] = [dest]
+
+        # Inventory: Group by region
+        self.push(self.inventory, region, dest)
+
+        # Inventory: Group by availability zone
+        self.push(self.inventory, instance.availability_zone, dest)
+        
+        # Inventory: Group by instance type
+        self.push(self.inventory, self.to_safe('type_' + instance.instance_class), dest)
+        
+        # Inventory: Group by security group
+        try:
+            if instance.security_group:
+                key = self.to_safe("security_group_" + instance.security_group.name)
+                self.push(self.inventory, key, dest)
+        except AttributeError:
+            print 'Package boto seems a bit older.'
+            print 'Please upgrade boto >= 2.3.0.'
+            sys.exit(1)
+
+        # Inventory: Group by engine
+        self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
+
+        # Inventory: Group by parameter group
+        self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
+
+        # Global Tag: all RDS instances
+        self.push(self.inventory, 'rds', dest)
+
+
+    def get_route53_records(self):
+        ''' Get and store the map of resource records to domain names that
+        point to them. '''
+
+        r53_conn = route53.Route53Connection()
+        all_zones = r53_conn.get_zones()
+
+        route53_zones = [ zone for zone in all_zones if zone.name[:-1]
+                          not in self.route53_excluded_zones ]
+
+        self.route53_records = {}
+
+        for zone in route53_zones:
+            rrsets = r53_conn.get_all_rrsets(zone.id)
+
+            for record_set in rrsets:
+                record_name = record_set.name
+
+                if record_name.endswith('.'):
+                    record_name = record_name[:-1]
+
+                for resource in record_set.resource_records:
+                    self.route53_records.setdefault(resource, set())
+                    self.route53_records[resource].add(record_name)
+
+
+    def get_instance_route53_names(self, instance):
+        ''' Check if an instance is referenced in the records we have from
+        Route53. If it is, return the list of domain names pointing to said
+        instance. If nothing points to it, return an empty list. '''
+
+        instance_attributes = [ 'public_dns_name', 'private_dns_name',
+                                'ip_address', 'private_ip_address' ]
+
+        name_list = set()
+
+        for attrib in instance_attributes:
+            try:
+                value = getattr(instance, attrib)
+            except AttributeError:
+                continue
+
+            if value in self.route53_records:
+                name_list.update(self.route53_records[value])
+
+        return list(name_list)
+
+
+    def get_host_info_dict_from_instance(self, instance):
+        instance_vars = {}
+        for key in vars(instance):
+            value = getattr(instance, key)
+            key = self.to_safe('ec2_' + key)
+
+            # Handle complex types
+            # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
+            if key == 'ec2__state':
+                instance_vars['ec2_state'] = instance.state or ''
+                instance_vars['ec2_state_code'] = instance.state_code
+            elif key == 'ec2__previous_state':
+                instance_vars['ec2_previous_state'] = instance.previous_state or ''
+                instance_vars['ec2_previous_state_code'] = instance.previous_state_code
+            elif type(value) in [int, bool]:
+                instance_vars[key] = value
+            elif type(value) in [str, unicode]:
+                instance_vars[key] = value.strip()
+            elif type(value) == type(None):
+                instance_vars[key] = ''
+            elif key == 'ec2_region':
+                instance_vars[key] = value.name
+            elif key == 'ec2__placement':
+                instance_vars['ec2_placement'] = value.zone
+            elif key == 'ec2_tags':
+                for k, v in value.iteritems():
+                    key = self.to_safe('ec2_tag_' + k)
+                    instance_vars[key] = v
+            elif key == 'ec2_groups':
+                group_ids = []
+                group_names = []
+                for group in value:
+                    group_ids.append(group.id)
+                    group_names.append(group.name)
+                instance_vars["ec2_security_group_ids"] = ','.join(group_ids)
+                instance_vars["ec2_security_group_names"] = ','.join(group_names)
+            else:
+                pass
+                # TODO Product codes if someone finds them useful
+                #print key
+                #print type(value)
+                #print value
+
+        return instance_vars
+
+    def get_host_info(self):
+        ''' Get variables about a specific host '''
+
+        if len(self.index) == 0:
+            # Need to load index from cache
+            self.load_index_from_cache()
+
+        if not self.args.host in self.index:
+            # try updating the cache
+            self.do_api_calls_update_cache()
+            if not self.args.host in self.index:
+                # host migh not exist anymore
+                return self.json_format_dict({}, True)
+
+        (region, instance_id) = self.index[self.args.host]
+
+        instance = self.get_instance(region, instance_id)
+        return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
+
+    def push(self, my_dict, key, element):
+        ''' Pushed an element onto an array that may not have been defined in
+        the dict '''
+
+        if key in my_dict:
+            my_dict[key].append(element);
+        else:
+            my_dict[key] = [element]
+
+
+    def get_inventory_from_cache(self):
+        ''' Reads the inventory from the cache file and returns it as a JSON
+        object '''
+
+        cache = open(self.cache_path_cache, 'r')
+        json_inventory = cache.read()
+        return json_inventory
+
+
+    def load_index_from_cache(self):
+        ''' Reads the index from the cache file sets self.index '''
+
+        cache = open(self.cache_path_index, 'r')
+        json_index = cache.read()
+        self.index = json.loads(json_index)
+
+
+    def write_to_cache(self, data, filename):
+        ''' Writes data in JSON format to a file '''
+
+        json_data = self.json_format_dict(data, True)
+        cache = open(filename, 'w')
+        cache.write(json_data)
+        cache.close()
+
+
+    def to_safe(self, word):
+        ''' Converts 'bad' characters in a string to underscores so they can be
+        used as Ansible groups '''
+
+        return re.sub("[^A-Za-z0-9\-]", "_", word)
+
+
+    def json_format_dict(self, data, pretty=False):
+        ''' Converts a dict to a JSON object and dumps it as a formatted
+        string '''
+
+        if pretty:
+            return json.dumps(data, sort_keys=True, indent=2)
+        else:
+            return json.dumps(data)
+
+
+# Run the script
+Ec2Inventory()
+

+ 9 - 1
lib/ansible_helper.rb

@@ -60,7 +60,7 @@ extra_vars: #{@extra_vars.to_json}
       end
 
       def self.for_gce
-        ah      = AnsibleHelper.new
+        ah = AnsibleHelper.new
 
         # GCE specific configs
         gce_ini = "#{MYDIR}/../inventory/gce/gce.ini"
@@ -85,6 +85,14 @@ extra_vars: #{@extra_vars.to_json}
         return ah
       end
 
+      def self.for_aws
+        ah = AnsibleHelper.new
+
+        ah.inventory = 'inventory/aws/ec2.py'
+        return ah
+      end
+
+
       def ignore_bug_6407
         puts
         puts %q[ .----  Spurious warning "It is unnecessary to use '{{' in loops" (ansible bug 6407)  ----.]

+ 144 - 0
lib/aws_command.rb

@@ -0,0 +1,144 @@
+require 'thor'
+
+require_relative 'aws_helper'
+require_relative 'launch_helper'
+
+module OpenShift
+  module Ops
+    class AwsCommand < Thor
+      # WARNING: we do not currently support environments with hyphens in the name
+      SUPPORTED_ENVS = %w(prod stg int tint kint test jint)
+
+      option :type, :required => true, :enum => LaunchHelper.get_aws_host_types,
+             :desc => 'The host type of the new instances.'
+      option :env, :required => true, :aliases => '-e', :enum => SUPPORTED_ENVS,
+             :desc => 'The environment of the new instances.'
+      option :count, :default => 1, :aliases => '-c', :type => :numeric,
+             :desc => 'The number of instances to create'
+      option :tag, :type => :array,
+             :desc => 'The tag(s) to add to the new instances. Allowed characters are letters, numbers, and hyphens.'
+      desc "launch", "Launches instances."
+      def launch()
+        AwsHelper.check_creds()
+
+        # Expand all of the instance names so that we have a complete array
+        names = []
+        options[:count].times { names << "#{options[:env]}-#{options[:type]}-#{SecureRandom.hex(5)}" }
+
+        ah = AnsibleHelper.for_aws()
+
+        # AWS specific configs
+        ah.extra_vars['oo_new_inst_names'] = names
+        ah.extra_vars['oo_new_inst_tags'] = options[:tag]
+        ah.extra_vars['oo_env'] = options[:env]
+
+        # Add a created by tag
+        ah.extra_vars['oo_new_inst_tags'] = {} if ah.extra_vars['oo_new_inst_tags'].nil?
+
+        ah.extra_vars['oo_new_inst_tags']["created-by"] = ENV['USER']
+        ah.extra_vars['oo_new_inst_tags'].merge!(AwsHelper.generate_env_tag(options[:env]))
+        ah.extra_vars['oo_new_inst_tags'].merge!(AwsHelper.generate_host_type_tag(options[:type]))
+        ah.extra_vars['oo_new_inst_tags'].merge!(AwsHelper.generate_env_host_type_tag(options[:env], options[:type]))
+
+        puts
+        puts 'Creating instance(s) in AWS...'
+        ah.ignore_bug_6407
+
+        # Make sure we're completely up to date before launching
+        clear_cache()
+        ah.run_playbook("playbooks/aws/#{options[:type]}/launch.yml")
+      ensure
+        # This is so that if we a config right after a launch, the newly launched instances will be
+        # in the list.
+        clear_cache()
+      end
+
+      desc "clear-cache", 'Clear the inventory cache'
+      def clear_cache()
+        print "Clearing inventory cache... "
+        AwsHelper.clear_inventory_cache()
+        puts "Done."
+      end
+
+      option :name, :required => false, :type => :string,
+             :desc => 'The name of the instance to configure.'
+      option :env, :required => false, :aliases => '-e', :enum => SUPPORTED_ENVS,
+             :desc => 'The environment of the new instances.'
+      option :type, :required => false, :enum => LaunchHelper.get_aws_host_types,
+             :desc => 'The type of the instances to configure.'
+      desc "config", 'Configures instances.'
+      def config()
+        ah = AnsibleHelper.for_aws()
+
+        abort 'Error: you can\'t specify both --name and --type' unless options[:type].nil? || options[:name].nil?
+
+        abort 'Error: you can\'t specify both --name and --env' unless options[:env].nil? || options[:name].nil?
+
+        host_type = nil
+        if options[:name]
+          details = AwsHelper.get_host_details(options[:name])
+          ah.extra_vars['oo_host_group_exp'] = options[:name]
+          ah.extra_vars['oo_env'] = details['env']
+          host_type = details['host-type']
+        elsif options[:type] && options[:env]
+          oo_env_host_type_tag = AwsHelper.generate_env_host_type_tag_name(options[:env], options[:type])
+          ah.extra_vars['oo_host_group_exp'] = "groups['#{oo_env_host_type_tag}']"
+          ah.extra_vars['oo_env'] = options[:env]
+          host_type = options[:type]
+        else
+          abort 'Error: you need to specify either --name or (--type and --env)'
+        end
+
+        puts
+        puts "Configuring #{options[:type]} instance(s) in AWS..."
+        ah.ignore_bug_6407
+
+        ah.run_playbook("playbooks/aws/#{host_type}/config.yml")
+      end
+
+      desc "list", "Lists instances."
+      def list()
+        AwsHelper.check_creds()
+        hosts = AwsHelper.get_hosts()
+
+        puts
+        puts "Instances"
+        puts "---------"
+        hosts.each { |h| puts "  #{h.name}.#{h.env}" }
+        puts
+      end
+
+      desc "ssh", "Ssh to an instance"
+      def ssh(*ssh_ops, host)
+        if host =~ /^([\w\d_.-]+)@([\w\d-_.]+)/
+          user = $1
+          host = $2
+        end
+
+        details = AwsHelper.get_host_details(host)
+        abort "\nError: Instance [#{host}] is not RUNNING\n\n" unless details['ec2_state'] == 'running'
+
+        cmd = "ssh #{ssh_ops.join(' ')}"
+
+        if user.nil?
+          cmd += " "
+        else
+          cmd += " #{user}@"
+        end
+
+        cmd += "#{details['ec2_ip_address']}"
+
+        exec(cmd)
+      end
+
+      desc 'types', 'Displays instance types'
+      def types()
+        puts
+        puts "Available Host Types"
+        puts "--------------------"
+        LaunchHelper.get_aws_host_types.each { |t| puts "  #{t}" }
+        puts
+      end
+    end
+  end
+end

+ 82 - 0
lib/aws_helper.rb

@@ -0,0 +1,82 @@
+require 'fileutils'
+
+module OpenShift
+  module Ops
+    class AwsHelper
+      MYDIR = File.expand_path(File.dirname(__FILE__))
+
+      def self.get_list()
+        cmd = "#{MYDIR}/../inventory/aws/ec2.py --list"
+        hosts = %x[#{cmd} 2>&1]
+
+        raise "Error: failed to list hosts\n#{hosts}" unless $?.exitstatus == 0
+        return JSON.parse(hosts)
+      end
+
+      def self.get_hosts()
+        hosts = get_list()
+
+        retval = []
+        hosts['_meta']['hostvars'].each do |host, info|
+          retval << OpenStruct.new({
+            :name        => info['ec2_tag_Name'],
+            :env         => info['ec2_tag_environment'] || 'UNSET',
+            :external_ip => info['ec2_ip_address'],
+            :public_dns  => info['ec2_public_dns_name']
+          })
+        end
+
+        retval.sort_by! { |h| [h.env, h.name] }
+
+        return retval
+      end
+
+      def self.get_host_details(host)
+        hosts = get_list()
+        dns_names = hosts["tag_Name_#{host}"]
+
+        raise "Error: host not found [#{host}]" if dns_names.nil?
+
+        return hosts['_meta']['hostvars'][dns_names.first]
+      end
+
+      def self.check_creds()
+        raise "AWS_ACCESS_KEY_ID environment variable must be set" if ENV['AWS_ACCESS_KEY_ID'].nil?
+        raise "AWS_SECRET_ACCESS_KEY environment variable must be set" if ENV['AWS_SECRET_ACCESS_KEY'].nil?
+      end
+
+      def self.clear_inventory_cache()
+        path = "#{ENV['HOME']}/.ansible/tmp"
+        cache_files = ["#{path}/ansible-ec2.cache", "#{path}/ansible-ec2.index"]
+        FileUtils.rm(cache_files)
+      end
+
+      def self.generate_env_tag(env)
+        return { "environment" => env }
+      end
+
+      def self.generate_env_tag_name(env)
+        h = generate_env_tag(env)
+        return "tag_#{h.keys.first}_#{h.values.first}"
+      end
+
+      def self.generate_host_type_tag(host_type)
+        return { "host-type" => host_type }
+      end
+
+      def self.generate_host_type_tag_name(host_type)
+        h = generate_host_type_tag(host_type)
+        return "tag_#{h.keys.first}_#{h.values.first}"
+      end
+
+      def self.generate_env_host_type_tag(env, host_type)
+        return { "env-host-type" => "#{env}-#{host_type}" }
+      end
+
+      def self.generate_env_host_type_tag_name(env, host_type)
+        h = generate_env_host_type_tag(env, host_type)
+        return "tag_#{h.keys.first}_#{h.values.first}"
+      end
+    end
+  end
+end

+ 2 - 10
lib/gce_command.rb

@@ -125,17 +125,12 @@ module OpenShift
 
       desc "list", "Lists instances."
       def list()
-        hosts = GceHelper.list_hosts()
-
-        data = {}
-        hosts.each do |key,value|
-          value.each { |h| (data[h] ||= []) << key }
-        end
+        hosts = GceHelper.get_hosts()
 
         puts
         puts "Instances"
         puts "---------"
-        data.keys.sort.each { |k| puts "  #{k}" }
+        hosts.each { |k| puts "  #{k.name}" }
         puts
       end
 
@@ -177,13 +172,10 @@ module OpenShift
 
       desc "ssh", "Ssh to an instance"
       def ssh(*ssh_ops, host)
-        puts host
         if host =~ /^([\w\d_.-]+)@([\w\d-_.]+)/
           user = $1
           host = $2
         end
-        puts "user=#{user}"
-        puts "host=#{host}"
 
         details = GceHelper.get_host_details(host)
         abort "\nError: Instance [#{host}] is not RUNNING\n\n" unless details['gce_status'] == 'RUNNING'

+ 14 - 2
lib/gce_helper.rb

@@ -1,15 +1,27 @@
+require 'ostruct'
+
 module OpenShift
   module Ops
     class GceHelper
       MYDIR = File.expand_path(File.dirname(__FILE__))
 
-      def self.list_hosts()
+      def self.get_hosts()
         cmd = "#{MYDIR}/../inventory/gce/gce.py --list"
         hosts = %x[#{cmd} 2>&1]
 
         raise "Error: failed to list hosts\n#{hosts}" unless $?.exitstatus == 0
 
-        return JSON.parse(hosts)
+        # invert the hash so that it's key is the host, and values is an array of metadata
+        data = {}
+        JSON.parse(hosts).each do |key,value|
+          value.each { |h| (data[h] ||= []) << key }
+        end
+
+        # For now, we only care about the name. In the future, we may want the other metadata included.
+        retval = []
+        data.keys.sort.each { |k| retval << OpenStruct.new({ :name => k }) }
+
+        return retval
       end
 
       def self.get_host_details(host)

+ 4 - 0
lib/launch_helper.rb

@@ -21,6 +21,10 @@ module OpenShift
       def self.get_gce_host_types()
         return Dir.glob("#{MYDIR}/../playbooks/gce/*").map { |d| File.basename(d) }
       end
+
+      def self.get_aws_host_types()
+        return Dir.glob("#{MYDIR}/../playbooks/aws/*").map { |d| File.basename(d) }
+      end
     end
   end
 end

+ 21 - 0
playbooks/aws/os2-atomic-proxy/config.yml

@@ -0,0 +1,21 @@
+---
+- name: "populate oo_hosts_to_config host group if needed"
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: Evaluate oo_host_group_exp if it's set
+    add_host: "name={{ item }} groups=oo_hosts_to_config"
+    with_items: "{{ oo_host_group_exp | default(['']) }}"
+    when: oo_host_group_exp is defined
+
+- name: "Configure instances"
+  hosts: oo_hosts_to_config
+  connection: ssh
+  user: root
+  vars_files:
+    - vars.yml
+    - "vars.{{ oo_env }}.yml"
+  roles:
+    - ../../../roles/atomic_base
+    - ../../../roles/atomic_proxy
+    - ../../../roles/shutdown_nightly

+ 69 - 0
playbooks/aws/os2-atomic-proxy/launch.yml

@@ -0,0 +1,69 @@
+---
+- name: Launch instance(s)
+  hosts: localhost
+  connection: local
+  gather_facts: no
+
+  vars:
+    inst_region: us-east-1
+    atomic_ami: ami-8e239fe6
+    user_data_file: user_data.txt
+
+  vars_files:
+    - vars.yml
+
+  tasks:
+    - name: Launch instances
+      ec2:
+        state: present
+        region: "{{ inst_region }}"
+        keypair: mmcgrath_libra
+        group: ['Libra', '{{ oo_env }}', '{{ oo_env }}_proxy', '{{ oo_env }}_proxy_atomic']
+        instance_type: m3.large
+        image: "{{ atomic_ami }}"
+        count: "{{ oo_new_inst_names | oo_len }}"
+        user_data: "{{ lookup('file', user_data_file) }}"
+        wait: yes
+      register: ec2
+
+    - name: Add new instances public IPs to the atomic proxy host group
+      add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances"
+      with_items: ec2.instances
+
+    - name: Add Name and environment tags to instances
+      ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present"
+      with_together:
+        - oo_new_inst_names
+        - ec2.instances
+      args:
+        tags:
+          Name: "{{ item.0 }}"
+
+    - name: Add other tags to instances
+      ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
+      with_items: ec2.instances
+      args:
+        tags: "{{ oo_new_inst_tags }}"
+
+    - name: Add new instances public IPs to oo_hosts_to_config
+      add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.public_ip }} groupname=oo_hosts_to_config"
+      with_together:
+        - oo_new_inst_names
+        - ec2.instances
+
+    - debug: var=ec2
+
+    - name: Wait for ssh
+      wait_for: "port=22 host={{ item.public_ip }}"
+      with_items: ec2.instances
+
+    - name: Wait for root user setup
+      command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
+      register: result
+      until: result.rc == 0
+      retries: 20
+      delay: 10
+      with_items: ec2.instances
+
+# Apply the configs, seprate so that just the configs can be run by themselves
+- include: config.yml

+ 6 - 0
playbooks/aws/os2-atomic-proxy/user_data.txt

@@ -0,0 +1,6 @@
+#cloud-config
+disable_root: 0
+
+system_info:
+  default_user:
+    name: root

+ 2 - 0
playbooks/aws/os2-atomic-proxy/vars.stg.yml

@@ -0,0 +1,2 @@
+---
+oo_env_long: staging

+ 1 - 0
playbooks/aws/os2-atomic-proxy/vars.yml

@@ -0,0 +1 @@
+---

+ 56 - 0
roles/atomic_base/README.md

@@ -0,0 +1,56 @@
+Role Name
+========
+
+The purpose of this role is to do common configurations for all RHEL atomic hosts.
+
+
+Requirements
+------------
+
+None
+
+
+Role Variables
+--------------
+
+None
+
+
+Dependencies
+------------
+
+None
+
+
+Example Playbook
+-------------------------
+
+From a group playbook:
+
+  hosts: servers
+  roles:
+    - ../../roles/atomic_base
+
+
+License
+-------
+
+Copyright 2012-2014 Red Hat, Inc., All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+
+Author Information
+------------------
+
+Thomas Wiest <twiest@redhat.com>

+ 2 - 0
roles/atomic_base/defaults/main.yml

@@ -0,0 +1,2 @@
+---
+# defaults file for atomic_base

+ 12 - 0
roles/atomic_base/files/bash/bashrc

@@ -0,0 +1,12 @@
+# .bashrc
+
+# User specific aliases and functions
+
+alias rm='rm -i'
+alias cp='cp -i'
+alias mv='mv -i'
+
+# Source global definitions
+if [ -f /etc/bashrc ]; then
+    . /etc/bashrc
+fi

+ 10 - 0
roles/atomic_base/files/ostree/repo_config

@@ -0,0 +1,10 @@
+[core]
+repo_version=1
+mode=bare
+
+[remote "rh-atomic-controller"]
+url=https://mirror.openshift.com/libra/ostree/rhel-7-atomic-host
+branches=rh-atomic-controller/el7/x86_64/buildmaster/controller/docker;
+tls-client-cert-path=/var/lib/yum/client-cert.pem
+tls-client-key-path=/var/lib/yum/client-key.pem
+gpg-verify=false

+ 7 - 0
roles/atomic_base/files/system/90-nofile.conf

@@ -0,0 +1,7 @@
+# PAM process file descriptor limits
+# see limits.conf(5) for details.
+#Each line describes a limit for a user in the form:
+#
+#<domain> <type> <item> <value>
+*       hard    nofile  16384
+root	soft	nofile	16384

+ 2 - 0
roles/atomic_base/handlers/main.yml

@@ -0,0 +1,2 @@
+---
+# handlers file for atomic_base

+ 19 - 0
roles/atomic_base/meta/main.yml

@@ -0,0 +1,19 @@
+---
+galaxy_info:
+  author: Thomas Wiest
+  description: Common base RHEL atomic configurations
+  company: Red Hat
+  # Some suggested licenses:
+  # - BSD (default)
+  # - MIT
+  # - GPLv2
+  # - GPLv3
+  # - Apache
+  # - CC-BY
+  license: Apache
+  min_ansible_version: 1.2
+  platforms:
+  - name: EL
+    versions:
+    - 7
+dependencies: []

+ 13 - 0
roles/atomic_base/tasks/bash.yml

@@ -0,0 +1,13 @@
+---
+- name: Copy .bashrc
+  copy: src=bash/bashrc dest=/root/.bashrc owner=root group=root mode=0644
+
+- name: Link to .profile to .bashrc
+  file: src=/root/.bashrc dest=/root/.profile owner=root group=root state=link
+
+- name: Setup Timezone [{{ oo_timezone }}]
+  file: >
+    src=/usr/share/zoneinfo/{{ oo_timezone }}
+    dest=/etc/localtime
+    owner=root
+    group=root state=link

+ 6 - 0
roles/atomic_base/tasks/cloud_user.yml

@@ -0,0 +1,6 @@
+---
+- name: Remove cloud-user account
+  user: name=cloud-user state=absent remove=yes force=yes
+
+- name: Remove cloud-user sudo
+  file: path=/etc/sudoers.d/90-cloud-init-users state=absent

+ 4 - 0
roles/atomic_base/tasks/main.yml

@@ -0,0 +1,4 @@
+---
+- include: system.yml
+- include: bash.yml
+- include: ostree.yml

+ 18 - 0
roles/atomic_base/tasks/ostree.yml

@@ -0,0 +1,18 @@
+---
+- name: Copy ostree repo config
+  copy: >
+    src=ostree/repo_config
+    dest=/ostree/repo/config
+    owner=root
+    group=root
+    mode=0644
+
+- name: "WORK AROUND: Stat redhat repo file"
+  stat: path=/etc/yum.repos.d/redhat.repo
+  register: redhat_repo
+
+- name: "WORK AROUND: subscription manager failures"
+  file: >
+    path=/etc/yum.repos.d/redhat.repo
+    state=touch
+  when: redhat_repo.stat.exists == False

+ 3 - 0
roles/atomic_base/tasks/system.yml

@@ -0,0 +1,3 @@
+---
+- name: Upload nofile limits.d file
+  copy: src=system/90-nofile.conf dest=/etc/security/limits.d/90-nofile.conf owner=root group=root mode=0644

+ 2 - 0
roles/atomic_base/vars/main.yml

@@ -0,0 +1,2 @@
+---
+oo_timezone: US/Eastern

+ 56 - 0
roles/atomic_proxy/README.md

@@ -0,0 +1,56 @@
+Role Name
+========
+
+The purpose of this role is to do common configurations for all RHEL atomic hosts.
+
+
+Requirements
+------------
+
+None
+
+
+Role Variables
+--------------
+
+None
+
+
+Dependencies
+------------
+
+None
+
+
+Example Playbook
+-------------------------
+
+From a group playbook:
+
+  hosts: servers
+  roles:
+    - ../../roles/atomic_proxy
+
+
+License
+-------
+
+Copyright 2012-2014 Red Hat, Inc., All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+
+Author Information
+------------------
+
+Thomas Wiest <twiest@redhat.com>

+ 2 - 0
roles/atomic_proxy/defaults/main.yml

@@ -0,0 +1,2 @@
+---
+# defaults file for atomic_proxy

+ 37 - 0
roles/atomic_proxy/files/ctr-proxy-1.service

@@ -0,0 +1,37 @@
+
+
+[Unit]
+Description=Container proxy-1
+
+
+[Service]
+Type=simple
+TimeoutStartSec=5m
+Slice=container-small.slice
+
+ExecStartPre=-/usr/bin/docker rm "proxy-1"
+
+ExecStart=/usr/bin/docker run --rm --name "proxy-1"                           \
+          --volumes-from proxy-shared-data-1                                  \
+          -a stdout -a stderr -p 80:80 -p 443:443 -p 4999:4999                \
+          "proxy:latest"
+
+ExecStartPost=-/usr/bin/gear init --post "proxy-1" "proxy:latest"
+ExecReload=-/usr/bin/docker stop "proxy-1"
+ExecReload=-/usr/bin/docker rm "proxy-1"
+ExecStop=-/usr/bin/docker stop "proxy-1"
+
+[Install]
+WantedBy=container.target
+
+# Container information
+X-ContainerId=proxy-1
+X-ContainerImage=proxy:latest
+X-ContainerUserId=
+X-ContainerRequestId=LwiWtYWaAvSavH6Ze53QJg
+X-ContainerType=simple
+X-PortMapping=80:80
+X-PortMapping=443:443
+X-PortMapping=4999:4999
+
+

+ 37 - 0
roles/atomic_proxy/files/ctr-proxy-monitoring-1.service

@@ -0,0 +1,37 @@
+
+
+[Unit]
+Description=Container proxy-monitoring-1
+
+
+[Service]
+Type=simple
+TimeoutStartSec=5m
+Slice=container-small.slice
+
+ExecStartPre=-/usr/bin/docker rm "proxy-monitoring-1"
+
+ExecStart=/usr/bin/docker run --rm --name "proxy-monitoring-1"                \
+          --volumes-from proxy-shared-data-1                                  \
+          -a stdout -a stderr                                                 \
+          "monitoring:latest"
+
+ExecStartPost=-/usr/bin/gear init --post "proxy-monitoring-1" "monitoring:latest"
+ExecReload=-/usr/bin/docker stop "proxy-monitoring-1"
+ExecReload=-/usr/bin/docker rm "proxy-monitoring-1"
+ExecStop=-/usr/bin/docker stop "proxy-monitoring-1"
+
+[Install]
+WantedBy=container.target
+
+# Container information
+X-ContainerId=proxy-monitoring-1
+X-ContainerImage=monitoring:latest
+X-ContainerUserId=
+X-ContainerRequestId=LwiWtYWaAvSavH6Ze53QJg
+X-ContainerType=simple
+X-PortMapping=80:80
+X-PortMapping=443:443
+X-PortMapping=4999:4999
+
+

+ 37 - 0
roles/atomic_proxy/files/ctr-proxy-puppet-1.service

@@ -0,0 +1,37 @@
+
+
+[Unit]
+Description=Container proxy-puppet-1
+
+
+[Service]
+Type=simple
+TimeoutStartSec=5m
+Slice=container-small.slice
+
+
+ExecStartPre=-/usr/bin/docker rm "proxy-puppet-1"
+
+ExecStart=/usr/bin/docker run --rm --name "proxy-puppet-1"                                    \
+          --volumes-from proxy-shared-data-1                                                  \
+          -v /var/lib/docker/volumes/proxy_puppet/var/lib/puppet/ssl:/var/lib/puppet/ssl      \
+          -v /var/lib/docker/volumes/proxy_puppet/etc/puppet:/etc/puppet                      \
+          -a stdout -a stderr                                                                 \
+          "puppet:latest"
+# Set links (requires container have a name)
+ExecStartPost=-/usr/bin/gear init --post "proxy-puppet-1" "puppet:latest"
+ExecReload=-/usr/bin/docker stop "proxy-puppet-1"
+ExecReload=-/usr/bin/docker rm "proxy-puppet-1"
+ExecStop=-/usr/bin/docker stop "proxy-puppet-1"
+
+[Install]
+WantedBy=container.target
+
+# Container information
+X-ContainerId=proxy-puppet-1
+X-ContainerImage=puppet:latest
+X-ContainerUserId=
+X-ContainerRequestId=Ky0lhw0onwoSDJR4GK6t3g
+X-ContainerType=simple
+
+

+ 29 - 0
roles/atomic_proxy/files/proxy_containers_deploy_descriptor.json

@@ -0,0 +1,29 @@
+{
+  "Containers":[
+    {
+      "Name":"proxy-puppet",
+      "Count":1,
+      "Image":"puppet:latest",
+      "PublicPorts":[
+      ]
+    },
+    {
+      "Name":"proxy",
+      "Count":1,
+      "Image":"proxy:latest",
+      "PublicPorts":[
+        {"Internal":80,"External":80},
+        {"Internal":443,"External":443},
+        {"Internal":4999,"External":4999}
+      ]
+    },
+    {
+      "Name":"proxy-monitoring",
+      "Count":1,
+      "Image":"monitoring:latest",
+      "PublicPorts":[
+      ]
+    }
+  ],
+  "RandomizeIds": false
+}

+ 116 - 0
roles/atomic_proxy/files/puppet/auth.conf

@@ -0,0 +1,116 @@
+# This is the default auth.conf file, which implements the default rules
+# used by the puppet master. (That is, the rules below will still apply
+# even if this file is deleted.)
+#
+# The ACLs are evaluated in top-down order. More specific stanzas should
+# be towards the top of the file and more general ones at the bottom;
+# otherwise, the general rules may "steal" requests that should be
+# governed by the specific rules.
+#
+# See http://docs.puppetlabs.com/guides/rest_auth_conf.html for a more complete
+# description of auth.conf's behavior.
+#
+# Supported syntax:
+# Each stanza in auth.conf starts with a path to match, followed
+# by optional modifiers, and finally, a series of allow or deny
+# directives.
+#
+# Example Stanza
+# ---------------------------------
+# path /path/to/resource     # simple prefix match
+# # path ~ regex             # alternately, regex match
+# [environment envlist]
+# [method methodlist]
+# [auth[enthicated] {yes|no|on|off|any}]
+# allow [host|backreference|*|regex]
+# deny [host|backreference|*|regex]
+# allow_ip [ip|cidr|ip_wildcard|*]
+# deny_ip [ip|cidr|ip_wildcard|*]
+#
+# The path match can either be a simple prefix match or a regular
+# expression. `path /file` would match both `/file_metadata` and
+# `/file_content`. Regex matches allow the use of backreferences
+# in the allow/deny directives.
+#
+# The regex syntax is the same as for Ruby regex, and captures backreferences
+# for use in the `allow` and `deny` lines of that stanza
+#
+# Examples:
+#
+# path ~ ^/path/to/resource    # Equivalent to `path /path/to/resource`.
+# allow *                      # Allow all authenticated nodes (since auth
+#                              # defaults to `yes`).
+#
+# path ~ ^/catalog/([^/]+)$    # Permit nodes to access their own catalog (by
+# allow $1                     # certname), but not any other node's catalog.
+#
+# path ~ ^/file_(metadata|content)/extra_files/  # Only allow certain nodes to
+# auth yes                                       # access the "extra_files"
+# allow /^(.+)\.example\.com$/                   # mount point; note this must
+# allow_ip 192.168.100.0/24                      # go ABOVE the "/file" rule,
+#                                                # since it is more specific.
+#
+# environment:: restrict an ACL to a comma-separated list of environments
+# method:: restrict an ACL to a comma-separated list of HTTP methods
+# auth:: restrict an ACL to an authenticated or unauthenticated request
+# the default when unspecified is to restrict the ACL to authenticated requests
+# (ie exactly as if auth yes was present).
+#
+
+### Authenticated ACLs - these rules apply only when the client
+### has a valid certificate and is thus authenticated
+
+# allow nodes to retrieve their own catalog
+path ~ ^/catalog/([^/]+)$
+method find
+allow $1
+
+# allow nodes to retrieve their own node definition
+path ~ ^/node/([^/]+)$
+method find
+allow $1
+
+# allow all nodes to access the certificates services
+path /certificate_revocation_list/ca
+method find
+allow *
+
+# allow all nodes to store their own reports
+path ~ ^/report/([^/]+)$
+method save
+allow $1
+
+# Allow all nodes to access all file services; this is necessary for
+# pluginsync, file serving from modules, and file serving from custom
+# mount points (see fileserver.conf). Note that the `/file` prefix matches
+# requests to both the file_metadata and file_content paths. See "Examples"
+# above if you need more granular access control for custom mount points.
+path /file
+allow *
+
+### Unauthenticated ACLs, for clients without valid certificates; authenticated
+### clients can also access these paths, though they rarely need to.
+
+# allow access to the CA certificate; unauthenticated nodes need this
+# in order to validate the puppet master's certificate
+path /certificate/ca
+auth any
+method find
+allow *
+
+# allow nodes to retrieve the certificate they requested earlier
+path /certificate/
+auth any
+method find
+allow *
+
+# allow nodes to request a new certificate
+path /certificate_request
+auth any
+method find, save
+allow *
+
+# deny everything else; this ACL is not strictly necessary, but
+# illustrates the default policy.
+path /
+auth any

+ 43 - 0
roles/atomic_proxy/files/setup-proxy-containers.sh

@@ -0,0 +1,43 @@
+#!/bin/bash
+
+function fail {
+  msg=$1
+  echo
+  echo $msg
+  echo
+  exit 5
+}
+
+
+NUM_DATA_CTR=$(docker ps -a | grep -c proxy-shared-data-1)
+[ "$NUM_DATA_CTR" -ne 0 ] && fail "ERROR: proxy-shared-data-1 exists"
+
+
+# pre-cache the container images
+echo
+timeout --signal TERM --kill-after 30 600  docker pull busybox:latest  || fail "ERROR: docker pull of busybox failed"
+
+echo
+# WORKAROUND: Setup the shared data container
+/usr/bin/docker run --name "proxy-shared-data-1"  \
+          -v /shared/etc/haproxy                  \
+          -v /shared/etc/httpd                    \
+          -v /shared/etc/openshift                \
+          -v /shared/etc/pki                      \
+          -v /shared/var/run/ctr-ipc              \
+          -v /shared/var/lib/haproxy              \
+          -v /shared/usr/local                    \
+          "busybox:latest" true
+
+# WORKAROUND: These are because we're not using a pod yet
+cp /usr/local/etc/ctr-proxy-1.service /usr/local/etc/ctr-proxy-puppet-1.service /usr/local/etc/ctr-proxy-monitoring-1.service /etc/systemd/system/
+
+systemctl daemon-reload
+
+echo
+echo -n "sleeping 10 seconds for systemd reload to take affect..."
+sleep 10
+echo " Done."
+
+# Start the services
+systemctl start ctr-proxy-puppet-1 ctr-proxy-1 ctr-proxy-monitoring-1

+ 2 - 0
roles/atomic_proxy/handlers/main.yml

@@ -0,0 +1,2 @@
+---
+# handlers file for atomic_proxy

+ 21 - 0
roles/atomic_proxy/meta/main.yml

@@ -0,0 +1,21 @@
+---
+galaxy_info:
+  author: Thomas Wiest
+  description: Common base RHEL atomic configurations
+  company: Red Hat
+  # Some suggested licenses:
+  # - BSD (default)
+  # - MIT
+  # - GPLv2
+  # - GPLv3
+  # - Apache
+  # - CC-BY
+  license: Apache
+  min_ansible_version: 1.2
+  platforms:
+  - name: EL
+    versions:
+    - 7
+dependencies:
+  # This is the role's PRIVATE counterpart, which is used.
+  - ../../../../../atomic_private/ansible/roles/atomic_proxy

+ 21 - 0
roles/atomic_proxy/tasks/main.yml

@@ -0,0 +1,21 @@
+---
+- name: upload sbin scripts
+  copy: >
+    src={{ item }}
+    dest=/usr/local/sbin/{{ item }}
+    mode=0750
+  with_items:
+    - setup-proxy-containers.sh
+
+- name: upload /usr/local/etc files
+  copy: >
+    src={{ item }}
+    dest=/usr/local/etc/{{ item }}
+    mode=0640
+  with_items:
+    - proxy_containers_deploy_descriptor.json
+    - ctr-proxy-1.service
+    - ctr-proxy-puppet-1.service
+    - ctr-proxy-monitoring-1.service
+
+- include: setup_puppet.yml

+ 24 - 0
roles/atomic_proxy/tasks/setup_puppet.yml

@@ -0,0 +1,24 @@
+---
+- name: make puppet conf dir
+  file: >
+    dest={{ oo_proxy_puppet_volume_dir }}/etc/puppet
+    mode=755
+    owner=root
+    group=root
+    state=directory
+
+- name: upload puppet auth config
+  copy: >
+    src=puppet/auth.conf
+    dest={{ oo_proxy_puppet_volume_dir }}/etc/puppet/auth.conf
+    mode=0644
+    owner=root
+    group=root
+
+- name: upload puppet config
+  template: >
+    src=puppet/puppet.conf.j2
+    dest={{ oo_proxy_puppet_volume_dir }}/etc/puppet/puppet.conf
+    mode=0644
+    owner=root
+    group=root

+ 40 - 0
roles/atomic_proxy/templates/puppet/puppet.conf.j2

@@ -0,0 +1,40 @@
+[main]
+    # we need to override the host name of the container
+    certname = ctr-proxy.stg.rhcloud.com
+
+    # The Puppet log directory.
+    # The default value is '$vardir/log'.
+    logdir = /var/log/puppet
+
+    # Where Puppet PID files are kept.
+    # The default value is '$vardir/run'.
+    rundir = /var/run/puppet
+
+    # Where SSL certificates are kept.
+    # The default value is '$confdir/ssl'.
+    ssldir = $vardir/ssl
+    manifest = $manifestdir/site.pp
+    manifestdir = /var/lib/puppet/environments/pub/$environment/manifests
+    environment = {{ oo_env_long }}
+    modulepath = /var/lib/puppet/environments/pub/$environment/modules:/var/lib/puppet/environments/pri/$environment/modules:/var/lib/puppet/environments/pri/production/modules:$confdir/modules:/usr/share/puppet/modules
+
+[agent]
+    # The file in which puppetd stores a list of the classes
+    # associated with the retrieved configuratiion.  Can be loaded in
+    # the separate ``puppet`` executable using the ``--loadclasses``
+    # option.
+    # The default value is '$confdir/classes.txt'.
+    classfile = $vardir/classes.txt
+
+    # Where puppetd caches the local configuration.  An
+    # extension indicating the cache format is added automatically.
+    # The default value is '$confdir/localconfig'.
+    localconfig = $vardir/localconfig
+    server = puppet.ops.rhcloud.com
+    environment = {{ oo_env_long }}
+    pluginsync = true
+    graph = true
+    configtimeout = 600
+    report = true
+    runinterval = 3600
+    splay = true

+ 16 - 0
roles/atomic_proxy/templates/sync/sync-proxy-configs.sh.j2

@@ -0,0 +1,16 @@
+#!/bin/bash
+
+VOL_DIR=/var/lib/docker/volumes/proxy
+SSH_CMD="ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null"
+
+mkdir -p ${VOL_DIR}/etc/haproxy/
+rsync -e "${SSH_CMD}" -va --progress root@proxy1.{{ oo_env }}.rhcloud.com:/etc/haproxy/ ${VOL_DIR}/etc/haproxy/
+
+mkdir -p ${VOL_DIR}/etc/httpd/
+rsync -e "${SSH_CMD}" -va --progress root@proxy1.{{ oo_env }}.rhcloud.com:/etc/httpd/ ${VOL_DIR}/etc/httpd/
+
+mkdir -p ${VOL_DIR}/etc/pki/tls/
+rsync -e "${SSH_CMD}" -va --progress root@proxy1.{{ oo_env }}.rhcloud.com:/etc/pki/tls/ ${VOL_DIR}/etc/pki/tls/
+
+# We need to disable the haproxy chroot
+sed -i -re 's/^(\s+)chroot/\1#chroot/' /var/lib/docker/volumes/proxy/etc/haproxy/haproxy.cfg

+ 2 - 0
roles/atomic_proxy/vars/main.yml

@@ -0,0 +1,2 @@
+---
+oo_proxy_puppet_volume_dir: /var/lib/docker/volumes/proxy_puppet

+ 56 - 0
roles/shutdown_nightly/README.md

@@ -0,0 +1,56 @@
+Role Name
+========
+
+The purpose of this role is to do common configurations for all RHEL atomic hosts.
+
+
+Requirements
+------------
+
+None
+
+
+Role Variables
+--------------
+
+None
+
+
+Dependencies
+------------
+
+None
+
+
+Example Playbook
+-------------------------
+
+From a group playbook:
+
+  hosts: servers
+  roles:
+    - ../../roles/shutdown_nightly
+
+
+License
+-------
+
+Copyright 2012-2014 Red Hat, Inc., All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+
+Author Information
+------------------
+
+Thomas Wiest <twiest@redhat.com>

+ 2 - 0
roles/shutdown_nightly/defaults/main.yml

@@ -0,0 +1,2 @@
+---
+# defaults file for role

+ 2 - 0
roles/shutdown_nightly/handlers/main.yml

@@ -0,0 +1,2 @@
+---
+# handlers file for role

+ 19 - 0
roles/shutdown_nightly/meta/main.yml

@@ -0,0 +1,19 @@
+---
+galaxy_info:
+  author: Thomas Wiest
+  description: Common base RHEL atomic configurations
+  company: Red Hat
+  # Some suggested licenses:
+  # - BSD (default)
+  # - MIT
+  # - GPLv2
+  # - GPLv3
+  # - Apache
+  # - CC-BY
+  license: Apache
+  min_ansible_version: 1.2
+  platforms:
+  - name: EL
+    versions:
+    - 7
+dependencies: []

+ 7 - 0
roles/shutdown_nightly/tasks/main.yml

@@ -0,0 +1,7 @@
+---
+- name: Setup nightly shutdown command to save money
+  cron: >
+    name="shutdown system at night to save money"
+    hour="18"
+    minute="0"
+    job="/usr/sbin/shutdown --halt"

+ 2 - 0
roles/shutdown_nightly/vars/main.yml

@@ -0,0 +1,2 @@
+---
+# vars file for role