Browse Source

Merge pull request #245 from twiest/remove_atomic_stuff

removed references to atomic proxy.
Thomas Wiest 10 years ago
parent
commit
95d94334bb
37 changed files with 8 additions and 812 deletions
  1. 4 4
      playbooks/aws/openshift-master/launch.yml
  2. 4 4
      playbooks/aws/openshift-node/launch.yml
  3. 0 20
      playbooks/aws/os2-atomic-proxy/config.yml
  4. 0 1
      playbooks/aws/os2-atomic-proxy/filter_plugins
  5. 0 97
      playbooks/aws/os2-atomic-proxy/launch.yml
  6. 0 1
      playbooks/aws/os2-atomic-proxy/roles
  7. 0 6
      playbooks/aws/os2-atomic-proxy/user_data.txt
  8. 0 3
      playbooks/aws/os2-atomic-proxy/vars.int.yml
  9. 0 3
      playbooks/aws/os2-atomic-proxy/vars.prod.yml
  10. 0 10
      playbooks/aws/os2-atomic-proxy/vars.stg.yml
  11. 0 1
      playbooks/aws/os2-atomic-proxy/vars.yml
  12. 0 56
      roles/atomic_base/README.md
  13. 0 12
      roles/atomic_base/files/bash/bashrc
  14. 0 10
      roles/atomic_base/files/ostree/repo_config
  15. 0 7
      roles/atomic_base/files/system/90-nofile.conf
  16. 0 19
      roles/atomic_base/meta/main.yml
  17. 0 14
      roles/atomic_base/tasks/bash.yml
  18. 0 6
      roles/atomic_base/tasks/cloud_user.yml
  19. 0 4
      roles/atomic_base/tasks/main.yml
  20. 0 18
      roles/atomic_base/tasks/ostree.yml
  21. 0 3
      roles/atomic_base/tasks/system.yml
  22. 0 2
      roles/atomic_base/vars/main.yml
  23. 0 56
      roles/atomic_proxy/README.md
  24. 0 29
      roles/atomic_proxy/files/proxy_containers_deploy_descriptor.json
  25. 0 116
      roles/atomic_proxy/files/puppet/auth.conf
  26. 0 43
      roles/atomic_proxy/files/setup-proxy-containers.sh
  27. 0 3
      roles/atomic_proxy/handlers/main.yml
  28. 0 21
      roles/atomic_proxy/meta/main.yml
  29. 0 3
      roles/atomic_proxy/tasks/main.yml
  30. 0 57
      roles/atomic_proxy/tasks/setup_containers.yml
  31. 0 24
      roles/atomic_proxy/tasks/setup_puppet.yml
  32. 0 40
      roles/atomic_proxy/templates/puppet/puppet.conf.j2
  33. 0 16
      roles/atomic_proxy/templates/sync/sync-proxy-configs.sh.j2
  34. 0 32
      roles/atomic_proxy/templates/systemd/ctr-proxy-1.service.j2
  35. 0 36
      roles/atomic_proxy/templates/systemd/ctr-proxy-monitoring-1.service.j2
  36. 0 33
      roles/atomic_proxy/templates/systemd/ctr-proxy-puppet-1.service.j2
  37. 0 2
      roles/atomic_proxy/vars/main.yml

+ 4 - 4
playbooks/aws/openshift-master/launch.yml

@@ -4,10 +4,10 @@
   connection: local
   gather_facts: no
 
-# TODO: modify atomic_ami based on deployment_type
+# TODO: modify g_ami based on deployment_type
   vars:
     inst_region: us-east-1
-    atomic_ami: ami-86781fee
+    g_ami: ami-86781fee
     user_data_file: user_data.txt
 
   tasks:
@@ -18,13 +18,13 @@
         keypair: libra
         group: ['public']
         instance_type: m3.large
-        image: "{{ atomic_ami }}"
+        image: "{{ g_ami }}"
         count: "{{ oo_new_inst_names | oo_len }}"
         user_data: "{{ lookup('file', user_data_file) }}"
         wait: yes
       register: ec2
 
-    - name: Add new instances public IPs to the atomic proxy host group
+    - name: Add new instances public IPs to the host group
       add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances"
       with_items: ec2.instances
 

+ 4 - 4
playbooks/aws/openshift-node/launch.yml

@@ -4,10 +4,10 @@
   connection: local
   gather_facts: no
 
-# TODO: modify atomic_ami based on deployment_type
+# TODO: modify g_ami based on deployment_type
   vars:
     inst_region: us-east-1
-    atomic_ami: ami-86781fee
+    g_ami: ami-86781fee
     user_data_file: user_data.txt
 
   tasks:
@@ -18,13 +18,13 @@
         keypair: libra
         group: ['public']
         instance_type: m3.large
-        image: "{{ atomic_ami }}"
+        image: "{{ g_ami }}"
         count: "{{ oo_new_inst_names | oo_len }}"
         user_data: "{{ lookup('file', user_data_file) }}"
         wait: yes
       register: ec2
 
-    - name: Add new instances public IPs to the atomic proxy host group
+    - name: Add new instances public IPs to the host group
       add_host:
         hostname: "{{ item.public_ip }}"
         groupname: new_ec2_instances"

+ 0 - 20
playbooks/aws/os2-atomic-proxy/config.yml

@@ -1,20 +0,0 @@
----
-- name: "populate oo_hosts_to_config host group if needed"
-  hosts: localhost
-  gather_facts: no
-  tasks:
-  - name: Evaluate oo_host_group_exp if it's set
-    add_host: "name={{ item }} groups=oo_hosts_to_config"
-    with_items: "{{ oo_host_group_exp | default(['']) }}"
-    when: oo_host_group_exp is defined
-
-- name: "Configure instances"
-  hosts: oo_hosts_to_config
-  connection: ssh
-  user: root
-  vars_files:
-    - vars.yml
-    - "vars.{{ oo_env }}.yml"
-  roles:
-    - atomic_base
-    - atomic_proxy

+ 0 - 1
playbooks/aws/os2-atomic-proxy/filter_plugins

@@ -1 +0,0 @@
-../../../filter_plugins

+ 0 - 97
playbooks/aws/os2-atomic-proxy/launch.yml

@@ -1,97 +0,0 @@
----
-- name: Launch instance(s)
-  hosts: localhost
-  connection: local
-  gather_facts: no
-
-  vars:
-    inst_region: us-east-1
-    atomic_ami: ami-8e239fe6
-    user_data_file: user_data.txt
-    oo_vpc_subnet_id:    # Purposely left blank, these are here to be overridden in env vars_files
-    oo_assign_public_ip: # Purposely left blank, these are here to be overridden in env vars_files
-
-  vars_files:
-    - vars.yml
-    - "vars.{{ oo_env }}.yml"
-
-  tasks:
-    - name: Launch instances in VPC
-      ec2:
-        state: present
-        region: "{{ inst_region }}"
-        keypair: mmcgrath_libra
-        group_id: "{{ oo_security_group_ids }}"
-        instance_type: m3.large
-        image: "{{ atomic_ami }}"
-        count: "{{ oo_new_inst_names | oo_len }}"
-        user_data: "{{ lookup('file', user_data_file) }}"
-        wait: yes
-        assign_public_ip: "{{ oo_assign_public_ip }}"
-        vpc_subnet_id: "{{ oo_vpc_subnet_id }}"
-      when: oo_vpc_subnet_id
-      register: ec2_vpc
-
-    - set_fact:
-        ec2: "{{ ec2_vpc }}"
-      when: oo_vpc_subnet_id
-
-    - name: Launch instances in Classic
-      ec2:
-        state: present
-        region: "{{ inst_region }}"
-        keypair: mmcgrath_libra
-        group: ['Libra', '{{ oo_env }}', '{{ oo_env }}_proxy', '{{ oo_env }}_proxy_atomic']
-        instance_type: m3.large
-        image: "{{ atomic_ami }}"
-        count: "{{ oo_new_inst_names | oo_len }}"
-        user_data: "{{ lookup('file', user_data_file) }}"
-        wait: yes
-      when: not oo_vpc_subnet_id
-      register: ec2_classic
-
-    - set_fact:
-        ec2: "{{ ec2_classic }}"
-      when: not oo_vpc_subnet_id
-
-    - name: Add new instances public IPs to the atomic proxy host group
-      add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances"
-      with_items: ec2.instances
-
-    - name: Add Name and environment tags to instances
-      ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present"
-      with_together:
-        - oo_new_inst_names
-        - ec2.instances
-      args:
-        tags:
-          Name: "{{ item.0 }}"
-
-    - name: Add other tags to instances
-      ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
-      with_items: ec2.instances
-      args:
-        tags: "{{ oo_new_inst_tags }}"
-
-    - name: Add new instances public IPs to oo_hosts_to_config
-      add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.public_ip }} groupname=oo_hosts_to_config"
-      with_together:
-        - oo_new_inst_names
-        - ec2.instances
-
-    - debug: var=ec2
-
-    - name: Wait for ssh
-      wait_for: "port=22 host={{ item.public_ip }}"
-      with_items: ec2.instances
-
-    - name: Wait for root user setup
-      command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
-      register: result
-      until: result.rc == 0
-      retries: 20
-      delay: 10
-      with_items: ec2.instances
-
-# Apply the configs, seprate so that just the configs can be run by themselves
-- include: config.yml

+ 0 - 1
playbooks/aws/os2-atomic-proxy/roles

@@ -1 +0,0 @@
-../../../roles

+ 0 - 6
playbooks/aws/os2-atomic-proxy/user_data.txt

@@ -1,6 +0,0 @@
-#cloud-config
-disable_root: 0
-
-system_info:
-  default_user:
-    name: root

+ 0 - 3
playbooks/aws/os2-atomic-proxy/vars.int.yml

@@ -1,3 +0,0 @@
----
-oo_env_long: integration
-oo_zabbix_hostgroups: ['INT Environment']

+ 0 - 3
playbooks/aws/os2-atomic-proxy/vars.prod.yml

@@ -1,3 +0,0 @@
----
-oo_env_long: production
-oo_zabbix_hostgroups: ['PROD Environment']

+ 0 - 10
playbooks/aws/os2-atomic-proxy/vars.stg.yml

@@ -1,10 +0,0 @@
----
-oo_env_long: staging
-oo_zabbix_hostgroups: ['STG Environment']
-oo_vpc_subnet_id: subnet-700bdd07
-oo_assign_public_ip: yes
-oo_security_group_ids:
-  - sg-02c2f267 # Libra (vpc)
-  - sg-f0bfbe95 # stg (vpc)
-  - sg-a3bfbec6 # stg_proxy (vpc)
-  - sg-d4bfbeb1 # stg_proxy_atomic (vpc)

+ 0 - 1
playbooks/aws/os2-atomic-proxy/vars.yml

@@ -1 +0,0 @@
----

+ 0 - 56
roles/atomic_base/README.md

@@ -1,56 +0,0 @@
-Role Name
-========
-
-The purpose of this role is to do common configurations for all RHEL atomic hosts.
-
-
-Requirements
-------------
-
-None
-
-
-Role Variables
---------------
-
-None
-
-
-Dependencies
-------------
-
-None
-
-
-Example Playbook
--------------------------
-
-From a group playbook:
-
-  hosts: servers
-  roles:
-    - ../../roles/atomic_base
-
-
-License
--------
-
-Copyright 2012-2014 Red Hat, Inc., All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-Author Information
-------------------
-
-Thomas Wiest <twiest@redhat.com>

+ 0 - 12
roles/atomic_base/files/bash/bashrc

@@ -1,12 +0,0 @@
-# .bashrc
-
-# User specific aliases and functions
-
-alias rm='rm -i'
-alias cp='cp -i'
-alias mv='mv -i'
-
-# Source global definitions
-if [ -f /etc/bashrc ]; then
-    . /etc/bashrc
-fi

+ 0 - 10
roles/atomic_base/files/ostree/repo_config

@@ -1,10 +0,0 @@
-[core]
-repo_version=1
-mode=bare
-
-[remote "rh-atomic-controller"]
-url=https://mirror.openshift.com/libra/ostree/rhel-7-atomic-host
-branches=rh-atomic-controller/el7/x86_64/buildmaster/controller/docker;
-tls-client-cert-path=/var/lib/yum/client-cert.pem
-tls-client-key-path=/var/lib/yum/client-key.pem
-gpg-verify=false

+ 0 - 7
roles/atomic_base/files/system/90-nofile.conf

@@ -1,7 +0,0 @@
-# PAM process file descriptor limits
-# see limits.conf(5) for details.
-#Each line describes a limit for a user in the form:
-#
-#<domain> <type> <item> <value>
-*       hard    nofile  16384
-root	soft	nofile	16384

+ 0 - 19
roles/atomic_base/meta/main.yml

@@ -1,19 +0,0 @@
----
-galaxy_info:
-  author: Thomas Wiest
-  description: Common base RHEL atomic configurations
-  company: Red Hat
-  # Some suggested licenses:
-  # - BSD (default)
-  # - MIT
-  # - GPLv2
-  # - GPLv3
-  # - Apache
-  # - CC-BY
-  license: Apache
-  min_ansible_version: 1.2
-  platforms:
-  - name: EL
-    versions:
-    - 7
-dependencies: []

+ 0 - 14
roles/atomic_base/tasks/bash.yml

@@ -1,14 +0,0 @@
----
-- name: Copy .bashrc
-  copy: src=bash/bashrc dest=/root/.bashrc owner=root group=root mode=0644
-
-- name: Link to .profile to .bashrc
-  file: src=/root/.bashrc dest=/root/.profile owner=root group=root state=link
-
-- name: "Setup Timezone [{{ oo_timezone }}]"
-  file:
-    src: "/usr/share/zoneinfo/{{ oo_timezone }}"
-    dest: /etc/localtime
-    owner: root
-    group: root
-    state: link

+ 0 - 6
roles/atomic_base/tasks/cloud_user.yml

@@ -1,6 +0,0 @@
----
-- name: Remove cloud-user account
-  user: name=cloud-user state=absent remove=yes force=yes
-
-- name: Remove cloud-user sudo
-  file: path=/etc/sudoers.d/90-cloud-init-users state=absent

+ 0 - 4
roles/atomic_base/tasks/main.yml

@@ -1,4 +0,0 @@
----
-- include: system.yml
-- include: bash.yml
-- include: ostree.yml

+ 0 - 18
roles/atomic_base/tasks/ostree.yml

@@ -1,18 +0,0 @@
----
-- name: Copy ostree repo config
-  copy:
-    src: ostree/repo_config
-    dest: /ostree/repo/config
-    owner: root
-    group: root
-    mode: 0644
-
-- name: "WORK AROUND: Stat redhat repo file"
-  stat: path=/etc/yum.repos.d/redhat.repo
-  register: redhat_repo
-
-- name: "WORK AROUND: subscription manager failures"
-  file:
-    path: /etc/yum.repos.d/redhat.repo
-    state: touch
-  when: redhat_repo.stat.exists == False

+ 0 - 3
roles/atomic_base/tasks/system.yml

@@ -1,3 +0,0 @@
----
-- name: Upload nofile limits.d file
-  copy: src=system/90-nofile.conf dest=/etc/security/limits.d/90-nofile.conf owner=root group=root mode=0644

+ 0 - 2
roles/atomic_base/vars/main.yml

@@ -1,2 +0,0 @@
----
-oo_timezone: US/Eastern

+ 0 - 56
roles/atomic_proxy/README.md

@@ -1,56 +0,0 @@
-Role Name
-========
-
-The purpose of this role is to do common configurations for all RHEL atomic hosts.
-
-
-Requirements
-------------
-
-None
-
-
-Role Variables
---------------
-
-None
-
-
-Dependencies
-------------
-
-None
-
-
-Example Playbook
--------------------------
-
-From a group playbook:
-
-  hosts: servers
-  roles:
-    - ../../roles/atomic_proxy
-
-
-License
--------
-
-Copyright 2012-2014 Red Hat, Inc., All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-Author Information
-------------------
-
-Thomas Wiest <twiest@redhat.com>

+ 0 - 29
roles/atomic_proxy/files/proxy_containers_deploy_descriptor.json

@@ -1,29 +0,0 @@
-{
-  "Containers":[
-    {
-      "Name":"proxy-puppet",
-      "Count":1,
-      "Image":"puppet:latest",
-      "PublicPorts":[
-      ]
-    },
-    {
-      "Name":"proxy",
-      "Count":1,
-      "Image":"proxy:latest",
-      "PublicPorts":[
-        {"Internal":80,"External":80},
-        {"Internal":443,"External":443},
-        {"Internal":4999,"External":4999}
-      ]
-    },
-    {
-      "Name":"proxy-monitoring",
-      "Count":1,
-      "Image":"monitoring:latest",
-      "PublicPorts":[
-      ]
-    }
-  ],
-  "RandomizeIds": false
-}

+ 0 - 116
roles/atomic_proxy/files/puppet/auth.conf

@@ -1,116 +0,0 @@
-# This is the default auth.conf file, which implements the default rules
-# used by the puppet master. (That is, the rules below will still apply
-# even if this file is deleted.)
-#
-# The ACLs are evaluated in top-down order. More specific stanzas should
-# be towards the top of the file and more general ones at the bottom;
-# otherwise, the general rules may "steal" requests that should be
-# governed by the specific rules.
-#
-# See http://docs.puppetlabs.com/guides/rest_auth_conf.html for a more complete
-# description of auth.conf's behavior.
-#
-# Supported syntax:
-# Each stanza in auth.conf starts with a path to match, followed
-# by optional modifiers, and finally, a series of allow or deny
-# directives.
-#
-# Example Stanza
-# ---------------------------------
-# path /path/to/resource     # simple prefix match
-# # path ~ regex             # alternately, regex match
-# [environment envlist]
-# [method methodlist]
-# [auth[enthicated] {yes|no|on|off|any}]
-# allow [host|backreference|*|regex]
-# deny [host|backreference|*|regex]
-# allow_ip [ip|cidr|ip_wildcard|*]
-# deny_ip [ip|cidr|ip_wildcard|*]
-#
-# The path match can either be a simple prefix match or a regular
-# expression. `path /file` would match both `/file_metadata` and
-# `/file_content`. Regex matches allow the use of backreferences
-# in the allow/deny directives.
-#
-# The regex syntax is the same as for Ruby regex, and captures backreferences
-# for use in the `allow` and `deny` lines of that stanza
-#
-# Examples:
-#
-# path ~ ^/path/to/resource    # Equivalent to `path /path/to/resource`.
-# allow *                      # Allow all authenticated nodes (since auth
-#                              # defaults to `yes`).
-#
-# path ~ ^/catalog/([^/]+)$    # Permit nodes to access their own catalog (by
-# allow $1                     # certname), but not any other node's catalog.
-#
-# path ~ ^/file_(metadata|content)/extra_files/  # Only allow certain nodes to
-# auth yes                                       # access the "extra_files"
-# allow /^(.+)\.example\.com$/                   # mount point; note this must
-# allow_ip 192.168.100.0/24                      # go ABOVE the "/file" rule,
-#                                                # since it is more specific.
-#
-# environment:: restrict an ACL to a comma-separated list of environments
-# method:: restrict an ACL to a comma-separated list of HTTP methods
-# auth:: restrict an ACL to an authenticated or unauthenticated request
-# the default when unspecified is to restrict the ACL to authenticated requests
-# (ie exactly as if auth yes was present).
-#
-
-### Authenticated ACLs - these rules apply only when the client
-### has a valid certificate and is thus authenticated
-
-# allow nodes to retrieve their own catalog
-path ~ ^/catalog/([^/]+)$
-method find
-allow $1
-
-# allow nodes to retrieve their own node definition
-path ~ ^/node/([^/]+)$
-method find
-allow $1
-
-# allow all nodes to access the certificates services
-path /certificate_revocation_list/ca
-method find
-allow *
-
-# allow all nodes to store their own reports
-path ~ ^/report/([^/]+)$
-method save
-allow $1
-
-# Allow all nodes to access all file services; this is necessary for
-# pluginsync, file serving from modules, and file serving from custom
-# mount points (see fileserver.conf). Note that the `/file` prefix matches
-# requests to both the file_metadata and file_content paths. See "Examples"
-# above if you need more granular access control for custom mount points.
-path /file
-allow *
-
-### Unauthenticated ACLs, for clients without valid certificates; authenticated
-### clients can also access these paths, though they rarely need to.
-
-# allow access to the CA certificate; unauthenticated nodes need this
-# in order to validate the puppet master's certificate
-path /certificate/ca
-auth any
-method find
-allow *
-
-# allow nodes to retrieve the certificate they requested earlier
-path /certificate/
-auth any
-method find
-allow *
-
-# allow nodes to request a new certificate
-path /certificate_request
-auth any
-method find, save
-allow *
-
-# deny everything else; this ACL is not strictly necessary, but
-# illustrates the default policy.
-path /
-auth any

+ 0 - 43
roles/atomic_proxy/files/setup-proxy-containers.sh

@@ -1,43 +0,0 @@
-#!/bin/bash
-
-function fail {
-  msg=$1
-  echo
-  echo $msg
-  echo
-  exit 5
-}
-
-
-NUM_DATA_CTR=$(docker ps -a | grep -c proxy-shared-data-1)
-[ "$NUM_DATA_CTR" -ne 0 ] && fail "ERROR: proxy-shared-data-1 exists"
-
-
-# pre-cache the container images
-echo
-timeout --signal TERM --kill-after 30 600  docker pull busybox:latest  || fail "ERROR: docker pull of busybox failed"
-
-echo
-# WORKAROUND: Setup the shared data container
-/usr/bin/docker run --name "proxy-shared-data-1"  \
-          -v /shared/etc/haproxy                  \
-          -v /shared/etc/httpd                    \
-          -v /shared/etc/openshift                \
-          -v /shared/etc/pki                      \
-          -v /shared/var/run/ctr-ipc              \
-          -v /shared/var/lib/haproxy              \
-          -v /shared/usr/local                    \
-          "busybox:latest" true
-
-# WORKAROUND: These are because we're not using a pod yet
-cp /usr/local/etc/ctr-proxy-1.service /usr/local/etc/ctr-proxy-puppet-1.service /usr/local/etc/ctr-proxy-monitoring-1.service /etc/systemd/system/
-
-systemctl daemon-reload
-
-echo
-echo -n "sleeping 10 seconds for systemd reload to take affect..."
-sleep 10
-echo " Done."
-
-# Start the services
-systemctl start ctr-proxy-puppet-1 ctr-proxy-1 ctr-proxy-monitoring-1

+ 0 - 3
roles/atomic_proxy/handlers/main.yml

@@ -1,3 +0,0 @@
----
-- name: reload systemd
-  command: systemctl daemon-reload

+ 0 - 21
roles/atomic_proxy/meta/main.yml

@@ -1,21 +0,0 @@
----
-galaxy_info:
-  author: Thomas Wiest
-  description: Common base RHEL atomic configurations
-  company: Red Hat
-  # Some suggested licenses:
-  # - BSD (default)
-  # - MIT
-  # - GPLv2
-  # - GPLv3
-  # - Apache
-  # - CC-BY
-  license: Apache
-  min_ansible_version: 1.2
-  platforms:
-  - name: EL
-    versions:
-    - 7
-dependencies:
-  # This is the role's PRIVATE counterpart, which is used.
-  - ../../../../../atomic_private/ansible/roles/atomic_proxy

+ 0 - 3
roles/atomic_proxy/tasks/main.yml

@@ -1,3 +0,0 @@
----
-- include: setup_puppet.yml
-- include: setup_containers.yml

+ 0 - 57
roles/atomic_proxy/tasks/setup_containers.yml

@@ -1,57 +0,0 @@
----
-- name: "get output of: docker images"
-  command: docker images
-  changed_when: False # don't report as changed
-  register: docker_images
-
-- name: docker pull busybox ONLY if it's not present
-  command: "docker pull busybox:latest"
-  when: "not docker_images.stdout | search('busybox.*latest')"
-
-- name: docker pull containers ONLY if they're not present (needed otherwise systemd will timeout pulling the containers)
-  command: "docker pull docker-registry.ops.rhcloud.com/{{ item }}:{{ oo_env }}"
-  with_items:
-    - oso-v2-proxy
-    - oso-v2-puppet
-    - oso-v2-monitoring
-  when: "not docker_images.stdout | search('docker-registry.ops.rhcloud.com/{{ item }}.*{{ oo_env }}')"
-
-- name: "get output of: docker ps -a"
-  command: docker ps -a
-  changed_when: False # don't report as changed
-  register: docker_ps
-
-- name: run proxy-shared-data-1
-  command: /usr/bin/docker run --name "proxy-shared-data-1"  \
-                     -v /shared/etc/haproxy                  \
-                     -v /shared/etc/httpd                    \
-                     -v /shared/etc/openshift                \
-                     -v /shared/etc/pki                      \
-                     -v /shared/var/run/ctr-ipc              \
-                     -v /shared/var/lib/haproxy              \
-                     -v /shared/usr/local                    \
-                     "busybox:latest" true
-  when: "not docker_ps.stdout | search('proxy-shared-data-1')"
-
-- name: Deploy systemd files for containers
-  template:
-    src: "systemd/{{ item }}.j2"
-    dest: "/etc/systemd/system/{{ item }}"
-    mode: 0640
-    owner: root
-    group: root
-  with_items:
-    - ctr-proxy-1.service
-    - ctr-proxy-monitoring-1.service
-    - ctr-proxy-puppet-1.service
-  notify: reload systemd
-
-- name: start containers
-  service:
-    name: "{{ item }}"
-    state: started
-    enabled: yes
-  with_items:
-    - ctr-proxy-puppet-1
-    - ctr-proxy-1
-    - ctr-proxy-monitoring-1

+ 0 - 24
roles/atomic_proxy/tasks/setup_puppet.yml

@@ -1,24 +0,0 @@
----
-- name: make puppet conf dir
-  file:
-    dest: "{{ oo_proxy_puppet_volume_dir }}/etc/puppet"
-    mode: 755
-    owner: root
-    group: root
-    state: directory
-
-- name: upload puppet auth config
-  copy:
-    src: puppet/auth.conf
-    dest: "{{ oo_proxy_puppet_volume_dir }}/etc/puppet/auth.conf"
-    mode: 0644
-    owner: root
-    group: root
-
-- name: upload puppet config
-  template:
-    src: puppet/puppet.conf.j2
-    dest: "{{ oo_proxy_puppet_volume_dir }}/etc/puppet/puppet.conf"
-    mode: 0644
-    owner: root
-    group: root

+ 0 - 40
roles/atomic_proxy/templates/puppet/puppet.conf.j2

@@ -1,40 +0,0 @@
-[main]
-    # we need to override the host name of the container
-    certname = ctr-proxy.{{ oo_env }}.rhcloud.com
-
-    # The Puppet log directory.
-    # The default value is '$vardir/log'.
-    logdir = /var/log/puppet
-
-    # Where Puppet PID files are kept.
-    # The default value is '$vardir/run'.
-    rundir = /var/run/puppet
-
-    # Where SSL certificates are kept.
-    # The default value is '$confdir/ssl'.
-    ssldir = $vardir/ssl
-    manifest = $manifestdir/site.pp
-    manifestdir = /var/lib/puppet/environments/pub/$environment/manifests
-    environment = {{ oo_env_long }}
-    modulepath = /var/lib/puppet/environments/pub/$environment/modules:/var/lib/puppet/environments/pri/$environment/modules:/var/lib/puppet/environments/pri/production/modules:$confdir/modules:/usr/share/puppet/modules
-
-[agent]
-    # The file in which puppetd stores a list of the classes
-    # associated with the retrieved configuratiion.  Can be loaded in
-    # the separate ``puppet`` executable using the ``--loadclasses``
-    # option.
-    # The default value is '$confdir/classes.txt'.
-    classfile = $vardir/classes.txt
-
-    # Where puppetd caches the local configuration.  An
-    # extension indicating the cache format is added automatically.
-    # The default value is '$confdir/localconfig'.
-    localconfig = $vardir/localconfig
-    server = puppet.ops.rhcloud.com
-    environment = {{ oo_env_long }}
-    pluginsync = true
-    graph = true
-    configtimeout = 600
-    report = true
-    runinterval = 3600
-    splay = true

+ 0 - 16
roles/atomic_proxy/templates/sync/sync-proxy-configs.sh.j2

@@ -1,16 +0,0 @@
-#!/bin/bash
-
-VOL_DIR=/var/lib/docker/volumes/proxy
-SSH_CMD="ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null"
-
-mkdir -p ${VOL_DIR}/etc/haproxy/
-rsync -e "${SSH_CMD}" -va --progress root@proxy1.{{ oo_env }}.rhcloud.com:/etc/haproxy/ ${VOL_DIR}/etc/haproxy/
-
-mkdir -p ${VOL_DIR}/etc/httpd/
-rsync -e "${SSH_CMD}" -va --progress root@proxy1.{{ oo_env }}.rhcloud.com:/etc/httpd/ ${VOL_DIR}/etc/httpd/
-
-mkdir -p ${VOL_DIR}/etc/pki/tls/
-rsync -e "${SSH_CMD}" -va --progress root@proxy1.{{ oo_env }}.rhcloud.com:/etc/pki/tls/ ${VOL_DIR}/etc/pki/tls/
-
-# We need to disable the haproxy chroot
-sed -i -re 's/^(\s+)chroot/\1#chroot/' /var/lib/docker/volumes/proxy/etc/haproxy/haproxy.cfg

+ 0 - 32
roles/atomic_proxy/templates/systemd/ctr-proxy-1.service.j2

@@ -1,32 +0,0 @@
-[Unit]
-Description=Container proxy-1
-
-
-[Service]
-Type=simple
-TimeoutStartSec=5m
-Slice=container-small.slice
-
-ExecStartPre=-/usr/bin/docker rm "proxy-1"
-
-ExecStart=/usr/bin/docker run --rm --name "proxy-1"                           \
-          --volumes-from proxy-shared-data-1                                  \
-          -a stdout -a stderr -p 80:80 -p 443:443 -p 4999:4999                \
-          "docker-registry.ops.rhcloud.com/oso-v2-proxy:{{ oo_env }}"
-
-ExecReload=-/usr/bin/docker stop "proxy-1"
-ExecReload=-/usr/bin/docker rm "proxy-1"
-ExecStop=-/usr/bin/docker stop "proxy-1"
-
-[Install]
-WantedBy=container.target
-
-# Container information
-X-ContainerId=proxy-1
-X-ContainerImage=docker-registry.ops.rhcloud.com/oso-v2-proxy:{{ oo_env }}
-X-ContainerUserId=
-X-ContainerRequestId=LwiWtYWaAvSavH6Ze53QJg
-X-ContainerType=simple
-X-PortMapping=80:80
-X-PortMapping=443:443
-X-PortMapping=4999:4999

+ 0 - 36
roles/atomic_proxy/templates/systemd/ctr-proxy-monitoring-1.service.j2

@@ -1,36 +0,0 @@
-[Unit]
-Description=Container proxy-monitoring-1
-
-
-[Service]
-Type=simple
-TimeoutStartSec=5m
-Slice=container-small.slice
-
-ExecStartPre=-/usr/bin/docker rm "proxy-monitoring-1"
-
-ExecStart=/usr/bin/docker run --rm --name "proxy-monitoring-1"                \
-          --volumes-from proxy-shared-data-1                                  \
-          -a stdout -a stderr                                                 \
-          -e "OO_ENV={{ oo_env }}"                                            \
-          -e "OO_CTR_TYPE=proxy"                                              \
-          -e "OO_ZABBIX_HOSTGROUPS={{ oo_zabbix_hostgroups | join(',') }}"    \
-          -e "OO_ZABBIX_TEMPLATES=Template OpenShift Proxy Ctr"               \
-          "docker-registry.ops.rhcloud.com/oso-v2-monitoring:{{ oo_env }}"
-
-ExecReload=-/usr/bin/docker stop "proxy-monitoring-1"
-ExecReload=-/usr/bin/docker rm "proxy-monitoring-1"
-ExecStop=-/usr/bin/docker stop "proxy-monitoring-1"
-
-[Install]
-WantedBy=container.target
-
-# Container information
-X-ContainerId=proxy-monitoring-1
-X-ContainerImage=docker-registry.ops.rhcloud.com/oso-v2-monitoring:{{ oo_env }}
-X-ContainerUserId=
-X-ContainerRequestId=LwiWtYWaAvSavH6Ze53QJg
-X-ContainerType=simple
-X-PortMapping=80:80
-X-PortMapping=443:443
-X-PortMapping=4999:4999

+ 0 - 33
roles/atomic_proxy/templates/systemd/ctr-proxy-puppet-1.service.j2

@@ -1,33 +0,0 @@
-[Unit]
-Description=Container proxy-puppet-1
-
-
-[Service]
-Type=simple
-TimeoutStartSec=5m
-Slice=container-small.slice
-
-
-ExecStartPre=-/usr/bin/docker rm "proxy-puppet-1"
-
-ExecStart=/usr/bin/docker run --rm --name "proxy-puppet-1"                                    \
-          --volumes-from proxy-shared-data-1                                                  \
-          -v /var/lib/docker/volumes/proxy_puppet/var/lib/puppet/ssl:/var/lib/puppet/ssl      \
-          -v /var/lib/docker/volumes/proxy_puppet/etc/puppet:/etc/puppet                      \
-          -a stdout -a stderr                                                                 \
-          "docker-registry.ops.rhcloud.com/oso-v2-puppet:{{ oo_env }}"
-
-# Set links (requires container have a name)
-ExecReload=-/usr/bin/docker stop "proxy-puppet-1"
-ExecReload=-/usr/bin/docker rm "proxy-puppet-1"
-ExecStop=-/usr/bin/docker stop "proxy-puppet-1"
-
-[Install]
-WantedBy=container.target
-
-# Container information
-X-ContainerId=proxy-puppet-1
-X-ContainerImage=docker-registry.ops.rhcloud.com/oso-v2-puppet:{{ oo_env }}
-X-ContainerUserId=
-X-ContainerRequestId=Ky0lhw0onwoSDJR4GK6t3g
-X-ContainerType=simple

+ 0 - 2
roles/atomic_proxy/vars/main.yml

@@ -1,2 +0,0 @@
----
-oo_proxy_puppet_volume_dir: /var/lib/docker/volumes/proxy_puppet