Browse Source

WIP Infra - Add playbooks for using openshift binary

* Fixed terminate so that it properly removes the attached OS disk.
Jhon Honce 10 years ago
parent
commit
346da608fb

+ 15 - 8
cluster.sh

@@ -1,9 +1,16 @@
 #!/bin/bash -eu
 
-MINIONS=3
+MINIONS=1
 MASTERS=1
 PROVIDER=gce
 
+# FIXME: Add option
+#MASTER_PLAYBOOK=os3-master
+MASTER_PLAYBOOK=openshift-master
+#MINION_PLAYBOOK=os3-minion
+MINION_PLAYBOOK=openshift-minion
+
+
 # @formatter:off
 function usage {
     cat 1>&2 <<-EOT
@@ -17,29 +24,29 @@ EOT
 
 function create_cluser {
     for (( i = 0; i < $MINIONS; i ++ )); do
-        ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=os3-minion
+        ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=$MINION_PLAYBOOK
     done
 
     for (( i = 0; i < $MASTERS; i ++ )); do
-        ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=os3-master
+        ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=$MASTER_PLAYBOOK
     done
     update_cluster
-    echo -e "\nCreated ${MASTERS} masters and ${MINIONS} minions using ${PROVIDER} provider\n"
+    echo -e "\nCreated ${MASTERS} ${MASTER_PLAYBOOK} masters and ${MINIONS} ${MINION_PLAYBOOK} minions using ${PROVIDER} provider\n"
 }
 
 function update_cluster {
     for (( i = 0; i < $MINIONS; i ++ )); do
-        ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=os3-minion
+        ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=$MINION_PLAYBOOK
     done
 
     for (( i = 0; i < $MASTERS; i ++ )); do
-        ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=os3-master
+        ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=$MASTER_PLAYBOOK
     done
 }
 
 function terminate_cluster {
-    #./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=os3-master
-    ./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=os3-minion
+    ./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=$MASTER_PLAYBOOK
+    ./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=$MINION_PLAYBOOK
 }
 
 [ -f ./cloud.rb ] || (echo 1>&2 'Cannot find ./cloud.rb' && exit 1)

+ 39 - 0
playbooks/gce/origin-master/config.yml

@@ -0,0 +1,39 @@
+- name: "populate oo_hosts_to_config host group if needed"
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: Evaluate oo_host_group_exp if it's set
+    add_host: "name={{ item }} groups=oo_hosts_to_config"
+    with_items: "{{ oo_host_group_exp | default('') }}"
+    when: oo_host_group_exp is defined
+
+- name: "Gather facts for minions in {{ oo_env }}"
+  hosts: "tag_env-host-type-{{ oo_env }}-openshift-minion"
+  connection: ssh
+  user: root
+
+- name: "Set Origin specific facts on localhost (for later use)"
+  hosts: localhost
+  gather_facts: no
+  tasks:
+    - name: Setting oo_minion_ips fact on localhost
+      set_fact:
+        oo_minion_ips: "{{ hostvars
+            | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-minion'])
+            | oo_collect(attribute='ansible_eth0.ipv4.address') }}"
+      when: groups['tag_env-host-type-' + oo_env + '-openshift-minion'] is defined
+
+- name: "Configure instances"
+  hosts: oo_hosts_to_config
+  connection: ssh
+  user: root
+  vars_files:
+    - vars.yml
+  roles:
+    - ../../../roles/base_os
+    - ../../../roles/repos
+    - {
+        role: ../../../roles/openshift_master,
+        oo_minion_ips: "{{ hostvars['localhost'].oo_minion_ips | default(['']) }}"
+      }
+    - ../../../roles/pods

+ 38 - 0
playbooks/gce/origin-master/launch.yml

@@ -0,0 +1,38 @@
+---
+- name: Launch instance(s)
+  hosts: localhost
+  connection: local
+  gather_facts: no
+
+  vars:
+    inst_names: "{{ oo_new_inst_names }}"
+    machine_type: n1-standard-1
+    image: libra-rhel7
+
+  vars_files:
+      - vars.yml
+
+  tasks:
+    - name: Launch instances
+      gce:
+        instance_names: "{{ inst_names }}"
+        machine_type: "{{ machine_type }}"
+        image: "{{ image }}"
+        service_account_email: "{{ gce_service_account_email }}"
+        pem_file: "{{ gce_pem_file }}"
+        project_id: "{{ gce_project_id }}"
+        tags: "{{ oo_new_inst_tags }}"
+      register: gce
+
+    - name: Add new instances public IPs to oo_hosts_to_config
+      add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_hosts_to_config"
+      with_items: gce.instance_data
+
+    - name: Wait for ssh
+      wait_for: "port=22 host={{ item.public_ip }}"
+      with_items: gce.instance_data
+
+    - debug: var=gce
+
+# Apply the configs, separate so that just the configs can be run by themselves
+- include: config.yml

+ 39 - 0
playbooks/gce/origin-master/terminate.yml

@@ -0,0 +1,39 @@
+- name: "populate oo_hosts_to_terminate host group if needed"
+  hosts: localhost
+  gather_facts: no
+  tasks:
+    - debug: var=oo_host_group_exp
+
+    - name: Evaluate oo_host_group_exp if it's set
+      add_host: "name={{ item }} groups=oo_hosts_to_terminate"
+      with_items: "{{ oo_host_group_exp | default('') }}"
+      when: oo_host_group_exp is defined
+
+    - debug: msg="{{ groups['oo_hosts_to_terminate'] }}"
+
+
+- name: Terminate instances
+  hosts: localhost
+  connection: local
+  tasks:
+    - name: Terminate master instances
+      gce:
+        service_account_email: "{{ gce_service_account_email }}"
+        pem_file: "{{ gce_pem_file }}"
+        project_id: "{{ gce_project_id }}"
+        state: 'absent'
+        instance_names: "{{ groups['oo_hosts_to_terminate'] }}"
+        disks: "{{ groups['oo_hosts_to_terminate'] }}"
+      register: gce
+    
+    - debug: var=gce
+
+#    - name: Remove disks of instances
+#      gce_pd:
+#        service_account_email: "{{ gce_service_account_email }}"
+#        pem_file: "{{ gce_pem_file }}"
+#        project_id: "{{ gce_project_id }}"
+#        name: "{{ item }}"
+#        state: deleted
+#      with_items: gce.instance_names
+

+ 0 - 0
playbooks/gce/origin-master/vars.yml


+ 39 - 0
playbooks/gce/origin-minion/config.yml

@@ -0,0 +1,39 @@
+- name: "populate oo_hosts_to_config host group if needed"
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: Evaluate oo_host_group_exp
+    add_host: "name={{ item }} groups=oo_hosts_to_config"
+    with_items: "{{ oo_host_group_exp | default('') }}"
+    when: oo_host_group_exp is defined
+
+- name: "Gather facts for masters in {{ oo_env }}"
+  hosts: "tag_env-host-type-{{ oo_env }}-openshift-master"
+  connection: ssh
+  user: root
+
+- name: "Set OO sepcific facts on localhost (for later use)"
+  hosts: localhost
+  gather_facts: no
+  tasks:
+    - name: Setting oo_master_ips fact on localhost
+      set_fact:
+        oo_master_ips: "{{ hostvars
+            | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
+            | oo_collect(attribute='ansible_eth0.ipv4.address') }}"
+      when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
+
+- name: "Configure instances"
+  hosts: oo_hosts_to_config
+  connection: ssh
+  user: root
+  vars_files:
+    - vars.yml
+  roles:
+    - ../../../roles/base_os
+    - ../../../roles/repos
+    - ../../../roles/docker
+    - {
+        role: ../../../roles/openshift_minion,
+        oo_master_ips: "{{ hostvars['localhost'].oo_master_ips | default(['']) }}"
+      }

+ 38 - 0
playbooks/gce/origin-minion/launch.yml

@@ -0,0 +1,38 @@
+---
+- name: Launch instance(s)
+  hosts: localhost
+  connection: local
+  gather_facts: no
+
+  vars:
+    inst_names: "{{ oo_new_inst_names }}"
+    machine_type: n1-standard-1
+    image: libra-rhel7
+
+  vars_files:
+      - vars.yml
+
+  tasks:
+    - name: Launch instances
+      gce:
+        instance_names: "{{ inst_names }}"
+        machine_type: "{{ machine_type }}"
+        image: "{{ image }}"
+        service_account_email: "{{ gce_service_account_email }}"
+        pem_file: "{{ gce_pem_file }}"
+        project_id: "{{ gce_project_id }}"
+        tags: "{{ oo_new_inst_tags }}"
+      register: gce
+
+    - name: Add new instances public IPs to oo_hosts_to_config
+      add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_hosts_to_config"
+      with_items: gce.instance_data
+
+    - name: Wait for ssh
+      wait_for: "port=22 host={{ item.public_ip }}"
+      with_items: gce.instance_data
+
+    - debug: var=gce
+
+# Apply the configs, separate so that just the configs can be run by themselves
+- include: config.yml

+ 39 - 0
playbooks/gce/origin-minion/terminate.yml

@@ -0,0 +1,39 @@
+- name: "populate oo_hosts_to_terminate host group if needed"
+  hosts: localhost
+  gather_facts: no
+  tasks:
+    - debug: var=oo_host_group_exp
+
+    - name: Evaluate oo_host_group_exp if it's set
+      add_host: "name={{ item }} groups=oo_hosts_to_terminate"
+      with_items: "{{ oo_host_group_exp | default('') }}"
+      when: oo_host_group_exp is defined
+
+    - debug: msg="{{ groups['oo_hosts_to_terminate'] }}"
+
+
+- name: Terminate instances
+  hosts: localhost
+  connection: local
+  tasks:
+    - name: Terminate minion instances
+      gce:
+        service_account_email: "{{ gce_service_account_email }}"
+        pem_file: "{{ gce_pem_file }}"
+        project_id: "{{ gce_project_id }}"
+        state: 'absent'
+        instance_names: "{{ groups['oo_hosts_to_terminate'] }}"
+        disks: "{{ groups['oo_hosts_to_terminate'] }}"
+      register: gce
+
+    - debug: var=gce
+
+#    - name: Remove disks of instances
+#      gce_pd:
+#        service_account_email: "{{ gce_service_account_email }}"
+#        pem_file: "{{ gce_pem_file }}"
+#        project_id: "{{ gce_project_id }}"
+#        name: "{{ item }}"
+#        state: deleted
+#      with_items: gce.instance_names
+

+ 0 - 0
playbooks/gce/origin-minion/vars.yml


+ 10 - 11
playbooks/gce/os3-master/terminate.yml

@@ -11,7 +11,6 @@
 
     - debug: msg="{{ groups['oo_hosts_to_terminate'] }}"
 
-
 - name: Terminate instances
   hosts: localhost
   connection: local
@@ -25,15 +24,15 @@
         instance_names: "{{ groups['oo_hosts_to_terminate'] }}"
         disks: "{{ groups['oo_hosts_to_terminate'] }}"
       register: gce
-    
-    - debug: var=gce
 
-#    - name: Remove disks of instances
-#      gce_pd:
-#        service_account_email: "{{ gce_service_account_email }}"
-#        pem_file: "{{ gce_pem_file }}"
-#        project_id: "{{ gce_project_id }}"
-#        name: "{{ item }}"
-#        state: deleted
-#      with_items: gce.instance_names
+    - debug: var=gce
 
+    - name: Remove disks of instances
+      gce_pd:
+        service_account_email: "{{ gce_service_account_email }}"
+        pem_file: "{{ gce_pem_file }}"
+        project_id: "{{ gce_project_id }}"
+        name: "{{ item }}"
+        zone: "{{ gce.zone }}"
+        state: absent
+      with_items: gce.instance_names

+ 38 - 0
roles/origin_master/README.md

@@ -0,0 +1,38 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+    - hosts: servers
+      roles:
+         - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).

+ 2 - 0
roles/origin_master/defaults/main.yml

@@ -0,0 +1,2 @@
+---
+# defaults file for openshift_master

+ 4 - 0
roles/origin_master/handlers/main.yml

@@ -0,0 +1,4 @@
+---
+# handlers file for openshift_master
+- name: restart openshift-master
+  service: name=openshift state=restarted

+ 124 - 0
roles/origin_master/meta/main.yml

@@ -0,0 +1,124 @@
+---
+galaxy_info:
+  author: your name
+  description: 
+  company: your company (optional)
+  # Some suggested licenses:
+  # - BSD (default)
+  # - MIT
+  # - GPLv2
+  # - GPLv3
+  # - Apache
+  # - CC-BY
+  license: license (GPLv2, CC-BY, etc)
+  min_ansible_version: 1.2
+  #
+  # Below are all platforms currently available. Just uncomment
+  # the ones that apply to your role. If you don't see your 
+  # platform on this list, let us know and we'll get it added!
+  #
+  #platforms:
+  #- name: EL
+  #  versions:
+  #  - all
+  #  - 5
+  #  - 6
+  #  - 7
+  #- name: GenericUNIX
+  #  versions:
+  #  - all
+  #  - any
+  #- name: Fedora
+  #  versions:
+  #  - all
+  #  - 16
+  #  - 17
+  #  - 18
+  #  - 19
+  #  - 20
+  #- name: opensuse
+  #  versions:
+  #  - all
+  #  - 12.1
+  #  - 12.2
+  #  - 12.3
+  #  - 13.1
+  #  - 13.2
+  #- name: Amazon
+  #  versions:
+  #  - all
+  #  - 2013.03
+  #  - 2013.09
+  #- name: GenericBSD
+  #  versions:
+  #  - all
+  #  - any
+  #- name: FreeBSD
+  #  versions:
+  #  - all
+  #  - 8.0
+  #  - 8.1
+  #  - 8.2
+  #  - 8.3
+  #  - 8.4
+  #  - 9.0
+  #  - 9.1
+  #  - 9.1
+  #  - 9.2
+  #- name: Ubuntu
+  #  versions:
+  #  - all
+  #  - lucid
+  #  - maverick
+  #  - natty
+  #  - oneiric
+  #  - precise
+  #  - quantal
+  #  - raring
+  #  - saucy
+  #  - trusty
+  #- name: SLES
+  #  versions:
+  #  - all
+  #  - 10SP3
+  #  - 10SP4
+  #  - 11
+  #  - 11SP1
+  #  - 11SP2
+  #  - 11SP3
+  #- name: GenericLinux
+  #  versions:
+  #  - all
+  #  - any
+  #- name: Debian
+  #  versions:
+  #  - all
+  #  - etch
+  #  - lenny
+  #  - squeeze
+  #  - wheezy
+  #
+  # Below are all categories currently available. Just as with
+  # the platforms above, uncomment those that apply to your role.
+  #
+  #categories:
+  #- cloud
+  #- cloud:ec2
+  #- cloud:gce
+  #- cloud:rax
+  #- clustering
+  #- database
+  #- database:nosql
+  #- database:sql
+  #- development
+  #- monitoring
+  #- networking
+  #- packaging
+  #- system
+  #- web
+dependencies: []
+  # List your role dependencies here, one per line. Only
+  # dependencies available via galaxy should be listed here.
+  # Be sure to remove the '[]' above if you add dependencies
+  # to this list.
+  

+ 25 - 0
roles/origin_master/tasks/main.yml

@@ -0,0 +1,25 @@
+---
+# tasks file for openshift_master
+- name: Install Origin
+  yum: pkg=openshift state=installed
+
+- name: Configure Origin settings
+  lineinfile: >
+    dest=/etc/sysconfig/openshift
+    regexp={{ item.regex }}
+    line="{{ item.line }}"
+  with_items:
+    - { regex: '^OPENSHIFT_MASTER',  line: 'OPENSHIFT_MASTER=\"{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address }}\"' }
+    - { regex: '^OPENSHIFT_BIND_ADDR', line: 'OPENSHIFT_BIND_ADDR=\"{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address }}\"'}
+  notify:
+    - restart openshift-master
+
+- name: Enable OpenShift
+  service: name=openshift enabled=yes state=started
+
+- name: Open firewalld port for Origin
+  firewalld: port=8080/tcp permanent=false state=enabled
+
+- name: Save firewalld port for Origin
+  firewalld: port=8080/tcp permanent=true state=enabled
+

+ 2 - 0
roles/origin_master/vars/main.yml

@@ -0,0 +1,2 @@
+---
+# vars file for kubernetes_apiserver

+ 38 - 0
roles/origin_minion/README.md

@@ -0,0 +1,38 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+    - hosts: servers
+      roles:
+         - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).

+ 2 - 0
roles/origin_minion/defaults/main.yml

@@ -0,0 +1,2 @@
+---
+# defaults file for openshift_minion

+ 4 - 0
roles/origin_minion/handlers/main.yml

@@ -0,0 +1,4 @@
+---
+# handlers file for openshift_minion
+- name: restart openshift-minion
+  service: name=openshift state=restarted

+ 124 - 0
roles/origin_minion/meta/main.yml

@@ -0,0 +1,124 @@
+---
+galaxy_info:
+  author: your name
+  description: 
+  company: your company (optional)
+  # Some suggested licenses:
+  # - BSD (default)
+  # - MIT
+  # - GPLv2
+  # - GPLv3
+  # - Apache
+  # - CC-BY
+  license: license (GPLv2, CC-BY, etc)
+  min_ansible_version: 1.2
+  #
+  # Below are all platforms currently available. Just uncomment
+  # the ones that apply to your role. If you don't see your 
+  # platform on this list, let us know and we'll get it added!
+  #
+  #platforms:
+  #- name: EL
+  #  versions:
+  #  - all
+  #  - 5
+  #  - 6
+  #  - 7
+  #- name: GenericUNIX
+  #  versions:
+  #  - all
+  #  - any
+  #- name: Fedora
+  #  versions:
+  #  - all
+  #  - 16
+  #  - 17
+  #  - 18
+  #  - 19
+  #  - 20
+  #- name: opensuse
+  #  versions:
+  #  - all
+  #  - 12.1
+  #  - 12.2
+  #  - 12.3
+  #  - 13.1
+  #  - 13.2
+  #- name: Amazon
+  #  versions:
+  #  - all
+  #  - 2013.03
+  #  - 2013.09
+  #- name: GenericBSD
+  #  versions:
+  #  - all
+  #  - any
+  #- name: FreeBSD
+  #  versions:
+  #  - all
+  #  - 8.0
+  #  - 8.1
+  #  - 8.2
+  #  - 8.3
+  #  - 8.4
+  #  - 9.0
+  #  - 9.1
+  #  - 9.1
+  #  - 9.2
+  #- name: Ubuntu
+  #  versions:
+  #  - all
+  #  - lucid
+  #  - maverick
+  #  - natty
+  #  - oneiric
+  #  - precise
+  #  - quantal
+  #  - raring
+  #  - saucy
+  #  - trusty
+  #- name: SLES
+  #  versions:
+  #  - all
+  #  - 10SP3
+  #  - 10SP4
+  #  - 11
+  #  - 11SP1
+  #  - 11SP2
+  #  - 11SP3
+  #- name: GenericLinux
+  #  versions:
+  #  - all
+  #  - any
+  #- name: Debian
+  #  versions:
+  #  - all
+  #  - etch
+  #  - lenny
+  #  - squeeze
+  #  - wheezy
+  #
+  # Below are all categories currently available. Just as with
+  # the platforms above, uncomment those that apply to your role.
+  #
+  #categories:
+  #- cloud
+  #- cloud:ec2
+  #- cloud:gce
+  #- cloud:rax
+  #- clustering
+  #- database
+  #- database:nosql
+  #- database:sql
+  #- development
+  #- monitoring
+  #- networking
+  #- packaging
+  #- system
+  #- web
+dependencies: []
+  # List your role dependencies here, one per line. Only
+  # dependencies available via galaxy should be listed here.
+  # Be sure to remove the '[]' above if you add dependencies
+  # to this list.
+  

+ 24 - 0
roles/origin_minion/tasks/main.yml

@@ -0,0 +1,24 @@
+---
+# tasks file for openshift_minion
+- name: Install OpenShift
+  yum: pkg=openshift state=installed
+
+- name: Configure OpenShift settings
+  lineinfile: >
+    dest=/etc/sysconfig/openshift
+    regexp={{ item.regex }}
+    line="{{ item.line }}"
+  with_items:
+    - { regex: '^OPENSHIFT_MASTER',  line: 'OPENSHIFT_MASTER=\"{{ oo_master_ips[0] }}\"' }
+    - { regex: '^OPENSHIFT_BIND_ADDR', line: 'OPENSHIFT_BIND_ADDR=\"{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address }}\"'}
+  notify:
+    - restart openshift-minion
+
+- name: Enable OpenShift
+  service: name=openshift enabled=yes state=started
+
+- name: Open firewalld port for OpenShift
+  firewalld: port=10250/tcp permanent=false state=enabled
+
+- name: Save firewalld port for OpenShift
+  firewalld: port=10250/tcp permanent=true state=enabled

+ 2 - 0
roles/origin_minion/vars/main.yml

@@ -0,0 +1,2 @@
+---
+# vars file for openshift_minion

+ 1 - 1
roles/repos/files/docker.repo

@@ -2,4 +2,4 @@
 name= Temporary Docker rpm
 baseurl=http://10.240.169.148/mirror/docker
 gpgcheck=0
-enabled=1
+enabled=0

+ 6 - 0
roles/repos/files/epel7-origin.repo

@@ -0,0 +1,6 @@
+[maxamillion-origin-next]
+name=Copr repo for origin-next owned by maxamillion
+baseurl=http://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/
+skip_if_unavailable=False
+gpgcheck=0
+enabled=1

+ 3 - 0
roles/repos/tasks/main.yaml

@@ -25,3 +25,6 @@
 
 - name: Ensure the kubernetes repo is available
   copy: src=epel7-kubernetes.repo dest=/etc/yum.repos.d/epel7-kubernetes.repo
+
+- name: Ensure the origin repo is available
+  copy: src=epel7-origin.repo dest=/etc/yum.repos.d/epel7-origin.repo