Browse Source

Remove unused roles in 4.x

Jeremiah Stuever 6 years ago
parent
commit
d165db666a
100 changed files with 0 additions and 4760 deletions
  1. 0 18
      roles/ansible_service_broker/OWNERS
  2. 0 39
      roles/ansible_service_broker/defaults/main.yml
  3. 0 13
      roles/ansible_service_broker/files/bundlebindings.automationbroker.io.yaml
  4. 0 14
      roles/ansible_service_broker/files/bundleinstances.automationbroker.io.yaml
  5. 0 14
      roles/ansible_service_broker/files/bundles.automationbroker.io.yaml
  6. 0 17
      roles/ansible_service_broker/meta/main.yml
  7. 0 21
      roles/ansible_service_broker/tasks/facts.yml
  8. 0 44
      roles/ansible_service_broker/tasks/generate_certs.yml
  9. 0 246
      roles/ansible_service_broker/tasks/install.yml
  10. 0 8
      roles/ansible_service_broker/tasks/main.yml
  11. 0 203
      roles/ansible_service_broker/tasks/migrate.yml
  12. 0 131
      roles/ansible_service_broker/tasks/remove.yml
  13. 0 15
      roles/ansible_service_broker/tasks/upgrade.yml
  14. 0 13
      roles/ansible_service_broker/tasks/validate_facts.yml
  15. 0 82
      roles/ansible_service_broker/templates/asb_dc.yaml.j2
  16. 0 15
      roles/ansible_service_broker/templates/broker-user-auth.clusterrole.yaml.j2
  17. 0 56
      roles/ansible_service_broker/templates/configmap.yaml.j2
  18. 0 11
      roles/ansible_service_broker/vars/default_images.yml
  19. 0 12
      roles/ansible_service_broker/vars/openshift-enterprise.yml
  20. 0 48
      roles/calico/README.md
  21. 0 10
      roles/calico/defaults/main.yaml
  22. 0 17
      roles/calico/meta/main.yml
  23. 0 77
      roles/calico/tasks/certs.yml
  24. 0 129
      roles/calico/tasks/main.yml
  25. 0 88
      roles/calico/templates/calico-etcd.yml.j2
  26. 0 8
      roles/calico/templates/calico-pull-secret.yml.j2
  27. 0 398
      roles/calico/templates/calico.yml.j2
  28. 0 692
      roles/calico/templates/calicov3.yml.j2
  29. 0 3
      roles/calico_node/README.md
  30. 0 2
      roles/calico_node/files/calico.conf
  31. 0 16
      roles/calico_node/meta/main.yml
  32. 0 13
      roles/calico_node/tasks/main.yml
  33. 0 20
      roles/cockpit-ui/defaults/main.yml
  34. 0 110
      roles/cockpit-ui/files/registry-console.yaml
  35. 0 17
      roles/cockpit-ui/meta/main.yml
  36. 0 70
      roles/cockpit-ui/tasks/install.yml
  37. 0 2
      roles/cockpit-ui/tasks/main.yml
  38. 0 8
      roles/cockpit/defaults/main.yml
  39. 0 15
      roles/cockpit/meta/main.yml
  40. 0 40
      roles/cockpit/tasks/firewall.yml
  41. 0 23
      roles/cockpit/tasks/main.yml
  42. 0 39
      roles/contiv/README.md
  43. BIN
      roles/contiv/contiv-openshift-vlan-network.png
  44. 0 176
      roles/contiv/defaults/main.yml
  45. 0 5
      roles/contiv/files/contiv_cni.conf
  46. 0 16
      roles/contiv/handlers/main.yml
  47. 0 17
      roles/contiv/meta/main.yml
  48. 0 31
      roles/contiv/tasks/aci.yml
  49. 0 120
      roles/contiv/tasks/api_proxy.yml
  50. 0 71
      roles/contiv/tasks/default_network.yml
  51. 0 48
      roles/contiv/tasks/download_bins.yml
  52. 0 114
      roles/contiv/tasks/etcd.yml
  53. 0 15
      roles/contiv/tasks/main.yml
  54. 0 64
      roles/contiv/tasks/netmaster.yml
  55. 0 17
      roles/contiv/tasks/netmaster_firewalld.yml
  56. 0 32
      roles/contiv/tasks/netmaster_iptables.yml
  57. 0 129
      roles/contiv/tasks/netplugin.yml
  58. 0 17
      roles/contiv/tasks/netplugin_firewalld.yml
  59. 0 52
      roles/contiv/tasks/netplugin_iptables.yml
  60. 0 49
      roles/contiv/tasks/old_version_cleanup.yml
  61. 0 11
      roles/contiv/tasks/old_version_cleanup_firewalld.yml
  62. 0 44
      roles/contiv/tasks/old_version_cleanup_iptables.yml
  63. 0 28
      roles/contiv/tasks/ovs.yml
  64. 0 12
      roles/contiv/tasks/packageManagerInstall.yml
  65. 0 39
      roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
  66. 0 13
      roles/contiv/templates/aci-gw.service
  67. 0 35
      roles/contiv/templates/aci_gw.j2
  68. 0 57
      roles/contiv/templates/api-proxy-daemonset.yml.j2
  69. 0 12
      roles/contiv/templates/api-proxy-secrets.yml.j2
  70. 0 7
      roles/contiv/templates/contiv.cfg.j2
  71. 0 7
      roles/contiv/templates/contiv.cfg.master.j2
  72. 0 83
      roles/contiv/templates/etcd-daemonset.yml.j2
  73. 0 55
      roles/contiv/templates/etcd-proxy-daemonset.yml.j2
  74. 0 42
      roles/contiv/templates/etcd-scc.yml.j2
  75. 0 1
      roles/contiv/templates/netmaster.j2
  76. 0 13
      roles/contiv/templates/netmaster.service
  77. 0 6
      roles/contiv/templates/netplugin.j2
  78. 0 13
      roles/contiv/templates/netplugin.service
  79. 0 10
      roles/contiv_facts/defaults/main.yaml
  80. 0 3
      roles/contiv_facts/handlers/main.yml
  81. 0 26
      roles/contiv_facts/tasks/fedora-install.yml
  82. 0 63
      roles/contiv_facts/tasks/main.yml
  83. 0 30
      roles/contiv_facts/tasks/rpm.yml
  84. 0 40
      roles/etcd/README.md
  85. 0 114
      roles/etcd/defaults/main.yaml
  86. 0 43
      roles/etcd/files/etcd.yaml
  87. 0 3
      roles/etcd/handlers/main.yml
  88. 0 21
      roles/etcd/meta/main.yml
  89. 0 11
      roles/etcd/tasks/add_new_member.yml
  90. 0 2
      roles/etcd/tasks/backup.yml
  91. 0 5
      roles/etcd/tasks/backup/archive.yml
  92. 0 73
      roles/etcd/tasks/backup/backup.yml
  93. 0 5
      roles/etcd/tasks/backup/copy.yml
  94. 0 8
      roles/etcd/tasks/backup/fetch.yml
  95. 0 14
      roles/etcd/tasks/backup/unarchive.yml
  96. 0 15
      roles/etcd/tasks/backup/vars.yml
  97. 0 2
      roles/etcd/tasks/backup_ca_certificates.yml
  98. 0 2
      roles/etcd/tasks/backup_generated_certificates.yml
  99. 0 2
      roles/etcd/tasks/backup_server_certificates.yml
  100. 0 0
      roles/etcd/tasks/ca.yml

+ 0 - 18
roles/ansible_service_broker/OWNERS

@@ -1,18 +0,0 @@
-# approval == this is a good idea /approve
-approvers:
-  - fabianvf
-  - dymurray
-  - shawn-hurley
-  - michaelgugino
-  - mtnbikenc
-  - sdodson
-  - vrutkovs
-# review == this code is good /lgtm
-reviewers:
-  - fabianvf
-  - dymurray
-  - shawn-hurley
-  - michaelgugino
-  - mtnbikenc
-  - sdodson
-  - vrutkovs

+ 0 - 39
roles/ansible_service_broker/defaults/main.yml

@@ -1,39 +0,0 @@
----
-
-ansible_service_broker_node_selector: "{{ openshift_hosted_infra_selector | default('node-role.kubernetes.io/infra=true') | map_from_pairs }}"
-ansible_service_broker_enable_dashboard_redirector: false
-ansible_service_broker_dashboard_redirector_route: "dr-1337-openshift-ansible-service-broker.{{ openshift_master_default_subdomain }}"
-
-ansible_service_broker_remove: false
-ansible_service_broker_install: true
-ansible_service_broker_log_level: info
-ansible_service_broker_output_request: false
-ansible_service_broker_recovery: true
-ansible_service_broker_bootstrap_on_startup: true
-ansible_service_broker_dev_broker: false
-ansible_service_broker_refresh_interval: 600s
-# Recommended you do not enable this for now
-ansible_service_broker_launch_apb_on_bind: false
-ansible_service_broker_keep_namespace_on_error: false
-ansible_service_broker_keep_namespace: false
-
-ansible_service_broker_image_pull_policy: Always
-ansible_service_broker_sandbox_role: edit
-ansible_service_broker_auto_escalate: false
-ansible_service_broker_local_registry_whitelist: []
-ansible_service_broker_local_registry_namespaces: ["openshift"]
-
-l_asb_default_images_dict:
-  origin: 'docker.io/ansibleplaybookbundle/origin-ansible-service-broker:latest'
-  openshift-enterprise: 'registry.redhat.io/openshift3/ose-ansible-service-broker:${version}'
-
-l_asb_default_images_default: "{{ l_asb_default_images_dict[openshift_deployment_type] }}"
-l_asb_image_url: "{{ oreg_url | default(l_asb_default_images_default) | regex_replace('${version}' | regex_escape, openshift_image_tag) }}"
-
-ansible_service_broker_image: "{{ l_asb_image_url | regex_replace('${component}' | regex_escape, 'ansible-service-broker') }}"
-# Secrets to be mounted for APBs. Format:
-# - title: Database credentials
-#   secret: db_creds
-#   apb_name: dh-rhscl-postgresql-apb
-# https://github.com/openshift/ansible-service-broker/blob/master/docs/config.md#secrets-configuration
-ansible_service_broker_secrets: []

+ 0 - 13
roles/ansible_service_broker/files/bundlebindings.automationbroker.io.yaml

@@ -1,13 +0,0 @@
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
-  name: bundlebindings.automationbroker.io
-spec:
-  group: automationbroker.io
-  version: v1alpha1
-  scope: Namespaced
-  names:
-    plural: bundlebindings
-    singular: bundlebinding
-    kind: BundleBinding

+ 0 - 14
roles/ansible_service_broker/files/bundleinstances.automationbroker.io.yaml

@@ -1,14 +0,0 @@
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
-  name: bundleinstances.automationbroker.io
-spec:
-  group: automationbroker.io
-  version: v1alpha1
-  scope: Namespaced
-  names:
-    plural: bundleinstances
-    singular: bundleinstance
-    kind: BundleInstance
-

+ 0 - 14
roles/ansible_service_broker/files/bundles.automationbroker.io.yaml

@@ -1,14 +0,0 @@
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
-  name: bundles.automationbroker.io
-spec:
-  group: automationbroker.io
-  version: v1alpha1
-  scope: Namespaced
-  names:
-    plural: bundles
-    singular: bundle
-    kind: Bundle
-

+ 0 - 17
roles/ansible_service_broker/meta/main.yml

@@ -1,17 +0,0 @@
----
-galaxy_info:
-  author: Fabian von Feilitzsch
-  description: OpenShift Ansible Service Broker
-  company: Red Hat, Inc.
-  license: Apache License, Version 2.0
-  min_ansible_version: 2.1
-  platforms:
-  - name: EL
-    versions:
-    - 7
-  categories:
-  - cloud
-dependencies:
-- role: openshift_facts
-- role: lib_utils
-- role: lib_openshift

+ 0 - 21
roles/ansible_service_broker/tasks/facts.yml

@@ -1,21 +0,0 @@
----
-# Fact setting and validations
-- name: Set default image variables based on deployment type
-  include_vars: "{{ item }}"
-  with_first_found:
-    - "{{ openshift_deployment_type }}.yml"
-    - "default_images.yml"
-
-- name: set ansible_service_broker facts
-  set_fact:
-    ansible_service_broker_registry_type: "{{ ansible_service_broker_registry_type | default(__ansible_service_broker_registry_type) }}"
-    ansible_service_broker_registry_name: "{{ ansible_service_broker_registry_name | default(__ansible_service_broker_registry_name) }}"
-    ansible_service_broker_registry_url: "{{ ansible_service_broker_registry_url | default(__ansible_service_broker_registry_url) }}"
-    ansible_service_broker_registry_user: "{{ ansible_service_broker_registry_user | default(__ansible_service_broker_registry_user) }}"
-    ansible_service_broker_registry_password: "{{ ansible_service_broker_registry_password | default(__ansible_service_broker_registry_password) }}"
-    ansible_service_broker_registry_organization: "{{ ansible_service_broker_registry_organization | default(__ansible_service_broker_registry_organization) }}"
-    ansible_service_broker_registry_tag: "{{ ansible_service_broker_registry_tag | default(__ansible_service_broker_registry_tag) }}"
-    ansible_service_broker_registry_whitelist: "{{ ansible_service_broker_registry_whitelist | default(__ansible_service_broker_registry_whitelist) }}"
-    ansible_service_broker_registry_blacklist: "{{ ansible_service_broker_registry_blacklist | default(__ansible_service_broker_registry_blacklist) }}"
-
-- include_tasks: validate_facts.yml

+ 0 - 44
roles/ansible_service_broker/tasks/generate_certs.yml

@@ -1,44 +0,0 @@
----
-
-- when: ansible_service_broker_certs_dir is undefined
-  block:
-  - name: Create ansible-service-broker cert directory
-    file:
-      path: "{{ openshift.common.config_base }}/ansible-service-broker"
-      state: directory
-      mode: 0755
-    check_mode: no
-
-  - name: Create self signing ca cert
-    command: 'openssl req -nodes -x509 -newkey rsa:4096 -keyout {{ openshift.common.config_base }}/ansible-service-broker/key.pem -out {{ openshift.common.config_base }}/ansible-service-broker/cert.pem -days 365 -subj "/CN=asb-etcd.openshift-ansible-service-broker.svc"'
-    args:
-      creates: '{{ openshift.common.config_base }}/ansible-service-broker/cert.pem'
-
-  - name: Create self signed client cert
-    command: '{{ item.cmd }}'
-    args:
-      creates: '{{ item.creates }}'
-    with_items:
-    - cmd: openssl genrsa -out {{ openshift.common.config_base }}/ansible-service-broker/client.key 2048
-      creates: '{{ openshift.common.config_base }}/ansible-service-broker/client.key'
-    - cmd: 'openssl req -new -key {{ openshift.common.config_base }}/ansible-service-broker/client.key -out {{ openshift.common.config_base }}/ansible-service-broker/client.csr -subj "/CN=client"'
-      creates: '{{ openshift.common.config_base }}/ansible-service-broker/client.csr'
-    - cmd: openssl x509 -req -in {{ openshift.common.config_base }}/ansible-service-broker/client.csr -CA {{ openshift.common.config_base }}/ansible-service-broker/cert.pem -CAkey {{ openshift.common.config_base }}/ansible-service-broker/key.pem -CAcreateserial -out {{ openshift.common.config_base }}/ansible-service-broker/client.pem -days 1024
-      creates: '{{ openshift.common.config_base }}/ansible-service-broker/client.pem'
-
-  - set_fact:
-      ansible_service_broker_certs_dir: "{{ openshift.common.config_base }}/ansible-service-broker"
-
-- name: Read in certs for etcd
-  slurp:
-    src: '{{ ansible_service_broker_certs_dir }}/{{ item }}'
-  register: asb_etcd_certs
-  with_items:
-  - cert.pem
-  - client.pem
-  - client.key
-
-- set_fact:
-    etcd_ca_cert: "{{ asb_etcd_certs.results.0.content | b64decode }}"
-    etcd_client_cert: "{{ asb_etcd_certs.results.1.content | b64decode }}"
-    etcd_client_key: "{{ asb_etcd_certs.results.2.content | b64decode }}"

+ 0 - 246
roles/ansible_service_broker/tasks/install.yml

@@ -1,246 +0,0 @@
----
-
-- import_tasks: facts.yml
-
-- import_tasks: upgrade.yml
-  when: openshift_upgrade_target is defined
-
-- include_tasks: generate_certs.yml
-
-# Deployment of ansible-service-broker starts here
-- name: create openshift-ansible-service-broker project
-  oc_project:
-    name: openshift-ansible-service-broker
-    state: present
-    node_selector:
-      - ""
-
-- name: create ansible-service-broker serviceaccount
-  oc_serviceaccount:
-    name: asb
-    namespace: openshift-ansible-service-broker
-    state: present
-
-- name: create ansible-service-broker client serviceaccount
-  oc_serviceaccount:
-    name: asb-client
-    namespace: openshift-ansible-service-broker
-    state: present
-
-- name: Create asb-auth cluster role
-  oc_clusterrole:
-    state: present
-    name: asb-auth
-    rules:
-      - apiGroups: [""]
-        resources: ["namespaces"]
-        verbs: ["create", "delete"]
-      - apiGroups: ["authorization.openshift.io"]
-        resources: ["subjectrulesreview"]
-        verbs: ["create"]
-      - apiGroups: ["authorization.k8s.io"]
-        resources: ["subjectaccessreviews"]
-        verbs: ["create"]
-      - apiGroups: ["authentication.k8s.io"]
-        resources: ["tokenreviews"]
-        verbs: ["create"]
-      - apiGroups: ["image.openshift.io", ""]
-        resources: ["images"]
-        verbs: ["get", "list"]
-      - apiGroups: ["network.openshift.io"]
-        resources: ["clusternetworks", "netnamespaces"]
-        verbs: ["get"]
-      - apiGroups: ["network.openshift.io"]
-        resources: ["netnamespaces"]
-        verbs: ["update"]
-      - apiGroups: ["networking.k8s.io"]
-        resources: ["networkpolicies"]
-        verbs: ["create", "delete"]
-      - apiGroups: ["automationbroker.io"]
-        resources: ["bundles", "bundlebindings", "bundleinstances"]
-        verbs: ["*"]
-
-- name: Create aggregate rule for user authorization
-  oc_obj:
-    name: asb-user-access
-    state: present
-    kind: ClusterRole
-    content:
-      path: /tmp/useraccessout
-      data: "{{ lookup('template', 'broker-user-auth.clusterrole.yaml.j2') | from_yaml }}"
-
-- name: Create asb-access cluster role
-  oc_clusterrole:
-    state: present
-    name: asb-access
-    rules:
-      - nonResourceURLs: ["/osb", "/osb/*"]
-        verbs: ["get", "post", "put", "patch", "delete"]
-
-- name: Bind admin cluster-role to asb serviceaccount
-  oc_adm_policy_user:
-    state: present
-    resource_kind: cluster-role
-    resource_name: admin
-    user: "system:serviceaccount:openshift-ansible-service-broker:asb"
-
-- name: Bind auth cluster role to asb service account
-  oc_adm_policy_user:
-    state: present
-    resource_kind: cluster-role
-    resource_name: asb-auth
-    user: "system:serviceaccount:openshift-ansible-service-broker:asb"
-
-- name: Bind asb-access role to asb-client service account
-  oc_adm_policy_user:
-    state: present
-    resource_kind: cluster-role
-    resource_name: asb-access
-    user: "system:serviceaccount:openshift-ansible-service-broker:asb-client"
-
-- name: create asb-client token secret
-  oc_obj:
-    name: asb-client
-    namespace: openshift-ansible-service-broker
-    state: present
-    kind: Secret
-    content:
-      path: /tmp/asbclientsecretout
-      data:
-        apiVersion: v1
-        kind: Secret
-        metadata:
-          name: asb-client
-          namespace: openshift-ansible-service-broker
-          annotations:
-            kubernetes.io/service-account.name: asb-client
-        type: kubernetes.io/service-account-token
-
-- oc_secret:
-    state: list
-    namespace: openshift-ansible-service-broker
-    name: asb-client
-  register: asb_client_secret
-
-- set_fact:
-    service_ca_crt: "{{ asb_client_secret.results.results.0.data['service-ca.crt'] }}"
-
-- name: Create custom resource definitions for asb
-  oc_obj:
-    name: '{{ asb_crd.metadata.name }}'
-    kind: CustomResourceDefinition
-    state: present
-    content:
-      path: /tmp/{{ asb_crd.metadata.name }}
-      data: '{{ asb_crd }}'
-  vars:
-    asb_crd: "{{ lookup('file', item) | from_yaml }}"
-  with_fileglob:
-    - 'files/*.automationbroker.io.yaml'
-
-- name: create ansible-service-broker service
-  oc_service:
-    name: asb
-    namespace: openshift-ansible-service-broker
-    labels:
-      app: openshift-ansible-service-broker
-      service: asb
-    annotations:
-      service.alpha.openshift.io/serving-cert-secret-name: asb-tls
-    ports:
-      - name: port-1338
-        port: 1338
-        targetPort: 1338
-        protocol: TCP
-      - name: port-1337
-        port: 1337
-        targetPort: 1337
-        protocol: TCP
-    selector:
-      app: openshift-ansible-service-broker
-      service: asb
-
-- name: create route for ansible-service-broker service
-  oc_route:
-    name: asb-1338
-    namespace: openshift-ansible-service-broker
-    state: present
-    labels:
-      app: openshift-ansible-service-broker
-      service: asb
-    service_name: asb
-    port: 1338
-    tls_termination: Reencrypt
-
-- name: create route for dashboard-redirector service
-  oc_route:
-    name: dr-1337
-    namespace: openshift-ansible-service-broker
-    state: present
-    labels:
-      app: openshift-ansible-service-broker
-      service: asb
-    service_name: asb
-    port: 1337
-  when: ansible_service_broker_enable_dashboard_redirector
-
-- name: Set Ansible Service Broker deployment config
-  oc_obj:
-    force: yes
-    name: asb
-    namespace: openshift-ansible-service-broker
-    state: present
-    kind: DeploymentConfig
-    content:
-      path: /tmp/dcout
-      data: "{{ lookup('template', 'asb_dc.yaml.j2') | from_yaml }}"
-
-- name: set auth name and type facts if needed
-  set_fact:
-    ansible_service_broker_registry_auth_type: "secret"
-    ansible_service_broker_registry_auth_name: "asb-registry-auth"
-  when: ansible_service_broker_registry_user != "" and ansible_service_broker_registry_password != ""
-
-# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following:
-- name: Create config map for ansible-service-broker
-  oc_obj:
-    name: broker-config
-    namespace: openshift-ansible-service-broker
-    state: present
-    kind: ConfigMap
-    content:
-      path: /tmp/cmout
-      data: "{{ ansible_service_broker_full_broker_config_map | default(lookup('template', 'configmap.yaml.j2') | from_yaml) }}"
-
-- oc_secret:
-    name: asb-registry-auth
-    namespace: openshift-ansible-service-broker
-    state: present
-    contents:
-      - path: username
-        data: "{{ ansible_service_broker_registry_user }}"
-      - path: password
-        data: "{{ ansible_service_broker_registry_password }}"
-  when: ansible_service_broker_registry_user != "" and ansible_service_broker_registry_password != ""
-
-- name: Create the Broker resource in the catalog
-  oc_obj:
-    name: ansible-service-broker
-    state: present
-    kind: ClusterServiceBroker
-    content:
-      path: /tmp/brokerout
-      data:
-        apiVersion: servicecatalog.k8s.io/v1beta1
-        kind: ClusterServiceBroker
-        metadata:
-          name: ansible-service-broker
-        spec:
-          url: https://asb.openshift-ansible-service-broker.svc:1338/osb
-          authInfo:
-            bearer:
-              secretRef:
-                name: asb-client
-                namespace: openshift-ansible-service-broker
-                kind: Secret
-          caBundle: "{{ service_ca_crt }}"

+ 0 - 8
roles/ansible_service_broker/tasks/main.yml

@@ -1,8 +0,0 @@
----
-# do any asserts here
-
-- include_tasks: install.yml
-  when: ansible_service_broker_install | bool
-
-- include_tasks: remove.yml
-  when: ansible_service_broker_remove | bool

+ 0 - 203
roles/ansible_service_broker/tasks/migrate.yml

@@ -1,203 +0,0 @@
----
-
-- block:
-    - name: scale down asb deploymentconfig
-      oc_scale:
-        name: asb
-        namespace: openshift-ansible-service-broker
-        kind: dc
-        replicas: 0
-
-    - name: Add required permissions to asb-auth clusterrole
-      oc_clusterrole:
-        state: present
-        name: asb-auth
-        rules:
-          - apiGroups: [""]
-            resources: ["namespaces"]
-            verbs: ["create", "delete"]
-          - apiGroups: ["authorization.openshift.io"]
-            resources: ["subjectrulesreview"]
-            verbs: ["create"]
-          - apiGroups: ["authorization.k8s.io"]
-            resources: ["subjectaccessreviews"]
-            verbs: ["create"]
-          - apiGroups: ["authentication.k8s.io"]
-            resources: ["tokenreviews"]
-            verbs: ["create"]
-          - apiGroups: ["image.openshift.io", ""]
-            resources: ["images"]
-            verbs: ["get", "list"]
-          - apiGroups: ["network.openshift.io"]
-            resources: ["clusternetworks", "netnamespaces"]
-            verbs: ["get"]
-          - apiGroups: ["network.openshift.io"]
-            resources: ["netnamespaces"]
-            verbs: ["update"]
-          - apiGroups: ["networking.k8s.io"]
-            resources: ["networkpolicies"]
-            verbs: ["create", "delete"]
-          - apiGroups: ["automationbroker.io"]
-            resources: ["bundles", "bundlebindings", "bundleinstances"]
-            verbs: ["*"]
-
-    - name: Create custom resource definitions for asb
-      oc_obj:
-        name: '{{ asb_crd.metadata.name }}'
-        kind: CustomResourceDefinition
-        state: present
-        content:
-          path: /tmp/{{ asb_crd.metadata.name }}
-          data: '{{ asb_crd }}'
-      vars:
-        asb_crd: "{{ lookup('file', item) | from_yaml }}"
-      with_fileglob:
-        - 'files/*.automationbroker.io.yaml'
-
-
-    - name: Migrate from etcd to CustomResources
-      oc_obj:
-        force: yes
-        name: asb-etcd-migration
-        namespace: openshift-ansible-service-broker
-        kind: Job
-        state: present
-        content:
-          path: /tmp/asb_migrate_out
-          data:
-            apiVersion: batch/v1
-            kind: Job
-            metadata:
-              name: asb-etcd-migration
-            spec:
-              parallelism: 1
-              completions: 1
-              backoffLimit: 3
-              activeDeadlineSeconds: "{{ asb_migration_timeout | default(600) | int }}"
-              template:
-                metadata:
-                  name: asb-etcd-migration
-                spec:
-                  containers:
-                    - name: asb
-                      image: '{{ ansible_service_broker_image }}'
-                      imagePullPolicy: IfNotPresent
-                      command:
-                        - '/usr/bin/migration'
-                      args:
-                        - '-host=asb-etcd.openshift-ansible-service-broker.svc'
-                        - '-ca-file=/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt'
-                        - '-client-cert=/var/run/asb-etcd-auth/client.crt'
-                        - '-client-key=/var/run/asb-etcd-auth/client.key'
-                        - '-namespace=openshift-ansible-service-broker'
-                      volumeMounts:
-                        - name: config-volume
-                          mountPath: /etc/ansible-service-broker
-                        - name: asb-tls
-                          mountPath: /etc/tls/private
-                        - name: asb-etcd-auth
-                          mountPath: /var/run/asb-etcd-auth
-                      env:
-                        - name: BROKER_CONFIG
-                          value: /etc/ansible-service-broker/config.yaml
-                        - name: HTTP_PROXY
-                          value: "{{ openshift.common.http_proxy  | default('') }}"
-                        - name: HTTPS_PROXY
-                          value: "{{ openshift.common.https_proxy  | default('') }}"
-                        - name: NO_PROXY
-                          value: "{{ ([openshift.common.no_proxy, '.default'] | join(',')) if openshift.get('common', {}).get('no_proxy') else '' }}"
-                  volumes:
-                    - name: config-volume
-                      configMap:
-                        name: broker-config
-                        items:
-                          - key: broker-config
-                            path: config.yaml
-                    - name: asb-tls
-                      secret:
-                        secretName: asb-tls
-                    - name: asb-etcd-auth
-                      secret:
-                        secretName: broker-etcd-auth-secret
-                  restartPolicy: Never
-                  serviceAccount: asb
-                  serviceAccountName: asb
-
-    - name: wait for migration to complete
-      oc_obj:
-        namespace: openshift-ansible-service-broker
-        kind: Job
-        state: list
-        name: asb-etcd-migration
-      register: migration_status
-      ignore_errors: true
-      until:
-        - "'results' in migration_status"
-        - "'results' in migration_status.results"
-        - "migration_status.results.results | count > 0"
-        # Pod's 'Complete' status must be True
-        - "migration_status.results.results | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
-      delay: 10
-      retries: "{{ (asb_migration_timeout|default(600) | int / 10) | int }}"
-      failed_when:
-        - "'results' in migration_status.results"
-        - "migration_status.results.results | count > 0"
-        # Fail when pod's 'Failed' status is True
-        - "migration_status.results.results | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1"
-
-    - when: not (migration_status is failed)
-      block:
-        - name: Update broker configmap to use CRD backend
-          oc_obj:
-            name: broker-config
-            namespace: openshift-ansible-service-broker
-            state: present
-            kind: ConfigMap
-            content:
-              path: /tmp/cmout
-              data: "{{ lookup('template', 'configmap.yaml.j2') | from_yaml }}"
-          register: updated_configmap
-
-        - name: Update broker deploymentconfig
-          oc_obj:
-            force: yes
-            name: asb
-            namespace: openshift-ansible-service-broker
-            state: present
-            kind: DeploymentConfig
-            content:
-              path: /tmp/dcout
-              data: "{{ lookup('template', 'asb_dc.yaml.j2') | from_yaml }}"
-
-        - name: delete etcd service
-          oc_service:
-            name: asb-etcd
-            namespace: openshift-ansible-service-broker
-            state: absent
-
-        - name: delete etcd deploymentconfig
-          oc_obj:
-            name: asb-etcd
-            namespace: openshift-ansible-service-broker
-            kind: DeploymentConfig
-            state: absent
-
-        - name: delete broker etcd secret
-          oc_secret:
-            name: broker-etcd-auth-secret
-            namespace: openshift_ansible_service_broker
-            state: absent
-  always:
-    - name: scale up asb deploymentconfig
-      oc_scale:
-        name: asb
-        namespace: openshift-ansible-service-broker
-        kind: dc
-        replicas: 1
-
-- name: Fail out because the ASB etcd to CRD migration was unsuccessful
-  fail:
-    msg: >
-      The migration from etcd to CustomResourceDefinitions was not
-      successful, aborting upgrade of the ansible service broker.
-  when: migration_status is not defined or migration_status is failed or updated_configmap is not defined or updated_configmap is failed

+ 0 - 131
roles/ansible_service_broker/tasks/remove.yml

@@ -1,131 +0,0 @@
----
-
-- name: Unbind admin cluster-role to asb serviceaccount
-  oc_adm_policy_user:
-    state: absent
-    namespace: openshift-ansible-service-broker
-    resource_kind: cluster-role
-    resource_name: admin
-    user: "system:serviceaccount:openshift-ansible-service-broker:asb"
-
-- name: Unbind auth cluster role to asb service account
-  oc_adm_policy_user:
-    state: absent
-    namespace: openshift-ansible-service-broker
-    resource_kind: cluster-role
-    resource_name: asb-auth
-    user: "system:serviceaccount:openshift-ansible-service-broker:asb"
-
-- name: Unbind asb-access role to asb-client service account
-  oc_adm_policy_user:
-    state: absent
-    namespace: openshift-ansible-service-broker
-    resource_kind: cluster-role
-    resource_name: asb-access
-    user: "system:serviceaccount:openshift-ansible-service-broker:asb-client"
-
-- name: remove ansible-service-broker serviceaccount
-  oc_serviceaccount:
-    name: asb
-    namespace: openshift-ansible-service-broker
-    state: absent
-
-- name: remove ansible-service-broker client serviceaccount
-  oc_serviceaccount:
-    name: asb-client
-    namespace: openshift-ansible-service-broker
-    state: absent
-
-- name: remove asb-auth cluster role
-  oc_clusterrole:
-    state: absent
-    name: asb-auth
-
-- name: remove asb-access cluster role
-  oc_clusterrole:
-    state: absent
-    name: asb-access
-
-- name: remove asb-user-access cluster role
-  oc_clusterrole:
-    state: absent
-    name: asb-user-access
-
-- name: remove asb-registry auth secret
-  oc_secret:
-    state: absent
-    name: asb-registry-auth
-    namespace: openshift-ansible-service-broker
-
-- name: remove asb-client token secret
-  oc_secret:
-    state: absent
-    name: asb-client
-    namespace: openshift-ansible-service-broker
-
-- name: remove ansible-service-broker service
-  oc_service:
-    name: asb
-    namespace: openshift-ansible-service-broker
-    state: absent
-
-- name: remove route for ansible-service-broker service
-  oc_route:
-    name: asb-1338
-    namespace: openshift-ansible-service-broker
-    state: absent
-
-- name: remove route for dashboard-redirector service
-  oc_route:
-    name: dr-1337
-    namespace: openshift-ansible-service-broker
-    state: absent
-  when: ansible_service_broker_enable_dashboard_redirector
-
-- name: remove Ansible Service Broker deployment config
-  oc_obj:
-    name: asb
-    namespace: openshift-ansible-service-broker
-    kind: DeploymentConfig
-    state: absent
-
-- name: remove secret for broker auth
-  oc_obj:
-    name: asb-client
-    namespace: openshift-ansible-service-broker
-    kind: Secret
-    state: absent
-
-- name: remove config map for ansible-service-broker
-  oc_configmap:
-    name: broker-config
-    namespace: openshift-ansible-service-broker
-    state: absent
-
-- name: remove custom resource definitions for asb
-  oc_obj:
-    name: '{{ asb_crd.metadata.name }}'
-    kind: CustomResourceDefinition
-    state: absent
-  vars:
-    asb_crd: "{{ lookup('file', item) | from_yaml }}"
-  with_fileglob:
-    - 'files/*.automationbroker.io.yaml'
-
-# TODO: Is this going to work?
-- shell: >
-    oc get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
-  register: get_apiservices
-  changed_when: no
-
-- name: remove broker object from the catalog
-  oc_obj:
-    name: ansible-service-broker
-    state: absent
-    kind: ClusterServiceBroker
-  when: not('not found' in get_apiservices.stdout)
-
-- name: remove openshift-ansible-service-broker project
-  oc_project:
-    name: openshift-ansible-service-broker
-    state: absent

+ 0 - 15
roles/ansible_service_broker/tasks/upgrade.yml

@@ -1,15 +0,0 @@
----
-- name: retrieve broker configmap
-  oc_configmap:
-    state: list
-    name: broker-config
-    namespace: openshift-ansible-service-broker
-  register: broker_configmap_raw
-
-- set_fact:
-    broker_configmap: '{{ (broker_configmap_raw.results.results.0.data | from_yaml)["broker-config"] | from_yaml }}'
-  when: broker_configmap_raw.results.results.0.data is defined
-
-- name: Migrate from etcd to CRDs
-  import_tasks: migrate.yml
-  when: broker_configmap is defined and broker_configmap.dao.get('type') != 'crd'

+ 0 - 13
roles/ansible_service_broker/tasks/validate_facts.yml

@@ -1,13 +0,0 @@
----
-- name: validate Dockerhub registry settings
-  fail: msg="To use the dockerhub registry, you must provide the ansible_service_broker_registry_organization"
-  when:
-    - ansible_service_broker_registry_type == 'dockerhub'
-    - not ansible_service_broker_registry_organization
-
-
-- name: validate RHCC registry settings
-  fail: msg="To use the Red Hat Container Catalog registry, you must provide the ansible_service_broker_registry_url"
-  when:
-    - ansible_service_broker_registry_type == 'rhcc'
-    - not ansible_service_broker_registry_url

+ 0 - 82
roles/ansible_service_broker/templates/asb_dc.yaml.j2

@@ -1,82 +0,0 @@
----
-apiVersion: v1
-kind: DeploymentConfig
-metadata:
-  name: asb
-  labels:
-    app: openshift-ansible-service-broker
-    service: asb
-spec:
-  replicas: 1
-  selector:
-    app: openshift-ansible-service-broker
-  strategy:
-    type: Rolling
-  template:
-    metadata:
-      labels:
-        app: openshift-ansible-service-broker
-        service: asb
-    spec:
-      nodeSelector: {{ ansible_service_broker_node_selector | to_json }}
-      serviceAccount: asb
-      containers:
-{% if ansible_service_broker_enable_dashboard_redirector %}
-        - image: {{ ansible_service_broker_image }}
-          name: dashboard-redirector
-          imagePullPolicy: IfNotPresent
-          ports:
-            - containerPort: 1337
-              protocol: TCP
-          command:
-            - dashboard-redirector
-            - --namespace
-            - "openshift-ansible-service-broker"
-          imagePullPolicy: IfNotPresent
-{% endif %}
-        - image: {{ ansible_service_broker_image }}
-          name: asb
-          imagePullPolicy: IfNotPresent
-          volumeMounts:
-            - name: config-volume
-              mountPath: /etc/ansible-service-broker
-            - name: asb-tls
-              mountPath: /etc/tls/private
-          ports:
-            - containerPort: 1338
-              protocol: TCP
-          env:
-            - name: BROKER_CONFIG
-              value: /etc/ansible-service-broker/config.yaml
-            - name: HTTP_PROXY
-              value: {{ openshift.common.http_proxy  | default('') }}
-            - name: HTTPS_PROXY
-              value: {{ openshift.common.https_proxy  | default('') }}
-            - name: NO_PROXY
-              value: {{ ([openshift.common.no_proxy, '.default'] | join(',')) if openshift.get('common', {}).get('no_proxy') else '' }}
-          resources: {}
-          terminationMessagePath: /tmp/termination-log
-          readinessProbe:
-            httpGet:
-              port: 1338
-              path: /healthz
-              scheme: HTTPS
-            initialDelaySeconds: 15
-            timeoutSeconds: 1
-          livenessProbe:
-            httpGet:
-              port: 1338
-              path: /healthz
-              scheme: HTTPS
-            initialDelaySeconds: 15
-            timeoutSeconds: 1
-      volumes:
-        - name: config-volume
-          configMap:
-            name: broker-config
-            items:
-              - key: broker-config
-                path: config.yaml
-        - name: asb-tls
-          secret:
-            secretName: asb-tls

+ 0 - 15
roles/ansible_service_broker/templates/broker-user-auth.clusterrole.yaml.j2

@@ -1,15 +0,0 @@
----
-
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: asb-user-access
-  labels:
-    rbac.authorization.k8s.io/aggregate-to-admin: "true"
-{% if ansible_service_broker_sandbox_role != 'admin' %}
-    rbac.authorization.k8s.io/aggregate-to-{{ ansible_service_broker_sandbox_role }}: "true"
-{% endif %}
-rules:
-- apiGroups: ["automationbroker.io"]
-  resources: ["access"]
-  verbs: ["create"]

+ 0 - 56
roles/ansible_service_broker/templates/configmap.yaml.j2

@@ -1,56 +0,0 @@
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: broker-config
-  namespace: openshift-ansible-service-broker
-  labels:
-    app: openshift-ansible-service-broker
-data:
-  broker-config: |
-    registry:
-      - type: {{ ansible_service_broker_registry_type }}
-        name: {{ ansible_service_broker_registry_name }}
-        url:  {{ ansible_service_broker_registry_url }}
-        org:  {{ ansible_service_broker_registry_organization }}
-        tag:  {{ ansible_service_broker_registry_tag }}
-        white_list: {{  ansible_service_broker_registry_whitelist | to_yaml }}
-        black_list: {{  ansible_service_broker_registry_blacklist | to_yaml }}
-        auth_type: "{{ ansible_service_broker_registry_auth_type | default("") }}"
-        auth_name: "{{ ansible_service_broker_registry_auth_name | default("") }}"
-      - type: local_openshift
-        name: localregistry
-        white_list: {{ ansible_service_broker_local_registry_whitelist | to_yaml }}
-        namespaces: {{ ansible_service_broker_local_registry_namespaces | to_yaml }}
-    dao:
-      type: crd
-    log:
-      stdout: true
-      level: {{ ansible_service_broker_log_level }}
-      color: true
-    openshift:
-      host: ""
-      ca_file: ""
-      bearer_token_file: ""
-      namespace: openshift-ansible-service-broker
-      sandbox_role: {{ ansible_service_broker_sandbox_role }}
-      image_pull_policy: {{ ansible_service_broker_image_pull_policy }}
-      keep_namespace: {{ ansible_service_broker_keep_namespace | bool | lower }}
-      keep_namespace_on_error: {{ ansible_service_broker_keep_namespace_on_error | bool | lower }}
-    broker:
-      dev_broker: {{ ansible_service_broker_dev_broker | bool | lower }}
-{% if ansible_service_broker_enable_dashboard_redirector %}
-      dashboard_redirector: {{ ansible_service_broker_dashboard_redirector_route }}
-{% endif %}
-      bootstrap_on_startup: {{ ansible_service_broker_bootstrap_on_startup | bool | lower }}
-      refresh_interval: {{ ansible_service_broker_refresh_interval }}
-      launch_apb_on_bind: {{ ansible_service_broker_launch_apb_on_bind | bool | lower }}
-      output_request: {{ ansible_service_broker_output_request | bool | lower }}
-      recovery: {{ ansible_service_broker_recovery | bool | lower }}
-      ssl_cert_key: /etc/tls/private/tls.key
-      ssl_cert: /etc/tls/private/tls.crt
-      auto_escalate: {{ ansible_service_broker_auto_escalate }}
-      auth:
-        - type: basic
-          enabled: false
-    secrets: {{ ansible_service_broker_secrets | to_yaml }}

+ 0 - 11
roles/ansible_service_broker/vars/default_images.yml

@@ -1,11 +0,0 @@
----
-__ansible_service_broker_registry_type: dockerhub
-__ansible_service_broker_registry_name: dh
-__ansible_service_broker_registry_url: null
-__ansible_service_broker_registry_user: null
-__ansible_service_broker_registry_password: null
-__ansible_service_broker_registry_organization: ansibleplaybookbundle
-__ansible_service_broker_registry_tag: latest
-__ansible_service_broker_registry_whitelist: []
-__ansible_service_broker_registry_blacklist:
-  - '.*automation-broker-apb$'

+ 0 - 12
roles/ansible_service_broker/vars/openshift-enterprise.yml

@@ -1,12 +0,0 @@
----
-__ansible_service_broker_registry_type: rhcc
-__ansible_service_broker_registry_name: rh
-__ansible_service_broker_registry_url: "https://registry.redhat.io"
-__ansible_service_broker_registry_user: "{{ oreg_auth_user | default(None) }}"
-__ansible_service_broker_registry_password: "{{ oreg_auth_password | default(None) }}"
-__ansible_service_broker_registry_organization: null
-__ansible_service_broker_registry_tag: "{{ openshift_image_tag }}"
-__ansible_service_broker_registry_whitelist:
-  - '.*-apb$'
-__ansible_service_broker_registry_blacklist:
-  - '.*automation-broker-apb$'

+ 0 - 48
roles/calico/README.md

@@ -1,48 +0,0 @@
-# Calico
-
-Configure Calico components for the Master host.
-
-## Requirements
-
-* Ansible 2.2
-
-## Installation
-
-To install, set the following inventory configuration parameters:
-
-* `openshift_use_calico=True`
-* `openshift_use_openshift_sdn=False`
-* `os_sdn_network_plugin_name='cni'`
-
-By default, Calico will share the etcd used by OpenShift.
-To configure Calico to use a separate instance of etcd, place etcd SSL client certs on your master,
-then set the following variables in your inventory.ini:
-
-* `calico_etcd_ca_cert_file=/path/to/etcd-ca.crt`
-* `calico_etcd_cert_file=/path/to/etcd-client.crt`
-* `calico_etcd_key_file=/path/to/etcd-client.key`
-* `calico_etcd_endpoints=https://etcd:2379`
-
-## Upgrading
-
-OpenShift-Ansible installs Calico as a self-hosted install. Previously, Calico ran as a systemd service. Running Calico
-in this manner is now deprecated, and must be upgraded to a hosted cluster. Please run the Legacy Upgrade playbook to
-upgrade your existing Calico deployment to a hosted deployment:
-
-        ansible-playbook -i inventory.ini playbooks/byo/calico/legacy_upgrade.yml
-
-## Additional Calico/Node and Felix Configuration Options
-
-Additional parameters that can be defined in the inventory are:
-
-
-| Environment | Description | Schema | Default |   
-|---------|----------------------|---------|---------|
-| CALICO_IPV4POOL_IPIP | IPIP Mode to use for the IPv4 POOL created at start up.	| off, always, cross-subnet	| always |
-| CALICO_LOG_DIR | Directory on the host machine where Calico Logs are written.| String	| /var/log/calico |
-
-### Contact Information
-
-Author: Dan Osborne <dan@projectcalico.org>
-
-For support, join the `#openshift` channel on the [calico users slack](calicousers.slack.com).

+ 0 - 10
roles/calico/defaults/main.yaml

@@ -1,10 +0,0 @@
----
-cni_conf_dir: "/etc/cni/net.d/"
-cni_bin_dir: "/opt/cni/bin/"
-
-calico_url_policy_controller: "quay.io/calico/kube-controllers:v3.1.3"
-calico_node_image: "quay.io/calico/node:v3.1.3"
-calico_cni_image: "quay.io/calico/cni:v3.1.3"
-calico_upgrade_image: "quay.io/calico/upgrade:v1.0.5"
-calico_ipv4pool_ipip: "always"
-use_calico_etcd: False

+ 0 - 17
roles/calico/meta/main.yml

@@ -1,17 +0,0 @@
----
-galaxy_info:
-  author: Dan Osborne
-  description: Calico networking
-  company: Tigera, Inc.
-  license: Apache License, Version 2.0
-  min_ansible_version: 2.2
-  platforms:
-  - name: EL
-    versions:
-    - 7
-  categories:
-  - cloud
-  - system
-dependencies:
-- role: lib_utils
-- role: openshift_facts

+ 0 - 77
roles/calico/tasks/certs.yml

@@ -1,77 +0,0 @@
----
-- name: Calico Node | Set cert flag
-  set_fact:
-    calico_certs_provided: "{{ calico_etcd_ca_cert_file is defined or calico_etcd_cert_file is defined or calico_etcd_key_file is defined or calico_etcd_endpoints is defined | bool }}"
-
-- name: Calico Node | Error if invalid cert arguments
-  fail:
-    msg: "Must provide all or none for the following etcd params: calico_etcd_ca_cert_file, calico_etcd_cert_file, calico_etcd_key_file, calico_etcd_endpoints"
-  when:
-  - calico_certs_provided
-  - not (calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined)
-
-- name: Calico Node | Set separate Calico etcd flag
-  set_fact:
-    use_calico_etcd: "{{ calico_etcd_initial_cluster is defined or calico_etcd_generate_certs is defined or calico_etcd_service_ip is defined or calico_etcd_clients_port is defined or calico_etcd_peers_port is defined or calico_etcd_cert_dir is defined or calico_etcd_mount is defined | bool }}"
-
-- name: Calico Node | Error if using separate etcd with invalid arguments
-  fail:
-    msg: "Must provide all or none of the following etcd params: calico_etcd_initial_cluster, calico_etcd_generate_certs, calico_etcd_service_ip, calico_etcd_clients_port, calico_etcd_peers_port, calico_etcd_cert_dir, and calico_etcd_mount"
-  when:
-  - use_calico_etcd
-  - not (calico_certs_provided and calico_etcd_initial_cluster is defined and calico_etcd_generate_certs is defined and calico_etcd_service_ip is defined and calico_etcd_clients_port is defined and calico_etcd_peers_port is defined and calico_etcd_cert_dir is defined and calico_etcd_mount is defined)
-
-- name: Calico Node | Configure separate Calico etcd and certs
-  when: use_calico_etcd
-  become: yes
-  include_role:
-    name: etcd
-    tasks_from: server_certificates
-  vars:
-    etcd_cert_prefix: calico-etcd-
-    etcd_cert_config_dir: "{{ calico_etcd_cert_dir }}"
-    etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
-    etcd_cert_subdir: "calico-etcd-{{ openshift.common.hostname }}"
-
-- name: Calico Node | Set etcd cert location facts
-  when: not calico_certs_provided
-  set_fact:
-    calico_etcd_ca_cert_file: "/etc/origin/master/master.etcd-ca.crt"
-    calico_etcd_cert_file: "/etc/origin/master/master.etcd-client.crt"
-    calico_etcd_key_file: "/etc/origin/master/master.etcd-client.key"
-    calico_etcd_endpoints: "{{ hostvars[groups.oo_first_master.0].openshift_master_etcd_urls | join(',') }}"
-
-- name: Calico Node | Error if no certs set.
-  fail:
-    msg: "Invalid etcd configuration for calico."
-  when: item is not defined or item == ''
-  with_items:
-  - calico_etcd_ca_cert_file
-  - calico_etcd_cert_file
-  - calico_etcd_key_file
-  - calico_etcd_endpoints
-
-- name: Calico Node | Assure the calico certs are present
-  stat:
-    path: "{{ item }}"
-    get_checksum: false
-    get_attributes: false
-    get_mime: false
-  with_items:
-  - "{{ calico_etcd_ca_cert_file }}"
-  - "{{ calico_etcd_cert_file }}"
-  - "{{ calico_etcd_key_file }}"
-
-- name: Create secret
-  oc_secret:
-    name: calico-etcd-secrets
-    state: present
-    namespace: kube-system
-    files:
-    - name: etcd-key
-      path: "{{ calico_etcd_key_file }}"
-    - name: etcd-cert
-      path: "{{ calico_etcd_cert_file }}"
-    - name: etcd-ca
-      path: "{{ calico_etcd_ca_cert_file }}"
-  run_once: true

+ 0 - 129
roles/calico/tasks/main.yml

@@ -1,129 +0,0 @@
----
-- name: Calico | Run kube proxy
-  run_once: true
-  import_role:
-    name: kube_proxy_and_dns
-
-- include_tasks: certs.yml
-
-- name: Calico | Clean Calico etcd data
-  when: calico_cleanup_path is defined and calico_cleanup_path != ""
-  file:
-    state: absent
-    path: "{{ calico_cleanup_path }}"
-
-- name: Calico | oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:calico-node
-  oc_adm_policy_user:
-    user: system:serviceaccount:kube-system:calico-node
-    resource_kind: scc
-    resource_name: privileged
-    state: present
-
-- name: Calico | oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:calico-kube-controllers
-  oc_adm_policy_user:
-    user: system:serviceaccount:kube-system:calico-kube-controllers
-    resource_kind: scc
-    resource_name: privileged
-    state: present
-
-- name: Calico | oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:calico-upgrade-job
-  oc_adm_policy_user:
-    user: system:serviceaccount:kube-system:calico-upgrade-job
-    resource_kind: scc
-    resource_name: privileged
-    state: present
-
-- name: Calico | Set default selector for kube-system
-  command: >
-    {{ openshift_client_binary }}
-    --config={{ openshift.common.config_base }}/master/admin.kubeconfig
-    annotate  ns kube-system openshift.io/node-selector="" --overwrite
-
-- name: Calico | Create temp directory
-  command: mktemp -d /tmp/openshift-ansible-XXXXXXX
-  register: mktemp
-  changed_when: False
-
-- name: Calico | Write separate Calico etcd manifest
-  when: use_calico_etcd
-  template:
-    dest: "{{ mktemp.stdout }}/calico-etcd.yml"
-    src: calico-etcd.yml.j2
-
-- name: Calico | Launch separate Calico etcd
-  when: use_calico_etcd
-  command: >
-    {{ openshift_client_binary }} apply
-    -f {{ mktemp.stdout }}/calico-etcd.yml
-    --config={{ openshift.common.config_base }}/master/admin.kubeconfig
-  register: calico_etcd_create_output
-  failed_when: "calico_etcd_create_output.rc != 0"
-  changed_when: "('created' in calico_etcd_create_output.stdout) or ('configured' in calico_etcd_create_output.stdout)"
-
-- name: Calico | Parse node version
-  set_fact:
-    node_version: "{{ calico_node_image | regex_replace('^.*node:v?(.*)$', '\\1') }}"
-    cnx: "{{ calico_node_image | regex_replace('^.*/(.*)-node:.*$', '\\1') }}"
-    use_calico_credentials: "{{ calico_image_credentials is defined | bool }}"
-
-- name: Calico | Encode Docker Credentials
-  shell: >
-    cat {{ calico_image_credentials }} | openssl base64 -A
-  register: calico_encoded_credentials_output
-  failed_when: "calico_encoded_credentials_output.rc != 0 or calico_encoded_credentials_output.stdout == ''"
-  when: use_calico_credentials
-
-- name: Calico | Set Encoded Docker Credentials Fact
-  set_fact:
-    calico_encoded_credentials: "{{ calico_encoded_credentials_output.stdout }}"
-  when: use_calico_credentials
-
-- name: Calico | Write Calico Pull Secret
-  template:
-    dest: "{{ mktemp.stdout }}/calico-pull-secret.yml"
-    src: calico-pull-secret.yml.j2
-  when: use_calico_credentials
-
-- name: Calico | Create Calico Pull Secret
-  when: use_calico_credentials
-  command: >
-    {{ openshift_client_binary }} apply
-    -f {{ mktemp.stdout }}/calico-pull-secret.yml
-    --config={{ openshift.common.config_base }}/master/admin.kubeconfig
-  register: calico_pull_secret_create_output
-  failed_when: "calico_pull_secret_create_output.rc != 0"
-  changed_when: "('created' in calico_pull_secret_create_output.stdout) or ('configured' in calico_pull_secret_create_output.stdout)"
-
-- name: Calico | Set the correct liveness and readiness checks
-  set_fact:
-    calico_binary_checks: "{{ (node_version >= '3.2.0' and cnx != 'cnx') or (node_version >= '2.2.0' and cnx == 'cnx') | bool }}"
-
-- name: Calico | Write Calico v2
-  template:
-    dest: "{{ mktemp.stdout }}/calico.yml"
-    src: calico.yml.j2
-  when:
-    - node_version | regex_search('^[0-9]\.[0-9]\.[0-9]') and node_version < '3.0.0'
-    - cnx != "cnx"
-
-- name: Calico | Write Calico v3
-  template:
-    dest: "{{ mktemp.stdout }}/calico.yml"
-    src: calicov3.yml.j2
-  when: (node_version | regex_search('^[0-9]\.[0-9]\.[0-9]') and node_version >= '3.0.0') or (node_version == 'master') or (cnx == "cnx" and node_version >= '2.0.0')
-
-- name: Calico | Launch Calico
-  run_once: true
-  command: >
-    {{ openshift_client_binary }} apply
-    -f {{ mktemp.stdout }}/calico.yml
-    --config={{ openshift.common.config_base }}/master/admin.kubeconfig
-  register: calico_create_output
-  failed_when: "calico_create_output.rc != 0"
-  changed_when: "('created' in calico_create_output.stdout) or ('configured' in calico_create_output.stdout)"
-
-- name: Calico | Delete temp directory
-  file:
-    name: "{{ mktemp.stdout }}"
-    state: absent
-  changed_when: False

File diff suppressed because it is too large
+ 0 - 88
roles/calico/templates/calico-etcd.yml.j2


+ 0 - 8
roles/calico/templates/calico-pull-secret.yml.j2

@@ -1,8 +0,0 @@
-apiVersion: v1
-kind: Secret
-metadata:
-  name: calico-pull-secret
-  namespace: kube-system
-data:
-  .dockerconfigjson: {{ calico_encoded_credentials }}
-type: kubernetes.io/dockerconfigjson

+ 0 - 398
roles/calico/templates/calico.yml.j2

@@ -1,398 +0,0 @@
----
-kind: ClusterRole
-apiVersion: v1
-metadata:
-  name: calico-kube-controllers
-  namespace: kube-system
-rules:
-  - apiGroups:
-    - ""
-    - extensions
-    resources:
-      - pods
-      - namespaces
-      - networkpolicies
-      - nodes
-    verbs:
-      - watch
-      - list
----
-kind: ClusterRoleBinding
-apiVersion: v1
-metadata:
-  name: calico-kube-controllers
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: calico-kube-controllers
-subjects:
-- kind: ServiceAccount
-  name: calico-kube-controllers
-  namespace: kube-system
----
-kind: ClusterRole
-apiVersion: v1
-metadata:
-  name: calico-node
-  namespace: kube-system
-rules:
-  - apiGroups: [""]
-    resources:
-      - pods
-      - nodes
-    verbs:
-      - get
----
-apiVersion: v1
-kind: ClusterRoleBinding
-metadata:
-  name: calico-node
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: calico-node
-subjects:
-- kind: ServiceAccount
-  name: calico-node
-  namespace: kube-system
-
----
-# This ConfigMap is used to configure a self-hosted Calico installation.
-kind: ConfigMap
-apiVersion: v1
-metadata:
-  name: calico-config
-  namespace: kube-system
-data:
-  # Configure this with the location of your etcd cluster.
-  etcd_endpoints: "{{ calico_etcd_endpoints }}"
-  
-  # Configure the Calico backend to use.
-  calico_backend: "bird"
-
-  # The CNI network configuration to install on each node.
-  cni_network_config: |-
-    {
-        "name": "k8s-pod-network",
-        "cniVersion": "0.1.0",
-        "type": "calico",
-        "etcd_endpoints": "__ETCD_ENDPOINTS__",
-        "etcd_key_file": "__ETCD_KEY_FILE__",
-        "etcd_cert_file": "__ETCD_CERT_FILE__",
-        "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
-        "log_level": "info",
-        "mtu": 1500,
-        "ipam": {
-            "type": "calico-ipam"
-        },
-        "policy": {
-            "type": "k8s",
-            "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
-            "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
-        },
-        "kubernetes": {
-            "kubeconfig": "__KUBECONFIG_FILEPATH__"
-        }
-    }
-
-  etcd_ca: "/calico-secrets/etcd-ca"
-  etcd_cert: "/calico-secrets/etcd-cert"
-  etcd_key: "/calico-secrets/etcd-key"
-
----
-
-# This manifest installs the calico/node container, as well
-# as the Calico CNI plugins and network config on
-# each master and worker node in a Kubernetes cluster.
-kind: DaemonSet
-apiVersion: extensions/v1beta1
-metadata:
-  name: calico-node
-  namespace: kube-system
-  labels:
-    k8s-app: calico-node
-spec:
-  updateStrategy:
-    type: RollingUpdate
-    rollingUpdate:
-      maxUnavailable: 1
-  selector:
-    matchLabels:
-      k8s-app: calico-node
-  template:
-    metadata:
-      labels:
-        k8s-app: calico-node
-      annotations:
-        scheduler.alpha.kubernetes.io/critical-pod: ''
-    spec:
-      hostNetwork: true
-      tolerations:
-        # Make sure calico/node gets scheduled on all nodes.
-        - effect: NoSchedule
-          operator: Exists
-        # Mark the pod as a critical add-on for rescheduling.
-        - key: CriticalAddonsOnly
-          operator: Exists
-        - effect: NoExecute
-          operator: Exists
-      serviceAccountName: calico-node
-      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
-      # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
-      terminationGracePeriodSeconds: 0
-      containers:
-        # Runs calico/node container on each Kubernetes node.  This
-        # container programs network policy and routes on each
-        # host.
-        - name: calico-node
-          image: {{ calico_node_image }}
-          env:
-            # The location of the Calico etcd cluster.
-            - name: ETCD_ENDPOINTS
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_endpoints
-            # Choose the backend to use.
-            - name: CALICO_NETWORKING_BACKEND
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: calico_backend
-            # Cluster type to identify the deployment type
-            - name: CLUSTER_TYPE
-              value: "origin,bgp"
-            # Disable file logging so `kubectl logs` works.
-            - name: CALICO_DISABLE_FILE_LOGGING
-              value: "true"
-            # Set noderef for node controller.
-            - name: CALICO_K8S_NODE_REF
-              valueFrom:
-                fieldRef:
-                  fieldPath: spec.nodeName
-            # Set Felix endpoint to host default action to ACCEPT.
-            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
-              value: "ACCEPT"
-            # Configure the IP Pool from which Pod IPs will be chosen.
-            - name: CALICO_IPV4POOL_CIDR
-              value: "{{ openshift_cluster_network_cidr }}"
-            - name: CALICO_IPV4POOL_IPIP
-              value: "{{ calico_ipv4pool_ipip }}"
-            # Disable IPv6 on Kubernetes.
-            - name: FELIX_IPV6SUPPORT
-              value: "false"
-            # Set Felix logging to "info"
-            - name: FELIX_LOGSEVERITYSCREEN
-              value: "info"
-            # Set MTU for tunnel device used if ipip is enabled
-            - name: FELIX_IPINIPMTU
-              value: "1440"
-            - name: ETCD_ENDPOINTS
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_endpoints
-            # Location of the CA certificate for etcd.
-            - name: ETCD_CA_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_ca
-            # Location of the client key for etcd.
-            - name: ETCD_KEY_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_key
-            # Location of the client certificate for etcd.
-            - name: ETCD_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_cert
-            # Auto-detect the BGP IP address.
-            - name: IP
-              value: ""
-            - name: FELIX_HEALTHENABLED
-              value: "true"
-          securityContext:
-            privileged: true
-          livenessProbe:
-            httpGet:
-              path: /liveness
-              port: 9099
-            periodSeconds: 10
-            initialDelaySeconds: 10
-            failureThreshold: 6
-          readinessProbe:
-            httpGet:
-              path: /readiness
-              port: 9099
-            periodSeconds: 10
-          volumeMounts:
-            - mountPath: /lib/modules
-              name: lib-modules
-              readOnly: true
-            - mountPath: /var/run/calico
-              name: var-run-calico
-              readOnly: false
-            - mountPath: /calico-secrets
-              name: etcd-certs
-        # This container installs the Calico CNI binaries
-        # and CNI network config file on each node.
-        - name: install-cni
-          securityContext:
-            privileged: true
-          image: {{ calico_cni_image }}
-          command: ["/install-cni.sh"]
-          env:
-            # The location of the Calico etcd cluster.
-            - name: ETCD_ENDPOINTS
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_endpoints
-            # The CNI network config to install on each node.
-            - name: CNI_NETWORK_CONFIG
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: cni_network_config
-            # Location of the CA certificate for etcd.
-            - name: CNI_CONF_ETCD_CA
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_ca
-            # Location of the client key for etcd.
-            - name: CNI_CONF_ETCD_KEY
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_key
-            # Location of the client certificate for etcd.
-            - name: CNI_CONF_ETCD_CERT
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_cert
-          volumeMounts:
-            - mountPath: /host/opt/cni/bin
-              name: cni-bin-dir
-            - mountPath: /host/etc/cni/net.d
-              name: cni-net-dir
-            - mountPath: /calico-secrets
-              name: etcd-certs
-      volumes:
-        # Used by calico/node.
-        - name: lib-modules
-          hostPath:
-            path: /lib/modules
-        - name: var-run-calico
-          hostPath:
-            path: /var/run/calico
-        # Used to install CNI.
-        - name: cni-bin-dir
-          hostPath:
-            path: {{ cni_bin_dir }}
-        - name: cni-net-dir
-          hostPath:
-            path: {{ cni_conf_dir }}
-        # Mount in the etcd TLS secrets.
-        - name: etcd-certs
-          secret:
-            secretName: calico-etcd-secrets
-
----
-
-# This manifest deploys the Calico Kubernetes controllers.
-# See https://github.com/projectcalico/kube-controllers
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
-  name: calico-kube-controllers
-  namespace: kube-system
-  labels:
-    k8s-app: calico-kube-controllers
-  annotations:
-    scheduler.alpha.kubernetes.io/critical-pod: ''
-spec:
-  # The controllers can only have a single active instance.
-  replicas: 1
-  strategy:
-    type: Recreate
-  template:
-    metadata:
-      name: calico-kube-controllers
-      namespace: kube-system
-      labels:
-        k8s-app: calico-kube-controllers
-    spec:
-      # The controllers must run in the host network namespace so that
-      # it isn't governed by policy that would prevent it from working.
-      hostNetwork: true
-      tolerations:
-        # Mark the pod as a critical add-on for rescheduling.
-        - key: CriticalAddonsOnly
-          operator: Exists
-        - key: node-role.kubernetes.io/master
-          effect: NoSchedule
-      serviceAccountName: calico-kube-controllers
-      containers:
-        - name: calico-kube-controllers
-          securityContext:
-            privileged: true
-          image: {{ calico_url_policy_controller }}
-          env:
-            # The location of the Calico etcd cluster.
-            - name: ETCD_ENDPOINTS
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_endpoints
-            # Location of the CA certificate for etcd.
-            - name: ETCD_CA_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_ca
-            # Location of the client key for etcd.
-            - name: ETCD_KEY_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_key
-            # Location of the client certificate for etcd.
-            - name: ETCD_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_cert
-            # Choose which controllers to run.
-            - name: ENABLED_CONTROLLERS
-              value: policy,profile,workloadendpoint,node
-          volumeMounts:
-            # Mount in the etcd TLS secrets.
-            - mountPath: /calico-secrets
-              name: etcd-certs
-      volumes:
-        # Mount in the etcd TLS secrets.
-        - name: etcd-certs
-          secret:
-            secretName: calico-etcd-secrets
-
----
-
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: calico-kube-controllers
-  namespace: kube-system
-
----
-
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: calico-node
-  namespace: kube-system

+ 0 - 692
roles/calico/templates/calicov3.yml.j2

@@ -1,692 +0,0 @@
----
-
-kind: ClusterRole
-apiVersion: v1
-metadata:
-  name: calico-kube-controllers
-  namespace: kube-system
-rules:
-  - apiGroups:
-    - ""
-    - extensions
-    resources:
-      - pods
-      - namespaces
-      - networkpolicies
-      - nodes
-      - serviceaccounts
-    verbs:
-      - watch
-      - list
-  - apiGroups:
-    - networking.k8s.io
-    resources:
-      - networkpolicies
-    verbs:
-      - watch
-      - list
----
-kind: ClusterRoleBinding
-apiVersion: v1
-metadata:
-  name: calico-kube-controllers
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: calico-kube-controllers
-subjects:
-- kind: ServiceAccount
-  name: calico-kube-controllers
-  namespace: kube-system
-
----
-
-kind: ClusterRole
-apiVersion: v1
-metadata:
-  name: calico-node
-  namespace: kube-system
-rules:
-  - apiGroups: [""]
-    resources:
-      - pods
-      - namespaces
-      - nodes
-    verbs:
-      - get
-
----
-
-apiVersion: v1
-kind: ClusterRoleBinding
-metadata:
-  name: calico-node
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: calico-node
-subjects:
-- kind: ServiceAccount
-  name: calico-node
-  namespace: kube-system
-
----
-
-kind: ClusterRole
-apiVersion: v1
-metadata:
-  name: calico-upgrade-job
-  namespace: kube-system
-rules:
-  - apiGroups:
-    - extensions
-    resources:
-      - daemonsets
-    verbs:
-      - get
-      - list
-      - watch
----
-
-apiVersion: v1
-kind: ClusterRoleBinding
-metadata:
-  name: calico-upgrade-job
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: calico-upgrade-job
-subjects:
-- kind: ServiceAccount
-  name: calico-upgrade-job
-  namespace: kube-system
-
----
-
-# This ConfigMap is used to configure a self-hosted Calico installation.
-kind: ConfigMap
-apiVersion: v1
-metadata:
-  name: calico-config
-  namespace: kube-system
-data:
-  # Configure this with the location of your etcd cluster.
-  etcd_endpoints: "{{ calico_etcd_endpoints }}"
-
-  node_image: "{{ calico_node_image }}"
-
-  # Configure the Calico backend to use.
-  calico_backend: "bird"
-
-  # The CNI network configuration to install on each node.
-  cni_network_config: |-
-    {
-      "name": "k8s-pod-network",
-      "cniVersion": "0.3.0",
-      "plugins": [
-        {
-          "type": "calico",
-          "etcd_endpoints": "__ETCD_ENDPOINTS__",
-          "etcd_key_file": "__ETCD_KEY_FILE__",
-          "etcd_cert_file": "__ETCD_CERT_FILE__",
-          "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
-          "log_level": "info",
-          "mtu": 1500,
-          "ipam": {
-              "type": "calico-ipam"
-          },
-          "policy": {
-              "type": "k8s"
-          },
-          "kubernetes": {
-              "kubeconfig": "__KUBECONFIG_FILEPATH__"
-          }
-        },
-        {
-          "type": "portmap",
-          "snat": true,
-          "capabilities": {"portMappings": true}
-        }
-      ]
-    }
-
-  # If you're using TLS enabled etcd uncomment the following.
-  # You must also populate the Secret below with these files.
-  etcd_ca: "/calico-secrets/etcd-ca"
-  etcd_cert: "/calico-secrets/etcd-cert"
-  etcd_key: "/calico-secrets/etcd-key"
-
----
-
-# This manifest installs the calico/node container, as well
-# as the Calico CNI plugins and network config on
-# each master and worker node in a Kubernetes cluster.
-kind: DaemonSet
-apiVersion: extensions/v1beta1
-metadata:
-  name: calico-node
-  namespace: kube-system
-  labels:
-    k8s-app: calico-node
-spec:
-  selector:
-    matchLabels:
-      k8s-app: calico-node
-  updateStrategy:
-    type: RollingUpdate
-    rollingUpdate:
-      maxUnavailable: 1
-  template:
-    metadata:
-      labels:
-        k8s-app: calico-node
-      annotations:
-        scheduler.alpha.kubernetes.io/critical-pod: ''
-    spec:
-{% if calico_image_credentials is defined %}
-      imagePullSecrets:
-        - name: calico-pull-secret
-{% endif %}
-      hostNetwork: true
-      tolerations:
-        # Make sure calico/node gets scheduled on all nodes.
-        - effect: NoSchedule
-          operator: Exists
-        # Mark the pod as a critical add-on for rescheduling.
-        - key: CriticalAddonsOnly
-          operator: Exists
-        - effect: NoExecute
-          operator: Exists
-      serviceAccountName: calico-node
-      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
-      # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
-      terminationGracePeriodSeconds: 0
-      initContainers:
-        - name: migrate
-          image: {{ calico_upgrade_image }}
-          command: ['/bin/sh', '-c', '/node-init-container.sh']
-          env:
-            # The location of the Calico etcd cluster.
-            - name: CALICO_ETCD_ENDPOINTS
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_endpoints
-            - name: CALICO_ETCD_CA_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_ca
-            # Location of the client key for etcd.
-            - name: CALICO_ETCD_KEY_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_key
-            # Location of the client certificate for etcd.
-            - name: CALICO_ETCD_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_cert
-            # The location of the Calico etcd cluster.
-            - name: CALICO_APIV1_ETCD_ENDPOINTS
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_endpoints
-            - name: CALICO_APIV1_ETCD_CA_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_ca
-            # Location of the client key for etcd.
-            - name: CALICO_APIV1_ETCD_KEY_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_key
-            # Location of the client certificate for etcd.
-            - name: CALICO_APIV1_ETCD_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_cert
-            - name: CALICO_APIV1_DATASTORE_TYPE
-              value: "etcdv2"
-          volumeMounts:
-            - mountPath: /calico-secrets
-              name: etcd-certs
-      containers:
-        # Runs calico/node container on each Kubernetes node.  This
-        # container programs network policy and routes on each
-        # host.
-        - name: calico-node
-          image: {{ calico_node_image }}
-          env:
-            # The location of the Calico etcd cluster.
-            - name: ETCD_ENDPOINTS
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_endpoints
-            # Choose the backend to use.
-            - name: CALICO_NETWORKING_BACKEND
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: calico_backend
-            # Cluster type to identify the deployment type
-            - name: CLUSTER_TYPE
-              value: "origin,bgp"
-            # Disable file logging so 'kubectl logs' works.
-            - name: CALICO_DISABLE_FILE_LOGGING
-              value: "true"
-            # Set noderef for node controller.
-            - name: CALICO_K8S_NODE_REF
-              valueFrom:
-                fieldRef:
-                  fieldPath: spec.nodeName
-            # Set Felix endpoint to host default action to ACCEPT.
-            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
-              value: "ACCEPT"
-            # The default IPv4 pool to create on startup if none exists. Pod IPs will be
-            # chosen from this range. Changing this value after installation will have
-            # no effect. This should fall within '--cluster-cidr'.
-            - name: CALICO_IPV4POOL_CIDR
-              value: "{{ openshift_cluster_network_cidr }}"
-            - name: CALICO_IPV4POOL_IPIP
-              value: "{{ calico_ipv4pool_ipip }}"
-            # Disable IPv6 on Kubernetes.
-            - name: FELIX_IPV6SUPPORT
-              value: "false"
-            # Set Felix logging to "info"
-            - name: FELIX_LOGSEVERITYSCREEN
-              value: "info"
-            # Set MTU for tunnel device used if ipip is enabled
-            - name: FELIX_IPINIPMTU
-              value: "1440"
-            - name: ETCD_ENDPOINTS
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_endpoints
-            # Location of the CA certificate for etcd.
-            - name: ETCD_CA_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_ca
-            # Location of the client key for etcd.
-            - name: ETCD_KEY_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_key
-            # Location of the client certificate for etcd.
-            - name: ETCD_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_cert
-            # Auto-detect the BGP IP address.
-            - name: IP
-              value: "autodetect"
-            - name: FELIX_HEALTHENABLED
-              value: "true"
-          securityContext:
-            privileged: true
-          resources:
-            requests:
-              cpu: 250m
-          livenessProbe:
-            httpGet:
-              path: /liveness
-              port: 9099
-{% if calico_binary_checks %}
-              host: localhost
-{% endif %}
-            periodSeconds: 10
-            initialDelaySeconds: 10
-            failureThreshold: 6
-          readinessProbe:
-{% if calico_binary_checks %}
-            exec:
-              command:
-              - /bin/calico-node
-              - -bird-ready
-              - -felix-ready
-{% else %}
-            httpGet:
-              path: /readiness
-              port: 9099
-{% endif %}
-            periodSeconds: 10
-          volumeMounts:
-            - mountPath: /lib/modules
-              name: lib-modules
-              readOnly: true
-            - mountPath: /var/run/calico
-              name: var-run-calico
-              readOnly: false
-            - mountPath: /var/lib/calico
-              name: var-lib-calico
-              readOnly: false
-            - mountPath: /calico-secrets
-              name: etcd-certs
-        # This container installs the Calico CNI binaries
-        # and CNI network config file on each node.
-        - name: install-cni
-          securityContext:
-            privileged: true
-          image: {{ calico_cni_image }}
-          command: ["/install-cni.sh"]
-          env:
-            # Name of the CNI config file to create.
-            - name: CNI_CONF_NAME
-              value: "10-calico.conflist"
-            # The location of the Calico etcd cluster.
-            - name: ETCD_ENDPOINTS
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_endpoints
-            # The CNI network config to install on each node.
-            - name: CNI_NETWORK_CONFIG
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: cni_network_config
-            # Location of the CA certificate for etcd.
-            - name: CNI_CONF_ETCD_CA
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_ca
-            # Location of the client key for etcd.
-            - name: CNI_CONF_ETCD_KEY
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_key
-            # Location of the client certificate for etcd.
-            - name: CNI_CONF_ETCD_CERT
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_cert
-          volumeMounts:
-            - mountPath: /host/opt/cni/bin
-              name: cni-bin-dir
-            - mountPath: /host/etc/cni/net.d
-              name: cni-net-dir
-            - mountPath: /calico-secrets
-              name: etcd-certs
-      volumes:
-        # Used by calico/node.
-        - name: lib-modules
-          hostPath:
-            path: /lib/modules
-        - name: var-run-calico
-          hostPath:
-            path: /var/run/calico
-        - name: var-lib-calico
-          hostPath:
-            path: /var/lib/calico
-        # Used to install CNI.
-        - name: cni-bin-dir
-          hostPath:
-            path: {{ cni_bin_dir }}
-        - name: cni-net-dir
-          hostPath:
-            path: {{ cni_conf_dir }}
-        # Mount in the etcd TLS secrets with mode 400.
-        # See https://kubernetes.io/docs/concepts/configuration/secret/
-        - name: etcd-certs
-          secret:
-            secretName: calico-etcd-secrets
-            defaultMode: 0400
-
----
-
-# This manifest deploys the Calico Kubernetes controllers.
-# See https://github.com/projectcalico/kube-controllers
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
-  name: calico-kube-controllers
-  namespace: kube-system
-  labels:
-    k8s-app: calico-kube-controllers
-  annotations:
-    scheduler.alpha.kubernetes.io/critical-pod: ''
-spec:
-  # The controllers can only have a single active instance.
-  replicas: 1
-  strategy:
-    type: Recreate
-  template:
-    metadata:
-      name: calico-kube-controllers
-      namespace: kube-system
-      labels:
-        k8s-app: calico-kube-controllers
-    spec:
-{% if calico_image_credentials is defined %}
-      imagePullSecrets:
-        - name: calico-pull-secret
-{% endif %}
-      # The controllers must run in the host network namespace so that
-      # it isn't governed by policy that would prevent it from working.
-      hostNetwork: true
-      tolerations:
-        # Mark the pod as a critical add-on for rescheduling.
-        - key: CriticalAddonsOnly
-          operator: Exists
-        - key: node-role.kubernetes.io/master
-          effect: NoSchedule
-      serviceAccountName: calico-kube-controllers
-      initContainers:
-        - name: migrate
-          image: {{ calico_upgrade_image }}
-          command: ['/bin/sh', '-c', '/controller-init.sh']
-          env:
-            # The location of the Calico etcd cluster.
-            - name: CALICO_ETCD_ENDPOINTS
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_endpoints
-            - name: CALICO_ETCD_CA_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_ca
-            # Location of the client key for etcd.
-            - name: CALICO_ETCD_KEY_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_key
-            # Location of the client certificate for etcd.
-            - name: CALICO_ETCD_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_cert
-            # The location of the Calico etcd cluster.
-            - name: CALICO_APIV1_ETCD_ENDPOINTS
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_endpoints
-            - name: CALICO_APIV1_ETCD_CA_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_ca
-            # Location of the client key for etcd.
-            - name: CALICO_APIV1_ETCD_KEY_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_key
-            # Location of the client certificate for etcd.
-            - name: CALICO_APIV1_ETCD_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_cert
-            - name: CALICO_APIV1_DATASTORE_TYPE
-              value: "etcdv2"
-          volumeMounts:
-            - mountPath: /calico-secrets
-              name: etcd-certs
-      containers:
-        - name: calico-kube-controllers
-          image: {{ calico_url_policy_controller }}
-          securityContext:
-            privileged: true
-          env:
-            # The location of the Calico etcd cluster.
-            - name: ETCD_ENDPOINTS
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_endpoints
-            # Location of the CA certificate for etcd.
-            - name: ETCD_CA_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_ca
-            # Location of the client key for etcd.
-            - name: ETCD_KEY_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_key
-            # Location of the client certificate for etcd.
-            - name: ETCD_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_cert
-            # Choose which controllers to run.
-            - name: ENABLED_CONTROLLERS
-              value: policy,profile,workloadendpoint,node
-          volumeMounts:
-            # Mount in the etcd TLS secrets.
-            - mountPath: /calico-secrets
-              name: etcd-certs
-      volumes:
-        # Mount in the etcd TLS secrets with mode 400.
-        # See https://kubernetes.io/docs/concepts/configuration/secret/
-        - name: etcd-certs
-          secret:
-            secretName: calico-etcd-secrets
-            defaultMode: 0400
-
----
-apiVersion: batch/v1
-kind: Job
-metadata:
-  name: complete-upgrade
-  namespace: kube-system
-spec:
-  template:
-    spec:
-      hostNetwork: true
-      serviceAccountName: calico-upgrade-job
-      restartPolicy: OnFailure
-      containers:
-        - name: migrate-completion
-          image: {{ calico_upgrade_image }}
-          command: ['/bin/sh', '-c', '/completion-job.sh']
-          env:
-            - name: EXPECTED_NODE_IMAGE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: node_image
-            # The location of the Calico etcd cluster.
-            - name: CALICO_ETCD_ENDPOINTS
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_endpoints
-            - name: CALICO_ETCD_CA_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_ca
-            # Location of the client key for etcd.
-            - name: CALICO_ETCD_KEY_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_key
-            # Location of the client certificate for etcd.
-            - name: CALICO_ETCD_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_cert
-            # The location of the Calico etcd cluster.
-            - name: CALICO_APIV1_ETCD_ENDPOINTS
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_endpoints
-            - name: CALICO_APIV1_ETCD_CA_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_ca
-            # Location of the client key for etcd.
-            - name: CALICO_APIV1_ETCD_KEY_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_key
-            # Location of the client certificate for etcd.
-            - name: CALICO_APIV1_ETCD_CERT_FILE
-              valueFrom:
-                configMapKeyRef:
-                  name: calico-config
-                  key: etcd_cert
-            - name: CALICO_APIV1_DATASTORE_TYPE
-              value: "etcdv2"
-          volumeMounts:
-            - mountPath: /calico-secrets
-              name: etcd-certs
-      volumes:
-        # Mount in the etcd TLS secrets with mode 400.
-        # See https://kubernetes.io/docs/concepts/configuration/secret/
-        - name: etcd-certs
-          secret:
-            secretName: calico-etcd-secrets
-            defaultMode: 0400
-
----
-
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: calico-upgrade-job
-  namespace: kube-system
----
-
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: calico-kube-controllers
-  namespace: kube-system
-
----
-
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: calico-node
-  namespace: kube-system

+ 0 - 3
roles/calico_node/README.md

@@ -1,3 +0,0 @@
-# Calico Node
-
-Please see [calico](../calico/README.md)

+ 0 - 2
roles/calico_node/files/calico.conf

@@ -1,2 +0,0 @@
-[keyfile]
-unmanaged-devices=interface-name:cali*;interface-name:tunl0

+ 0 - 16
roles/calico_node/meta/main.yml

@@ -1,16 +0,0 @@
----
-galaxy_info:
-  author: Dan Osborne
-  description: Calico networking
-  company: Tigera, Inc.
-  license: Apache License, Version 2.0
-  min_ansible_version: 2.2
-  platforms:
-  - name: EL
-    versions:
-    - 7
-  categories:
-  - cloud
-  - system
-dependencies:
-- role: openshift_facts

+ 0 - 13
roles/calico_node/tasks/main.yml

@@ -1,13 +0,0 @@
----
-- name: Calico Node | Configure NetworkManager to ignore Calico interfaces
-  copy:
-    src: files/calico.conf
-    dest: /etc/NetworkManager/conf.d/
-  when: using_network_manager | default(true) | bool
-  register: nm
-
-- name: Calico Node | Restart NetworkManager
-  systemd:
-    name: NetworkManager
-    state: restarted
-  when: nm.changed

+ 0 - 20
roles/cockpit-ui/defaults/main.yml

@@ -1,20 +0,0 @@
----
-openshift_hosted_manage_registry_console: True
-l_os_cockpit_image_version_dict:
-  origin: 'latest'
-  openshift-enterprise: "{{ openshift_image_tag }}"
-l_os_cockpit_image_version: "{{ l_os_cockpit_image_version_dict[openshift_deployment_type] }}"
-
-l_os_cockpit_image_format: "{{ l_os_non_standard_reg_url | regex_replace('${version}' | regex_escape, l_os_cockpit_image_version) }}"
-
-l_openshift_cockit_search_dict:
-  origin: "openshift/origin-${component}"
-  openshift-enterprise: "ose-${component}"
-l_openshift_cockit_search: "{{ l_openshift_cockit_search_dict[openshift_deployment_type] }}"
-
-l_openshift_cockpit_replace_dict:
-  origin: "cockpit/kubernetes"
-  openshift-enterprise: "registry-console"
-l_openshift_cockpit_replace: "{{ l_openshift_cockpit_replace_dict[openshift_deployment_type] }}"
-
-openshift_cockpit_deployer_image: "{{ l_os_cockpit_image_format | regex_replace(l_openshift_cockit_search | regex_escape, l_openshift_cockpit_replace) }}"

+ 0 - 110
roles/cockpit-ui/files/registry-console.yaml

@@ -1,110 +0,0 @@
-kind: Template
-apiVersion: v1
-metadata:
-  name: "registry-console"
-  annotations:
-    description: "Template for deploying registry web console. Requires cluster-admin."
-    tags: infrastructure
-labels:
-  createdBy: "registry-console-template"
-objects:
-  - kind: DeploymentConfig
-    apiVersion: v1
-    metadata:
-      name: "registry-console"
-      labels:
-        name: "registry-console"
-    spec:
-      triggers:
-      - type: ConfigChange
-      replicas: 1
-      selector:
-        name: "registry-console"
-      template:
-        metadata:
-          labels:
-            name: "registry-console"
-        spec:
-          nodeSelector:
-            node-role.kubernetes.io/master: 'true'
-          containers:
-            - name: registry-console
-              image: ${IMAGE_NAME}
-              ports:
-                - containerPort: 9090
-                  protocol: TCP
-              livenessProbe:
-                failureThreshold: 3
-                httpGet:
-                  path: /ping
-                  port: 9090
-                  scheme: HTTP
-                initialDelaySeconds: 10
-                periodSeconds: 10
-                successThreshold: 1
-                timeoutSeconds: 5
-              readinessProbe:
-                failureThreshold: 3
-                httpGet:
-                  path: /ping
-                  port: 9090
-                  scheme: HTTP
-                periodSeconds: 10
-                successThreshold: 1
-                timeoutSeconds: 5
-              env:
-                - name: OPENSHIFT_OAUTH_PROVIDER_URL
-                  value: "${OPENSHIFT_OAUTH_PROVIDER_URL}"
-                - name: OPENSHIFT_OAUTH_CLIENT_ID
-                  value: "${OPENSHIFT_OAUTH_CLIENT_ID}"
-                - name: KUBERNETES_INSECURE
-                  value: "false"
-                - name: COCKPIT_KUBE_INSECURE
-                  value: "false"
-                - name: REGISTRY_ONLY
-                  value: "true"
-                - name: REGISTRY_HOST
-                  value: "${REGISTRY_HOST}"
-  - kind: Service
-    apiVersion: v1
-    metadata:
-     name: "registry-console"
-     labels:
-       name: "registry-console"
-    spec:
-      type: ClusterIP
-      ports:
-        - name: registry-console
-          protocol: TCP
-          port: 9000
-          targetPort: 9090
-      selector:
-        name: "registry-console"
-  - kind: OAuthClient
-    apiVersion: v1
-    metadata:
-      name: "${OPENSHIFT_OAUTH_CLIENT_ID}"
-      respondWithChallenges: false
-    secret: "${OPENSHIFT_OAUTH_CLIENT_SECRET}"
-    redirectURIs:
-      - "${COCKPIT_KUBE_URL}"
-parameters:
-  - description: 'Specify fully qualified image name and version; e.g. for "registry.redhat.io/openshift3/registry-console:v3.11"'
-    name: IMAGE_NAME
-    value: "openshift3/registry-console:latest"
-  - description: "The public URL for the Openshift OAuth Provider, e.g. https://openshift.example.com:8443"
-    name: OPENSHIFT_OAUTH_PROVIDER_URL
-    required: true
-  - description: "The registry console URL. This should be created beforehand using 'oc create route passthrough --service registry-console --port registry-console -n default', e.g. https://registry-console-default.example.com"
-    name: COCKPIT_KUBE_URL
-    required: true
-  - description: "Oauth client secret"
-    name: OPENSHIFT_OAUTH_CLIENT_SECRET
-    from: "user[a-zA-Z0-9]{64}"
-    generate: expression
-  - description: "Oauth client id"
-    name: OPENSHIFT_OAUTH_CLIENT_ID
-    value: "cockpit-oauth-client"
-  - description: "The integrated registry hostname exposed via route, e.g. registry.example.com"
-    name: REGISTRY_HOST
-    required: true

+ 0 - 17
roles/cockpit-ui/meta/main.yml

@@ -1,17 +0,0 @@
----
-galaxy_info:
-  author: Samuel Munilla
-  description: Deploy and Enable cockpit-ui
-  company: Red Hat, Inc.
-  license: Apache License, Version 2.0
-  min_ansible_version: 2.1
-  platforms:
-  - name: EL
-    versions:
-    - 7
-  categories:
-  - cloud
-dependencies:
-- role: lib_utils
-- role: lib_openshift
-- role: openshift_facts

+ 0 - 70
roles/cockpit-ui/tasks/install.yml

@@ -1,70 +0,0 @@
----
-- name: Create local temp dir for registry-console template
-  command: mktemp -d /tmp/openshift-ansible-XXXXXXX
-  register: mktemp
-  # AUDIT:changed_when: not set here because this task actually
-  # creates something
-
-- name: Copy the admin client config(s)
-  command: >
-    cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
-  changed_when: False
-
-- name: Copy registry-console template to tmp dir
-  copy:
-    src: "registry-console.yaml"
-    dest: "{{ mktemp.stdout }}/registry-console.yaml"
-
-- name: Create registry-console template
-  command: >
-    {{ openshift_client_binary }} apply
-    -f {{ mktemp.stdout }}/registry-console.yaml
-    --config={{ mktemp.stdout }}/admin.kubeconfig
-    -n openshift
-  register: oht_import_templates
-  failed_when: "'already exists' not in oht_import_templates.stderr and oht_import_templates.rc != 0"
-  changed_when: "'created' in oht_import_templates.stdout"
-
-# When openshift_hosted_manage_registry=true the openshift_hosted
-# role will create the appropriate route for the docker-registry.
-# When openshift_hosted_manage_registry=false then this code will
-# not be run.
-- name: fetch the docker-registry route
-  oc_route:
-    kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
-    name: docker-registry
-    namespace: default
-    state: list
-  register: docker_registry_route
-
-- name: Create passthrough route for registry-console
-  oc_route:
-    kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
-    name: registry-console
-    namespace: default
-    service_name: registry-console
-    state: present
-    tls_termination: passthrough
-  register: registry_console_cockpit_kube
-
-- name: Deploy registry-console
-  shell: >
-    {{ openshift_client_binary }} process openshift//registry-console
-    -p IMAGE_NAME="{{ openshift_cockpit_deployer_image }}"
-    -p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}"
-    -p REGISTRY_HOST="{{ docker_registry_route.results[0].spec.host }}"
-    -p COCKPIT_KUBE_URL="https://{{ registry_console_cockpit_kube.results.results[0].spec.host }}"
-    --config={{ mktemp.stdout }}/admin.kubeconfig
-    -n default
-    | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f -
-  register: deploy_registry_console
-  changed_when: "'already exists' not in deploy_registry_console.stderr"
-  failed_when:
-  - "'already exists' not in deploy_registry_console.stderr"
-  - "deploy_registry_console.rc != 0"
-
-- name: Delete temp directory
-  file:
-    name: "{{ mktemp.stdout }}"
-    state: absent
-  changed_when: False

+ 0 - 2
roles/cockpit-ui/tasks/main.yml

@@ -1,2 +0,0 @@
----
-# This role is meant to be used with import_role and tasks_from.

+ 0 - 8
roles/cockpit/defaults/main.yml

@@ -1,8 +0,0 @@
----
-r_cockpit_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
-r_cockpit_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
-
-r_cockpit_os_firewall_deny: []
-r_cockpit_os_firewall_allow:
-- service: cockpit-ws
-  port: 9090/tcp

+ 0 - 15
roles/cockpit/meta/main.yml

@@ -1,15 +0,0 @@
----
-galaxy_info:
-  author: Scott Dodson
-  description: Deploy and Enable cockpit-ws plus optional plugins
-  company: Red Hat, Inc.
-  license: Apache License, Version 2.0
-  min_ansible_version: 2.2
-  platforms:
-  - name: EL
-    versions:
-    - 7
-  categories:
-  - cloud
-dependencies:
-- role: lib_utils

+ 0 - 40
roles/cockpit/tasks/firewall.yml

@@ -1,40 +0,0 @@
----
-- when: r_cockpit_firewall_enabled | bool and not r_cockpit_use_firewalld | bool
-  block:
-  - name: Add iptables allow rules
-    os_firewall_manage_iptables:
-      name: "{{ item.service }}"
-      action: add
-      protocol: "{{ item.port.split('/')[1] }}"
-      port: "{{ item.port.split('/')[0] }}"
-    when: item.cond | default(True)
-    with_items: "{{ r_cockpit_os_firewall_allow }}"
-
-  - name: Remove iptables rules
-    os_firewall_manage_iptables:
-      name: "{{ item.service }}"
-      action: remove
-      protocol: "{{ item.port.split('/')[1] }}"
-      port: "{{ item.port.split('/')[0] }}"
-    when: item.cond | default(True)
-    with_items: "{{ r_cockpit_os_firewall_deny }}"
-
-- when: r_cockpit_firewall_enabled | bool and r_cockpit_use_firewalld | bool
-  block:
-  - name: Add firewalld allow rules
-    firewalld:
-      port: "{{ item.port }}"
-      permanent: true
-      immediate: true
-      state: enabled
-    when: item.cond | default(True)
-    with_items: "{{ r_cockpit_os_firewall_allow }}"
-
-  - name: Remove firewalld allow rules
-    firewalld:
-      port: "{{ item.port }}"
-      permanent: true
-      immediate: true
-      state: disabled
-    when: item.cond | default(True)
-    with_items: "{{ r_cockpit_os_firewall_deny }}"

+ 0 - 23
roles/cockpit/tasks/main.yml

@@ -1,23 +0,0 @@
----
-- name: setup firewall
-  import_tasks: firewall.yml
-
-- name: Install cockpit-ws
-  package:
-    name: "{{ pkg_list | join(',') }}"
-    state: present
-  vars:
-    pkg_list:
-    - cockpit-ws
-    - cockpit-system
-    - cockpit-bridge
-    - cockpit-docker
-    - "{{ cockpit_plugins | join(',') }}"
-  register: result
-  until: result is succeeded
-
-- name: Enable cockpit-ws
-  systemd:
-    name: cockpit.socket
-    enabled: true
-    state: started

+ 0 - 39
roles/contiv/README.md

@@ -1,39 +0,0 @@
-## Contiv
-
-Install Contiv components (netmaster, netplugin, contiv_etcd) on Master and Minion nodes 
-
-## Requirements
-
-* Ansible 2.2
-* Centos/ RHEL
-
-## Current Contiv restrictions when used with Openshift
-
-* Openshift Origin only 
-* VLAN encap mode only (default for Openshift Ansible)
-* Bare metal deployments only
-* Requires additional network configuration on the external physical routers (ref. Openshift docs Contiv section)
-
-## Key Ansible inventory configuration parameters
-
-* ``openshift_use_contiv=True``
-* ``openshift_use_openshift_sdn=False``
-* ``os_sdn_network_plugin_name='cni'``
-* ``contiv_netmaster_interface=eth0``
-* ``contiv_netplugin_interface=eth1``
-* ref. Openshift docs Contiv section for more details
-
-## Example bare metal deployment of Openshift + Contiv 
-
-* Example bare metal deployment
-
-![Screenshot](roles/contiv/contiv-openshift-vlan-network.png)
-
-* contiv241 is a Master + minion node
-* contiv242 and contiv243 are minion nodes
-* VLANs 1001, 1002 used for contiv container networks
-* VLAN 10 used for cluster-internal host network 
-* VLANs added to isolated VRF on external physical switch 
-* Static routes added on external switch as shown to allow routing between host and container networks
-* External switch also used for public internet access 
-

BIN
roles/contiv/contiv-openshift-vlan-network.png


+ 0 - 176
roles/contiv/defaults/main.yml

@@ -1,176 +0,0 @@
----
-# The version of Contiv binaries to use
-contiv_version: 1.2.0
-
-# The version of cni binaries
-contiv_cni_version: v0.4.0
-
-# If the node we are deploying to is to be a contiv master.
-contiv_master: false
-
-contiv_default_subnet: "10.128.0.0/16"
-contiv_default_gw: "10.128.254.254"
-
-# Ports netmaster listens on
-contiv_netmaster_port: 9999
-contiv_netmaster_port_proto: tcp
-contiv_ofnet_master_port: 9001
-contiv_ofnet_master_port_proto: tcp
-
-# Ports netplugin listens on
-contiv_netplugin_port: 6640
-contiv_netplugin_port_proto: tcp
-contiv_ofnet_vxlan_port: 9002
-contiv_ofnet_vxlan_port_proto: tcp
-contiv_ovs_port: 9003
-contiv_ovs_port_proto: tcp
-
-contiv_vxlan_port: 4789
-contiv_vxlan_port_proto: udp
-
-# Interface used by Netplugin for inter-host traffic when encap_mode is vlan.
-# The interface must support 802.1Q trunking.
-contiv_netplugin_interface: "eno16780032"
-
-# IP address of the interface used for control communication within the cluster
-# It needs to be reachable from all nodes in the cluster.
-contiv_netplugin_ctrl_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netplugin_interface].ipv4.address }}"
-
-# IP used to terminate vxlan tunnels
-contiv_netplugin_vtep_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netplugin_interface].ipv4.address }}"
-
-# Interface used to bind Netmaster service
-contiv_netmaster_interface: "{{ contiv_netplugin_interface }}"
-
-# IP address of the interface used for control communication within the cluster
-# It needs to be reachable from all nodes in the cluster.
-contiv_netmaster_ctrl_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address }}"
-
-# Path to the contiv binaries
-contiv_bin_dir: /usr/bin
-
-# Path to the contivk8s cni binary
-contiv_cni_bin_dir: /opt/cni/bin
-
-# Path to cni archive download directory
-contiv_cni_download_dir: /tmp
-
-# URL for cni binaries
-contiv_cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/"
-contiv_cni_bin_url: "{{ contiv_cni_bin_url_base }}/{{ contiv_cni_version }}/cni-{{ contiv_cni_version }}.tbz2"
-
-
-# Contiv config directory
-contiv_config_dir: /opt/contiv/config
-
-# Directory to store downloaded Contiv releases
-contiv_releases_directory: /opt/contiv
-contiv_current_release_directory: "{{ contiv_releases_directory }}/{{ contiv_version }}"
-
-#The default url to download the Contiv tar's from
-contiv_download_url_base: "https://github.com/contiv/netplugin/releases/download"
-contiv_download_url: "{{ contiv_download_url_base }}/{{ contiv_version }}/netplugin-{{ contiv_version }}.tar.bz2"
-
-# This is where kubelet looks for plugin files
-contiv_kube_plugin_dir: /usr/libexec/kubernetes/kubelet-plugins/net/exec
-
-# Specifies routed mode vs bridged mode for networking (bridge | routing)
-# if you are using an external router for all routing, you should select bridge here
-contiv_netplugin_fwd_mode: routing
-
-# Contiv fabric mode aci|default
-contiv_fabric_mode: default
-
-# Global VLAN range
-contiv_vlan_range: "2900-3000"
-
-# Encapsulation type vlan|vxlan to use for instantiating container networks
-contiv_encap_mode: vxlan
-
-# Backend used by Netplugin for instantiating container networks
-contiv_netplugin_driver: ovs
-
-# Create a default Contiv network for use by pods
-contiv_default_network: true
-
-# Statically configured tag for default network (if needed)
-contiv_default_network_tag: ""
-
-#SRFIXME (use the openshift variables)
-contiv_https_proxy: ""
-contiv_http_proxy: ""
-contiv_no_proxy: ""
-
-# The following are aci specific parameters when contiv_fabric_mode: aci is set.
-# Otherwise, you can ignore these.
-contiv_apic_url: ""
-contiv_apic_username: ""
-contiv_apic_password: ""
-contiv_apic_leaf_nodes: ""
-contiv_apic_phys_dom: ""
-contiv_apic_contracts_unrestricted_mode: no
-contiv_apic_epg_bridge_domain: not_specified
-apic_configure_default_policy: false
-contiv_apic_default_external_contract: "uni/tn-common/brc-default"
-contiv_apic_default_app_profile: "contiv-infra-app-profile"
-contiv_kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master"
-contiv_kube_ca_cert: "{{ contiv_kube_cert_dir }}/ca.crt"
-contiv_kube_key: "{{ contiv_kube_cert_dir }}/admin.key"
-contiv_kube_cert: "{{ contiv_kube_cert_dir }}/admin.crt"
-contiv_kube_master_api_port: 8443
-contiv_kube_master_api_port_proto: tcp
-
-# contivh1 default subnet and gateway
-contiv_h1_subnet_default: "10.129.0.0/16"
-contiv_h1_gw_default: "10.129.0.1"
-
-# contiv default private subnet for ext access
-contiv_private_ext_subnet: "10.130.0.0/16"
-
-contiv_openshift_docker_service_name: "docker"
-
-contiv_api_proxy_port: 10000
-contiv_api_proxy_port_proto: tcp
-contiv_api_proxy_image_repo: contiv/auth_proxy
-contiv_api_proxy_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address }}"
-
-contiv_etcd_system_user: contivetcd
-contiv_etcd_system_uid: 823
-contiv_etcd_system_group: contivetcd
-contiv_etcd_system_gid: 823
-contiv_etcd_port: 22379
-contiv_etcd_port_proto: tcp
-contiv_etcd_peer_port: 22380
-contiv_etcd_peer_port_proto: tcp
-contiv_etcd_url: "http://127.0.0.1:{{ contiv_etcd_port }}"
-contiv_etcd_init_image_repo: ferest/etcd-initer
-contiv_etcd_init_image_tag: latest
-contiv_etcd_image_repo: quay.io/coreos/etcd
-contiv_etcd_image_tag: v3.2.4
-contiv_etcd_conf_dir: /etc/contiv-etcd
-contiv_etcd_data_dir: /var/lib/contiv-etcd
-contiv_etcd_peers: |-
-  {% for host in groups.oo_masters_to_config -%}
-    {{ host }}=http://{{ hostvars[host]['ip'] | default(hostvars[host].ansible_default_ipv4['address']) }}:{{ contiv_etcd_peer_port }}{% if not loop.last %},{% endif %}
-  {%- endfor %}
-
-# List of port/protocol pairs to allow inbound access to on every host
-# netplugin runs on, from all host IPs in the cluster.
-contiv_netplugin_internal: [ "{{ contiv_ofnet_vxlan_port }}/{{ contiv_ofnet_vxlan_port_proto }}",
-                             "{{ contiv_ovs_port }}/{{ contiv_ovs_port_proto }}",
-                             "{{ contiv_vxlan_port }}/{{ contiv_vxlan_port_proto }}" ]
-# Allow all forwarded traffic in and out of these interfaces.
-contiv_netplugin_forward_interfaces: [ contivh0, contivh1 ]
-
-# List of port/protocol pairs to allow inbound access to on every host
-# netmaster runs on, from all host IPs in the cluster.  Note that every host
-# that runs netmaster also runs netplugin, so the above netplugin rules will
-# apply as well.
-contiv_netmaster_internal: [ "{{ contiv_ofnet_master_port }}/{{ contiv_ofnet_master_port_proto }}",
-                             "{{ contiv_netmaster_port }}/{{ contiv_netmaster_port_proto }}",
-                             "{{ contiv_etcd_port }}/{{ contiv_etcd_port_proto }}",
-                             "{{ contiv_etcd_peer_port }}/{{ contiv_etcd_peer_port_proto }}",
-                             "{{ contiv_kube_master_api_port }}/{{ contiv_kube_master_api_port_proto }}" ]
-# List of port/protocol pairs to allow inbound access to on every host
-# netmaster runs on, from any host anywhere.
-contiv_netmaster_external: [ "{{ contiv_api_proxy_port }}/{{ contiv_api_proxy_port_proto }}" ]

+ 0 - 5
roles/contiv/files/contiv_cni.conf

@@ -1,5 +0,0 @@
-{
-  "cniVersion": "0.1.0",
-  "name": "contiv-net",
-  "type": "contivk8s"
-}

+ 0 - 16
roles/contiv/handlers/main.yml

@@ -1,16 +0,0 @@
----
-- name: reload systemd
-  command: systemctl --system daemon-reload
-
-- name: restart netmaster
-  service:
-    name: netmaster
-    state: restarted
-
-- name: restart netplugin
-  service:
-    name: netplugin
-    state: restarted
-
-- name: Save iptables rules
-  command: service iptables save

+ 0 - 17
roles/contiv/meta/main.yml

@@ -1,17 +0,0 @@
----
-galaxy_info:
-  author: Cisco
-  description:
-  company: Cisco
-  license:
-  min_ansible_version: 2.2
-  platforms:
-  - name: EL
-    versions:
-    - 7
-  categories:
-  - cloud
-  - system
-dependencies:
-- role: lib_utils
-- role: contiv_facts

+ 0 - 31
roles/contiv/tasks/aci.yml

@@ -1,31 +0,0 @@
----
-- name: ACI | Check aci-gw container image
-  command: "{{ openshift_container_cli }} images -q contiv/aci-gw"
-  register: docker_aci_image
-
-- name: ACI | Pull aci-gw container
-  command: "{{ openshift_container_cli }} pull contiv/aci-gw"
-  when: docker_aci_image.stdout_lines == []
-
-- name: ACI | Copy shell script used by aci-gw service
-  template:
-    src: aci_gw.j2
-    dest: "{{ contiv_bin_dir }}/aci_gw.sh"
-    mode: u=rwx,g=rx,o=rx
-
-- name: ACI | Copy systemd units for aci-gw
-  template:
-    src: aci-gw.service
-    dest: /etc/systemd/system/aci-gw.service
-  notify: reload systemd
-
-- name: ACI | Enable aci-gw service
-  service:
-    name: aci-gw
-    enabled: yes
-
-- name: ACI | Start aci-gw service
-  service:
-    name: aci-gw
-    state: started
-  register: aci-gw_started

+ 0 - 120
roles/contiv/tasks/api_proxy.yml

@@ -1,120 +0,0 @@
----
-- name: API proxy | Create contiv-api-proxy openshift user
-  oc_serviceaccount:
-    state: present
-    name: contiv-api-proxy
-    namespace: kube-system
-  run_once: true
-
-- name: API proxy | Set contiv-api-proxy openshift user permissions
-  oc_adm_policy_user:
-    user: system:serviceaccount:kube-system:contiv-api-proxy
-    resource_kind: scc
-    resource_name: hostnetwork
-    state: present
-  run_once: true
-
-- name: API proxy | Create temp directory for doing work
-  command: mktemp -d /tmp/openshift-contiv-XXXXXX
-  register: mktemp
-  changed_when: False
-  # For things that pass temp files between steps, we want to make sure they
-  # run on the same node.
-  delegate_to: "{{ groups.oo_masters_to_config.0 }}"
-  run_once: true
-
-- name: API proxy | Check for existing api proxy secret volume
-  oc_obj:
-    namespace: kube-system
-    kind: secret
-    state: list
-    selector: "name=contiv-api-proxy-secret"
-  register: existing_secret_volume
-  run_once: true
-
-- name: API proxy | Generate a self signed certificate for api proxy
-  command: openssl req -new -nodes -x509 -subj "/C=US/ST=/L=/O=/CN=localhost" -days 3650 -keyout "{{ mktemp.stdout }}/key.pem" -out "{{ mktemp.stdout }}/cert.pem" -extensions v3_ca
-  when: (contiv_api_proxy_cert is not defined or contiv_api_proxy_key is not defined)
-        and not existing_secret_volume.results.results[0]['items']
-  register: created_self_signed_cert
-  delegate_to: "{{ groups.oo_masters_to_config.0 }}"
-  run_once: true
-
-- name: API proxy | Read self signed certificate file
-  command: cat "{{ mktemp.stdout }}/cert.pem"
-  register: generated_cert
-  when: created_self_signed_cert.changed
-  delegate_to: "{{ groups.oo_masters_to_config.0 }}"
-  run_once: true
-
-- name: API proxy | Read self signed key file
-  command: cat "{{ mktemp.stdout }}/key.pem"
-  register: generated_key
-  when: created_self_signed_cert.changed
-  delegate_to: "{{ groups.oo_masters_to_config.0 }}"
-  run_once: true
-
-- name: API proxy | Create api-proxy-secrets.yml from template using generated cert
-  template:
-    src: api-proxy-secrets.yml.j2
-    dest: "{{ mktemp.stdout }}/api-proxy-secrets.yml"
-  vars:
-    key: "{{ generated_key.stdout }}"
-    cert: "{{ generated_cert.stdout }}"
-  when: created_self_signed_cert.changed
-  delegate_to: "{{ groups.oo_masters_to_config.0 }}"
-  run_once: true
-
-- name: API proxy | Create api-proxy-secrets.yml from template using user defined cert
-  template:
-    src: api-proxy-secrets.yml.j2
-    dest: "{{ mktemp.stdout }}/api-proxy-secrets.yml"
-  vars:
-    key: "{{ lookup('file', contiv_api_proxy_key) }}"
-    cert: "{{ lookup('file', contiv_api_proxy_cert) }}"
-  when: contiv_api_proxy_cert is defined and contiv_api_proxy_key is defined
-  delegate_to: "{{ groups.oo_masters_to_config.0 }}"
-  run_once: true
-
-- name: API proxy | Create secret certificate volume
-  oc_obj:
-    state: present
-    namespace: "kube-system"
-    kind: secret
-    name: contiv-api-proxy-secret
-    files:
-      - "{{ mktemp.stdout }}/api-proxy-secrets.yml"
-  when: (contiv_api_proxy_cert is defined and contiv_api_proxy_key is defined)
-        or created_self_signed_cert.changed
-  delegate_to: "{{ groups.oo_masters_to_config.0 }}"
-  run_once: true
-
-- name: API proxy | Create api-proxy-daemonset.yml from template
-  template:
-    src: api-proxy-daemonset.yml.j2
-    dest: "{{ mktemp.stdout }}/api-proxy-daemonset.yml"
-  vars:
-    etcd_host: "etcd://{{ groups.oo_etcd_to_config.0 }}:{{ contiv_etcd_port }}"
-  delegate_to: "{{ groups.oo_masters_to_config.0 }}"
-  run_once: true
-
-# Always "import" this file, k8s won't do anything if it matches exactly what
-# is already in the cluster.
-- name: API proxy | Add API proxy daemonset
-  oc_obj:
-    state: present
-    namespace: "kube-system"
-    kind: daemonset
-    name: contiv-api-proxy
-    files:
-      - "{{ mktemp.stdout }}/api-proxy-daemonset.yml"
-  delegate_to: "{{ groups.oo_masters_to_config.0 }}"
-  run_once: true
-
-- name: API proxy | Delete temp directory
-  file:
-    name: "{{ mktemp.stdout }}"
-    state: absent
-  changed_when: False
-  delegate_to: "{{ groups.oo_masters_to_config.0 }}"
-  run_once: true

+ 0 - 71
roles/contiv/tasks/default_network.yml

@@ -1,71 +0,0 @@
----
-- name: Default network | Wait for netmaster
-  command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" tenant ls'
-  register: tenant_result
-  until: tenant_result.stdout.find("default") != -1
-  retries: 9
-  delay: 10
-
-- name: Default network | Set globals
-  command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ contiv_netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}'
-  run_once: true
-
-- name: Default network | Set arp mode to flood if ACI
-  command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" global set --arp-mode flood'
-  when: contiv_fabric_mode == "aci"
-  run_once: true
-
-- name: Default network | Check if default-net exists
-  command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" net ls'
-  register: net_result
-  run_once: true
-
-- name: Default network | Create default-net
-  command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net'
-  when: net_result.stdout.find("default-net") == -1
-  run_once: true
-
-- name: Default network | Create host access infra network for VxLan routing case
-  command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1'
-  when: (contiv_encap_mode == "vxlan") and (contiv_netplugin_fwd_mode == "routing")
-  run_once: true
-
-#- name: Default network | Create an allow-all policy for the default-group
-#  command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" policy create ose-allow-all-policy'
-#  when: contiv_fabric_mode == "aci"
-#  run_once: true
-
-- name: Default network | Set up aci external contract to consume default external contract
-  command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" external-contracts create -c -a {{ contiv_apic_default_external_contract }} oseExtToConsume'
-  when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
-  run_once: true
-
-- name: Default network | Set up aci external contract to provide default external contract
-  command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" external-contracts create -p -a {{ contiv_apic_default_external_contract }} oseExtToProvide'
-  when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
-  run_once: true
-
-- name: Default network | Create aci default-group
-  command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" group create default-net default-group'
-  when: contiv_fabric_mode == "aci"
-  run_once: true
-
-- name: Default network | Add external contracts to the default-group
-  command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group'
-  when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
-  run_once: true
-
-#- name: Default network | Add policy rule 1 for allow-all policy
-#  command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1'
-#  when: contiv_fabric_mode == "aci"
-#  run_once: true
-
-#- name: Default network | Add policy rule 2 for allow-all policy
-#  command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2'
-#  when: contiv_fabric_mode == "aci"
-#  run_once: true
-
-- name: Default network | Create default aci app profile
-  command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" app-profile create -g default-group {{ contiv_apic_default_app_profile }}'
-  when: contiv_fabric_mode == "aci"
-  run_once: true

+ 0 - 48
roles/contiv/tasks/download_bins.yml

@@ -1,48 +0,0 @@
----
-- name: Download Bins | Create directory for current Contiv release
-  file:
-    path: "{{ contiv_current_release_directory }}"
-    state: directory
-
-- name: Download Bins | Install bzip2
-  yum:
-    name: bzip2
-    state: installed
-  register: result
-  until: result is succeeded
-
-- name: Download Bins | Download Contiv tar file
-  get_url:
-    url: "{{ contiv_download_url }}"
-    dest: "{{ contiv_current_release_directory }}"
-    mode: 0755
-    validate_certs: False
-  environment:
-    http_proxy: "{{ contiv_http_proxy|default('') }}"
-    https_proxy: "{{ contiv_https_proxy|default('') }}"
-    no_proxy: "{{ contiv_no_proxy|default('') }}"
-
-- name: Download Bins | Extract Contiv tar file
-  unarchive:
-    src: "{{ contiv_current_release_directory }}/netplugin-{{ contiv_version }}.tar.bz2"
-    dest: "{{ contiv_current_release_directory }}"
-    copy: no
-
-- name: Download Bins | Download cni tar file
-  get_url:
-    url: "{{ contiv_cni_bin_url }}"
-    dest: "{{ contiv_cni_download_dir }}"
-    mode: 0755
-    validate_certs: False
-  environment:
-    http_proxy: "{{ contiv_http_proxy|default('') }}"
-    https_proxy: "{{ contiv_https_proxy|default('') }}"
-    no_proxy: "{{ contiv_no_proxy|default('') }}"
-  register: download_file
-
-- name: Download Bins | Extract cni tar file
-  unarchive:
-    src: "{{ download_file.dest }}"
-    dest: "{{ contiv_cni_download_dir }}"
-    copy: no
-  when: download_file.changed

+ 0 - 114
roles/contiv/tasks/etcd.yml

@@ -1,114 +0,0 @@
----
-# To run contiv-etcd in a container as non-root, we need to match the uid/gid
-# with the filesystem permissions on the host.
-- name: Contiv etcd | Create local unix group
-  group:
-    name: "{{ contiv_etcd_system_group }}"
-    gid: "{{ contiv_etcd_system_gid }}"
-    system: yes
-
-- name: Contiv etcd | Create local unix user
-  user:
-    name: "{{ contiv_etcd_system_user }}"
-    createhome: no
-    uid: "{{ contiv_etcd_system_uid }}"
-    group: "{{ contiv_etcd_system_group }}"
-    home: "{{ contiv_etcd_data_dir }}"
-    shell: /bin/false
-    system: yes
-
-- name: Contiv etcd | Create directories
-  file:
-    path: "{{ item }}"
-    state: directory
-    mode: g-rwx,o-rwx
-    owner: "{{ contiv_etcd_system_user }}"
-    group: "{{ contiv_etcd_system_group }}"
-    setype: svirt_sandbox_file_t
-    seuser: system_u
-    serole: object_r
-    selevel: s0
-    recurse: yes
-  with_items:
-    - "{{ contiv_etcd_data_dir }}"
-    - "{{ contiv_etcd_conf_dir }}"
-
-- name: Contiv etcd | Create contiv-etcd openshift user
-  oc_serviceaccount:
-    state: present
-    name: contiv-etcd
-    namespace: kube-system
-  run_once: true
-
-- name: Contiv etcd | Create temp directory for doing work
-  command: mktemp -d /tmp/openshift-contiv-XXXXXX
-  register: mktemp
-  changed_when: False
-  # For things that pass temp files between steps, we want to make sure they
-  # run on the same node.
-  delegate_to: "{{ groups.oo_masters_to_config.0 }}"
-  run_once: true
-
-- name: Contiv etcd | Create etcd-scc.yml from template
-  template:
-    src: etcd-scc.yml.j2
-    dest: "{{ mktemp.stdout }}/etcd-scc.yml"
-  delegate_to: "{{ groups.oo_masters_to_config.0 }}"
-  run_once: true
-
-- name: Contiv etcd | Create etcd.yml from template
-  template:
-    src: etcd-daemonset.yml.j2
-    dest: "{{ mktemp.stdout }}/etcd-daemonset.yml"
-  delegate_to: "{{ groups.oo_masters_to_config.0 }}"
-  run_once: true
-
-- name: Contiv etcd | Create etcd-proxy.yml from template
-  template:
-    src: etcd-proxy-daemonset.yml.j2
-    dest: "{{ mktemp.stdout }}/etcd-proxy-daemonset.yml"
-  delegate_to: "{{ groups.oo_masters_to_config.0 }}"
-  run_once: true
-
-- name: Contiv etcd | Add etcd scc
-  oc_obj:
-    state: present
-    namespace: "kube-system"
-    kind: SecurityContextConstraints
-    name: contiv-etcd
-    files:
-      - "{{ mktemp.stdout }}/etcd-scc.yml"
-  delegate_to: "{{ groups.oo_masters_to_config.0 }}"
-  run_once: true
-
-# Always "import" this file, k8s won't do anything if it matches exactly what
-# is already in the cluster.
-- name: Contiv etcd | Add etcd daemonset
-  oc_obj:
-    state: present
-    namespace: "kube-system"
-    kind: daemonset
-    name: contiv-etcd
-    files:
-      - "{{ mktemp.stdout }}/etcd-daemonset.yml"
-  delegate_to: "{{ groups.oo_masters_to_config.0 }}"
-  run_once: true
-
-- name: Contiv etcd | Add etcd-proxy daemonset
-  oc_obj:
-    state: present
-    namespace: "kube-system"
-    kind: daemonset
-    name: contiv-etcd-proxy
-    files:
-      - "{{ mktemp.stdout }}/etcd-proxy-daemonset.yml"
-  delegate_to: "{{ groups.oo_masters_to_config.0 }}"
-  run_once: true
-
-- name: Contiv etcd | Delete temp directory
-  file:
-    name: "{{ mktemp.stdout }}"
-    state: absent
-  changed_when: False
-  delegate_to: "{{ groups.oo_masters_to_config.0 }}"
-  run_once: true

+ 0 - 15
roles/contiv/tasks/main.yml

@@ -1,15 +0,0 @@
----
-- include_tasks: old_version_cleanup.yml
-
-- name: Ensure contiv_bin_dir exists
-  file:
-    path: "{{ contiv_bin_dir }}"
-    recurse: yes
-    state: directory
-
-- include_tasks: download_bins.yml
-
-- include_tasks: netmaster.yml
-  when: contiv_master
-
-- include_tasks: netplugin.yml

+ 0 - 64
roles/contiv/tasks/netmaster.yml

@@ -1,64 +0,0 @@
----
-- include_tasks: netmaster_firewalld.yml
-  when: contiv_has_firewalld
-
-- include_tasks: netmaster_iptables.yml
-  when: not contiv_has_firewalld and contiv_has_iptables
-
-- include_tasks: etcd.yml
-
-- name: Netmaster | Create netmaster symlinks
-  file:
-    src: "{{ contiv_current_release_directory }}/{{ item }}"
-    dest: "{{ contiv_bin_dir }}/{{ item }}"
-    state: link
-    force: yes
-  with_items:
-    - netmaster
-    - netctl
-
-- name: Netmaster | Copy environment file for netmaster
-  template:
-    src: netmaster.j2
-    dest: /etc/default/netmaster
-    mode: 0644
-  notify: restart netmaster
-
-- name: Netmaster | Ensure contiv_config_dir exists
-  file:
-    path: "{{ contiv_config_dir }}"
-    recurse: yes
-    state: directory
-
-- name: Netmaster | Setup contiv.json config for the cni plugin
-  template:
-    src: contiv.cfg.master.j2
-    dest: "{{ contiv_config_dir }}/contiv.json"
-  notify: restart netmaster
-
-- name: Netmaster | Copy systemd units for netmaster
-  template:
-    src: netmaster.service
-    dest: /etc/systemd/system/netmaster.service
-  notify: reload systemd
-
-- name: Netmaster | Flush handlers
-  meta: flush_handlers
-
-- name: Netmaster | Enable Netmaster
-  service:
-    name: netmaster
-    enabled: yes
-
-- name: Netmaster | Start Netmaster
-  service:
-    name: netmaster
-    state: started
-
-- include_tasks: aci.yml
-  when: contiv_fabric_mode == "aci"
-
-- include_tasks: default_network.yml
-  when: contiv_default_network == true
-
-- include_tasks: api_proxy.yml

+ 0 - 17
roles/contiv/tasks/netmaster_firewalld.yml

@@ -1,17 +0,0 @@
----
-- name: Netmaster Firewalld | Add internal rules
-  firewalld:
-    immediate: true
-    permanent: true
-    port: "{{ item[0] }}"
-    source: "{{ item[1] }}"
-  with_nested:
-    - "{{ contiv_netmaster_internal }}"
-    - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}"
-
-- name: Netmaster Firewalld | Add external rules
-  firewalld:
-    immediate: true
-    permanent: true
-    port: "{{ item }}"
-  with_items: "{{ contiv_netmaster_external }}"

+ 0 - 32
roles/contiv/tasks/netmaster_iptables.yml

@@ -1,32 +0,0 @@
----
-- name: Netmaster IPtables | Add internal rules
-  iptables:
-    action: insert
-    chain: INPUT
-    # Parsed from the contiv_netmaster_internal list, this will be tcp or udp.
-    protocol: "{{ item[0].split('/')[1] }}"
-    match: "{{ item[0].split('/')[1] }}"
-    # Parsed from the contiv_netmaster_internal list, this will be a port number.
-    destination_port: "{{ item[0].split('/')[0] }}"
-    # This is an IP address from a node in the cluster.
-    source: "{{ item[1] }}"
-    jump: ACCEPT
-    comment: contiv
-  with_nested:
-    - "{{ contiv_netmaster_internal }}"
-    - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}"
-  notify: Save iptables rules
-
-- name: Netmaster IPtables | Add external rules
-  iptables:
-    action: insert
-    chain: INPUT
-    # Parsed from the contiv_netmaster_external list, this will be tcp or udp.
-    protocol: "{{ item.split('/')[1] }}"
-    match: "{{ item.split('/')[1] }}"
-    # Parsed from the contiv_netmaster_external list, this will be a port number.
-    destination_port: "{{ item.split('/')[0] }}"
-    jump: ACCEPT
-    comment: contiv
-  with_items: "{{ contiv_netmaster_external }}"
-  notify: Save iptables rules

+ 0 - 129
roles/contiv/tasks/netplugin.yml

@@ -1,129 +0,0 @@
----
-- include_tasks: netplugin_firewalld.yml
-  when: contiv_has_firewalld
-
-- include_tasks: netplugin_iptables.yml
-  when: not contiv_has_firewalld and contiv_has_iptables
-
-- name: Netplugin | Ensure localhost entry correct in /etc/hosts
-  lineinfile:
-    dest: /etc/hosts
-    regexp: '^127\.0\.0\.1.*'
-    line: '127.0.0.1 localhost {{ ansible_hostname }}'
-    state: present
-
-- name: Netplugin | Remove incorrect localhost entry in /etc/hosts
-  lineinfile:
-    dest: /etc/hosts
-    regexp: '^::1. localhost '
-    line: '::1 '
-    state: absent
-
-- include_tasks: ovs.yml
-  when: contiv_netplugin_driver == "ovs"
-
-- name: Netplugin | Create Netplugin bin symlink
-  file:
-    src: "{{ contiv_current_release_directory }}/netplugin"
-    dest: "{{ contiv_bin_dir }}/netplugin"
-    state: link
-    force: yes
-
-- name: Netplugin | Ensure contiv_cni_bin_dir exists
-  file:
-    path: "{{ contiv_cni_bin_dir }}"
-    recurse: yes
-    state: directory
-
-- name: Netplugin | Create CNI bin symlink
-  file:
-    src: "{{ contiv_current_release_directory }}/contivk8s"
-    dest: "{{ contiv_cni_bin_dir }}/contivk8s"
-    state: link
-    force: yes
-
-- name: Netplugin | Copy CNI loopback bin
-  copy:
-    src: "{{ contiv_cni_download_dir }}/loopback"
-    dest: "{{ contiv_cni_bin_dir }}/loopback"
-    remote_src: True
-    mode: 0755
-
-- name: Netplugin | Ensure contiv_kube_plugin_dir and cni/net.d directories exist
-  file:
-    path: "{{ item }}"
-    recurse: yes
-    state: directory
-  with_items:
-    - "{{ contiv_kube_plugin_dir }}"
-    - "/etc/cni/net.d"
-
-- name: Netplugin | Ensure contiv_config_dir exists
-  file:
-    path: "{{ contiv_config_dir }}"
-    recurse: yes
-    state: directory
-
-- name: Netplugin | Copy contiv_cni.conf file
-  copy:
-    src: contiv_cni.conf
-    dest: "{{ item }}"
-  with_items:
-    - "{{ contiv_kube_plugin_dir }}/contiv_cni.conf"
-    - "/etc/cni/net.d"
-# notify: restart kubelet
-
-- name: Netplugin | Setup contiv.json config for the cni plugin
-  template:
-    src: contiv.cfg.j2
-    dest: "{{ contiv_config_dir }}/contiv.json"
-  notify: restart netplugin
-
-- name: Netplugin | Copy environment file for netplugin
-  template:
-    src: netplugin.j2
-    dest: /etc/default/netplugin
-    mode: 0644
-  notify: restart netplugin
-
-- name: Netplugin | Make sure docker proxy setting exists
-  lineinfile:
-    dest: /etc/sysconfig/docker-network
-    regexp: '^https_proxy.*'
-    line: 'https_proxy={{ contiv_https_proxy }}'
-    state: present
-  register: docker_updated
-
-- name: Netplugin | Copy systemd unit for netplugin
-  template:
-    src: netplugin.service
-    dest: /etc/systemd/system/netplugin.service
-  notify: reload systemd
-
-- name: systemd reload
-  command: systemctl daemon-reload
-  when: docker_updated is changed
-
-- name: Netplugin | Flush handlers
-  meta: flush_handlers
-
-- name: Netplugin | Restart docker
-  service:
-    name: "{{ contiv_openshift_docker_service_name }}"
-    state: restarted
-  when: docker_updated is changed
-  register: l_docker_restart_docker_in_contiv_result
-  until: not (l_docker_restart_docker_in_contiv_result is failed)
-  retries: 3
-  delay: 30
-
-- name: Netplugin | Enable Netplugin
-  service:
-    name: netplugin
-    enabled: yes
-
-- name: Netplugin | Start Netplugin
-  service:
-    name: netplugin
-    state: started
-# notify: restart kubelet

+ 0 - 17
roles/contiv/tasks/netplugin_firewalld.yml

@@ -1,17 +0,0 @@
----
-- name: Netplugin Firewalld | Add internal rules
-  firewalld:
-    immediate: true
-    permanent: true
-    port: "{{ item[0] }}"
-    source: "{{ item[1] }}"
-  with_nested:
-    - "{{ contiv_netplugin_internal }}"
-    - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}"
-
-- name: Netplugin Firewalld | Add dns rule
-  firewalld:
-    immediate: true
-    permanent: true
-    port: "53/udp"
-    interface: contivh0

+ 0 - 52
roles/contiv/tasks/netplugin_iptables.yml

@@ -1,52 +0,0 @@
----
-- name: Netplugin IPtables | Add internal rules
-  iptables:
-    action: insert
-    chain: INPUT
-    protocol: "{{ item[0].split('/')[1] }}"
-    match: "{{ item[0].split('/')[1] }}"
-    destination_port: "{{ item[0].split('/')[0] }}"
-    source: "{{ item[1] }}"
-    jump: ACCEPT
-    comment: contiv
-  with_nested:
-    - "{{ contiv_netplugin_internal }}"
-    - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}"
-  notify: Save iptables rules
-
-- name: Netplugin IPtables | Add [in] forward rules
-  iptables:
-    action: insert
-    chain: FORWARD
-    in_interface: "{{ item }}"
-    jump: ACCEPT
-    comment: contiv
-  with_items: "{{ contiv_netplugin_forward_interfaces }}"
-  notify: Save iptables rules
-
-- name: Netplugin IPtables | Add [out] forward rules
-  iptables:
-    action: insert
-    chain: FORWARD
-    out_interface: "{{ item }}"
-    jump: ACCEPT
-    comment: contiv
-  with_items: "{{ contiv_netplugin_forward_interfaces }}"
-  notify: Save iptables rules
-
-- name: Netplugin IPtables | Add dns rule
-  iptables:
-    action: insert
-    chain: INPUT
-    protocol: udp
-    match: udp
-    destination_port: 53
-    in_interface: contivh0
-    jump: ACCEPT
-    comment: contiv
-  notify: Save iptables rules
-
-- name: Netplugin IPtables | Enable iptables at boot
-  service:
-    name: iptables
-    enabled: yes

+ 0 - 49
roles/contiv/tasks/old_version_cleanup.yml

@@ -1,49 +0,0 @@
----
-- name: Old version cleanup | Check if old auth proxy service exists
-  stat:
-    path: /etc/systemd/system/auth-proxy.service
-    get_checksum: false
-    get_attributes: false
-    get_mime: false
-  register: auth_proxy_stat
-
-- name: Old version cleanup | Stop old auth proxy
-  service:
-    name: auth-proxy
-    enabled: no
-    state: stopped
-  when: auth_proxy_stat.stat.exists
-
-# Note(NB): The new containerized contiv-etcd service uses the same data
-# directory on the host, so etcd data is not lost.
-- name: Old version cleanup | Check if old contiv-etcd service exists
-  stat:
-    path: /etc/systemd/system/contiv-etcd.service
-    get_checksum: false
-    get_attributes: false
-    get_mime: false
-  register: contiv_etcd_stat
-
-- name: Old version cleanup | Stop old contiv-etcd
-  service:
-    name: contiv-etcd
-    enabled: no
-    state: stopped
-  when: contiv_etcd_stat.stat.exists
-
-- name: Old version cleanup | Delete old files
-  file:
-    state: absent
-    path: "{{ item }}"
-  with_items:
-    - /etc/systemd/system/auth-proxy.service
-    - /var/contiv/certs
-    - /usr/bin/auth_proxy.sh
-    - /etc/systemd/system/contiv-etcd.service
-    - /etc/systemd/system/contiv-etcd.service.d
-
-- include_tasks: old_version_cleanup_iptables.yml
-  when: not contiv_has_firewalld and contiv_has_iptables
-
-- include_tasks: old_version_cleanup_firewalld.yml
-  when: contiv_has_firewalld

+ 0 - 11
roles/contiv/tasks/old_version_cleanup_firewalld.yml

@@ -1,11 +0,0 @@
----
-- name: Old version cleanup | Delete old firewalld rules
-  firewalld:
-    state: absent
-    immediate: true
-    permanent: true
-    port: "{{ item }}"
-  with_items:
-    - "9999/tcp"
-    - "6640/tcp"
-    - "8472/udp"

+ 0 - 44
roles/contiv/tasks/old_version_cleanup_iptables.yml

@@ -1,44 +0,0 @@
----
-- name: Old version cleanup | Delete old forward [in] iptables rules
-  iptables:
-    state: absent
-    chain: FORWARD
-    in_interface: "{{ item }}"
-    jump: ACCEPT
-    comment: "{{ item }} FORWARD input"
-  with_items:
-    - contivh0
-    - contivh1
-  notify: Save iptables rules
-
-- name: Old version cleanup | Delete old forward [out] iptables rules
-  iptables:
-    state: absent
-    chain: FORWARD
-    out_interface: "{{ item }}"
-    jump: ACCEPT
-    comment: "{{ item }} FORWARD output"
-  with_items:
-    - contivh0
-    - contivh1
-  notify: Save iptables rules
-
-- name: Old version cleanup | Delete old input iptables rules
-  iptables:
-    state: absent
-    chain: INPUT
-    protocol: "{{ item.split('/')[1] }}"
-    match: "{{ item.split('/')[1] }}"
-    destination_port: "{{ item.split('/')[0] }}"
-    comment: "{{ item.split('/')[2] }}"
-    jump: ACCEPT
-  with_items:
-    - "53/udp/contiv dns"
-    - "4789/udp/netplugin vxlan 4789"
-    - "8472/udp/netplugin vxlan 8472"
-    - "9003/tcp/contiv"
-    - "9002/tcp/contiv"
-    - "9001/tcp/contiv"
-    - "9999/tcp/contiv"
-    - "10000/tcp/Contiv auth proxy service (10000)"
-  notify: Save iptables rules

+ 0 - 28
roles/contiv/tasks/ovs.yml

@@ -1,28 +0,0 @@
----
-- include_tasks: packageManagerInstall.yml
-  when: contiv_source_type == "packageManager"
-  tags:
-    - binary-update
-
-- name: OVS | Configure selinux for ovs
-  command: "semanage permissive -a openvswitch_t"
-
-- name: OVS | Enable ovs
-  service:
-    name: openvswitch
-    enabled: yes
-
-- name: OVS | Start ovs
-  service:
-    name: openvswitch
-    state: started
-  register: ovs_started
-
-- name: OVS | Configure ovs
-  command: "ovs-vsctl set-manager {{ item }}"
-  with_items:
-    - "tcp:127.0.0.1:6640"
-    - "ptcp:6640"
-
-- name: OVS | Configure ovsdb-server
-  command: "ovs-appctl -t ovsdb-server ovsdb-server/add-remote ptcp:6640"

+ 0 - 12
roles/contiv/tasks/packageManagerInstall.yml

@@ -1,12 +0,0 @@
----
-- name: Package Manager | Init the did_install fact
-  set_fact:
-    did_install: false
-
-- include_tasks: pkgMgrInstallers/centos-install.yml
-  when: ansible_os_family == "RedHat"
-
-- name: Package Manager | Set fact saying we did CentOS package install
-  set_fact:
-    did_install: true
-  when: ansible_os_family == "RedHat"

+ 0 - 39
roles/contiv/tasks/pkgMgrInstallers/centos-install.yml

@@ -1,39 +0,0 @@
----
-- name: PkgMgr RHEL/CentOS | Install net-tools pkg for route
-  yum:
-    pkg=net-tools
-    state=latest
-  register: result
-  until: result is succeeded
-
-- name: PkgMgr RHEL/CentOS | Get openstack ocata rpm
-  get_url:
-    url: https://repos.fedorapeople.org/repos/openstack/openstack-ocata/rdo-release-ocata-2.noarch.rpm
-    dest: /tmp/rdo-release-ocata-2.noarch.rpm
-    validate_certs: False
-  environment:
-    http_proxy: "{{ contiv_http_proxy|default('') }}"
-    https_proxy: "{{ contiv_https_proxy|default('') }}"
-    no_proxy: "{{ contiv_no_proxy|default('') }}"
-  tags:
-    - ovs_install
-
-- name: PkgMgr RHEL/CentOS | Install openstack ocata rpm
-  yum: name=/tmp/rdo-release-ocata-2.noarch.rpm state=present
-  tags:
-    - ovs_install
-  register: result
-  until: result is succeeded
-
-- name: PkgMgr RHEL/CentOS | Install ovs
-  yum:
-    pkg=openvswitch
-    state=present
-  environment:
-    http_proxy: "{{ contiv_http_proxy|default('') }}"
-    https_proxy: "{{ contiv_https_proxy|default('') }}"
-    no_proxy: "{{ contiv_no_proxy|default('') }}"
-  tags:
-    - ovs_install
-  register: result
-  until: result is succeeded

+ 0 - 13
roles/contiv/templates/aci-gw.service

@@ -1,13 +0,0 @@
-[Unit]
-Description=Contiv ACI gw
-After=auditd.service systemd-user-sessions.service time-sync.target {{ contiv_openshift_docker_service_name }}.service
-
-[Service]
-ExecStart={{ contiv_bin_dir }}/aci_gw.sh start
-ExecStop={{ contiv_bin_dir }}/aci_gw.sh stop
-KillMode=control-group
-Restart=always
-RestartSec=10
-
-[Install]
-WantedBy=multi-user.target

+ 0 - 35
roles/contiv/templates/aci_gw.j2

@@ -1,35 +0,0 @@
-#!/bin/bash
-
-usage="$0 start"
-if [ $# -ne 1 ]; then
-    echo USAGE: $usage
-    exit 1
-fi
-
-case $1 in
-start)
-    set -e
-
-    docker run --net=host \
-    -e "APIC_URL={{ contiv_apic_url }}" \
-    -e "APIC_USERNAME={{ contiv_apic_username }}" \
-    -e "APIC_PASSWORD={{ contiv_apic_password }}" \
-    -e "APIC_LEAF_NODE={{ contiv_apic_leaf_nodes }}" \
-    -e "APIC_PHYS_DOMAIN={{ contiv_apic_phys_dom }}" \
-    -e "APIC_EPG_BRIDGE_DOMAIN={{ contiv_apic_epg_bridge_domain }}" \
-    -e "APIC_CONTRACTS_UNRESTRICTED_MODE={{ contiv_apic_contracts_unrestricted_mode }}" \
-    --name=contiv-aci-gw \
-    contiv/aci-gw
-    ;;
-
-stop)
-    # don't stop on error
-    docker stop contiv-aci-gw
-    docker rm contiv-aci-gw
-    ;;
-
-*)
-    echo USAGE: $usage
-    exit 1
-    ;;
-esac

+ 0 - 57
roles/contiv/templates/api-proxy-daemonset.yml.j2

@@ -1,57 +0,0 @@
----
-apiVersion: extensions/v1beta1
-kind: DaemonSet
-metadata:
-  name: contiv-api-proxy
-  namespace: kube-system
-spec:
-  updateStrategy:
-    type: RollingUpdate
-  selector:
-    matchLabels:
-      name: contiv-api-proxy
-  template:
-    metadata:
-      namespace: kube-system
-      labels:
-        name: contiv-api-proxy
-      annotations:
-        scheduler.alpha.kubernetes.io/critical-pod: ""
-    spec:
-      serviceAccountName: contiv-api-proxy
-      hostNetwork: true
-      affinity:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-            - matchExpressions:
-              - key: kubernetes.io/hostname
-                operator: In
-                values:
-{% for node in groups.oo_masters_to_config %}
-                  - "{{ node }}"
-{% endfor %}
-      tolerations:
-        - key: node-role.kubernetes.io/master
-          effect: NoSchedule
-      containers:
-        - name: contiv-api-proxy
-          image: "{{ contiv_api_proxy_image_repo }}:{{ contiv_version }}"
-          args:
-            - "--listen-address=0.0.0.0:{{ contiv_api_proxy_port }}"
-            - --tls-key-file=/var/contiv/api_proxy_key.pem
-            - --tls-certificate=/var/contiv/api_proxy_cert.pem
-            - "--data-store-address={{ etcd_host }}"
-            - --data-store-driver=etcd
-            - "--netmaster-address=127.0.0.1:{{ contiv_netmaster_port }}"
-          ports:
-            - containerPort: "{{ contiv_api_proxy_port }}"
-              hostPort: "{{ contiv_api_proxy_port }}"
-          volumeMounts:
-            - name: secret-volume
-              mountPath: /var/contiv
-              readOnly: true
-      volumes:
-        - name: secret-volume
-          secret:
-            secretName: contiv-api-proxy-secret

+ 0 - 12
roles/contiv/templates/api-proxy-secrets.yml.j2

@@ -1,12 +0,0 @@
----
-apiVersion: v1
-kind: Secret
-metadata:
-    name: contiv-api-proxy-secret
-    namespace: kube-system
-    labels:
-      name: contiv-api-proxy-secret
-# Use data+b64encode, because stringData doesn't preserve newlines.
-data:
-    api_proxy_key.pem: "{{ key | b64encode }}"
-    api_proxy_cert.pem: "{{ cert | b64encode }}"

+ 0 - 7
roles/contiv/templates/contiv.cfg.j2

@@ -1,7 +0,0 @@
-{
-  "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + contiv_netmaster_interface].ipv4.address }}:{{ contiv_kube_master_api_port }}",
-  "K8S_CA": "{{ openshift.common.config_base }}/node/ca.crt",
-  "K8S_KEY": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.key",
-  "K8S_CERT": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.crt",
-  "SVC_SUBNET": "172.30.0.0/16"
-}

+ 0 - 7
roles/contiv/templates/contiv.cfg.master.j2

@@ -1,7 +0,0 @@
-{
-  "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + contiv_netmaster_interface].ipv4.address }}:{{ contiv_kube_master_api_port }}",
-  "K8S_CA": "{{ openshift.common.config_base }}/master/ca.crt",
-  "K8S_KEY": "{{ openshift.common.config_base }}/master/system:node:{{ openshift.common.hostname }}.key",
-  "K8S_CERT": "{{ openshift.common.config_base }}/master/system:node:{{ openshift.common.hostname }}.crt",
-  "SVC_SUBNET": "172.30.0.0/16"
-}

+ 0 - 83
roles/contiv/templates/etcd-daemonset.yml.j2

@@ -1,83 +0,0 @@
----
-apiVersion: extensions/v1beta1
-kind: DaemonSet
-metadata:
-  name: contiv-etcd
-  namespace: kube-system
-spec:
-  updateStrategy:
-    type: RollingUpdate
-  selector:
-    matchLabels:
-      name: contiv-etcd
-  template:
-    metadata:
-      namespace: kube-system
-      labels:
-        name: contiv-etcd
-      annotations:
-        scheduler.alpha.kubernetes.io/critical-pod: ""
-    spec:
-      serviceAccountName: contiv-etcd
-      hostNetwork: true
-      affinity:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-            - matchExpressions:
-              - key: kubernetes.io/hostname
-                operator: In
-                values:
-{% for node in groups.oo_masters_to_config %}
-                  - "{{ node }}"
-{% endfor %}
-      tolerations:
-        - key: node-role.kubernetes.io/master
-          effect: NoSchedule
-      initContainers:
-        - name: contiv-etcd-init
-          image: "{{ contiv_etcd_init_image_repo }}:{{ contiv_etcd_init_image_tag }}"
-          env:
-            - name: ETCD_INIT_ARGSFILE
-              value: "{{ contiv_etcd_conf_dir }}/contiv-etcd-args"
-            - name: ETCD_INIT_LISTEN_PORT
-              value: "{{ contiv_etcd_port }}"
-            - name: ETCD_INIT_PEER_PORT
-              value: "{{ contiv_etcd_peer_port }}"
-            - name: ETCD_INIT_CLUSTER
-              value: "{{ contiv_etcd_peers }}"
-            - name: ETCD_INIT_DATA_DIR
-              value: "{{ contiv_etcd_data_dir }}"
-          volumeMounts:
-            - name: contiv-etcd-conf-dir
-              mountPath: "{{ contiv_etcd_conf_dir }}"
-          securityContext:
-            runAsUser: "{{ contiv_etcd_system_uid }}"
-            fsGroup: "{{ contiv_etcd_system_gid }}"
-      containers:
-        - name: contiv-etcd
-          image: "{{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}"
-          command:
-            - sh
-            - -c
-            - 'exec etcd $(cat "$ETCD_INIT_ARGSFILE")'
-          env:
-            - name: ETCD_INIT_ARGSFILE
-              value: "{{ contiv_etcd_conf_dir }}/contiv-etcd-args"
-          volumeMounts:
-            - name: contiv-etcd-conf-dir
-              mountPath: "{{ contiv_etcd_conf_dir }}"
-            - name: contiv-etcd-data-dir
-              mountPath: "{{ contiv_etcd_data_dir }}"
-          securityContext:
-            runAsUser: "{{ contiv_etcd_system_uid }}"
-            fsGroup: "{{ contiv_etcd_system_gid }}"
-      volumes:
-        - name: contiv-etcd-data-dir
-          hostPath:
-            type: DirectoryOrCreate
-            path: "{{ contiv_etcd_data_dir }}"
-        - name: contiv-etcd-conf-dir
-          hostPath:
-            type: DirectoryOrCreate
-            path: "{{ contiv_etcd_conf_dir }}"

+ 0 - 55
roles/contiv/templates/etcd-proxy-daemonset.yml.j2

@@ -1,55 +0,0 @@
----
-apiVersion: extensions/v1beta1
-kind: DaemonSet
-metadata:
-  name: contiv-etcd-proxy
-  namespace: kube-system
-spec:
-  updateStrategy:
-    type: RollingUpdate
-  selector:
-    matchLabels:
-      name: contiv-etcd-proxy
-  template:
-    metadata:
-      namespace: kube-system
-      labels:
-        name: contiv-etcd-proxy
-      annotations:
-        scheduler.alpha.kubernetes.io/critical-pod: ""
-    spec:
-      serviceAccountName: contiv-etcd
-      hostNetwork: true
-      affinity:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-            - matchExpressions:
-              - key: kubernetes.io/hostname
-                operator: NotIn
-                values:
-{% for node in groups.oo_masters_to_config %}
-                  - "{{ node }}"
-{% endfor %}
-      tolerations:
-        - key: node-role.kubernetes.io/master
-          effect: NoSchedule
-      containers:
-        - name: contiv-etcd-proxy
-          image: "{{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}"
-          command:
-            - etcd
-            - "--proxy=on"
-            - "--listen-client-urls=http://127.0.0.1:{{ contiv_etcd_port }}"
-            - "--advertise-client-urls=http://127.0.0.1:{{ contiv_etcd_port }}"
-            - "--initial-cluster={{ contiv_etcd_peers }}"
-            - "--data-dir={{ contiv_etcd_data_dir }}"
-          volumeMounts:
-            - name: contiv-etcd-data-dir
-              mountPath: "{{ contiv_etcd_data_dir }}"
-          securityContext:
-            runAsUser: "{{ contiv_etcd_system_uid }}"
-            fsGroup: "{{ contiv_etcd_system_gid }}"
-      volumes:
-        - name: contiv-etcd-data-dir
-          emptyDir: {}

+ 0 - 42
roles/contiv/templates/etcd-scc.yml.j2

@@ -1,42 +0,0 @@
-allowHostDirVolumePlugin: true
-allowHostIPC: false
-allowHostNetwork: true
-allowHostPID: false
-allowHostPorts: false
-allowPrivilegedContainer: false
-allowedCapabilities: []
-allowedFlexVolumes: []
-apiVersion: v1
-defaultAddCapabilities: []
-fsGroup:
-  ranges:
-  - max: "{{ contiv_etcd_system_gid }}"
-    min: "{{ contiv_etcd_system_gid }}"
-  type: MustRunAs
-groups: []
-kind: SecurityContextConstraints
-metadata:
-  annotations:
-    kubernetes.io/description: 'For contiv-etcd only.'
-  creationTimestamp: null
-  name: contiv-etcd
-priority: null
-readOnlyRootFilesystem: true
-requiredDropCapabilities:
-- KILL
-- MKNOD
-- SETUID
-- SETGID
-runAsUser:
-  type: MustRunAs
-  uid: "{{ contiv_etcd_system_uid }}"
-seLinuxContext:
-  type: MustRunAs
-supplementalGroups:
-  type: MustRunAs
-users:
-- system:serviceaccount:kube-system:contiv-etcd
-volumes:
-- emptyDir
-- hostPath
-- secret

+ 0 - 1
roles/contiv/templates/netmaster.j2

@@ -1 +0,0 @@
-NETMASTER_ARGS='--etcd={{ contiv_etcd_url }} --listen-url=127.0.0.1:{{ contiv_netmaster_port }} --fwdmode={{ contiv_netplugin_fwd_mode }} --infra={{ contiv_fabric_mode }} --control-url={{ contiv_netmaster_ctrl_ip }}:{{ contiv_netmaster_port }} --cluster-mode=kubernetes --netmode={{ contiv_encap_mode }}'

+ 0 - 13
roles/contiv/templates/netmaster.service

@@ -1,13 +0,0 @@
-[Unit]
-Description=Netmaster
-After=auditd.service systemd-user-sessions.service contiv-etcd.service
-
-[Service]
-EnvironmentFile=/etc/default/netmaster
-ExecStart={{ contiv_bin_dir }}/netmaster $NETMASTER_ARGS
-KillMode=control-group
-Restart=always
-RestartSec=10
-
-[Install]
-WantedBy=multi-user.target

+ 0 - 6
roles/contiv/templates/netplugin.j2

@@ -1,6 +0,0 @@
-{% if contiv_encap_mode == "vlan" %}
-NETPLUGIN_ARGS='--vlan-if={{ contiv_netplugin_interface }} --ctrl-ip={{ contiv_netplugin_ctrl_ip }} --etcd={{ contiv_etcd_url }} --fwdmode={{ contiv_netplugin_fwd_mode }} --cluster-mode=kubernetes --netmode={{ contiv_encap_mode }}'
-{% endif %}
-{% if contiv_encap_mode == "vxlan" %}
-NETPLUGIN_ARGS='--vtep-ip={{ contiv_netplugin_ctrl_ip }} --vxlan-port={{ contiv_vxlan_port }} --ctrl-ip={{ contiv_netplugin_ctrl_ip }} --etcd={{ contiv_etcd_url }} --fwdmode={{ contiv_netplugin_fwd_mode }} --cluster-mode=kubernetes --netmode={{ contiv_encap_mode }}'
-{% endif %}

+ 0 - 13
roles/contiv/templates/netplugin.service

@@ -1,13 +0,0 @@
-[Unit]
-Description=Netplugin
-After=auditd.service systemd-user-sessions.service contiv-etcd.service
-
-[Service]
-EnvironmentFile=/etc/default/netplugin
-ExecStart={{ contiv_bin_dir }}/netplugin $NETPLUGIN_ARGS
-KillMode=control-group
-Restart=always
-RestartSec=10
-
-[Install]
-WantedBy=multi-user.target

+ 0 - 10
roles/contiv_facts/defaults/main.yaml

@@ -1,10 +0,0 @@
----
-# The directory where binaries are stored on Ansible
-# managed systems.
-contiv_bin_dir: /usr/bin
-
-# The directory used by Ansible to temporarily store
-# files on Ansible managed systems.
-contiv_ansible_temp_dir: /tmp/.ansible/files
-
-contiv_source_type: packageManager

+ 0 - 3
roles/contiv_facts/handlers/main.yml

@@ -1,3 +0,0 @@
----
-- name: reload systemd
-  command: systemctl --system daemon-reload

+ 0 - 26
roles/contiv_facts/tasks/fedora-install.yml

@@ -1,26 +0,0 @@
----
-- name: Install dnf
-  yum:
-    name: dnf
-    state: installed
-  register: result
-  until: result is succeeded
-
-- name: Update repo cache
-  command: dnf update -y
-  retries: 5
-  delay: 10
-  environment:
-    https_proxy: "{{ contiv_https_proxy }}"
-    http_proxy: "{{ contiv_http_proxy }}"
-    no_proxy: "{{ contiv_no_proxy }}"
-
-- name: Install libselinux-python
-  command: dnf install {{ item }} -y
-  with_items:
-    - python-dnf
-    - libselinux-python
-  environment:
-    https_proxy: "{{ contiv_https_proxy }}"
-    http_proxy: "{{ contiv_http_proxy }}"
-    no_proxy: "{{ contiv_no_proxy }}"

+ 0 - 63
roles/contiv_facts/tasks/main.yml

@@ -1,63 +0,0 @@
----
-- name: Determine if CoreOS
-  raw: "grep '^NAME=' /etc/os-release | sed s'/NAME=//'"
-  register: distro
-  check_mode: no
-
-- name: Init the contiv_is_coreos fact
-  set_fact:
-    contiv_is_coreos: false
-
-- name: Set the contiv_is_coreos fact
-  set_fact:
-    contiv_is_coreos: true
-  when: "'CoreOS' in distro.stdout"
-
-- name: Set the bin directory path for CoreOS
-  set_fact:
-    contiv_bin_dir: "/opt/bin"
-  when: contiv_is_coreos
-
-- name: Create the directory used to store binaries
-  file:
-    path: "{{ contiv_bin_dir }}"
-    state: directory
-
-- name: Create Ansible temp directory
-  file:
-    path: "{{ contiv_ansible_temp_dir }}"
-    state: directory
-
-- name: Determine if has rpm
-  stat:
-    path: /usr/bin/rpm
-    get_checksum: false
-    get_attributes: false
-    get_mime: false
-  register: s
-  changed_when: false
-  check_mode: no
-
-- name: Init the contiv_has_rpm fact
-  set_fact:
-    contiv_has_rpm: false
-
-- name: Set the contiv_has_rpm fact
-  set_fact:
-    contiv_has_rpm: true
-  when: s.stat.exists
-
-- name: Init the contiv_has_firewalld fact
-  set_fact:
-    contiv_has_firewalld: false
-
-- name: Init the contiv_has_iptables fact
-  set_fact:
-    contiv_has_iptables: false
-
-# collect information about what packages are installed
-- include_tasks: rpm.yml
-  when: contiv_has_rpm
-
-- include_tasks: fedora-install.yml
-  when: ansible_distribution == "Fedora"

+ 0 - 30
roles/contiv_facts/tasks/rpm.yml

@@ -1,30 +0,0 @@
----
-- name: RPM | Determine if firewalld installed
-  command: "rpm -q firewalld"
-  register: s
-  changed_when: false
-  failed_when: false
-  check_mode: no
-
-- name: RPM | Determine if firewalld enabled
-  systemd:
-    name: "firewalld"
-  ignore_errors: true
-  register: ss
-
-- name: Set the contiv_has_firewalld fact
-  set_fact:
-    contiv_has_firewalld: true
-  when: s.rc == 0 and ss.status.ActiveState == 'active'
-
-- name: Determine if iptables-services installed
-  command: "rpm -q iptables-services"
-  register: s
-  changed_when: false
-  failed_when: false
-  check_mode: no
-
-- name: Set the contiv_has_iptables fact
-  set_fact:
-    contiv_has_iptables: true
-  when: s.rc == 0

+ 0 - 40
roles/etcd/README.md

@@ -1,40 +0,0 @@
-Role Name
-=========
-
-Configures an etcd cluster for an arbitrary number of hosts
-
-Requirements
-------------
-
-* Ansible 2.2
-* This role assumes it's being deployed on a RHEL/Fedora based host with package
-named 'etcd' available via yum or dnf (conditionally).
-
-Role Variables
---------------
-
-TODO
-
-Dependencies
-------------
-
-etcd-common
-
-Example Playbook
-----------------
-
-    - hosts: etcd
-      roles:
-         - { etcd }
-
-License
--------
-
-MIT
-
-Author Information
-------------------
-
-Scott Dodson <sdodson@redhat.com>
-Adapted from https://github.com/retr0h/ansible-etcd for use on RHEL/Fedora. We
-should at some point submit a PR to merge this with that module.

+ 0 - 114
roles/etcd/defaults/main.yaml

@@ -1,114 +0,0 @@
----
-r_etcd_common_backup_tag: ''
-r_etcd_common_backup_sufix_name: ''
-
-l_etcd_static_pod: "{{ (inventory_hostname in groups['oo_masters']) | bool }}"
-
-# runc, docker, static pod, host
-r_etcd_common_etcd_runtime: "{{ 'static_pod' if l_etcd_static_pod  else 'host' }}"
-
-r_etcd_default_version: "3.2.22"
-# lib_utils_oo_oreg_image is a custom filter defined in roles/lib_utils/filter_plugins/oo_filters.py
-# This filter attempts to combine oreg_url host with project/component from etcd_image_dict.
-# "oreg.example.com/openshift3/ose-${component}:${version}"
-# becomes "oreg.example.com/rhel7/etcd:{{ r_etcd_upgrade_version | default(r_etcd_default_version) }}"
-osm_etcd_image: "{{ etcd_image_dict[openshift_deployment_type] | lib_utils_oo_oreg_image((oreg_url | default('None'))) }}"
-etcd_image_dict:
-  origin: "quay.io/coreos/etcd:v{{ r_etcd_upgrade_version | default(r_etcd_default_version) }}"
-  openshift-enterprise: "registry.redhat.io/rhel7/etcd:{{ r_etcd_upgrade_version | default(r_etcd_default_version) }}"
-etcd_image: "{{ osm_etcd_image }}"
-
-# etcd run on a host => use etcdctl command directly
-etcdctl_dict:
-  host: 'etcdctl'
-  static_pod: '/usr/local/bin/master-exec etcd etcd etcdctl'
-r_etcd_common_etcdctl_command: "{{ etcdctl_dict[r_etcd_common_etcd_runtime] }}"
-
-# etcd server vars
-etcd_conf_dir: '/etc/etcd'
-etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf"
-etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
-etcd_cert_file: "{{ etcd_conf_dir }}/server.crt"
-etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}"
-etcd_key_file: "{{ etcd_conf_dir }}/server.key"
-etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt"
-etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt"
-etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key"
-
-# etcd ca vars
-etcd_ca_dir: "{{ etcd_conf_dir}}/ca"
-etcd_generated_certs_dir: "{{ etcd_conf_dir }}/generated_certs"
-etcd_cert_prefix: ''
-etcd_cert_config_dir: "/etc/etcd"
-etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt"
-etcd_ca_key: "{{ etcd_ca_dir }}/ca.key"
-etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf"
-etcd_ca_name: etcd_ca
-etcd_req_ext: etcd_v3_req
-etcd_ca_exts_peer: etcd_v3_ca_peer
-etcd_ca_exts_server: etcd_v3_ca_server
-etcd_ca_exts_self: etcd_v3_ca_self
-etcd_ca_exts_client: etcd_v3_ca_client
-etcd_ca_crl_dir: "{{ etcd_ca_dir }}/crl"
-etcd_ca_new_certs_dir: "{{ etcd_ca_dir }}/certs"
-etcd_ca_db: "{{ etcd_ca_dir }}/index.txt"
-etcd_ca_serial: "{{ etcd_ca_dir }}/serial"
-etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber"
-etcd_ca_default_days: 1825
-
-r_etcd_common_master_peer_cert_file: /etc/origin/master/master.etcd-client.crt
-r_etcd_common_master_peer_key_file: /etc/origin/master/master.etcd-client.key
-r_etcd_common_master_peer_ca_file: /etc/origin/master/master.etcd-ca.crt
-
-# etcd server & certificate vars
-etcd_hostname: "{{ openshift.common.hostname }}"
-etcd_ip: "{{ openshift.common.ip }}"
-etcd_is_thirdparty: False
-
-# etcd dir vars
-etcd_data_dir: "/var/lib/etcd/"
-
-# etcd ports and protocols
-etcd_client_port: 2379
-etcd_peer_port: 2380
-etcd_url_scheme: https
-etcd_peer_url_scheme: https
-
-etcd_initial_cluster_state: new
-etcd_initial_cluster_token: etcd-cluster-1
-
-etcd_initial_advertise_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
-etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
-etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
-etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
-
-etcd_peer: "{{ openshift.common.hostname }}"
-etcdctlv2: "{{ r_etcd_common_etcdctl_command }} --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} --endpoints {{ etcd_peer_url_scheme }}://{{ etcd_peer }}:{{ etcd_client_port }}"
-
-etcd_service: etcd
-# Location of the service file is fixed and not meant to be changed
-etcd_service_file: "/etc/systemd/system/{{ etcd_service }}.service"
-
-r_etcd_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
-r_etcd_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
-
-etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d"
-r_etcd_os_firewall_deny: []
-r_etcd_os_firewall_allow:
-- service: etcd
-  port: "{{etcd_client_port}}/tcp"
-- service: etcd peering
-  port: "{{ etcd_peer_port }}/tcp"
-
-# set the backend quota to 4GB by default
-etcd_quota_backend_bytes: 4294967296
-
-openshift_docker_service_name: "docker"
-
-etcd_ca_host: "{{ groups['oo_etcd_to_config'].0 }}"
-
-l_etcd_restart_command: "{{ l_etcd_static_pod | ternary('/usr/local/bin/master-restart etcd', 'systemctl restart etcd') }}"
-
-etcd_static_pod_location: "{{ openshift_control_plane_static_pod_location | default('/etc/origin/node/pods/') }}"
-
-etcd_cipher_suites: ""

+ 0 - 43
roles/etcd/files/etcd.yaml

@@ -1,43 +0,0 @@
-kind: Pod
-apiVersion: v1
-metadata:
-  name: master-etcd
-  namespace: kube-system
-  labels:
-    openshift.io/control-plane: "true"
-    openshift.io/component: etcd
-  annotations:
-    scheduler.alpha.kubernetes.io/critical-pod: ''
-spec:
-  restartPolicy: Always
-  hostNetwork: true
-  priorityClassName: system-node-critical
-  containers:
-  - name: etcd
-    image: quay.io/coreos/etcd:v3.3
-    workingDir: /var/lib/etcd
-    command: ["/bin/sh", "-c"]
-    args:
-    - |
-      #!/bin/sh
-      set -o allexport
-      source /etc/etcd/etcd.conf
-      exec etcd
-    securityContext:
-      privileged: true
-    volumeMounts:
-     - mountPath: /etc/etcd/
-       name: master-config
-       readOnly: true
-     - mountPath: /var/lib/etcd/
-       name: master-data
-    livenessProbe:
-      exec:
-      initialDelaySeconds: 45
-  volumes:
-  - name: master-config
-    hostPath:
-      path: /etc/etcd/
-  - name: master-data
-    hostPath:
-      path: /var/lib/etcd

+ 0 - 3
roles/etcd/handlers/main.yml

@@ -1,3 +0,0 @@
----
-- name: restart etcd
-  command: "{{ l_etcd_restart_command }}"

+ 0 - 21
roles/etcd/meta/main.yml

@@ -1,21 +0,0 @@
----
-# This module is based on https://github.com/retr0h/ansible-etcd with most
-# changes centered around installing from a pre-existing rpm
-# TODO: Extend https://github.com/retr0h/ansible-etcd rather than forking
-galaxy_info:
-  author: Scott Dodson
-  description: etcd management
-  company: Red Hat, Inc.
-  license: Apache License, Version 2.0
-  min_ansible_version: 2.2
-  platforms:
-  - name: EL
-    versions:
-    - 7
-  categories:
-  - cloud
-  - system
-dependencies:
-- role: lib_openshift
-- role: lib_utils
-- role: openshift_facts

+ 0 - 11
roles/etcd/tasks/add_new_member.yml

@@ -1,11 +0,0 @@
----
-# Set some facts to reference from hostvars
-- import_tasks: set_facts.yml
-
-- name: Add new etcd members to cluster
-  command: "{{ hostvars[etcd_ca_host].etcdctlv2 }} member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
-  delegate_to: "{{ etcd_ca_host }}"
-  register: etcd_add_check
-  retries: 3
-  delay: 10
-  until: etcd_add_check.rc == 0

+ 0 - 2
roles/etcd/tasks/backup.yml

@@ -1,2 +0,0 @@
----
-- include_tasks: backup/backup.yml

+ 0 - 5
roles/etcd/tasks/backup/archive.yml

@@ -1,5 +0,0 @@
----
-- name: Archive backup
-  archive:
-    path: "{{ l_etcd_backup_dir }}"
-    dest: "{{ l_etcd_backup_dir }}.tgz"

+ 0 - 73
roles/etcd/tasks/backup/backup.yml

@@ -1,73 +0,0 @@
----
-- include_tasks: vars.yml
-
-# TODO: replace shell module with command and update later checks
-- name: Check available disk space for etcd backup
-  shell: df --output=avail -k {{ etcd_data_dir }} | tail -n 1
-  register: l_avail_disk
-  # AUDIT:changed_when: `false` because we are only inspecting
-  # state, not manipulating anything
-  changed_when: false
-
-# TODO: replace shell module with command and update later checks
-- name: Check current etcd disk usage
-  shell: du --exclude='*openshift-backup*' -k {{ etcd_data_dir }} | tail -n 1 | cut -f1
-  register: l_etcd_disk_usage
-  # AUDIT:changed_when: `false` because we are only inspecting
-  # state, not manipulating anything
-  changed_when: false
-
-- name: Abort if insufficient disk space for etcd backup
-  fail:
-    msg: >
-      {{ l_etcd_disk_usage.stdout|int*2 }} Kb disk space required for etcd backup,
-      {{ l_avail_disk.stdout }} Kb available.
-  when: l_etcd_disk_usage.stdout|int*2 > l_avail_disk.stdout|int
-
-# For non containerized we should have the correct version of
-# etcd installed already. So don't do anything.
-#
-# For static pod installs we now exec into etcd_container
-
-- name: Check selinux label of '{{ etcd_data_dir }}'
-  command: >
-    stat -c '%C' {{ etcd_data_dir }}
-  register: l_etcd_selinux_labels
-
-- debug:
-    msg: "{{ l_etcd_selinux_labels }}"
-
-- name: Make sure the '{{ etcd_data_dir }}' has the proper label
-  command: >
-    chcon -t svirt_sandbox_file_t  "{{ etcd_data_dir }}"
-  when:
-  - l_etcd_selinux_labels.rc == 0
-  - "'svirt_sandbox_file_t' not in l_etcd_selinux_labels.stdout"
-
-- name: Generate etcd backup
-  command: >
-    {{ r_etcd_common_etcdctl_command }} backup --data-dir={{ l_etcd_incontainer_data_dir }}
-    --backup-dir={{ l_etcd_incontainer_backup_dir }}
-
-# According to the docs change you can simply copy snap/db
-# https://github.com/openshift/openshift-docs/commit/b38042de02d9780842dce95cfa0ef45d53b58bc6
-- name: Check for v3 data store
-  stat:
-    path: "{{ etcd_data_dir }}/member/snap/db"
-    get_checksum: false
-    get_attributes: false
-    get_mime: false
-  register: l_v3_db
-
-- name: Copy etcd v3 data store
-  command: >
-    cp -a {{ etcd_data_dir }}/member/snap/db
-    {{ l_etcd_backup_dir }}/member/snap/
-  when: l_v3_db.stat.exists
-
-- set_fact:
-    r_etcd_common_backup_complete: True
-
-- name: Display location of etcd backup
-  debug:
-    msg: "Etcd backup created in {{ l_etcd_backup_dir }}"

+ 0 - 5
roles/etcd/tasks/backup/copy.yml

@@ -1,5 +0,0 @@
----
-- name: Copy etcd backup
-  copy:
-    src: "{{ etcd_backup_sync_directory }}/{{ l_backup_dir_name }}.tgz"
-    dest: "{{ etcd_data_dir }}"

+ 0 - 8
roles/etcd/tasks/backup/fetch.yml

@@ -1,8 +0,0 @@
----
-- name: Fetch etcd backup
-  fetch:
-    src: "{{ l_etcd_backup_dir }}.tgz"
-    dest: "{{ etcd_backup_sync_directory }}/"
-    flat: yes
-    fail_on_missing: yes
-    validate_checksum: yes

+ 0 - 14
roles/etcd/tasks/backup/unarchive.yml

@@ -1,14 +0,0 @@
----
-- shell: ls /var/lib/etcd
-  register: output
-
-- debug:
-    msg: "output: {{ output }}"
-
-- name: Unarchive backup
-  # can't use unarchive https://github.com/ansible/ansible/issues/30821
-  # unarchive:
-  #   src: "{{ l_etcd_backup_dir }}.tgz"
-  #   dest: "{{ l_etcd_backup_dir }}"
-  command: >
-    tar -xf "{{ l_etcd_backup_dir }}.tgz" -C "{{ etcd_data_dir }}"

+ 0 - 15
roles/etcd/tasks/backup/vars.yml

@@ -1,15 +0,0 @@
----
-# set the etcd backup directory name here in case the tag or sufix consists of dynamic value that changes over time
-# e.g. openshift-backup-{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }} value will change every second so if the date changes
-# right after setting l_etcd_incontainer_backup_dir and before l_etcd_backup_dir facts, the backup directory name is different
-- set_fact:
-    l_backup_dir_name: "openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}"
-
-- set_fact:
-    l_etcd_incontainer_data_dir: "{{ etcd_data_dir }}"
-
-- set_fact:
-    l_etcd_incontainer_backup_dir: "{{ l_etcd_incontainer_data_dir }}/{{ l_backup_dir_name }}"
-
-- set_fact:
-    l_etcd_backup_dir: "{{ etcd_data_dir }}/{{ l_backup_dir_name }}"

+ 0 - 2
roles/etcd/tasks/backup_ca_certificates.yml

@@ -1,2 +0,0 @@
----
-- include_tasks: certificates/backup_ca_certificates.yml

+ 0 - 2
roles/etcd/tasks/backup_generated_certificates.yml

@@ -1,2 +0,0 @@
----
-- include_tasks: certificates/backup_generated_certificates.yml

+ 0 - 2
roles/etcd/tasks/backup_server_certificates.yml

@@ -1,2 +0,0 @@
----
-- include_tasks: certificates/backup_server_certificates.yml

+ 0 - 0
roles/etcd/tasks/ca.yml


Some files were not shown because too many files changed in this diff