Browse Source

Add external provisioners playbook starting with aws efs

Matthew Wong 8 years ago
parent
commit
f2f58c7cda

+ 5 - 0
playbooks/common/openshift-cluster/openshift_provisioners.yml

@@ -0,0 +1,5 @@
+---
+- name: OpenShift Provisioners
+  hosts: oo_first_master
+  roles:
+  - openshift_provisioners

+ 29 - 0
roles/openshift_provisioners/README.md

@@ -0,0 +1,29 @@
+# OpenShift External Dynamic Provisioners
+
+## Required Vars
+* `openshift_provisioners_install_provisioners`: When `True` the openshift_provisioners role will install provisioners that have their "master" var (e.g. `openshift_provisioners_efs`) set `True`. When `False` will uninstall provisioners that have their var set `True`.
+
+## Optional Vars
+* `openshift_provisioners_image_prefix`: The prefix for the provisioner images to use. Defaults to 'docker.io/openshift/origin-'.
+* `openshift_provisioners_image_version`: The image version for the provisioner images to use. Defaults to 'latest'.
+* `openshift_provisioners_project`: The namespace that provisioners will be installed in. Defaults to 'openshift-infra'.
+
+## AWS EFS
+
+### Prerequisites
+* An IAM user assigned the AmazonElasticFileSystemReadOnlyAccess policy (or better)
+* An EFS file system in your cluster's region
+* [Mount targets](http://docs.aws.amazon.com/efs/latest/ug/accessing-fs.html) and [security groups](http://docs.aws.amazon.com/efs/latest/ug/accessing-fs-create-security-groups.html) such that any node (in any zone in the cluster's region) can mount the EFS file system by its [File system DNS name](http://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html)
+
+### Required Vars
+* `openshift_provisioners_efs_fsid`: The [File system ID](http://docs.aws.amazon.com/efs/latest/ug/gs-step-two-create-efs-resources.html) of the EFS file system, e.g. fs-47a2c22e.
+* `openshift_provisioners_efs_region`: The Amazon EC2 region of the EFS file system.
+* `openshift_provisioners_efs_aws_access_key_id`: The AWS access key of the IAM user, used to check that the EFS file system specified actually exists.
+* `openshift_provisioners_efs_aws_secret_access_key`: The AWS secret access key of the IAM user, used to check that the EFS file system specified actually exists.
+
+### Optional Vars
+* `openshift_provisioners_efs`: When `True` the AWS EFS provisioner will be installed or uninstalled according to whether `openshift_provisioners_install_provisioners` is `True` or `False`, respectively. Defaults to `False`.
+* `openshift_provisioners_efs_path`: The path of the directory in the EFS file system in which the EFS provisioner will create a directory to back each PV it creates. It must exist and be mountable by the EFS provisioner. Defaults to '/persistentvolumes'.
+* `openshift_provisioners_efs_name`: The `provisioner` name that `StorageClasses` specify. Defaults to 'openshift.org/aws-efs'.
+* `openshift_provisioners_efs_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
+* `openshift_provisioners_efs_supplementalgroup`: The supplemental group to give the pod in case it is needed for permission to write to the EFS file system. Defaults to '65534'.

+ 12 - 0
roles/openshift_provisioners/defaults/main.yaml

@@ -0,0 +1,12 @@
+---
+openshift_provisioners_install_provisioners: True
+openshift_provisioners_image_prefix: docker.io/openshift/origin-
+openshift_provisioners_image_version: latest
+
+openshift_provisioners_efs: False
+openshift_provisioners_efs_path: /persistentvolumes
+openshift_provisioners_efs_name: openshift.org/aws-efs
+openshift_provisioners_efs_nodeselector: ""
+openshift_provisioners_efs_supplementalgroup: '65534'
+
+openshift_provisioners_project: openshift-infra

+ 16 - 0
roles/openshift_provisioners/meta/main.yaml

@@ -0,0 +1,16 @@
+---
+galaxy_info:
+  author: OpenShift Red Hat
+  description: OpenShift Provisioners
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 2.2
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  categories:
+  - cloud
+dependencies:
+- role: lib_openshift
+- role: openshift_facts

+ 19 - 0
roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml

@@ -0,0 +1,19 @@
+---
+- name: Generate ClusterRoleBindings
+  template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-clusterrolebinding.yaml
+  vars:
+    acct_name: provisioners-{{item}}
+    obj_name: run-provisioners-{{item}}
+    labels:
+      provisioners-infra: support
+    crb_usernames: ["system:serviceaccount:{{openshift_provisioners_project}}:{{acct_name}}"]
+    subjects:
+      - kind: ServiceAccount
+        name: "{{acct_name}}"
+        namespace: "{{openshift_provisioners_project}}"
+    cr_name: "system:persistent-volume-provisioner"
+  with_items:
+    # TODO
+    - efs
+  check_mode: no
+  changed_when: no

+ 14 - 0
roles/openshift_provisioners/tasks/generate_secrets.yaml

@@ -0,0 +1,14 @@
+---
+- name: Generate secret for efs
+  template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-secret.yaml
+  vars:
+    name: efs
+    obj_name: "provisioners-efs"
+    labels:
+      provisioners-infra: support
+    secrets:
+      - {key: aws-access-key-id, value: "{{openshift_provisioners_efs_aws_access_key_id}}"}
+      - {key: aws-secret-access-key, value: "{{openshift_provisioners_efs_aws_secret_access_key}}"}
+  check_mode: no
+  changed_when: no
+  when: openshift_provisioners_efs | bool

+ 12 - 0
roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml

@@ -0,0 +1,12 @@
+---
+- name: Generating serviceaccounts
+  template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-sa.yaml
+  vars:
+    obj_name: provisioners-{{item}}
+    labels:
+      provisioners-infra: support
+  with_items:
+  # TODO
+  - efs
+  check_mode: no
+  changed_when: no

+ 70 - 0
roles/openshift_provisioners/tasks/install_efs.yaml

@@ -0,0 +1,70 @@
+---
+- name: Check efs current replica count
+  command: >
+    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc provisioners-efs
+    -o jsonpath='{.spec.replicas}' -n {{openshift_provisioners_project}}
+  register: efs_replica_count
+  when: not ansible_check_mode
+  ignore_errors: yes
+  changed_when: no
+
+- name: Generate efs PersistentVolumeClaim
+  template: src=pvc.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-pvc.yaml
+  vars:
+    obj_name: "provisioners-efs"
+    size: "1Mi"
+    access_modes:
+      - "ReadWriteMany"
+    pv_selector:
+      provisioners-efs: efs
+  check_mode: no
+  changed_when: no
+
+- name: Generate efs PersistentVolume
+  template: src=pv.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-pv.yaml
+  vars:
+    obj_name: "provisioners-efs"
+    size: "1Mi"
+    access_modes:
+      - "ReadWriteMany"
+    labels:
+      provisioners-efs: efs
+    volume_plugin: "nfs"
+    volume_source:
+      - {key: "server", value: "{{openshift_provisioners_efs_fsid}}.efs.{{openshift_provisioners_efs_region}}.amazonaws.com"}
+      - {key: "path", value: "{{openshift_provisioners_efs_path}}"}
+    claim_name: "provisioners-efs"
+  check_mode: no
+  changed_when: no
+
+- name: Generate efs DeploymentConfig
+  template:
+    src: efs.j2
+    dest: "{{ mktemp.stdout }}/templates/{{deploy_name}}-dc.yaml"
+  vars:
+    name: efs
+    deploy_name: "provisioners-efs"
+    deploy_serviceAccount: "provisioners-efs"
+    replica_count: "{{efs_replica_count.stdout | default(0)}}"
+    node_selector: "{{openshift_provisioners_efs_nodeselector | default('') }}"
+    claim_name: "provisioners-efs"
+  check_mode: no
+  changed_when: false
+
+# anyuid in order to run as root & chgrp shares with allocated gids
+- name: "Check efs anyuid permissions"
+  command: >
+    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+    get scc/anyuid -o jsonpath='{.users}'
+  register: efs_anyuid
+  check_mode: no
+  changed_when: no
+
+- name: "Set anyuid permissions for efs"
+  command: >
+    {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+    add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs
+  register: efs_output
+  failed_when: "efs_output.rc == 1 and 'exists' not in efs_output.stderr"
+  check_mode: no
+  when: efs_anyuid.stdout.find("system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs") == -1

+ 55 - 0
roles/openshift_provisioners/tasks/install_provisioners.yaml

@@ -0,0 +1,55 @@
+---
+- name: Check that EFS File System ID is set
+  fail: msg='the openshift_provisioners_efs_fsid variable is required'
+  when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_fsid is not defined
+
+- name: Check that EFS region is set
+  fail: msg='the openshift_provisioners_efs_region variable is required'
+  when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_region is not defined
+
+- name: Check that EFS AWS access key id is set
+  fail: msg='the openshift_provisioners_efs_aws_access_key_id variable is required'
+  when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_aws_access_key_id is not defined
+
+- name: Check that EFS AWS secret access key is set
+  fail: msg='the openshift_provisioners_efs_aws_secret_access_key variable is required'
+  when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_aws_secret_access_key is not defined
+
+- name: Install support
+  include: install_support.yaml
+
+- name: Install EFS
+  include: install_efs.yaml
+  when: openshift_provisioners_efs | bool
+
+- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
+  register: object_def_files
+  changed_when: no
+
+- slurp: src={{item}}
+  register: object_defs
+  with_items: "{{object_def_files.files | map(attribute='path') | list | sort}}"
+  changed_when: no
+
+- name: Create objects
+  include: oc_apply.yaml
+  vars:
+    - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+    - namespace: "{{ openshift_provisioners_project }}"
+    - file_name: "{{ file.source }}"
+    - file_content: "{{ file.content | b64decode | from_yaml }}"
+  with_items: "{{ object_defs.results }}"
+  loop_control:
+    loop_var: file
+  when: not ansible_check_mode
+
+- name: Printing out objects to create
+  debug: msg={{file.content | b64decode }}
+  with_items: "{{ object_defs.results }}"
+  loop_control:
+    loop_var: file
+  when: ansible_check_mode
+
+- name: Scaling up cluster
+  include: start_cluster.yaml
+  when: start_cluster | default(true) | bool

+ 24 - 0
roles/openshift_provisioners/tasks/install_support.yaml

@@ -0,0 +1,24 @@
+---
+- name: Check for provisioners project already exists
+  command: >
+    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_provisioners_project}} --no-headers
+  register: provisioners_project_result
+  ignore_errors: yes
+  when: not ansible_check_mode
+  changed_when: no
+
+- name: Create provisioners project
+  command: >
+    {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_provisioners_project}}
+  when: not ansible_check_mode and "not found" in provisioners_project_result.stderr
+
+- name: Create temp directory for all our templates
+  file: path={{mktemp.stdout}}/templates state=directory mode=0755
+  changed_when: False
+  check_mode: no
+
+- include: generate_secrets.yaml
+
+- include: generate_clusterrolebindings.yaml
+
+- include: generate_serviceaccounts.yaml

+ 27 - 0
roles/openshift_provisioners/tasks/main.yaml

@@ -0,0 +1,27 @@
+---
+- name: Create temp directory for doing work in
+  command: mktemp -td openshift-provisioners-ansible-XXXXXX
+  register: mktemp
+  changed_when: False
+  check_mode: no
+
+- name: Copy the admin client config(s)
+  command: >
+    cp {{ openshift.common.config_base}}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+  changed_when: False
+  check_mode: no
+  tags: provisioners_init
+
+- include: "{{ role_path }}/tasks/install_provisioners.yaml"
+  when: openshift_provisioners_install_provisioners | default(false) | bool
+
+- include: "{{ role_path }}/tasks/uninstall_provisioners.yaml"
+  when: not openshift_provisioners_install_provisioners | default(false) | bool
+
+- name: Delete temp directory
+  file:
+    name: "{{ mktemp.stdout }}"
+    state: absent
+  tags: provisioners_cleanup
+  changed_when: False
+  check_mode: no

+ 51 - 0
roles/openshift_provisioners/tasks/oc_apply.yaml

@@ -0,0 +1,51 @@
+---
+- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
+  command: >
+    {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+    get {{file_content.kind}} {{file_content.metadata.name}}
+    -o jsonpath='{.metadata.resourceVersion}'
+    -n {{namespace}}
+  register: generation_init
+  failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''"
+  changed_when: no
+
+- name: Applying {{file_name}}
+  command: >
+    {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+    apply -f {{ file_name }}
+    -n {{ namespace }}
+  register: generation_apply
+  failed_when: "'error' in generation_apply.stderr"
+  changed_when: no
+
+- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
+  command: >
+    {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+    get {{file_content.kind}} {{file_content.metadata.name}}
+    -o jsonpath='{.metadata.resourceVersion}'
+    -n {{namespace}}
+  register: generation_changed
+  failed_when: "'not found' not in generation_changed.stderr and generation_changed.stdout == ''"
+  changed_when: generation_changed.stdout | default (0) | int  > generation_init.stdout | default(0) | int
+  when:
+    - "'field is immutable' not in generation_apply.stderr"
+
+- name: Removing previous {{file_name}}
+  command: >
+    {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+    delete -f {{ file_name }}
+    -n {{ namespace }}
+  register: generation_delete
+  failed_when: "'error' in generation_delete.stderr"
+  changed_when: generation_delete.rc == 0
+  when: generation_apply.rc != 0
+
+- name: Recreating {{file_name}}
+  command: >
+    {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+    apply -f {{ file_name }}
+    -n {{ namespace }}
+  register: generation_apply
+  failed_when: "'error' in generation_apply.stderr"
+  changed_when: generation_apply.rc == 0
+  when: generation_apply.rc != 0

+ 20 - 0
roles/openshift_provisioners/tasks/start_cluster.yaml

@@ -0,0 +1,20 @@
+---
+- name: Retrieve efs
+  oc_obj:
+    state: list
+    kind: dc
+    selector: "provisioners-infra=efs"
+    namespace: "{{openshift_provisioners_project}}"
+  register: efs_dc
+  when: openshift_provisioners_efs | bool
+
+- name: start efs
+  oc_scale:
+    kind: dc
+    name: "{{ object }}"
+    namespace: "{{openshift_provisioners_project}}"
+    replicas: 1
+  with_items: "{{ efs_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+  loop_control:
+    loop_var: object
+  when: openshift_provisioners_efs | bool

+ 20 - 0
roles/openshift_provisioners/tasks/stop_cluster.yaml

@@ -0,0 +1,20 @@
+---
+- name: Retrieve efs
+  oc_obj:
+    state: list
+    kind: dc
+    selector: "provisioners-infra=efs"
+    namespace: "{{openshift_provisioners_project}}"
+  register: efs_dc
+  when: openshift_provisioners_efs | bool
+
+- name: stop efs
+  oc_scale:
+    kind: dc
+    name: "{{ object }}"
+    namespace: "{{openshift_provisioners_project}}"
+    replicas: 0
+  with_items: "{{ efs_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+  loop_control:
+    loop_var: object
+  when: openshift_provisioners_efs | bool

+ 43 - 0
roles/openshift_provisioners/tasks/uninstall_provisioners.yaml

@@ -0,0 +1,43 @@
+---
+- name: stop provisioners
+  include: stop_cluster.yaml
+
+# delete the deployment objects that we had created
+- name: delete provisioner api objects
+  command: >
+    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+    delete {{ item }} --selector provisioners-infra -n {{ openshift_provisioners_project }} --ignore-not-found=true
+  with_items:
+    - dc
+  register: delete_result
+  changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our old secrets
+- name: delete provisioner secrets
+  command: >
+    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+    delete secret {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true
+  with_items:
+    - provisioners-efs
+  ignore_errors: yes
+  register: delete_result
+  changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete cluster role bindings
+- name: delete cluster role bindings
+  command: >
+    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+    delete clusterrolebindings {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true
+  with_items:
+    - run-provisioners-efs
+  register: delete_result
+  changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our service accounts
+- name: delete service accounts
+  oc_serviceaccount:
+    name: "{{ item }}"
+    namespace: "{{ openshift_provisioners_project }}"
+    state: absent
+  with_items:
+    - provisioners-efs

+ 30 - 0
roles/openshift_provisioners/templates/clusterrolebinding.j2

@@ -0,0 +1,30 @@
+apiVersion: v1
+kind: ClusterRoleBinding
+metadata:
+  name: {{obj_name}}
+{% if labels is defined%}
+  labels:
+{% for key, value in labels.iteritems() %}
+    {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+{% if crb_usernames is defined %}
+userNames:
+{% for name in crb_usernames %}
+  - {{ name }}
+{% endfor %}
+{% endif %}
+{% if crb_groupnames is defined %}
+groupNames:
+{% for name in crb_groupnames %}
+  - {{ name }}
+{% endfor %}
+{% endif %}
+subjects:
+{% for sub in subjects %}
+  - kind: {{ sub.kind }}
+    name: {{ sub.name }}
+    namespace: {{sub.namespace}}
+{% endfor %}
+roleRef:
+  name: {{cr_name}}

+ 58 - 0
roles/openshift_provisioners/templates/efs.j2

@@ -0,0 +1,58 @@
+kind: DeploymentConfig
+apiVersion: v1
+metadata:
+  name: "{{deploy_name}}"
+  labels:
+    provisioners-infra: "{{name}}"
+    name: "{{name}}"
+spec:
+  replicas: {{replica_count}}
+  selector:
+    provisioners-infra: "{{name}}"
+    name: "{{name}}"
+  strategy:
+    type: Recreate 
+  template:
+    metadata:
+      name: "{{deploy_name}}"
+      labels:
+        provisioners-infra: "{{name}}"
+        name: "{{name}}"
+    spec:
+      serviceAccountName: "{{deploy_serviceAccount}}"
+{% if node_selector is iterable and node_selector | length > 0 %}
+      nodeSelector:
+{% for key, value in node_selector.iteritems() %}
+        {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+      containers:
+        - name: efs-provisioner
+          image: {{openshift_provisioners_image_prefix}}efs-provisioner:{{openshift_provisioners_image_version}}
+          env:
+            - name: AWS_ACCESS_KEY_ID
+              valueFrom:
+                secretKeyRef:
+                  name: provisioners-efs
+                  key: aws-access-key-id
+            - name: AWS_SECRET_ACCESS_KEY
+              valueFrom:
+                secretKeyRef:
+                  name: provisioners-efs
+                  key: aws-secret-access-key
+            - name: FILE_SYSTEM_ID
+              value: "{{openshift_provisioners_efs_fsid}}"
+            - name: AWS_REGION
+              value: "{{openshift_provisioners_efs_region}}"
+            - name: PROVISIONER_NAME
+              value: "{{openshift_provisioners_efs_name}}"
+          volumeMounts:
+            - name: pv-volume
+              mountPath: /persistentvolumes
+      securityContext:
+        supplementalGroups:
+          - {{openshift_provisioners_efs_supplementalgroup}}
+      volumes:
+        - name: pv-volume
+          persistentVolumeClaim:
+            claimName: "{{claim_name}}"

+ 32 - 0
roles/openshift_provisioners/templates/pv.j2

@@ -0,0 +1,32 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: {{obj_name}}
+{% if annotations is defined %}
+  annotations:
+{% for key,value in annotations.iteritems() %}
+    {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+{% if labels is defined%}
+  labels:
+{% for key, value in labels.iteritems() %}
+    {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+  capacity:
+    storage: {{size}}
+  accessModes:
+{% for mode in access_modes %}
+    - {{mode}}
+{% endfor %}
+  {{volume_plugin}}:
+{% for s in volume_source %}
+    {{s.key}}: {{s.value}}
+{% endfor %}
+{% if claim_name is defined%}
+  claimRef:
+    name: {{claim_name}}
+    namespace: {{openshift_provisioners_project}}
+{% endif %}

+ 26 - 0
roles/openshift_provisioners/templates/pvc.j2

@@ -0,0 +1,26 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: {{obj_name}}
+{% if annotations is defined %}
+  annotations:
+{% for key,value in annotations.iteritems() %}
+    {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+{% if pv_selector is defined and pv_selector is mapping %}
+  selector:
+    matchLabels:
+{% for key,value in pv_selector.iteritems() %}
+      {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+  accessModes:
+{% for mode in access_modes %}
+    - {{mode}}
+{% endfor %}
+  resources:
+    requests:
+      storage: {{size}}
+

+ 15 - 0
roles/openshift_provisioners/templates/secret.j2

@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{obj_name}}
+{% if labels is defined%}
+  labels:
+{% for key, value in labels.iteritems() %}
+    {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+type: Opaque
+data:
+{% for s in secrets %}
+  "{{s.key}}" : "{{s.value | b64encode}}"
+{% endfor %}

+ 16 - 0
roles/openshift_provisioners/templates/serviceaccount.j2

@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{obj_name}}
+{% if labels is defined%}
+  labels:
+{% for key, value in labels.iteritems() %}
+    {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+{% if secrets is defined %}
+secrets:
+{% for name in secrets %}
+- name: {{ name }}
+{% endfor %}
+{% endif %}