--- openshift_aws_create_s3: True openshift_aws_create_iam_cert: True openshift_aws_delete_iam_cert: "{{ openshift_aws_create_iam_cert }}" openshift_aws_create_iam_role: False openshift_aws_create_security_groups: True openshift_aws_create_launch_config: True openshift_aws_create_scale_group: True openshift_aws_node_group_upgrade: False openshift_aws_wait_for_ssh: True openshift_aws_clusterid: default openshift_aws_region: us-east-1 openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}" openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}" openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external" openshift_aws_iam_cert_path: '' openshift_aws_iam_cert_key_path: '' openshift_aws_iam_role_name: "openshift_node_describe_instances_{{ openshift_aws_clusterid }}" openshift_aws_iam_role_policy_json: "{{ lookup('file', 'describeinstances.json') }}" openshift_aws_iam_role_policy_name: "describe_instances_{{ openshift_aws_clusterid }}" openshift_aws_iam_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms" openshift_aws_ami: '' openshift_aws_ami_copy_wait: False openshift_aws_ami_encrypt: False openshift_aws_ami_copy_src_region: "{{ openshift_aws_region }}" openshift_aws_ami_name: openshift-gi openshift_aws_base_ami_name: ami_base openshift_aws_instance_type: m4.xlarge openshift_aws_launch_config_bootstrap_token: '' openshift_aws_users: [] openshift_aws_copy_base_ami_tags: False openshift_aws_ami_tags: bootstrap: "true" openshift-created: "true" parent: "{{ openshift_aws_base_ami | default('unknown') }}" openshift_aws_s3_mode: create openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry" openshift_aws_vpc_tags: Name: "{{ openshift_aws_vpc_name }}" openshift_aws_vpc: name: "{{ openshift_aws_vpc_name }}" cidr: 172.31.0.0/16 subnets: us-east-1: - cidr: 172.31.48.0/20 az: "us-east-1c" # Uncomment to enable use of multi availability zone # Greenfield installs only! Single-az to multi-az migration is not supported! # - cidr: 172.31.32.0/20 # az: "us-east-1e" # - cidr: 172.31.16.0/20 # az: "us-east-1a" openshift_aws_create_dns: False openshift_aws_dns_provider: "route53" # openshift_aws_dns_zone: "" # ie. openshift_aws_dns_zone: "{{ openshift_aws_clusterid }}.example.com" # elb names we want to query to support dns record creation. # you don't need to adjust this unless you have modified openshift_aws_elb_dict openshift_aws_elb_names: - "{{ openshift_aws_elb_master_internal_name }}" - "{{ openshift_aws_elb_master_external_name }}" - "{{ openshift_aws_elb_infra_name }}" # l_openshift_aws_elb_facts is created by querying ec2 for all elb names in # l_openshift_aws_elb_names via tasks/build_elb_dict.yml openshift_aws_dns_records: # Pertains to inventory file key: openshift_master_cluster_public_hostname 'api': type: 'CNAME' # A public or private vpc attached Route53 zone will be created based on # private_zone boolean. Split-tier dns is supported. private_zone: False value: "{{ l_openshift_aws_elb_facts[openshift_aws_elb_master_external_name].dns_name }}" # Pertains to inventory file key: openshift_master_cluster_hostname 'internal.api': type: 'CNAME' private_zone: False value: "{{ l_openshift_aws_elb_facts[openshift_aws_elb_master_internal_name].dns_name }}" # Pertains to inventory file key: openshift_master_default_subdomain '*.apps': type: "CNAME" private_zone: False value: "{{ l_openshift_aws_elb_facts[openshift_aws_elb_infra_name].dns_name }}" 'logs': type: "CNAME" private_zone: False value: "{{ l_openshift_aws_elb_facts[openshift_aws_elb_infra_name].dns_name }}" 'metrics': type: "CNAME" private_zone: False value: "{{ l_openshift_aws_elb_facts[openshift_aws_elb_infra_name].dns_name }}" 'registry': type: "CNAME" private_zone: False value: "{{ l_openshift_aws_elb_facts[openshift_aws_elb_infra_name].dns_name }}" # Allows users to add and recursively override # https://docs.ansible.com/ansible/2.5/user_guide/playbooks_filters.html#combining-hashes-dictionaries openshift_aws_dns_records_override: {} l_openshift_aws_dns_records: "{{ openshift_aws_dns_records | combine(openshift_aws_dns_records_override, recursive=True) }}" openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}" openshift_aws_elb_master_external_name: "{{ openshift_aws_elb_basename }}-master-external" openshift_aws_elb_master_internal_name: "{{ openshift_aws_elb_basename }}-master-internal" openshift_aws_elb_infra_name: "{{ openshift_aws_elb_basename }}-infra" openshift_aws_elb_cert_arn: '' openshift_aws_elb_dict: master: external: cross_az_load_balancing: False health_check: ping_protocol: tcp ping_port: "{{ openshift_master_api_port }}" response_timeout: 5 interval: 30 unhealthy_threshold: 2 healthy_threshold: 2 idle_timeout: 400 listeners: - protocol: tcp load_balancer_port: 80 instance_protocol: ssl instance_port: "{{ openshift_master_api_port }}" - protocol: ssl load_balancer_port: "{{ openshift_master_api_port }}" instance_protocol: ssl instance_port: "{{ openshift_master_api_port }}" ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}" name: "{{ openshift_aws_elb_master_external_name }}" # Set scheme [internal|internet-facing] scheme: internet-facing tags: "{{ openshift_aws_kube_tags }}" internal: cross_az_load_balancing: False health_check: ping_protocol: tcp ping_port: "{{ openshift_master_api_port }}" response_timeout: 5 interval: 30 unhealthy_threshold: 2 healthy_threshold: 2 idle_timeout: 400 listeners: - protocol: tcp load_balancer_port: 80 instance_protocol: tcp instance_port: 80 - protocol: tcp load_balancer_port: "{{ openshift_master_api_port }}" instance_protocol: tcp instance_port: "{{ openshift_master_api_port }}" name: "{{ openshift_aws_elb_master_internal_name }}" # Set scheme [internal|internet-facing] scheme: internal tags: "{{ openshift_aws_kube_tags }}" infra: external: cross_az_load_balancing: False health_check: ping_protocol: tcp ping_port: 443 response_timeout: 5 interval: 30 unhealthy_threshold: 2 healthy_threshold: 2 idle_timeout: 400 listeners: - protocol: tcp load_balancer_port: 80 instance_protocol: tcp instance_port: 80 proxy_protocol: True - protocol: tcp load_balancer_port: 443 instance_protocol: tcp instance_port: 443 proxy_protocol: True name: "{{ openshift_aws_elb_infra_name }}" # Set scheme [internal|internet-facing] scheme: internet-facing tags: "{{ openshift_aws_kube_tags }}" openshift_aws_master_volumes: - device_name: /dev/sda1 volume_size: 100 volume_type: gp2 delete_on_termination: False - device_name: /dev/sdb volume_size: 100 volume_type: gp2 delete_on_termination: False openshift_aws_node_group_config_node_volumes: - device_name: /dev/sda1 volume_size: 100 volume_type: gp2 delete_on_termination: True - device_name: /dev/sdb volume_size: 100 volume_type: gp2 delete_on_termination: True # build_instance_tags is a custom filter in role lib_utils openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_node_group_termination_policy: Default openshift_aws_node_group_replace_instances: [] openshift_aws_node_group_replace_all_instances: False openshift_aws_ami_map: master: "{{ openshift_aws_ami }}" infra: "{{ openshift_aws_ami }}" compute: "{{ openshift_aws_ami }}" openshift_aws_master_group: - name: "{{ openshift_aws_clusterid }} master group" group: master node_group_config: node-config-master tags: host-type: master sub-host-type: default runtime: docker openshift_aws_node_groups: - name: "{{ openshift_aws_clusterid }} compute group" group: compute node_group_config: node-config-compute tags: host-type: node sub-host-type: compute runtime: docker - name: "{{ openshift_aws_clusterid }} infra group" group: infra node_group_config: node-config-infra tags: host-type: node sub-host-type: infra runtime: docker openshift_aws_created_asgs: [] openshift_aws_current_asgs: [] openshift_aws_scale_group_health_check: period: 60 type: EC2 # these will be used during upgrade openshift_aws_master_instance_config: instance_type: "{{ openshift_aws_master_group_instance_type | default(openshift_aws_instance_type) }}" volumes: "{{ openshift_aws_master_volumes }}" health_check: "{{ openshift_aws_scale_group_health_check }}" exact_count: "{{ openshift_aws_master_group_desired_size | default(3) }}" termination_policy: "{{ openshift_aws_node_group_termination_policy }}" iam_role: "{{ openshift_aws_iam_master_role_name | default(openshift_aws_iam_role_name) }}" policy_name: "{{ openshift_aws_iam_master_role_policy_name | default(openshift_aws_iam_role_policy_name) }}" policy_json: "{{ openshift_aws_iam_master_role_policy_json | default(openshift_aws_iam_role_policy_json) }}" elbs: "{{ openshift_aws_elb_dict | json_query('master.[*][0][*].name') }}" groups: - "{{ openshift_aws_clusterid }}" # default sg - "{{ openshift_aws_clusterid }}_master" # node type sg - "{{ openshift_aws_clusterid }}_master_k8s" # node type sg k8s openshift_aws_node_group_config: # The 'compute' key is always required here. compute: instance_type: "{{ openshift_aws_compute_group_instance_type | default(openshift_aws_instance_type) }}" volumes: "{{ openshift_aws_node_group_config_node_volumes }}" health_check: "{{ openshift_aws_scale_group_health_check }}" min_size: "{{ openshift_aws_compute_group_min_size | default(3) }}" max_size: "{{ openshift_aws_compute_group_max_size | default(100) }}" desired_size: "{{ openshift_aws_compute_group_desired_size | default(3) }}" termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" iam_role: "{{ openshift_aws_iam_node_role_name | default(openshift_aws_iam_role_name) }}" policy_name: "{{ openshift_aws_iam_node_role_policy_name | default(openshift_aws_iam_role_policy_name) }}" policy_json: "{{ openshift_aws_iam_node_role_policy_json | default(openshift_aws_iam_role_policy_json) }}" # The 'infra' key is always required here. infra: instance_type: "{{ openshift_aws_infra_group_instance_type | default(openshift_aws_instance_type) }}" volumes: "{{ openshift_aws_node_group_config_node_volumes }}" health_check: "{{ openshift_aws_scale_group_health_check }}" min_size: "{{ openshift_aws_infra_group_min_size | default(2) }}" max_size: "{{ openshift_aws_infra_group_max_size | default(20) }}" desired_size: "{{ openshift_aws_infra_group_desired_size | default(2) }}" termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" iam_role: "{{ openshift_aws_iam_node_role_name | default(openshift_aws_iam_role_name) }}" policy_name: "{{ openshift_aws_iam_node_role_policy_name | default(openshift_aws_iam_role_policy_name) }}" policy_json: "{{ openshift_aws_iam_node_role_policy_json | default(openshift_aws_iam_role_policy_json) }}" elbs: "{{ openshift_aws_elb_dict | json_query('infra.[*][0][*].name') }}" # build_instance_tags is a custom filter in role lib_utils openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" openshift_aws_elb_security_groups: "{{ openshift_aws_launch_config_security_groups }}" openshift_aws_launch_config_security_groups: master: - "{{ openshift_aws_clusterid }}" # default sg - "{{ openshift_aws_clusterid }}_master" # node type sg - "{{ openshift_aws_clusterid }}_master_k8s" # node type sg k8s compute: - "{{ openshift_aws_clusterid }}" # default sg - "{{ openshift_aws_clusterid }}_compute" # node type sg - "{{ openshift_aws_clusterid }}_compute_k8s" # node type sg k8s infra: - "{{ openshift_aws_clusterid }}" # default sg - "{{ openshift_aws_clusterid }}_infra" # node type sg - "{{ openshift_aws_clusterid }}_infra_k8s" # node type sg k8s openshift_aws_security_groups_tags: "{{ openshift_aws_kube_tags }}" openshift_aws_node_security_groups: default: name: "{{ openshift_aws_clusterid }}" desc: "{{ openshift_aws_clusterid }} default" rules: - proto: tcp from_port: 22 to_port: 22 cidr_ip: 0.0.0.0/0 - proto: all from_port: 1 to_port: 65535 group_name: "{{ openshift_aws_clusterid }}" master: name: "{{ openshift_aws_clusterid }}_master" desc: "{{ openshift_aws_clusterid }} master instances" rules: - proto: tcp from_port: 80 to_port: 80 cidr_ip: 0.0.0.0/0 - proto: tcp from_port: "{{ openshift_master_api_port }}" to_port: "{{ openshift_master_api_port }}" cidr_ip: 0.0.0.0/0 compute: name: "{{ openshift_aws_clusterid }}_compute" desc: "{{ openshift_aws_clusterid }} compute node instances" infra: name: "{{ openshift_aws_clusterid }}_infra" desc: "{{ openshift_aws_clusterid }} infra node instances" rules: - proto: tcp from_port: 80 to_port: 80 cidr_ip: 0.0.0.0/0 - proto: tcp from_port: "{{ openshift_master_api_port }}" to_port: "{{ openshift_master_api_port }}" cidr_ip: 0.0.0.0/0 - proto: tcp from_port: 30000 to_port: 32000 cidr_ip: 0.0.0.0/0 etcd: name: "{{ openshift_aws_clusterid }}_etcd" desc: "{{ openshift_aws_clusterid }} etcd instances" openshift_aws_node_run_bootstrap_startup: True openshift_aws_node_user_data: '' openshift_aws_node_config_namespace: openshift-node openshift_aws_masters_groups: masters,etcd,nodes # By default, don't delete things like the shared IAM instance # profile and uploaded ssh keys openshift_aws_enable_uninstall_shared_objects: False # S3 bucket names are global by default and can take minutes/hours for the # name to become available for re-use (assuming someone doesn't take the # name in the meantime). Default to just emptying the contents of the S3 # bucket if we've been asked to create the bucket during provisioning. openshift_aws_really_delete_s3_bucket: False