hosts.example 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165
  1. # This is an example of an OpenShift-Ansible host inventory that provides the
  2. # minimum recommended configuration for production use. This includes 3 masters,
  3. # two infra nodes, two compute nodes, and an haproxy load balancer to load
  4. # balance traffic to the API servers. For a truly production environment you
  5. # should use an external load balancing solution that itself is highly available.
  6. [masters]
  7. ose3-master[1:3].test.example.com
  8. [etcd]
  9. ose3-master[1:3].test.example.com
  10. [nodes]
  11. ose3-master[1:3].test.example.com
  12. ose3-infra[1:2].test.example.com openshift_node_labels="{'region': 'infra', 'zone': 'default'}"
  13. ose3-node[1:2].test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
  14. [nfs]
  15. ose3-master1.test.example.com
  16. [lb]
  17. ose3-lb.test.example.com
  18. # Create an OSEv3 group that contains the masters and nodes groups
  19. [OSEv3:children]
  20. masters
  21. nodes
  22. etcd
  23. lb
  24. nfs
  25. [OSEv3:vars]
  26. ###############################################################################
  27. # Common/ Required configuration variables follow #
  28. ###############################################################################
  29. # SSH user, this user should allow ssh based auth without requiring a
  30. # password. If using ssh key based auth, then the key should be managed by an
  31. # ssh agent.
  32. ansible_user=root
  33. # If ansible_user is not root, ansible_become must be set to true and the
  34. # user must be configured for passwordless sudo
  35. #ansible_become=yes
  36. # Specify the deployment type. Valid values are origin and openshift-enterprise.
  37. openshift_deployment_type=origin
  38. #openshift_deployment_type=openshift-enterprise
  39. # Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
  40. # rely on the version running on the first master. Works best for containerized installs where we can usually
  41. # use this to lookup the latest exact version of the container images, which is the tag actually used to configure
  42. # the cluster. For RPM installations we just verify the version detected in your configured repos matches this
  43. # release.
  44. openshift_release=v3.9
  45. # default subdomain to use for exposed routes, you should have wildcard dns
  46. # for *.apps.test.example.com that points at your infra nodes which will run
  47. # your router
  48. openshift_master_default_subdomain=apps.test.example.com
  49. #Set cluster_hostname to point at your load balancer
  50. openshift_master_cluster_hostname=ose3-lb.test.example.com
  51. ###############################################################################
  52. # Additional configuration variables follow #
  53. ###############################################################################
  54. # Debug level for all OpenShift components (Defaults to 2)
  55. debug_level=2
  56. # Specify an exact container image tag to install or configure.
  57. # WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
  58. # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
  59. #openshift_image_tag=v3.9.0
  60. # Specify an exact rpm version to install or configure.
  61. # WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
  62. # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
  63. #openshift_pkg_version=-3.9.0
  64. # This enables all the system containers except for docker:
  65. #openshift_use_system_containers=False
  66. #
  67. # But you can choose separately each component that must be a
  68. # system container:
  69. #
  70. #openshift_use_openvswitch_system_container=False
  71. #openshift_use_node_system_container=False
  72. #openshift_use_master_system_container=False
  73. #openshift_use_etcd_system_container=False
  74. #
  75. # In either case, system_images_registry must be specified to be able to find the system images
  76. #system_images_registry="docker.io"
  77. # when openshift_deployment_type=='openshift-enterprise'
  78. #system_images_registry="registry.access.redhat.com"
  79. # Manage openshift example imagestreams and templates during install and upgrade
  80. #openshift_install_examples=true
  81. # Configure logoutURL in the master config for console customization
  82. # See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
  83. #openshift_master_logout_url=http://example.com
  84. # Configure extensionScripts in the master config for console customization
  85. # See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
  86. #openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js']
  87. # Configure extensionStylesheets in the master config for console customization
  88. # See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
  89. #openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css']
  90. # Configure extensions in the master config for console customization
  91. # See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
  92. #openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}]
  93. # Configure extensions in the master config for console customization
  94. # See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
  95. #openshift_master_oauth_templates:
  96. # login: /path/to/login-template.html
  97. # openshift_master_oauth_template is deprecated. Use openshift_master_oauth_templates instead.
  98. #openshift_master_oauth_template=/path/to/login-template.html
  99. # Configure imagePolicyConfig in the master config
  100. # See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
  101. #openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
  102. # Configure master API rate limits for external clients
  103. #openshift_master_external_ratelimit_qps=200
  104. #openshift_master_external_ratelimit_burst=400
  105. # Configure master API rate limits for loopback clients
  106. #openshift_master_loopback_ratelimit_qps=300
  107. #openshift_master_loopback_ratelimit_burst=600
  108. # Docker Configuration
  109. # Add additional, insecure, and blocked registries to global docker configuration
  110. # For enterprise deployment types we ensure that registry.access.redhat.com is
  111. # included if you do not include it
  112. #openshift_docker_additional_registries=registry.example.com
  113. #openshift_docker_insecure_registries=registry.example.com
  114. #openshift_docker_blocked_registries=registry.hacker.com
  115. # Disable pushing to dockerhub
  116. #openshift_docker_disable_push_dockerhub=True
  117. # Use Docker inside a System Container. Note that this is a tech preview and should
  118. # not be used to upgrade!
  119. # The following options for docker are ignored:
  120. # - docker_version
  121. # - docker_upgrade
  122. # The following options must not be used
  123. # - openshift_docker_options
  124. #openshift_docker_use_system_container=False
  125. # Install and run cri-o. By default this will install cri-o as a system container.
  126. #openshift_use_crio=False
  127. # You can install cri-o as an rpm by setting the following variable:
  128. #openshift_crio_use_rpm=False
  129. # NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override
  130. # just as container-engine does.
  131. # Force the registry to use for the container-engine/crio system container. By default the registry
  132. # will be built off of the deployment type and ansible_distribution. Only
  133. # use this option if you are sure you know what you are doing!
  134. #openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest"
  135. #openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest"
  136. # The following two variables are used when openshift_use_crio is True
  137. # and cleans up after builds that pass through docker. When openshift_use_crio is True
  138. # these variables are set to the defaults shown. You may override them here.
  139. # NOTE: You will still need to tag crio nodes with your given label(s)!
  140. # Enable docker garbage collection when using cri-o
  141. #openshift_crio_enable_docker_gc=True
  142. # Node Selectors to run the garbage collection
  143. #openshift_crio_docker_gc_node_selector={'runtime': 'cri-o'}
  144. # Items added, as is, to end of /etc/sysconfig/docker OPTIONS
  145. # Default value: "--log-driver=journald"
  146. #openshift_docker_options="-l warn --ipv6=false"
  147. # Specify exact version of Docker to configure or upgrade to.
  148. # Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
  149. # docker_version="1.12.1"
  150. # Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.
  151. # Uncomment below to disable; for example if your kernel does not support the
  152. # Docker overlay/overlay2 storage drivers with SELinux enabled.
  153. #openshift_docker_selinux_enabled=False
  154. # Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
  155. # docker_upgrade=False
  156. # Specify exact version of etcd to configure or upgrade to.
  157. # etcd_version="3.1.0"
  158. # Enable etcd debug logging, defaults to false
  159. # etcd_debug=true
  160. # Set etcd log levels by package
  161. # etcd_log_package_levels="etcdserver=WARNING,security=DEBUG"
  162. # Upgrade Hooks
  163. #
  164. # Hooks are available to run custom tasks at various points during a cluster
  165. # upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using
  166. # absolute paths, if not the path will be treated as relative to the file where the
  167. # hook is actually used.
  168. #
  169. # Tasks to run before each master is upgraded.
  170. # openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml
  171. #
  172. # Tasks to run to upgrade the master. These tasks run after the main openshift-ansible
  173. # upgrade steps, but before we restart system/services.
  174. # openshift_master_upgrade_hook=/usr/share/custom/master.yml
  175. #
  176. # Tasks to run after each master is upgraded and system/services have been restarted.
  177. # openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml
  178. # Alternate image format string, useful if you've got your own registry mirror
  179. # Configure this setting just on node or master
  180. #oreg_url_master=example.com/openshift3/ose-${component}:${version}
  181. #oreg_url_node=example.com/openshift3/ose-${component}:${version}
  182. # For setting the configuration globally
  183. #oreg_url=example.com/openshift3/ose-${component}:${version}
  184. # If oreg_url points to a registry other than registry.access.redhat.com we can
  185. # modify image streams to point at that registry by setting the following to true
  186. #openshift_examples_modify_imagestreams=true
  187. # If oreg_url points to a registry requiring authentication, provide the following:
  188. #oreg_auth_user=some_user
  189. #oreg_auth_password='my-pass'
  190. # NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect.
  191. # oreg_auth_pass should be generated from running docker login.
  192. # To update registry auth credentials, uncomment the following:
  193. #oreg_auth_credentials_replace: True
  194. # OpenShift repository configuration
  195. #openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
  196. #openshift_repos_enable_testing=false
  197. # If the image for etcd needs to be pulled from anywhere else than registry.access.redhat.com, e.g. in
  198. # a disconnected and containerized installation, use osm_etcd_image to specify the image to use:
  199. #osm_etcd_image=rhel7/etcd
  200. # htpasswd auth
  201. #openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
  202. # Defining htpasswd users
  203. #openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
  204. # or
  205. #openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
  206. # Allow all auth
  207. #openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
  208. # LDAP auth
  209. #openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
  210. #
  211. # Configure LDAP CA certificate
  212. # Specify either the ASCII contents of the certificate or the path to
  213. # the local file that will be copied to the remote host. CA
  214. # certificate contents will be copied to master systems and saved
  215. # within /etc/origin/master/ with a filename matching the "ca" key set
  216. # within the LDAPPasswordIdentityProvider.
  217. #
  218. #openshift_master_ldap_ca=<ca text>
  219. # or
  220. #openshift_master_ldap_ca_file=<path to local ca file to use>
  221. # OpenID auth
  222. #openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}]
  223. #
  224. # Configure OpenID CA certificate
  225. # Specify either the ASCII contents of the certificate or the path to
  226. # the local file that will be copied to the remote host. CA
  227. # certificate contents will be copied to master systems and saved
  228. # within /etc/origin/master/ with a filename matching the "ca" key set
  229. # within the OpenIDIdentityProvider.
  230. #
  231. #openshift_master_openid_ca=<ca text>
  232. # or
  233. #openshift_master_openid_ca_file=<path to local ca file to use>
  234. # Request header auth
  235. #openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}]
  236. #
  237. # Configure request header CA certificate
  238. # Specify either the ASCII contents of the certificate or the path to
  239. # the local file that will be copied to the remote host. CA
  240. # certificate contents will be copied to master systems and saved
  241. # within /etc/origin/master/ with a filename matching the "clientCA"
  242. # key set within the RequestHeaderIdentityProvider.
  243. #
  244. #openshift_master_request_header_ca=<ca text>
  245. # or
  246. #openshift_master_request_header_ca_file=<path to local ca file to use>
  247. # CloudForms Management Engine (ManageIQ) App Install
  248. #
  249. # Enables installation of MIQ server. Recommended for dedicated
  250. # clusters only. See roles/openshift_management/README.md for instructions
  251. # and requirements.
  252. #openshift_management_install_management=False
  253. # Cloud Provider Configuration
  254. #
  255. # Note: You may make use of environment variables rather than store
  256. # sensitive configuration within the ansible inventory.
  257. # For example:
  258. #openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}"
  259. #openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}"
  260. #
  261. # AWS
  262. #openshift_cloudprovider_kind=aws
  263. # Note: IAM profiles may be used instead of storing API credentials on disk.
  264. #openshift_cloudprovider_aws_access_key=aws_access_key_id
  265. #openshift_cloudprovider_aws_secret_key=aws_secret_access_key
  266. #
  267. # Openstack
  268. #openshift_cloudprovider_kind=openstack
  269. #openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/
  270. #openshift_cloudprovider_openstack_username=username
  271. #openshift_cloudprovider_openstack_password=password
  272. #openshift_cloudprovider_openstack_domain_id=domain_id
  273. #openshift_cloudprovider_openstack_domain_name=domain_name
  274. #openshift_cloudprovider_openstack_tenant_id=tenant_id
  275. #openshift_cloudprovider_openstack_tenant_name=tenant_name
  276. #openshift_cloudprovider_openstack_region=region
  277. #openshift_cloudprovider_openstack_lb_subnet_id=subnet_id
  278. #
  279. # Note: If you're getting a "BS API version autodetection failed" when provisioning cinder volumes you may need this setting
  280. #openshift_cloudprovider_openstack_blockstorage_version=v2
  281. #
  282. # GCE
  283. #openshift_cloudprovider_kind=gce
  284. # Note: When using GCE, openshift_gcp_project and openshift_gcp_prefix must be
  285. # defined.
  286. # openshift_gcp_project is the project-id
  287. #openshift_gcp_project=
  288. # openshift_gcp_prefix is a unique string to identify each openshift cluster.
  289. #openshift_gcp_prefix=
  290. #openshift_gcp_multizone=False
  291. #
  292. # vSphere
  293. #openshift_cloudprovider_kind=vsphere
  294. #openshift_cloudprovider_vsphere_username=username
  295. #openshift_cloudprovider_vsphere_password=password
  296. #openshift_cloudprovider_vsphere_host=vcenter_host or vsphere_host
  297. #openshift_cloudprovider_vsphere_datacenter=datacenter
  298. #openshift_cloudprovider_vsphere_datastore=datastore
  299. #openshift_cloudprovider_vsphere_folder=optional_folder_name
  300. # Project Configuration
  301. #osm_project_request_message=''
  302. #osm_project_request_template=''
  303. #osm_mcs_allocator_range='s0:/2'
  304. #osm_mcs_labels_per_project=5
  305. #osm_uid_allocator_range='1000000000-1999999999/10000'
  306. # Configure additional projects
  307. #openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}}
  308. # Enable cockpit
  309. #osm_use_cockpit=true
  310. #
  311. # Set cockpit plugins
  312. #osm_cockpit_plugins=['cockpit-kubernetes']
  313. # Native high availability (default cluster method)
  314. # If no lb group is defined, the installer assumes that a load balancer has
  315. # been preconfigured. For installation the value of
  316. # openshift_master_cluster_hostname must resolve to the load balancer
  317. # or to one or all of the masters defined in the inventory if no load
  318. # balancer is present.
  319. #openshift_master_cluster_hostname=openshift-ansible.test.example.com
  320. # If an external load balancer is used public hostname should resolve to
  321. # external load balancer address
  322. #openshift_master_cluster_public_hostname=openshift-ansible.public.example.com
  323. # Configure controller arguments
  324. #osm_controller_args={'resource-quota-sync-period': ['10s']}
  325. # Configure api server arguments
  326. #osm_api_server_args={'max-requests-inflight': ['400']}
  327. # additional cors origins
  328. #osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
  329. # default project node selector
  330. #osm_default_node_selector='region=primary'
  331. # Override the default pod eviction timeout
  332. #openshift_master_pod_eviction_timeout=5m
  333. # Override the default oauth tokenConfig settings:
  334. # openshift_master_access_token_max_seconds=86400
  335. # openshift_master_auth_token_max_seconds=500
  336. # Override master servingInfo.maxRequestsInFlight
  337. #openshift_master_max_requests_inflight=500
  338. # Override master and node servingInfo.minTLSVersion and .cipherSuites
  339. # valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12
  340. # example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants
  341. #openshift_master_min_tls_version=VersionTLS12
  342. #openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
  343. #
  344. #openshift_node_min_tls_version=VersionTLS12
  345. #openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
  346. # default storage plugin dependencies to install, by default the ceph and
  347. # glusterfs plugin dependencies will be installed, if available.
  348. #osn_storage_plugin_deps=['ceph','glusterfs','iscsi']
  349. # OpenShift Router Options
  350. #
  351. # An OpenShift router will be created during install if there are
  352. # nodes present with labels matching the default router selector,
  353. # "region=infra". Set openshift_node_labels per node as needed in
  354. # order to label nodes.
  355. #
  356. # Example:
  357. # [nodes]
  358. # node.example.com openshift_node_labels="{'region': 'infra'}"
  359. #
  360. # Router selector (optional)
  361. # Router will only be created if nodes matching this label are present.
  362. # Default value: 'region=infra'
  363. #openshift_hosted_router_selector='region=infra'
  364. #
  365. # Router replicas (optional)
  366. # Unless specified, openshift-ansible will calculate the replica count
  367. # based on the number of nodes matching the openshift router selector.
  368. #openshift_hosted_router_replicas=2
  369. #
  370. # Router force subdomain (optional)
  371. # A router path format to force on all routes used by this router
  372. # (will ignore the route host value)
  373. #openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com'
  374. #
  375. # Router certificate (optional)
  376. # Provide local certificate paths which will be configured as the
  377. # router's default certificate.
  378. #openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
  379. #
  380. # Manage the OpenShift Router (optional)
  381. #openshift_hosted_manage_router=true
  382. #
  383. # Router sharding support has been added and can be achieved by supplying the correct
  384. # data to the inventory. The variable to house the data is openshift_hosted_routers
  385. # and is in the form of a list. If no data is passed then a default router will be
  386. # created. There are multiple combinations of router sharding. The one described
  387. # below supports routers on separate nodes.
  388. #
  389. #openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}]
  390. # OpenShift Registry Console Options
  391. # Override the console image prefix:
  392. # origin default is "cockpit/", enterprise default is "openshift3/"
  393. #openshift_cockpit_deployer_prefix=registry.example.com/myrepo/
  394. # origin default is "kubernetes", enterprise default is "registry-console"
  395. #openshift_cockpit_deployer_basename=my-console
  396. # Override image version, defaults to latest for origin, vX.Y product version for enterprise
  397. #openshift_cockpit_deployer_version=1.4.1
  398. # Openshift Registry Options
  399. #
  400. # An OpenShift registry will be created during install if there are
  401. # nodes present with labels matching the default registry selector,
  402. # "region=infra". Set openshift_node_labels per node as needed in
  403. # order to label nodes.
  404. #
  405. # Example:
  406. # [nodes]
  407. # node.example.com openshift_node_labels="{'region': 'infra'}"
  408. #
  409. # Registry selector (optional)
  410. # Registry will only be created if nodes matching this label are present.
  411. # Default value: 'region=infra'
  412. #openshift_hosted_registry_selector='region=infra'
  413. #
  414. # Registry replicas (optional)
  415. # Unless specified, openshift-ansible will calculate the replica count
  416. # based on the number of nodes matching the openshift registry selector.
  417. #openshift_hosted_registry_replicas=2
  418. #
  419. # Validity of the auto-generated certificate in days (optional)
  420. #openshift_hosted_registry_cert_expire_days=730
  421. #
  422. # Manage the OpenShift Registry (optional)
  423. #openshift_hosted_manage_registry=true
  424. # Manage the OpenShift Registry Console (optional)
  425. #openshift_hosted_manage_registry_console=true
  426. #
  427. # Registry Storage Options
  428. #
  429. # NFS Host Group
  430. # An NFS volume will be created with path "nfs_directory/volume_name"
  431. # on the host within the [nfs] host group. For example, the volume
  432. # path using these options would be "/exports/registry". "exports" is
  433. # is the name of the export served by the nfs server. "registry" is
  434. # the name of a directory inside of "/exports".
  435. #openshift_hosted_registry_storage_kind=nfs
  436. #openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
  437. # nfs_directory must conform to DNS-1123 subdomain must consist of lower case
  438. # alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character
  439. #openshift_hosted_registry_storage_nfs_directory=/exports
  440. #openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
  441. #openshift_hosted_registry_storage_volume_name=registry
  442. #openshift_hosted_registry_storage_volume_size=10Gi
  443. #
  444. # External NFS Host
  445. # NFS volume must already exist with path "nfs_directory/_volume_name" on
  446. # the storage_host. For example, the remote volume path using these
  447. # options would be "nfs.example.com:/exports/registry". "exports" is
  448. # is the name of the export served by the nfs server. "registry" is
  449. # the name of a directory inside of "/exports".
  450. #openshift_hosted_registry_storage_kind=nfs
  451. #openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
  452. #openshift_hosted_registry_storage_host=nfs.example.com
  453. # nfs_directory must conform to DNS-1123 subdomain must consist of lower case
  454. # alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character
  455. #openshift_hosted_registry_storage_nfs_directory=/exports
  456. #openshift_hosted_registry_storage_volume_name=registry
  457. #openshift_hosted_registry_storage_volume_size=10Gi
  458. #
  459. # Openstack
  460. # Volume must already exist.
  461. #openshift_hosted_registry_storage_kind=openstack
  462. #openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']
  463. #openshift_hosted_registry_storage_openstack_filesystem=ext4
  464. #openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
  465. #openshift_hosted_registry_storage_volume_size=10Gi
  466. #
  467. # AWS S3
  468. # S3 bucket must already exist.
  469. #openshift_hosted_registry_storage_kind=object
  470. #openshift_hosted_registry_storage_provider=s3
  471. #openshift_hosted_registry_storage_s3_encrypt=false
  472. #openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id
  473. #openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
  474. #openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
  475. #openshift_hosted_registry_storage_s3_bucket=bucket_name
  476. #openshift_hosted_registry_storage_s3_region=bucket_region
  477. #openshift_hosted_registry_storage_s3_chunksize=26214400
  478. #openshift_hosted_registry_storage_s3_rootdirectory=/registry
  479. #openshift_hosted_registry_pullthrough=true
  480. #openshift_hosted_registry_acceptschema2=true
  481. #openshift_hosted_registry_enforcequota=true
  482. #
  483. # Any S3 service (Minio, ExoScale, ...): Basically the same as above
  484. # but with regionendpoint configured
  485. # S3 bucket must already exist.
  486. #openshift_hosted_registry_storage_kind=object
  487. #openshift_hosted_registry_storage_provider=s3
  488. #openshift_hosted_registry_storage_s3_accesskey=access_key_id
  489. #openshift_hosted_registry_storage_s3_secretkey=secret_access_key
  490. #openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/
  491. #openshift_hosted_registry_storage_s3_bucket=bucket_name
  492. #openshift_hosted_registry_storage_s3_region=bucket_region
  493. #openshift_hosted_registry_storage_s3_chunksize=26214400
  494. #openshift_hosted_registry_storage_s3_rootdirectory=/registry
  495. #openshift_hosted_registry_pullthrough=true
  496. #openshift_hosted_registry_acceptschema2=true
  497. #openshift_hosted_registry_enforcequota=true
  498. #
  499. # Additional CloudFront Options. When using CloudFront all three
  500. # of the followingg variables must be defined.
  501. #openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/
  502. #openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem
  503. #openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid
  504. #
  505. # GCS Storage Bucket
  506. #openshift_hosted_registry_storage_provider=gcs
  507. #openshift_hosted_registry_storage_gcs_bucket=bucket01
  508. #openshift_hosted_registry_storage_gcs_keyfile=test.key
  509. #openshift_hosted_registry_storage_gcs_rootdirectory=/registry
  510. # Metrics deployment
  511. # See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
  512. #
  513. # By default metrics are not automatically deployed, set this to enable them
  514. #openshift_metrics_install_metrics=true
  515. #
  516. # Storage Options
  517. # If openshift_metrics_storage_kind is unset then metrics will be stored
  518. # in an EmptyDir volume and will be deleted when the cassandra pod terminates.
  519. # Storage options A & B currently support only one cassandra pod which is
  520. # generally enough for up to 1000 pods. Additional volumes can be created
  521. # manually after the fact and metrics scaled per the docs.
  522. #
  523. # Option A - NFS Host Group
  524. # An NFS volume will be created with path "nfs_directory/volume_name"
  525. # on the host within the [nfs] host group. For example, the volume
  526. # path using these options would be "/exports/metrics". "exports" is
  527. # is the name of the export served by the nfs server. "metrics" is
  528. # the name of a directory inside of "/exports".
  529. #openshift_metrics_storage_kind=nfs
  530. #openshift_metrics_storage_access_modes=['ReadWriteOnce']
  531. #openshift_metrics_storage_nfs_directory=/exports
  532. #openshift_metrics_storage_nfs_options='*(rw,root_squash)'
  533. #openshift_metrics_storage_volume_name=metrics
  534. #openshift_metrics_storage_volume_size=10Gi
  535. #openshift_metrics_storage_labels={'storage': 'metrics'}
  536. #
  537. # Option B - External NFS Host
  538. # NFS volume must already exist with path "nfs_directory/_volume_name" on
  539. # the storage_host. For example, the remote volume path using these
  540. # options would be "nfs.example.com:/exports/metrics". "exports" is
  541. # is the name of the export served by the nfs server. "metrics" is
  542. # the name of a directory inside of "/exports".
  543. #openshift_metrics_storage_kind=nfs
  544. #openshift_metrics_storage_access_modes=['ReadWriteOnce']
  545. #openshift_metrics_storage_host=nfs.example.com
  546. #openshift_metrics_storage_nfs_directory=/exports
  547. #openshift_metrics_storage_volume_name=metrics
  548. #openshift_metrics_storage_volume_size=10Gi
  549. #openshift_metrics_storage_labels={'storage': 'metrics'}
  550. #
  551. # Option C - Dynamic -- If openshift supports dynamic volume provisioning for
  552. # your cloud platform use this.
  553. #openshift_metrics_storage_kind=dynamic
  554. #
  555. # Other Metrics Options -- Common items you may wish to reconfigure, for the complete
  556. # list of options please see roles/openshift_metrics/README.md
  557. #
  558. # Override metricsPublicURL in the master config for cluster metrics
  559. # Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
  560. # Currently, you may only alter the hostname portion of the url, alterting the
  561. # `/hawkular/metrics` path will break installation of metrics.
  562. #openshift_metrics_hawkular_hostname=hawkular-metrics.example.com
  563. # Configure the prefix and version for the component images
  564. #openshift_metrics_image_prefix=docker.io/openshift/origin-
  565. #openshift_metrics_image_version=v3.9
  566. # when openshift_deployment_type=='openshift-enterprise'
  567. #openshift_metrics_image_prefix=registry.access.redhat.com/openshift3/
  568. #openshift_metrics_image_version=v3.9
  569. #
  570. # StorageClass
  571. # openshift_storageclass_name=gp2
  572. # openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'}
  573. #
  574. # Logging deployment
  575. #
  576. # Currently logging deployment is disabled by default, enable it by setting this
  577. #openshift_logging_install_logging=true
  578. #
  579. # Logging storage config
  580. # Option A - NFS Host Group
  581. # An NFS volume will be created with path "nfs_directory/volume_name"
  582. # on the host within the [nfs] host group. For example, the volume
  583. # path using these options would be "/exports/logging". "exports" is
  584. # is the name of the export served by the nfs server. "logging" is
  585. # the name of a directory inside of "/exports".
  586. #openshift_logging_storage_kind=nfs
  587. #openshift_logging_storage_access_modes=['ReadWriteOnce']
  588. #openshift_logging_storage_nfs_directory=/exports
  589. #openshift_logging_storage_nfs_options='*(rw,root_squash)'
  590. #openshift_logging_storage_volume_name=logging
  591. #openshift_logging_storage_volume_size=10Gi
  592. #openshift_logging_storage_labels={'storage': 'logging'}
  593. #
  594. # Option B - External NFS Host
  595. # NFS volume must already exist with path "nfs_directory/_volume_name" on
  596. # the storage_host. For example, the remote volume path using these
  597. # options would be "nfs.example.com:/exports/logging". "exports" is
  598. # is the name of the export served by the nfs server. "logging" is
  599. # the name of a directory inside of "/exports".
  600. #openshift_logging_storage_kind=nfs
  601. #openshift_logging_storage_access_modes=['ReadWriteOnce']
  602. #openshift_logging_storage_host=nfs.example.com
  603. #openshift_logging_storage_nfs_directory=/exports
  604. #openshift_logging_storage_volume_name=logging
  605. #openshift_logging_storage_volume_size=10Gi
  606. #openshift_logging_storage_labels={'storage': 'logging'}
  607. #
  608. # Option C - Dynamic -- If openshift supports dynamic volume provisioning for
  609. # your cloud platform use this.
  610. #openshift_logging_storage_kind=dynamic
  611. #
  612. # Option D - none -- Logging will use emptydir volumes which are destroyed when
  613. # pods are deleted
  614. #
  615. # Other Logging Options -- Common items you may wish to reconfigure, for the complete
  616. # list of options please see roles/openshift_logging/README.md
  617. #
  618. # Configure loggingPublicURL in the master config for aggregate logging, defaults
  619. # to kibana.{{ openshift_master_default_subdomain }}
  620. #openshift_logging_kibana_hostname=logging.apps.example.com
  621. # Configure the number of elastic search nodes, unless you're using dynamic provisioning
  622. # this value must be 1
  623. #openshift_logging_es_cluster_size=1
  624. # Configure the prefix and version for the component images
  625. #openshift_logging_image_prefix=docker.io/openshift/origin-
  626. #openshift_logging_image_version=v3.9.0
  627. # when openshift_deployment_type=='openshift-enterprise'
  628. #openshift_logging_image_prefix=registry.access.redhat.com/openshift3/
  629. #openshift_logging_image_version=3.9.0
  630. # Prometheus deployment
  631. #
  632. # Currently prometheus deployment is disabled by default, enable it by setting this
  633. #openshift_hosted_prometheus_deploy=true
  634. #
  635. # Prometheus storage config
  636. # Option A - NFS Host Group
  637. # An NFS volume will be created with path "nfs_directory/volume_name"
  638. # on the host within the [nfs] host group. For example, the volume
  639. # path using these options would be "/exports/prometheus"
  640. #openshift_prometheus_storage_kind=nfs
  641. #openshift_prometheus_storage_access_modes=['ReadWriteOnce']
  642. #openshift_prometheus_storage_nfs_directory=/exports
  643. #openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
  644. #openshift_prometheus_storage_volume_name=prometheus
  645. #openshift_prometheus_storage_volume_size=10Gi
  646. #openshift_prometheus_storage_labels={'storage': 'prometheus'}
  647. #openshift_prometheus_storage_type='pvc'
  648. #openshift_prometheus_storage_class=glusterfs-storage
  649. # For prometheus-alertmanager
  650. #openshift_prometheus_alertmanager_storage_kind=nfs
  651. #openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
  652. #openshift_prometheus_alertmanager_storage_nfs_directory=/exports
  653. #openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
  654. #openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
  655. #openshift_prometheus_alertmanager_storage_volume_size=10Gi
  656. #openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
  657. #openshift_prometheus_alertmanager_storage_type='pvc'
  658. #openshift_prometheus_alertmanager_storage_class=glusterfs-storage
  659. # For prometheus-alertbuffer
  660. #openshift_prometheus_alertbuffer_storage_kind=nfs
  661. #openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
  662. #openshift_prometheus_alertbuffer_storage_nfs_directory=/exports
  663. #openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
  664. #openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
  665. #openshift_prometheus_alertbuffer_storage_volume_size=10Gi
  666. #openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
  667. #openshift_prometheus_alertbuffer_storage_type='pvc'
  668. #openshift_prometheus_alertbuffer_storage_class=glusterfs-storage
  669. #
  670. # Option B - External NFS Host
  671. # NFS volume must already exist with path "nfs_directory/_volume_name" on
  672. # the storage_host. For example, the remote volume path using these
  673. # options would be "nfs.example.com:/exports/prometheus"
  674. #openshift_prometheus_storage_kind=nfs
  675. #openshift_prometheus_storage_access_modes=['ReadWriteOnce']
  676. #openshift_prometheus_storage_host=nfs.example.com
  677. #openshift_prometheus_storage_nfs_directory=/exports
  678. #openshift_prometheus_storage_volume_name=prometheus
  679. #openshift_prometheus_storage_volume_size=10Gi
  680. #openshift_prometheus_storage_labels={'storage': 'prometheus'}
  681. #openshift_prometheus_storage_type='pvc'
  682. #openshift_prometheus_storage_class=glusterfs-storage
  683. # For prometheus-alertmanager
  684. #openshift_prometheus_alertmanager_storage_kind=nfs
  685. #openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
  686. #openshift_prometheus_alertmanager_storage_host=nfs.example.com
  687. #openshift_prometheus_alertmanager_storage_nfs_directory=/exports
  688. #openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
  689. #openshift_prometheus_alertmanager_storage_volume_size=10Gi
  690. #openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
  691. #openshift_prometheus_alertmanager_storage_type='pvc'
  692. #openshift_prometheus_alertmanager_storage_class=glusterfs-storage
  693. # For prometheus-alertbuffer
  694. #openshift_prometheus_alertbuffer_storage_kind=nfs
  695. #openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
  696. #openshift_prometheus_alertbuffer_storage_host=nfs.example.com
  697. #openshift_prometheus_alertbuffer_storage_nfs_directory=/exports
  698. #openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
  699. #openshift_prometheus_alertbuffer_storage_volume_size=10Gi
  700. #openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
  701. #openshift_prometheus_alertbuffer_storage_type='pvc'
  702. #openshift_prometheus_alertbuffer_storage_class=glusterfs-storage
  703. #
  704. # Option C - none -- Prometheus, alertmanager and alertbuffer will use emptydir volumes
  705. # which are destroyed when pods are deleted
  706. # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
  707. # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
  708. # Disable the OpenShift SDN plugin
  709. # openshift_use_openshift_sdn=False
  710. # Configure SDN cluster network and kubernetes service CIDR blocks. These
  711. # network blocks should be private and should not conflict with network blocks
  712. # in your infrastructure that pods may require access to. Can not be changed
  713. # after deployment.
  714. #
  715. # WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of
  716. # 172.17.0.0/16. Your installation will fail and/or your configuration change will
  717. # cause the Pod SDN or Cluster SDN to fail.
  718. #
  719. # WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
  720. # docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
  721. # environment variable located in /etc/sysconfig/docker-network.
  722. # When upgrading or scaling up the following must match whats in your master config!
  723. # Inventory: master yaml field
  724. # osm_cluster_network_cidr: clusterNetworkCIDR
  725. # openshift_portal_net: serviceNetworkCIDR
  726. # When installing osm_cluster_network_cidr and openshift_portal_net must be set.
  727. # Sane examples are provided below.
  728. #osm_cluster_network_cidr=10.128.0.0/14
  729. #openshift_portal_net=172.30.0.0/16
  730. # ExternalIPNetworkCIDRs controls what values are acceptable for the
  731. # service external IP field. If empty, no externalIP may be set. It
  732. # may contain a list of CIDRs which are checked for access. If a CIDR
  733. # is prefixed with !, IPs in that CIDR will be rejected. Rejections
  734. # will be applied first, then the IP checked against one of the
  735. # allowed CIDRs. You should ensure this range does not overlap with
  736. # your nodes, pods, or service CIDRs for security reasons.
  737. #openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
  738. # IngressIPNetworkCIDR controls the range to assign ingress IPs from for
  739. # services of type LoadBalancer on bare metal. If empty, ingress IPs will not
  740. # be assigned. It may contain a single CIDR that will be allocated from. For
  741. # security reasons, you should ensure that this range does not overlap with
  742. # the CIDRs reserved for external IPs, nodes, pods, or services.
  743. #openshift_master_ingress_ip_network_cidr=172.46.0.0/16
  744. # Configure number of bits to allocate to each host's subnet e.g. 9
  745. # would mean a /23 network on the host.
  746. # When upgrading or scaling up the following must match whats in your master config!
  747. # Inventory: master yaml field
  748. # osm_host_subnet_length: hostSubnetLength
  749. # When installing osm_host_subnet_length must be set. A sane example is provided below.
  750. #osm_host_subnet_length=9
  751. # Configure master API and console ports.
  752. #openshift_master_api_port=8443
  753. #openshift_master_console_port=8443
  754. # set exact RPM version (include - prefix)
  755. #openshift_pkg_version=-3.9.0
  756. # you may also specify version and release, ie:
  757. #openshift_pkg_version=-3.9.0-0.126.0.git.0.9351aae.el7
  758. # Configure custom ca certificate
  759. #openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'}
  760. #
  761. # NOTE: CA certificate will not be replaced with existing clusters.
  762. # This option may only be specified when creating a new cluster or
  763. # when redeploying cluster certificates with the redeploy-certificates
  764. # playbook.
  765. # Configure custom named certificates (SNI certificates)
  766. #
  767. # https://docs.openshift.org/latest/install_config/certificate_customization.html
  768. # https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html
  769. #
  770. # NOTE: openshift_master_named_certificates is cached on masters and is an
  771. # additive fact, meaning that each run with a different set of certificates
  772. # will add the newly provided certificates to the cached set of certificates.
  773. #
  774. # An optional CA may be specified for each named certificate. CAs will
  775. # be added to the OpenShift CA bundle which allows for the named
  776. # certificate to be served for internal cluster communication.
  777. #
  778. # If you would like openshift_master_named_certificates to be overwritten with
  779. # the provided value, specify openshift_master_overwrite_named_certificates.
  780. #openshift_master_overwrite_named_certificates=true
  781. #
  782. # Provide local certificate paths which will be deployed to masters
  783. #openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}]
  784. #
  785. # Detected names may be overridden by specifying the "names" key
  786. #openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}]
  787. # Session options
  788. #openshift_master_session_name=ssn
  789. #openshift_master_session_max_seconds=3600
  790. # An authentication and encryption secret will be generated if secrets
  791. # are not provided. If provided, openshift_master_session_auth_secrets
  792. # and openshift_master_encryption_secrets must be equal length.
  793. #
  794. # Signing secrets, used to authenticate sessions using
  795. # HMAC. Recommended to use secrets with 32 or 64 bytes.
  796. #openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
  797. #
  798. # Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
  799. # characters long, to select AES-128, AES-192, or AES-256.
  800. #openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
  801. # configure how often node iptables rules are refreshed
  802. #openshift_node_iptables_sync_period=5s
  803. # Configure nodeIP in the node config
  804. # This is needed in cases where node traffic is desired to go over an
  805. # interface other than the default network interface.
  806. #openshift_set_node_ip=True
  807. # Configure dnsIP in the node config.
  808. # This setting overrides the bind IP address used by each node's dnsmasq.
  809. # By default, this value is set to the IP which ansible uses to connect to the node.
  810. # Only update this variable if you need to bind dnsmasq on a different interface
  811. #
  812. # Example:
  813. # [nodes]
  814. # node.example.com openshift_dns_ip=172.30.0.1
  815. # Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
  816. #openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']}
  817. # Configure logrotate scripts
  818. # See: https://github.com/nickhammond/ansible-logrotate
  819. #logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
  820. # The OpenShift-Ansible installer will fail when it detects that the
  821. # value of openshift_hostname resolves to an IP address not bound to any local
  822. # interfaces. This mis-configuration is problematic for any pod leveraging host
  823. # networking and liveness or readiness probes.
  824. # Setting this variable to false will override that check.
  825. #openshift_hostname_check=true
  826. # openshift_use_dnsmasq is deprecated. This must be true, or installs will fail
  827. # in versions >= 3.6
  828. #openshift_use_dnsmasq=False
  829. # Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
  830. # This is useful for POC environments where DNS may not actually be available yet or to set
  831. # options like 'strict-order' to alter dnsmasq configuration.
  832. #openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf
  833. # Global Proxy Configuration
  834. # These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
  835. # variables for docker and master services.
  836. #
  837. # Hosts in the openshift_no_proxy list will NOT use any globally
  838. # configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains
  839. # (.example.com), hosts (example.com), and IP addresses.
  840. #openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
  841. #openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
  842. #openshift_no_proxy='.hosts.example.com,some-host.com'
  843. #
  844. # Most environments don't require a proxy between openshift masters, nodes, and
  845. # etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
  846. # If all of your hosts share a common domain you may wish to disable this and
  847. # specify that domain above instead.
  848. #
  849. # For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and
  850. # n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy
  851. # variable (above) and set this value to False
  852. #openshift_generate_no_proxy_hosts=True
  853. #
  854. # These options configure the BuildDefaults admission controller which injects
  855. # configuration into Builds. Proxy related values will default to the global proxy
  856. # config values. You only need to set these if they differ from the global proxy settings.
  857. # See BuildDefaults documentation at
  858. # https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
  859. #openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
  860. #openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
  861. #openshift_builddefaults_no_proxy=mycorp.com
  862. #openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
  863. #openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
  864. #openshift_builddefaults_git_no_proxy=mycorp.com
  865. #openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
  866. #openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'}
  867. #openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'}
  868. #openshift_builddefaults_resources_requests_cpu=100m
  869. #openshift_builddefaults_resources_requests_memory=256Mi
  870. #openshift_builddefaults_resources_limits_cpu=1000m
  871. #openshift_builddefaults_resources_limits_memory=512Mi
  872. # Or you may optionally define your own build defaults configuration serialized as json
  873. #openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}'
  874. # These options configure the BuildOverrides admission controller which injects
  875. # configuration into Builds.
  876. # See BuildOverrides documentation at
  877. # https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
  878. #openshift_buildoverrides_force_pull=true
  879. #openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
  880. #openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}
  881. #openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'}
  882. #openshift_buildoverrides_tolerations=[{'key':'mykey1','value':'myvalue1','effect':'NoSchedule','operator':'Equal'}]
  883. # Or you may optionally define your own build overrides configuration serialized as json
  884. #openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
  885. # Enable service catalog
  886. #openshift_enable_service_catalog=true
  887. # Enable template service broker (requires service catalog to be enabled, above)
  888. #template_service_broker_install=true
  889. # Force a specific prefix (IE: registry) to use when pulling the service catalog image
  890. # NOTE: The registry all the way up to the start of the image name must be provided. Two examples
  891. # below are provided.
  892. #openshift_service_catalog_image_prefix=docker.io/openshift/origin-
  893. #openshift_service_catalog_image_prefix=registry.access.redhat.com/openshift3/ose-
  894. # Force a specific image version to use when pulling the service catalog image
  895. #openshift_service_catalog_image_version=v3.9
  896. # TSB image tag
  897. #template_service_broker_version='v3.9'
  898. # Configure one of more namespaces whose templates will be served by the TSB
  899. #openshift_template_service_broker_namespaces=['openshift']
  900. # masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
  901. #openshift_master_dynamic_provisioning_enabled=False
  902. # Admission plugin config
  903. #openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}}
  904. # Configure usage of openshift_clock role.
  905. #openshift_clock_enabled=true
  906. # OpenShift Per-Service Environment Variables
  907. # Environment variables are added to /etc/sysconfig files for
  908. # each OpenShift service: node, master (api and controllers).
  909. # API and controllers environment variables are merged in single
  910. # master environments.
  911. #openshift_master_api_env_vars={"ENABLE_HTTP2": "true"}
  912. #openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"}
  913. #openshift_node_env_vars={"ENABLE_HTTP2": "true"}
  914. # Enable API service auditing
  915. #openshift_master_audit_config={"enabled": true}
  916. #
  917. # In case you want more advanced setup for the auditlog you can
  918. # use this line.
  919. # The directory in "auditFilePath" will be created if it's not
  920. # exist
  921. #openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
  922. # Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
  923. # by openshift_deployment_type=origin
  924. #openshift_enable_origin_repo=false
  925. # Validity of the auto-generated OpenShift certificates in days.
  926. # See also openshift_hosted_registry_cert_expire_days above.
  927. #
  928. #openshift_ca_cert_expire_days=1825
  929. #openshift_node_cert_expire_days=730
  930. #openshift_master_cert_expire_days=730
  931. # Validity of the auto-generated external etcd certificates in days.
  932. # Controls validity for etcd CA, peer, server and client certificates.
  933. #
  934. #etcd_ca_default_days=1825
  935. #
  936. # ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference
  937. # openshift_master_saconfig_limitsecretreferences=false
  938. # Upgrade Control
  939. #
  940. # By default nodes are upgraded in a serial manner one at a time and all failures
  941. # are fatal, one set of variables for normal nodes, one set of variables for
  942. # nodes that are part of control plane as the number of hosts may be different
  943. # in those two groups.
  944. #openshift_upgrade_nodes_serial=1
  945. #openshift_upgrade_nodes_max_fail_percentage=0
  946. #openshift_upgrade_control_plane_nodes_serial=1
  947. #openshift_upgrade_control_plane_nodes_max_fail_percentage=0
  948. #
  949. # You can specify the number of nodes to upgrade at once. We do not currently
  950. # attempt to verify that you have capacity to drain this many nodes at once
  951. # so please be careful when specifying these values. You should also verify that
  952. # the expected number of nodes are all schedulable and ready before starting an
  953. # upgrade. If it's not possible to drain the requested nodes the upgrade will
  954. # stall indefinitely until the drain is successful.
  955. #
  956. # If you're upgrading more than one node at a time you can specify the maximum
  957. # percentage of failure within the batch before the upgrade is aborted. Any
  958. # nodes that do fail are ignored for the rest of the playbook run and you should
  959. # take care to investigate the failure and return the node to service so that
  960. # your cluster.
  961. #
  962. # The percentage must exceed the value, this would fail on two failures
  963. # openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49
  964. # where as this would not
  965. # openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
  966. #
  967. # A timeout to wait for nodes to drain pods can be specified to ensure that the
  968. # upgrade continues even if nodes fail to drain pods in the allowed time. The
  969. # default value of 0 will wait indefinitely allowing the admin to investigate
  970. # the root cause and ensuring that disruption budgets are respected. If the
  971. # a timeout of 0 is used there will also be one attempt to re-try draining the
  972. # node. If a non zero timeout is specified there will be no attempt to retry.
  973. #openshift_upgrade_nodes_drain_timeout=0
  974. #
  975. # Multiple data migrations take place and if they fail they will fail the upgrade
  976. # You may wish to disable these or make them non fatal
  977. #
  978. # openshift_upgrade_pre_storage_migration_enabled=true
  979. # openshift_upgrade_pre_storage_migration_fatal=true
  980. # openshift_upgrade_post_storage_migration_enabled=true
  981. # openshift_upgrade_post_storage_migration_fatal=false
  982. ######################################################################
  983. # CloudForms/ManageIQ (CFME/MIQ) Configuration
  984. # See the readme for full descriptions and getting started
  985. # instructions: ../../roles/openshift_management/README.md or go directly to
  986. # their definitions: ../../roles/openshift_management/defaults/main.yml
  987. # ../../roles/openshift_management/vars/main.yml
  988. #
  989. # Namespace for the CFME project
  990. #openshift_management_project: openshift-management
  991. # Namespace/project description
  992. #openshift_management_project_description: CloudForms Management Engine
  993. # Choose 'miq-template' for a podified database install
  994. # Choose 'miq-template-ext-db' for an external database install
  995. #
  996. # If you are using the miq-template-ext-db template then you must add
  997. # the required database parameters to the
  998. # openshift_management_template_parameters variable.
  999. #openshift_management_app_template: miq-template
  1000. # Allowed options: nfs, nfs_external, preconfigured, cloudprovider.
  1001. #openshift_management_storage_class: nfs
  1002. # [OPTIONAL] - If you are using an EXTERNAL NFS server, such as a
  1003. # netapp appliance, then you must set the hostname here. Leave the
  1004. # value as 'false' if you are not using external NFS.
  1005. #openshift_management_storage_nfs_external_hostname: false
  1006. # [OPTIONAL] - If you are using external NFS then you must set the base
  1007. # path to the exports location here.
  1008. #
  1009. # Additionally: EXTERNAL NFS REQUIRES that YOU CREATE the nfs exports
  1010. # that will back the application PV and optionally the database
  1011. # pv. Export path definitions, relative to
  1012. # {{ openshift_management_storage_nfs_base_dir }}
  1013. #
  1014. # LOCAL NFS NOTE:
  1015. #
  1016. # You may may also change this value if you want to change the default
  1017. # path used for local NFS exports.
  1018. #openshift_management_storage_nfs_base_dir: /exports
  1019. # LOCAL NFS NOTE:
  1020. #
  1021. # You may override the automatically selected LOCAL NFS server by
  1022. # setting this variable. Useful for testing specific task files.
  1023. #openshift_management_storage_nfs_local_hostname: false
  1024. # These are the default values for the username and password of the
  1025. # management app. Changing these values in your inventory will not
  1026. # change your username or password. You should only need to change
  1027. # these values in your inventory if you already changed the actual
  1028. # name and password AND are trying to use integration scripts.
  1029. #
  1030. # For example, adding this cluster as a container provider,
  1031. # playbooks/openshift-management/add_container_provider.yml
  1032. #openshift_management_username: admin
  1033. #openshift_management_password: smartvm
  1034. # A hash of parameters you want to override or set in the
  1035. # miq-template.yaml or miq-template-ext-db.yaml templates. Set this in
  1036. # your inventory file as a simple hash. Acceptable values are defined
  1037. # under the .parameters list in files/miq-template{-ext-db}.yaml
  1038. # Example:
  1039. #
  1040. # openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'}
  1041. #openshift_management_template_parameters: {}
  1042. # Firewall configuration
  1043. # You can open additional firewall ports by defining them as a list. of service
  1044. # names and ports/port ranges for either masters or nodes.
  1045. #openshift_master_open_ports=[{"service":"svc1","port":"11/tcp"}]
  1046. #openshift_node_open_ports=[{"service":"svc2","port":"12-13/tcp"},{"service":"svc3","port":"14/udp"}]
  1047. # Enable unsupported configurations, things that will yield a partially
  1048. # functioning cluster but would not be supported for production use
  1049. #openshift_enable_unsupported_configurations=false