hosts.example 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. # This is an example of a bring your own (byo) host inventory
  2. # Create an OSEv3 group that contains the masters and nodes groups
  3. [OSEv3:children]
  4. masters
  5. nodes
  6. etcd
  7. # Set variables common for all OSEv3 hosts
  8. [OSEv3:vars]
  9. # SSH user, this user should allow ssh based auth without requiring a
  10. # password. If using ssh key based auth, then the key should be managed by an
  11. # ssh agent.
  12. ansible_ssh_user=root
  13. # If ansible_ssh_user is not root, ansible_sudo must be set to true and the
  14. # user must be configured for passwordless sudo
  15. #ansible_sudo=true
  16. # deployment type valid values are origin, online and enterprise
  17. deployment_type=atomic-enterprise
  18. # Enable cluster metrics
  19. #use_cluster_metrics=true
  20. # Pre-release registry URL
  21. #oreg_url=example.com/openshift3/ose-${component}:${version}
  22. # Pre-release Dev puddle repo
  23. #openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
  24. # Pre-release Errata puddle repo
  25. #openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
  26. # Origin copr repo
  27. #openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
  28. # htpasswd auth
  29. openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/openshift/htpasswd'}]
  30. # Allow all auth
  31. #openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
  32. # Project Configuration
  33. #osm_project_request_message=''
  34. #osm_project_request_template=''
  35. #osm_mcs_allocator_range='s0:/2'
  36. #osm_mcs_labels_per_project=5
  37. #osm_uid_allocator_range='1000000000-1999999999/10000'
  38. # Configure Fluentd
  39. #use_fluentd=true
  40. # Enable cockpit
  41. #osm_use_cockpit=true
  42. #
  43. # Set cockpit plugins
  44. #osm_cockpit_plugins=['cockpit-kubernetes']
  45. # master cluster ha variables using pacemaker or RHEL HA
  46. #openshift_master_cluster_password=openshift_cluster
  47. #openshift_master_cluster_vip=192.168.133.25
  48. #openshift_master_cluster_public_vip=192.168.133.25
  49. #openshift_master_cluster_hostname=openshift-ansible.test.example.com
  50. #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
  51. # master cluster ha variables when using a different HA solution
  52. # For installation the value of openshift_master_cluster_hostname must resolve
  53. # to the first master defined in the inventory.
  54. # The HA solution must be manually configured after installation and must ensure
  55. # that the master is running on a single master host.
  56. #openshift_master_cluster_hostname=openshift-ansible.test.example.com
  57. #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
  58. #openshift_master_cluster_defer_ha=True
  59. # default subdomain to use for exposed routes
  60. #osm_default_subdomain=apps.test.example.com
  61. # additional cors origins
  62. #osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
  63. # default project node selector
  64. #osm_default_node_selector='region=primary'
  65. # default storage plugin dependencies to install, by default the ceph and
  66. # glusterfs plugin dependencies will be installed, if available.
  67. #osn_storage_plugin_deps=['ceph','glusterfs']
  68. # default selectors for router and registry services
  69. # openshift_router_selector='region=infra'
  70. # openshift_registry_selector='region=infra'
  71. # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
  72. # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
  73. # Disable the OpenShift SDN plugin
  74. # openshift_use_openshift_sdn=False
  75. # set RPM version for debugging purposes
  76. #openshift_pkg_version=-3.0.0.0
  77. # Configure custom master certificates
  78. #openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}]
  79. # Detected names may be overridden by specifying the "names" key
  80. #openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}]
  81. # host group for masters
  82. [masters]
  83. ose3-master[1:3]-ansible.test.example.com
  84. [etcd]
  85. ose3-etcd[1:3]-ansible.test.example.com
  86. # NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
  87. # However, in order to ensure that your masters are not burdened with running pods you should
  88. # make them unschedulable by adding openshift_scheduleable=False any node that's also a master.
  89. [nodes]
  90. ose3-master[1:3]-ansible.test.example.com
  91. ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"