hosts.example 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. # This is an example of a bring your own (byo) host inventory
  2. # Create an OSEv3 group that contains the masters and nodes groups
  3. [OSEv3:children]
  4. masters
  5. nodes
  6. etcd
  7. lb
  8. # Set variables common for all OSEv3 hosts
  9. [OSEv3:vars]
  10. # SSH user, this user should allow ssh based auth without requiring a
  11. # password. If using ssh key based auth, then the key should be managed by an
  12. # ssh agent.
  13. ansible_ssh_user=root
  14. # If ansible_ssh_user is not root, ansible_sudo must be set to true and the
  15. # user must be configured for passwordless sudo
  16. #ansible_sudo=true
  17. # deployment type valid values are origin, online and enterprise
  18. deployment_type=atomic-enterprise
  19. # Pre-release registry URL
  20. #oreg_url=example.com/openshift3/ose-${component}:${version}
  21. # Pre-release Dev puddle repo
  22. #openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
  23. # Pre-release Errata puddle repo
  24. #openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
  25. # Origin copr repo
  26. #openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
  27. # htpasswd auth
  28. openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/openshift/htpasswd'}]
  29. # Allow all auth
  30. #openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
  31. # Project Configuration
  32. #osm_project_request_message=''
  33. #osm_project_request_template=''
  34. #osm_mcs_allocator_range='s0:/2'
  35. #osm_mcs_labels_per_project=5
  36. #osm_uid_allocator_range='1000000000-1999999999/10000'
  37. # Configure Fluentd
  38. #use_fluentd=true
  39. # Enable cockpit
  40. #osm_use_cockpit=true
  41. #
  42. # Set cockpit plugins
  43. #osm_cockpit_plugins=['cockpit-kubernetes']
  44. # master cluster ha variables using pacemaker or RHEL HA
  45. #openshift_master_cluster_password=openshift_cluster
  46. #openshift_master_cluster_vip=192.168.133.25
  47. #openshift_master_cluster_public_vip=192.168.133.25
  48. #openshift_master_cluster_hostname=openshift-ansible.test.example.com
  49. #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
  50. # master cluster ha variables when using a different HA solution
  51. # For installation the value of openshift_master_cluster_hostname must resolve
  52. # to the first master defined in the inventory.
  53. # The HA solution must be manually configured after installation and must ensure
  54. # that the master is running on a single master host.
  55. #openshift_master_cluster_hostname=openshift-ansible.test.example.com
  56. #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
  57. #openshift_master_cluster_defer_ha=True
  58. # Native clustering with haproxy as an optional load balancer
  59. #openshift_master_cluster_hostname=openshift-ansible.test.example.com
  60. #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
  61. #openshift_master_cluster_vip=192.168.133.25
  62. #openshift_master_cluster_public_vip=192.168.133.25
  63. # Override the default controller lease ttl
  64. #osm_controller_lease_ttl=30
  65. # default subdomain to use for exposed routes
  66. #osm_default_subdomain=apps.test.example.com
  67. # additional cors origins
  68. #osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
  69. # default project node selector
  70. #osm_default_node_selector='region=primary'
  71. # default storage plugin dependencies to install, by default the ceph and
  72. # glusterfs plugin dependencies will be installed, if available.
  73. #osn_storage_plugin_deps=['ceph','glusterfs']
  74. # default selectors for router and registry services
  75. # openshift_router_selector='region=infra'
  76. # openshift_registry_selector='region=infra'
  77. # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
  78. # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
  79. # Disable the OpenShift SDN plugin
  80. # openshift_use_openshift_sdn=False
  81. # set RPM version for debugging purposes
  82. #openshift_pkg_version=-3.0.0.0
  83. # Configure custom master certificates
  84. #openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}]
  85. # Detected names may be overridden by specifying the "names" key
  86. #openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}]
  87. # host group for masters
  88. [masters]
  89. ose3-master[1:3]-ansible.test.example.com
  90. [etcd]
  91. ose3-etcd[1:3]-ansible.test.example.com
  92. [lb]
  93. ose3-lb-ansible.test.example.com
  94. # host group for nodes
  95. [nodes]
  96. ose3-master[1:3]-ansible.test.example.com
  97. ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"