Browse Source

GCE-support (more information in PR, README_GCE.md)

Chengcheng Mu 9 years ago
parent
commit
a22fbd327a

+ 14 - 3
README_GCE.md

@@ -39,6 +39,13 @@ Create a gce.ini file for GCE
 * gce_service_account_pem_file_path - Full path from previous steps
 * gce_project_id - Found in "Projects", it list all the gce projects you are associated with.  The page lists their "Project Name" and "Project ID".  You want the "Project ID"
 
+Mandatory customization variables (check the values according to your tenant):
+* zone = europe-west1-d
+* network = default
+* gce_machine_type = n1-standard-2
+* gce_machine_image = preinstalled-slave-50g-v5
+
+
 1. vi ~/.gce/gce.ini
 1. make the contents look like this:
 ```
@@ -46,11 +53,15 @@ Create a gce.ini file for GCE
 gce_service_account_email_address = long...@developer.gserviceaccount.com
 gce_service_account_pem_file_path = /full/path/to/project_id-gce_key_hash.pem
 gce_project_id = project_id
+zone = europe-west1-d
+network = default
+gce_machine_type = n1-standard-2
+gce_machine_image = preinstalled-slave-50g-v5
+
 ```
-1. Setup a sym link so that gce.py will pick it up (link must be in same dir as gce.py)
+1. Define the environment variable GCE_INI_PATH so gce.py can pick it up and bin/cluster can also read it
 ```
-  cd openshift-ansible/inventory/gce
-  ln -s ~/.gce/gce.ini gce.ini
+export GCE_INI_PATH=~/.gce/gce.ini
 ```
 
 

+ 8 - 4
bin/cluster

@@ -142,10 +142,14 @@ class Cluster(object):
         """
         config = ConfigParser.ConfigParser()
         if 'gce' == provider:
-            config.readfp(open('inventory/gce/hosts/gce.ini'))
+            gce_ini_default_path = os.path.join(
+                'inventory/gce/hosts/gce.ini')
+            gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
+            if os.path.exists(gce_ini_path): 
+                config.readfp(open(gce_ini_path))
 
-            for key in config.options('gce'):
-                os.environ[key] = config.get('gce', key)
+                for key in config.options('gce'):
+                    os.environ[key] = config.get('gce', key)
 
             inventory = '-i inventory/gce/hosts'
         elif 'aws' == provider:
@@ -193,7 +197,7 @@ class Cluster(object):
         if args.option:
             for opt in args.option:
                 k, v = opt.split('=', 1)
-                env['cli_' + k] = v
+                env[k] = v
 
         ansible_env = '-e \'{}\''.format(
             ' '.join(['%s=%s' % (key, value) for (key, value) in env.items()])

+ 11 - 3
inventory/gce/hosts/gce.py

@@ -120,6 +120,8 @@ class GceInventory(object):
             os.path.dirname(os.path.realpath(__file__)), "gce.ini")
         gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
 
+	print "GCE INI PATH :: "+gce_ini_path
+
         # Create a ConfigParser.
         # This provides empty defaults to each key, so that environment
         # variable configuration (as opposed to INI configuration) is able
@@ -173,6 +175,10 @@ class GceInventory(object):
         args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
         kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
 
+	sys.stderr.write("GCE_EMAIL : "+args[0]+"\n")
+	sys.stderr.write("GCE_PEM_FILE_PATH : "+args[1]+"\n")
+	sys.stderr.write("GCE_PROJECT : "+kwargs['project']+"\n")
+
         # Retrieve and return the GCE driver.
         gce = get_driver(Provider.GCE)(*args, **kwargs)
         gce.connection.user_agent_append(
@@ -211,7 +217,8 @@ class GceInventory(object):
             'gce_image': inst.image,
             'gce_machine_type': inst.size,
             'gce_private_ip': inst.private_ips[0],
-            'gce_public_ip': inst.public_ips[0],
+            # Hosts don't always have a public IP name
+            #'gce_public_ip': inst.public_ips[0],
             'gce_name': inst.name,
             'gce_description': inst.extra['description'],
             'gce_status': inst.extra['status'],
@@ -219,8 +226,8 @@ class GceInventory(object):
             'gce_tags': inst.extra['tags'],
             'gce_metadata': md,
             'gce_network': net,
-            # Hosts don't have a public name, so we add an IP
-            'ansible_ssh_host': inst.public_ips[0]
+            # Hosts don't always have a public IP name
+            #'ansible_ssh_host': inst.public_ips[0]
         }
 
     def get_instance(self, instance_name):
@@ -284,4 +291,5 @@ class GceInventory(object):
 
 
 # Run the script
+print "Hello world"
 GceInventory()

+ 1 - 1
inventory/openstack/hosts/nova.py

@@ -34,7 +34,7 @@ except ImportError:
 # executed with no parameters, return the list of
 # all groups and hosts
 
-NOVA_CONFIG_FILES = [os.getcwd() + "/nova.ini",
+NOVA_CONFIG_FILES = [os.path.join(os.path.dirname(os.path.realpath(__file__)), "nova.ini"),
                      os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")),
                      "/etc/ansible/nova.ini"]
 

+ 15 - 0
playbooks/common/openshift-cluster/set_infra_launch_facts_tasks.yml

@@ -0,0 +1,15 @@
+---
+- set_fact: k8s_type=infra
+- set_fact: sub_host_type="{{ type }}"
+- set_fact: number_infra="{{ count }}"
+
+- name: Generate infra  instance names(s)
+  set_fact:
+    scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
+  register: infra_names_output
+  with_sequence: count={{ number_infra }}
+
+- set_fact:
+    infra_names: "{{ infra_names_output.results | default([])
+                    | oo_collect('ansible_facts')
+                    | oo_collect('scratch_name') }}"

+ 4 - 0
playbooks/gce/openshift-cluster/config.yml

@@ -10,6 +10,8 @@
   - set_fact:
       g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
       g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
+      use_sdn: "{{ do_we_use_openshift_sdn }}"
+      sdn_plugin: "{{ sdn_network_plugin }}"
 
 - include: ../../common/openshift-cluster/config.yml
   vars:
@@ -22,3 +24,5 @@
     openshift_debug_level: 2
     openshift_deployment_type: "{{ deployment_type }}"
     openshift_hostname: "{{ gce_private_ip }}"
+    openshift_use_openshift_sdn: "{{ hostvars.localhost.use_sdn  }}"
+    os_sdn_network_plugin_name: "{{ hostvars.localhost.sdn_plugin }}"

+ 64 - 0
playbooks/gce/openshift-cluster/join_node.yml

@@ -0,0 +1,64 @@
+---
+- name: Populate oo_hosts_to_update group
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - name: Evaluate oo_hosts_to_update
+    add_host:
+      name: "{{ node_ip }}"
+      groups: oo_hosts_to_update
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- name: Populate oo_masters_to_config host group
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ node_ip }}"
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+      groups: oo_nodes_to_config
+
+  - name: Add to preemptible group if needed
+    add_host:
+      name: "{{ node_ip }}"
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+      groups: oo_preemptible_nodes
+    when: preemptible is defined and preemptible == "true"
+  
+  - name: Add to not preemptible group if needed
+    add_host:
+      name: "{{ node_ip }}"
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+      groups: oo_non_preemptible_nodes
+    when: preemptible is defined and  preemptible == "false"
+  
+  - name: Evaluate oo_first_master
+    add_host:
+      name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+      groups: oo_first_master
+    when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+
+#- include: config.yml
+- include: ../../common/openshift-node/config.yml
+  vars:
+    openshift_cluster_id: "{{ cluster_id }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"
+    openshift_hostname: "{{ ansible_default_ipv4.address }}"
+    openshift_use_openshift_sdn: true
+    os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
+    osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
+    osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"

+ 1 - 1
playbooks/gce/openshift-cluster/launch.yml

@@ -28,7 +28,7 @@
       type: "{{ k8s_type }}"
       g_sub_host_type: "{{ sub_host_type }}"
 
-  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/set_infra_launch_facts_tasks.yml
     vars:
       type: "infra"
       count: "{{ num_infra }}"

+ 2 - 2
playbooks/gce/openshift-cluster/list.yml

@@ -14,11 +14,11 @@
       groups: oo_list_hosts
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated | default([]))
 
 - name: List instance(s)
   hosts: oo_list_hosts
   gather_facts: no
   tasks:
   - debug:
-      msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}"
+      msg: "private ip:{{ hostvars[inventory_hostname].gce_private_ip }}"

+ 9 - 5
playbooks/gce/openshift-cluster/tasks/launch_instances.yml

@@ -10,18 +10,22 @@
     service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
     pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
     project_id: "{{ lookup('env', 'gce_project_id') }}"
+    zone: "{{ lookup('env', 'zone') }}"
+    network: "{{ lookup('env', 'network') }}"
+# unsupported in 1.9.+
+    #service_account_permissions: "datastore,logging-write"
     tags:
       - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}
       - env-{{ cluster }}
       - host-type-{{ type }}
-      - sub-host-type-{{ sub_host_type }}
+      - sub-host-type-{{ g_sub_host_type }}
       - env-host-type-{{ cluster }}-openshift-{{ type }}
   register: gce
 
 - name: Add new instances to groups and set variables needed
   add_host:
     hostname: "{{ item.name }}"
-    ansible_ssh_host: "{{ item.public_ip }}"
+    ansible_ssh_host: "{{ item.name }}"
     ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
     ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
     groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
@@ -30,13 +34,13 @@
   with_items: gce.instance_data
 
 - name: Wait for ssh
-  wait_for: port=22 host={{ item.public_ip }}
+  wait_for: port=22 host={{ item.name }}
   with_items: gce.instance_data
 
 - name: Wait for user setup
   command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
   register: result
   until: result.rc == 0
-  retries: 20
-  delay: 10
+  retries: 30
+  delay: 5
   with_items: gce.instance_data

+ 34 - 21
playbooks/gce/openshift-cluster/terminate.yml

@@ -1,25 +1,18 @@
 ---
 - name: Terminate instance(s)
   hosts: localhost
+  connection: local
   gather_facts: no
   vars_files:
   - vars.yml
   tasks:
-  - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node
+  - set_fact: scratch_group=tag_env-{{ cluster_id }}
   - add_host:
       name: "{{ item }}"
-      groups: oo_hosts_to_terminate, oo_nodes_to_terminate
+      groups: oo_hosts_to_terminate
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
-
-  - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master
-  - add_host:
-      name: "{{ item }}"
-      groups: oo_hosts_to_terminate, oo_masters_to_terminate
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated | default([]))
 
 - name: Unsubscribe VMs
   hosts: oo_hosts_to_terminate
@@ -32,14 +25,34 @@
           lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
             default('no', True) | lower in ['no', 'false']
 
-- include: ../openshift-node/terminate.yml
-  vars:
-    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
-    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
-    gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+- name: Terminate instances(s)
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+
+    - name: Terminate instances that were previously launched
+      local_action:
+        module: gce
+        state: 'absent'
+        name: "{{ item }}"
+        service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+        pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+        project_id: "{{ lookup('env', 'gce_project_id') }}"
+        zone: "{{ lookup('env', 'zone') }}"
+      with_items: groups['oo_hosts_to_terminate'] | default([])
+      when: item is defined
 
-- include: ../openshift-master/terminate.yml
-  vars:
-    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
-    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
-    gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+#- include: ../openshift-node/terminate.yml
+#  vars:
+#    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+#    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+#    gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+#
+#- include: ../openshift-master/terminate.yml
+#  vars:
+#    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+#    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+#    gce_project_id: "{{ lookup('env', 'gce_project_id') }}"

+ 5 - 3
playbooks/gce/openshift-cluster/vars.yml

@@ -1,8 +1,11 @@
 ---
+do_we_use_openshift_sdn: true
+sdn_network_plugin: redhat/openshift-ovs-subnet 
+# os_sdn_network_plugin_name can be ovssubnet or multitenant, see https://docs.openshift.org/latest/architecture/additional_concepts/sdn.html#ovssubnet-plugin-operation
 deployment_vars:
   origin:
-    image: centos-7
-    ssh_user:
+    image: preinstalled-slave-50g-v5
+    ssh_user: root
     sudo: yes
   online:
     image: libra-rhel7
@@ -12,4 +15,3 @@ deployment_vars:
     image: rhel-7
     ssh_user:
     sudo: yes
-

+ 17 - 3
playbooks/openstack/openshift-cluster/files/heat_stack.yaml

@@ -88,6 +88,12 @@ parameters:
     label: Infra flavor
     description: Flavor of the infra node servers
 
+  key_pair:
+    type: string
+    label: Key name
+    description: Name of the key
+
+
 outputs:
 
   master_names:
@@ -250,6 +256,14 @@ resources:
           port_range_max: 10250
           remote_mode: remote_group_id
           remote_group_id: { get_resource: master-secgrp }
+        - direction: ingress
+          protocol: tcp
+          port_range_min: 30001 
+          port_range_max: 30001
+        - direction: ingress
+          protocol: tcp
+          port_range_min: 30850 
+          port_range_max: 30850
 
   infra-secgrp:
     type: OS::Neutron::SecurityGroup
@@ -291,7 +305,7 @@ resources:
           type:       master
           image:      { get_param: master_image }
           flavor:     { get_param: master_flavor }
-          key_name:   { get_resource: keypair }
+          key_name:   { get_param: key_pair }
           net:        { get_resource: net }
           subnet:     { get_resource: subnet }
           secgrp:
@@ -323,7 +337,7 @@ resources:
           subtype:    compute
           image:      { get_param: node_image }
           flavor:     { get_param: node_flavor }
-          key_name:   { get_resource: keypair }
+          key_name:   { get_param: key_pair }
           net:        { get_resource: net }
           subnet:     { get_resource: subnet }
           secgrp:
@@ -355,7 +369,7 @@ resources:
           subtype:    infra
           image:      { get_param: infra_image }
           flavor:     { get_param: infra_flavor }
-          key_name:   { get_resource: keypair }
+          key_name:   { get_param: key_pair }
           net:        { get_resource: net }
           subnet:     { get_resource: subnet }
           secgrp:

+ 30 - 5
playbooks/openstack/openshift-cluster/launch.yml

@@ -19,15 +19,32 @@
     changed_when: false
     failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr
 
-  - set_fact:
-      heat_stack_action: 'stack-create'
+  - name: Create OpenStack Stack
+    command: 'heat stack-create -f {{ openstack_infra_heat_stack }}
+             -P key_pair={{ openstack_ssh_keypair }}
+             -P cluster_id={{ cluster_id }}
+             -P dns_nameservers={{ openstack_network_dns | join(",") }}
+             -P cidr={{ openstack_network_cidr }}
+             -P ssh_incoming={{ openstack_ssh_access_from }}
+             -P num_masters={{ num_masters }}
+             -P num_nodes={{ num_nodes }}
+             -P num_infra={{ num_infra }}
+             -P master_image={{ deployment_vars[deployment_type].image }}
+             -P node_image={{ deployment_vars[deployment_type].image }}
+             -P infra_image={{ deployment_vars[deployment_type].image }}
+             -P master_flavor={{ openstack_flavor["master"] }}
+             -P node_flavor={{ openstack_flavor["node"] }}
+             -P infra_flavor={{ openstack_flavor["infra"] }}
+             -P ssh_public_key="{{ openstack_ssh_public_key }}"
+             openshift-ansible-{{ cluster_id }}-stack'
     when: stack_show_result.rc == 1
   - set_fact:
       heat_stack_action: 'stack-update'
     when: stack_show_result.rc == 0
 
-  - name: Create or Update OpenStack Stack
-    command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }}
+  - name: Update OpenStack Stack
+    command: 'heat stack-update -f {{ openstack_infra_heat_stack }}
+             -P key_pair={{ openstack_ssh_keypair }}
              -P cluster_id={{ cluster_id }}
              -P cidr={{ openstack_network_cidr }}
              -P dns_nameservers={{ openstack_network_dns | join(",") }}
@@ -50,7 +67,7 @@
     shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}'''
     register: stack_show_status_result
     until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS']
-    retries: 30
+    retries: 300
     delay: 1
     failed_when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
 
@@ -119,4 +136,12 @@
 
 - include: update.yml
 
+# Fix icmp reject iptables rules
+# It should be solved in openshift-sdn but unfortunately it's not the case
+# Mysterious
+- name: Configuring Nodes for RBox
+  hosts: oo_nodes_to_config
+  roles:
+    - rbox-node
+
 - include: list.yml

+ 1 - 1
roles/openshift_facts/tasks/main.yml

@@ -1,5 +1,5 @@
 ---
-- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0
+- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0 and not 1.9.0.1
   assert:
     that:
     - ansible_version | version_compare('1.8.0', 'ge')

+ 1 - 1
roles/openshift_manage_node/tasks/main.yml

@@ -3,7 +3,7 @@
       {{ openshift.common.client_binary }} get node {{ item }}
   register: omd_get_node
   until: omd_get_node.rc == 0
-  retries: 10
+  retries: 20
   delay: 5
   with_items: openshift_nodes
 

+ 10 - 1
roles/openshift_master/tasks/main.yml

@@ -8,6 +8,15 @@
     - openshift_master_oauth_grant_method in openshift_master_valid_grant_methods
   when: openshift_master_oauth_grant_method is defined
 
+- name: Displaying openshift_master_ha
+  debug: var=openshift_master_ha
+
+- name: openshift_master_cluster_password
+  debug: var=openshift_master_cluster_password
+
+- name: openshift.master.cluster_defer_ha
+  debug: var=openshift.master.cluster_defer_ha
+
 - fail:
     msg: "openshift_master_cluster_password must be set for multi-master installations"
   when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool and openshift_master_cluster_password is not defined
@@ -23,7 +32,7 @@
       api_port: "{{ openshift_master_api_port | default(None) }}"
       api_url: "{{ openshift_master_api_url | default(None) }}"
       api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
-      public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
+      public_api_url: "{{ openshift_master_public_api_url | default('https://' ~ openshift.common.public_ip ~ ':8443') }}"
       console_path: "{{ openshift_master_console_path | default(None) }}"
       console_port: "{{ openshift_master_console_port | default(None) }}"
       console_url: "{{ openshift_master_console_url | default(None) }}"

+ 7 - 1
roles/openshift_node/tasks/main.yml

@@ -22,7 +22,7 @@
       deployment_type: "{{ openshift_deployment_type }}"
   - role: node
     local_facts:
-      labels: "{{ openshift_node_labels | default(none) }}"
+      labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default() ) }}"
       annotations: "{{ openshift_node_annotations | default(none) }}"
       registry_url: "{{ oreg_url | default(none) }}"
       debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
@@ -73,6 +73,12 @@
     dest: /etc/sysconfig/docker
     regexp: '^OPTIONS=.*$'
     line: "OPTIONS='--insecure-registry={{ openshift.node.portal_net }} \
+--insecure-registry=dockerhub.rnd.amadeus.net:5000 \
+--insecure-registry=dockerhub.rnd.amadeus.net:5001 \
+--insecure-registry=dockerhub.rnd.amadeus.net:5002 \
+--add-registry=dockerhub.rnd.amadeus.net:5000 \
+--add-registry=dockerhub.rnd.amadeus.net:5001 \
+--add-registry=dockerhub.rnd.amadeus.net:5002 \
 {% if ansible_selinux and ansible_selinux.status == '''enabled''' %}--selinux-enabled{% endif %}'"
   when: docker_check.stat.isreg
   notify: