Преглед на файлове

Upgrade improvements

- Push config dir logic out of module and use host variables instead.
- Backup master config with ansible utility.
- Add error handling for the upgrade config module.
- Add verbose option to installer.
- Return details on what we changed when upgrading config.
- Cleanup use of first master.
- Don't install upgrade rpms to check what version we'll upgrade to.
Devan Goodwin преди 9 години
родител
ревизия
fe4e9a4ca7

+ 25 - 28
playbooks/adhoc/upgrades/library/openshift_upgrade_config.py

@@ -5,11 +5,8 @@
 """Ansible module for modifying OpenShift configs during an upgrade"""
 """Ansible module for modifying OpenShift configs during an upgrade"""
 
 
 import os
 import os
-import shutil
 import yaml
 import yaml
 
 
-from datetime import datetime
-
 DOCUMENTATION = '''
 DOCUMENTATION = '''
 ---
 ---
 module: openshift_upgrade_config
 module: openshift_upgrade_config
@@ -20,21 +17,14 @@ requirements: [ ]
 EXAMPLES = '''
 EXAMPLES = '''
 '''
 '''
 
 
-def get_cfg_dir():
-    """Return the correct config directory to use."""
-    cfg_path = '/etc/origin/'
-    if not os.path.exists(cfg_path):
-        cfg_path = '/etc/openshift/'
-    return cfg_path
-
 
 
-def upgrade_master_3_0_to_3_1(backup):
+def upgrade_master_3_0_to_3_1(module, config_base, backup):
     """Main upgrade method for 3.0 to 3.1."""
     """Main upgrade method for 3.0 to 3.1."""
-    changed = False
+    changes = []
 
 
     # Facts do not get transferred to the hosts where custom modules run,
     # Facts do not get transferred to the hosts where custom modules run,
     # need to make some assumptions here.
     # need to make some assumptions here.
-    master_config = os.path.join(get_cfg_dir(), 'master/master-config.yaml')
+    master_config = os.path.join(config_base, 'master/master-config.yaml')
 
 
     master_cfg_file = open(master_config, 'r')
     master_cfg_file = open(master_config, 'r')
     config = yaml.safe_load(master_cfg_file.read())
     config = yaml.safe_load(master_cfg_file.read())
@@ -45,6 +35,7 @@ def upgrade_master_3_0_to_3_1(backup):
         'v1beta3' in config['apiLevels']:
         'v1beta3' in config['apiLevels']:
         config['apiLevels'].remove('v1beta3')
         config['apiLevels'].remove('v1beta3')
         changed = True
         changed = True
+        changes.append("master-config.yaml: removed v1beta3 from apiLevels")
     if 'apiLevels' in config['kubernetesMasterConfig'] and \
     if 'apiLevels' in config['kubernetesMasterConfig'] and \
         'v1beta3' in config['kubernetesMasterConfig']['apiLevels']:
         'v1beta3' in config['kubernetesMasterConfig']['apiLevels']:
         config['kubernetesMasterConfig']['apiLevels'].remove('v1beta3')
         config['kubernetesMasterConfig']['apiLevels'].remove('v1beta3')
@@ -57,27 +48,26 @@ def upgrade_master_3_0_to_3_1(backup):
 #            'certFile': 'master.proxy-client.crt',
 #            'certFile': 'master.proxy-client.crt',
 #            'keyFile': 'master.proxy-client.key'
 #            'keyFile': 'master.proxy-client.key'
 #       }
 #       }
+#        changes.append("master-config.yaml: added proxyClientInfo")
 
 
-    if changed:
+    if len(changes) > 0:
         if backup:
         if backup:
-            timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
-            basedir = os.path.split(master_config)[0]
-            backup_file = os.path.join(basedir, 'master-config.yaml.bak-%s'
-                                       % timestamp)
-            shutil.copyfile(master_config, backup_file)
+            # TODO: Check success:
+            module.backup_local(master_config)
+
         # Write the modified config:
         # Write the modified config:
         out_file = open(master_config, 'w')
         out_file = open(master_config, 'w')
         out_file.write(yaml.safe_dump(config, default_flow_style=False))
         out_file.write(yaml.safe_dump(config, default_flow_style=False))
         out_file.close()
         out_file.close()
 
 
-    return changed
+    return changes
 
 
 
 
-def upgrade_master(from_version, to_version, backup):
+def upgrade_master(module, config_base, from_version, to_version, backup):
     """Upgrade entry point."""
     """Upgrade entry point."""
     if from_version == '3.0':
     if from_version == '3.0':
         if to_version == '3.1':
         if to_version == '3.1':
-            return upgrade_master_3_0_to_3_1(backup)
+            return upgrade_master_3_0_to_3_1(module, config_base, backup)
 
 
 
 
 def main():
 def main():
@@ -89,6 +79,7 @@ def main():
 
 
     module = AnsibleModule(
     module = AnsibleModule(
         argument_spec=dict(
         argument_spec=dict(
+            config_base=dict(required=True),
             from_version=dict(required=True, choices=['3.0']),
             from_version=dict(required=True, choices=['3.0']),
             to_version=dict(required=True, choices=['3.1']),
             to_version=dict(required=True, choices=['3.1']),
             role=dict(required=True, choices=['master']),
             role=dict(required=True, choices=['master']),
@@ -101,12 +92,18 @@ def main():
     to_version = module.params['to_version']
     to_version = module.params['to_version']
     role = module.params['role']
     role = module.params['role']
     backup = module.params['backup']
     backup = module.params['backup']
-
-    changed = False
-    if role == 'master':
-        changed = upgrade_master(from_version, to_version, backup)
-
-    return module.exit_json(changed=changed)
+    config_base = module.params['config_base']
+
+    try:
+        changes = []
+        if role == 'master':
+            changes = upgrade_master(module, config_base, from_version,
+                to_version, backup)
+
+        changed = len(changes) > 0
+        return module.exit_json(changed=changed, changes=changes)
+    except Exception, e:
+        return module.fail_json(msg=str(e))
 
 
 # ignore pylint errors related to the module_utils import
 # ignore pylint errors related to the module_utils import
 # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
 # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import

+ 31 - 47
playbooks/adhoc/upgrades/upgrade.yml

@@ -1,4 +1,12 @@
 ---
 ---
+- name: Verify upgrade can proceed
+  hosts: masters
+  tasks:
+  # Checking the global deployment type rather than host facts, this is about
+  # what the user is requesting.
+    - fail: msg="Deployment type enterprise not supported for upgrade"
+      when: deployment_type == "enterprise"
+
 - name: Update deployment type
 - name: Update deployment type
   hosts: OSEv3
   hosts: OSEv3
   roles:
   roles:
@@ -9,14 +17,6 @@
       local_facts:
       local_facts:
         deployment_type: "{{ deployment_type }}"
         deployment_type: "{{ deployment_type }}"
 
 
-- name: Verify upgrade can proceed
-  hosts: masters
-  tasks:
-  # Checking the global deployment type rather than host facts, this is about
-  # what the user is requesting.
-    - fail: msg="Deployment type enterprise not supported for upgrade"
-      when: deployment_type == "enterprise"
-
 - name: Backup etcd
 - name: Backup etcd
   hosts: masters
   hosts: masters
   vars:
   vars:
@@ -52,48 +52,35 @@
   - name: Display location of etcd backup
   - name: Display location of etcd backup
     debug: msg="Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
     debug: msg="Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
 
 
-- name: Upgrade base package on masters
-  hosts: masters
-  roles:
-  - openshift_facts
-  vars:
-    openshift_version: "{{ openshift_pkg_version | default('') }}"
-  tasks:
-    - name: Upgrade base package
-      yum: pkg={{ openshift.common.service_type }}{{ openshift_version  }} state=latest
-
-- name: Evaluate oo_first_master
-  hosts: localhost
-  vars:
-    g_masters_group: "{{ 'masters' }}"
-  tasks:
-    - name: Evaluate oo_first_master
-      add_host:
-        name: "{{ groups[g_masters_group][0] }}"
-        groups: oo_first_master
-        ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
-        ansible_sudo: "{{ g_sudo | default(omit) }}"
-      when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
-
-# TODO: ideally we would check the new version, without installing it. (some
-# kind of yum repoquery? would need to handle openshift -> atomic-openshift
-# package rename)
 - name: Perform upgrade version checking
 - name: Perform upgrade version checking
-  hosts: oo_first_master
+  hosts: masters[0]
   tasks:
   tasks:
-    - name: Determine new version
+    - name: Determine available version
+      shell: >
+        yum list available {{ openshift.common.service_type }} | tail -n 1 | cut -f 2 -d " " | cut -f 1 -d "-"
+      register: _new_version
+    - debug: var=_new_version
+    # The above check will return nothing if the package is already installed,
+    # and we may be re-running upgrade due to a failure.
+    - name: Determine installed version
       command: >
       command: >
         rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}
         rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}
       register: _new_version
       register: _new_version
+      when: _new_version.stdout == ""
+    # Fail if we still don't know:
+    - debug: var=_new_version
+    - name: Verify upgrade version
+      fail: Unable to determine upgrade version for {{ openshift.common.service_type }}
+      when: _new_version.stdout == ""
 
 
 - name: Ensure AOS 3.0.2 or Origin 1.0.6
 - name: Ensure AOS 3.0.2 or Origin 1.0.6
-  hosts: oo_first_master
+  hosts: masters[0]
   tasks:
   tasks:
     fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
     fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
     when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
     when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
 
 
 - name: Verify upgrade can proceed
 - name: Verify upgrade can proceed
-  hosts: oo_first_master
+  hosts: masters[0]
   tasks:
   tasks:
   # Checking the global deployment type rather than host facts, this is about
   # Checking the global deployment type rather than host facts, this is about
   # what the user is requesting.
   # what the user is requesting.
@@ -107,13 +94,10 @@
   tasks:
   tasks:
     - name: Upgrade to latest available kernel
     - name: Upgrade to latest available kernel
       yum: pkg=kernel state=latest
       yum: pkg=kernel state=latest
-    - name: display just the deployment_type variable for the current host
-      debug:
-        var: hostvars[inventory_hostname]
     - name: Upgrade master packages
     - name: Upgrade master packages
       command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }}
       command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }}
     - name: Upgrade master configuration.
     - name: Upgrade master configuration.
-      openshift_upgrade_config: from_version=3.0 to_version=3.1 role=master
+      openshift_upgrade_config: from_version=3.0 to_version=3.1 role=master config_base={{ hostvars[inventory_hostname].openshift.common.config_base }}
     - name: Restart master services
     - name: Restart master services
       service: name="{{ openshift.common.service_type}}-master" state=restarted
       service: name="{{ openshift.common.service_type}}-master" state=restarted
 
 
@@ -130,7 +114,7 @@
       service: name="{{ openshift.common.service_type }}-node" state=restarted
       service: name="{{ openshift.common.service_type }}-node" state=restarted
 
 
 - name: Update cluster policy
 - name: Update cluster policy
-  hosts: oo_first_master
+  hosts: masters[0]
   tasks:
   tasks:
     - name: oadm policy reconcile-cluster-roles --confirm
     - name: oadm policy reconcile-cluster-roles --confirm
       command: >
       command: >
@@ -138,7 +122,7 @@
         policy reconcile-cluster-roles --confirm
         policy reconcile-cluster-roles --confirm
 
 
 - name: Update cluster policy bindings
 - name: Update cluster policy bindings
-  hosts: oo_first_master
+  hosts: masters[0]
   tasks:
   tasks:
     - name: oadm policy reconcile-cluster-role-bindings --confirm
     - name: oadm policy reconcile-cluster-role-bindings --confirm
       command: >
       command: >
@@ -151,7 +135,7 @@
       when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>')
       when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>')
 
 
 - name: Upgrade default router
 - name: Upgrade default router
-  hosts: oo_first_master
+  hosts: masters[0]
   vars:
   vars:
     - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
     - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
     - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
     - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
@@ -189,7 +173,7 @@
         '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
         '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
 
 
 - name: Upgrade default
 - name: Upgrade default
-  hosts: oo_first_master
+  hosts: masters[0]
   vars:
   vars:
     - registry_image: "{{  openshift.master.registry_url | replace( '${component}', 'docker-registry' )  | replace ( '${version}', 'v' + _new_version.stdout  ) }}"
     - registry_image: "{{  openshift.master.registry_url | replace( '${component}', 'docker-registry' )  | replace ( '${version}', 'v' + _new_version.stdout  ) }}"
     - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
     - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
@@ -207,7 +191,7 @@
         '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
         '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
 
 
 - name: Update image streams and templates
 - name: Update image streams and templates
-  hosts: oo_first_master
+  hosts: masters[0]
   vars:
   vars:
     openshift_examples_import_command: "update"
     openshift_examples_import_command: "update"
     openshift_deployment_type: "{{ deployment_type }}"
     openshift_deployment_type: "{{ deployment_type }}"

+ 15 - 7
utils/src/ooinstall/cli_installer.py

@@ -323,7 +323,7 @@ def get_installed_hosts(hosts, callback_facts):
             installed_hosts.append(host)
             installed_hosts.append(host)
     return installed_hosts
     return installed_hosts
 
 
-def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
+def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
 
 
     # Copy the list of existing hosts so we can remove any already installed nodes.
     # Copy the list of existing hosts so we can remove any already installed nodes.
     hosts_to_run_on = list(oo_cfg.hosts)
     hosts_to_run_on = list(oo_cfg.hosts)
@@ -424,9 +424,11 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
         writable=True,
         writable=True,
         readable=True),
         readable=True),
     default="/tmp/ansible.log")
     default="/tmp/ansible.log")
+@click.option('-v', '--verbose',
+    is_flag=True, default=False)
 #pylint: disable=too-many-arguments
 #pylint: disable=too-many-arguments
 # Main CLI entrypoint, not much we can do about too many arguments.
 # Main CLI entrypoint, not much we can do about too many arguments.
-def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path):
+def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose):
     """
     """
     The main click CLI module. Responsible for handling most common CLI options,
     The main click CLI module. Responsible for handling most common CLI options,
     assigning any defaults and adding to the context for the sub-commands.
     assigning any defaults and adding to the context for the sub-commands.
@@ -436,6 +438,7 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf
     ctx.obj['configuration'] = configuration
     ctx.obj['configuration'] = configuration
     ctx.obj['ansible_config'] = ansible_config
     ctx.obj['ansible_config'] = ansible_config
     ctx.obj['ansible_log_path'] = ansible_log_path
     ctx.obj['ansible_log_path'] = ansible_log_path
+    ctx.obj['verbose'] = verbose
 
 
     oo_cfg = OOConfig(ctx.obj['configuration'])
     oo_cfg = OOConfig(ctx.obj['configuration'])
 
 
@@ -466,6 +469,7 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf
 @click.pass_context
 @click.pass_context
 def uninstall(ctx):
 def uninstall(ctx):
     oo_cfg = ctx.obj['oo_cfg']
     oo_cfg = ctx.obj['oo_cfg']
+    verbose = ctx.obj['verbose']
 
 
     if len(oo_cfg.hosts) == 0:
     if len(oo_cfg.hosts) == 0:
         click.echo("No hosts defined in: %s" % oo_cfg['configuration'])
         click.echo("No hosts defined in: %s" % oo_cfg['configuration'])
@@ -481,13 +485,14 @@ def uninstall(ctx):
             click.echo("Uninstall cancelled.")
             click.echo("Uninstall cancelled.")
             sys.exit(0)
             sys.exit(0)
 
 
-    openshift_ansible.run_uninstall_playbook()
+    openshift_ansible.run_uninstall_playbook(verbose)
 
 
 
 
 @click.command()
 @click.command()
 @click.pass_context
 @click.pass_context
 def upgrade(ctx):
 def upgrade(ctx):
     oo_cfg = ctx.obj['oo_cfg']
     oo_cfg = ctx.obj['oo_cfg']
+    verbose = ctx.obj['verbose']
 
 
     if len(oo_cfg.hosts) == 0:
     if len(oo_cfg.hosts) == 0:
         click.echo("No hosts defined in: %s" % oo_cfg['configuration'])
         click.echo("No hosts defined in: %s" % oo_cfg['configuration'])
@@ -514,7 +519,7 @@ def upgrade(ctx):
             click.echo("Upgrade cancelled.")
             click.echo("Upgrade cancelled.")
             sys.exit(0)
             sys.exit(0)
 
 
-    retcode = openshift_ansible.run_upgrade_playbook()
+    retcode = openshift_ansible.run_upgrade_playbook(verbose)
     if retcode > 0:
     if retcode > 0:
         click.echo("Errors encountered during upgrade, please check %s." %
         click.echo("Errors encountered during upgrade, please check %s." %
             oo_cfg.settings['ansible_log_path'])
             oo_cfg.settings['ansible_log_path'])
@@ -527,6 +532,7 @@ def upgrade(ctx):
 @click.pass_context
 @click.pass_context
 def install(ctx, force):
 def install(ctx, force):
     oo_cfg = ctx.obj['oo_cfg']
     oo_cfg = ctx.obj['oo_cfg']
+    verbose = ctx.obj['verbose']
 
 
     if ctx.obj['unattended']:
     if ctx.obj['unattended']:
         error_if_missing_info(oo_cfg)
         error_if_missing_info(oo_cfg)
@@ -534,13 +540,15 @@ def install(ctx, force):
         oo_cfg = get_missing_info_from_user(oo_cfg)
         oo_cfg = get_missing_info_from_user(oo_cfg)
 
 
     click.echo('Gathering information from hosts...')
     click.echo('Gathering information from hosts...')
-    callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts)
+    callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts,
+        verbose)
     if error:
     if error:
         click.echo("There was a problem fetching the required information. " \
         click.echo("There was a problem fetching the required information. " \
                    "Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
                    "Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
         sys.exit(1)
         sys.exit(1)
 
 
-    hosts_to_run_on, callback_facts = get_hosts_to_run_on(oo_cfg, callback_facts, ctx.obj['unattended'], force)
+    hosts_to_run_on, callback_facts = get_hosts_to_run_on(
+        oo_cfg, callback_facts, ctx.obj['unattended'], force, verbose)
 
 
     click.echo('Writing config to: %s' % oo_cfg.config_path)
     click.echo('Writing config to: %s' % oo_cfg.config_path)
 
 
@@ -562,7 +570,7 @@ If changes are needed to the values recorded by the installer please update {}.
         confirm_continue(message)
         confirm_continue(message)
 
 
     error = openshift_ansible.run_main_playbook(oo_cfg.hosts,
     error = openshift_ansible.run_main_playbook(oo_cfg.hosts,
-                                                   hosts_to_run_on)
+                                                   hosts_to_run_on, verbose)
     if error:
     if error:
         # The bootstrap script will print out the log location.
         # The bootstrap script will print out the log location.
         message = """
         message = """

+ 22 - 19
utils/src/ooinstall/openshift_ansible.py

@@ -91,16 +91,17 @@ def write_host(host, inventory, scheduleable=True):
     inventory.write('{} {}\n'.format(host, facts))
     inventory.write('{} {}\n'.format(host, facts))
 
 
 
 
-def load_system_facts(inventory_file, os_facts_path, env_vars):
+def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):
     """
     """
     Retrieves system facts from the remote systems.
     Retrieves system facts from the remote systems.
     """
     """
     FNULL = open(os.devnull, 'w')
     FNULL = open(os.devnull, 'w')
-    status = subprocess.call(['ansible-playbook',
-                     '--inventory-file={}'.format(inventory_file),
-                     os_facts_path],
-                     env=env_vars,
-                     stdout=FNULL)
+    args = ['ansible-playbook', '-v'] if verbose \
+        else ['ansible-playbook']
+    args.extend([
+        '--inventory-file={}'.format(inventory_file),
+        os_facts_path])
+    status = subprocess.call(args, env=env_vars, stdout=FNULL)
     if not status == 0:
     if not status == 0:
         return [], 1
         return [], 1
     callback_facts_file = open(CFG.settings['ansible_callback_facts_yaml'], 'r')
     callback_facts_file = open(CFG.settings['ansible_callback_facts_yaml'], 'r')
@@ -109,7 +110,7 @@ def load_system_facts(inventory_file, os_facts_path, env_vars):
     return callback_facts, 0
     return callback_facts, 0
 
 
 
 
-def default_facts(hosts):
+def default_facts(hosts, verbose=False):
     global CFG
     global CFG
     inventory_file = generate_inventory(hosts)
     inventory_file = generate_inventory(hosts)
     os_facts_path = '{}/playbooks/byo/openshift_facts.yml'.format(CFG.ansible_playbook_directory)
     os_facts_path = '{}/playbooks/byo/openshift_facts.yml'.format(CFG.ansible_playbook_directory)
@@ -121,10 +122,10 @@ def default_facts(hosts):
         facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path']
         facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path']
     if 'ansible_config' in CFG.settings:
     if 'ansible_config' in CFG.settings:
         facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
         facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
-    return load_system_facts(inventory_file, os_facts_path, facts_env)
+    return load_system_facts(inventory_file, os_facts_path, facts_env, verbose)
 
 
 
 
-def run_main_playbook(hosts, hosts_to_run_on):
+def run_main_playbook(hosts, hosts_to_run_on, verbose=False):
     global CFG
     global CFG
     inventory_file = generate_inventory(hosts)
     inventory_file = generate_inventory(hosts)
     if len(hosts_to_run_on) != len(hosts):
     if len(hosts_to_run_on) != len(hosts):
@@ -138,17 +139,19 @@ def run_main_playbook(hosts, hosts_to_run_on):
         facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
         facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
     if 'ansible_config' in CFG.settings:
     if 'ansible_config' in CFG.settings:
         facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
         facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
-    return run_ansible(main_playbook_path, inventory_file, facts_env)
+    return run_ansible(main_playbook_path, inventory_file, facts_env, verbose)
 
 
 
 
-def run_ansible(playbook, inventory, env_vars):
-    return subprocess.call(['ansible-playbook',
-                             '--inventory-file={}'.format(inventory),
-                             playbook],
-                             env=env_vars)
+def run_ansible(playbook, inventory, env_vars, verbose=False):
+    args = ['ansible-playbook', '-v'] if verbose \
+        else ['ansible-playbook']
+    args.extend([
+        '--inventory-file={}'.format(inventory),
+        playbook])
+    return subprocess.call(args, env=env_vars)
 
 
 
 
-def run_uninstall_playbook():
+def run_uninstall_playbook(verbose=False):
     playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
     playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
         'playbooks/adhoc/uninstall.yml')
         'playbooks/adhoc/uninstall.yml')
     inventory_file = generate_inventory(CFG.hosts)
     inventory_file = generate_inventory(CFG.hosts)
@@ -157,10 +160,10 @@ def run_uninstall_playbook():
         facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
         facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
     if 'ansible_config' in CFG.settings:
     if 'ansible_config' in CFG.settings:
         facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
         facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
-    return run_ansible(playbook, inventory_file, facts_env)
+    return run_ansible(playbook, inventory_file, facts_env, verbose)
 
 
 
 
-def run_upgrade_playbook():
+def run_upgrade_playbook(verbose=False):
     playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
     playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
         'playbooks/adhoc/upgrades/upgrade.yml')
         'playbooks/adhoc/upgrades/upgrade.yml')
     # TODO: Upgrade inventory for upgrade?
     # TODO: Upgrade inventory for upgrade?
@@ -170,5 +173,5 @@ def run_upgrade_playbook():
         facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
         facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
     if 'ansible_config' in CFG.settings:
     if 'ansible_config' in CFG.settings:
         facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
         facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
-    return run_ansible(playbook, inventory_file, facts_env)
+    return run_ansible(playbook, inventory_file, facts_env, verbose)