run_support.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586
  1. from future.utils import viewitems
  2. from . import bash, functional
  3. from .functional import cfg_tobool
  4. from .io import NativeIO
  5. from .util.system import (make_fofn_abs, make_dirs, cd)
  6. import json
  7. import logging
  8. import logging.config
  9. import os
  10. import re
  11. import io
  12. import sys
  13. import tempfile
  14. import time
  15. import uuid
  16. logger = logging.getLogger(__name__)
  17. from configparser import ConfigParser
  18. def _prepend_env_paths(content, names):
  19. """
  20. E.g.
  21. names = ['PATH', 'PYTYHONPATH']
  22. content =
  23. echo hi
  24. =>
  25. export PATH=current:path:${PATH}
  26. export PYTHON=current:path:${PYTHONPATH}
  27. echo hi
  28. """
  29. export_env_vars = ['export %(k)s=%(v)s:${%(k)s}' % dict(
  30. k=name, v=os.environ.get(name, '')) for name in names]
  31. return '\n'.join(export_env_vars + [content])
  32. def update_env_in_script(fn, names):
  33. """Modify fn using on prepend_env_paths().
  34. """
  35. with open(fn) as ifs:
  36. content = ifs.read()
  37. content = _prepend_env_paths(content, names)
  38. with open(fn, 'w') as ofs:
  39. ofs.write(content)
  40. def use_tmpdir_for_files(basenames, src_dir, link_dir):
  41. """NOT USED. Kept only for reference. This will be done in pypeFLOW.
  42. Generate script to copy db files to tmpdir (for speed).
  43. - Choose tmp_dir, based on src_dir name.
  44. - rsync basenames into tmp_dir # after 'flock', per file
  45. - symlink from link_dir into tmp_dir.
  46. Return list of script lines, sans linefeed.
  47. """
  48. script = list()
  49. unique = os.path.abspath(src_dir).replace('/', '_')
  50. root = tempfile.gettempdir()
  51. tmp_dir = os.path.join(root, 'falcon', unique)
  52. script.append('mkdir -p %s' % tmp_dir)
  53. for basename in basenames:
  54. src = os.path.join(src_dir, basename)
  55. dst = os.path.join(tmp_dir, basename)
  56. rm_cmd = 'rm -f %s' % basename
  57. # Wait on lock for up to 10 minutes, in case of very large files.
  58. rsync_cmd = "flock -w 600 %s.lock -c 'rsync -av %s %s'" % (
  59. dst, src, dst)
  60. ln_cmd = 'ln -sf %s %s' % (dst, basename)
  61. script.extend([rm_cmd, rsync_cmd, ln_cmd])
  62. return script
  63. def make_job_data(url, script_fn):
  64. """Choose defaults.
  65. Run in same directory as script_fn.
  66. Base job_name on script_fn.
  67. """
  68. wd = os.path.dirname(script_fn)
  69. job_name = '{0}-{1}-{2}'.format(
  70. os.path.basename(script_fn),
  71. url.split("/")[-1],
  72. str(uuid.uuid4())[:8],
  73. )
  74. job_data = {"job_name": job_name,
  75. "cwd": wd,
  76. "script_fn": script_fn}
  77. return job_data
  78. def check_HPCdaligner_option(option):
  79. msg = ''
  80. if '-dal' in option:
  81. msg += 'HPC.daligner option "-dal" has changed to "-B".\n'
  82. if '-deg' in option:
  83. msg += 'HPC.daligner option "-deg" has changed to "-D".\n'
  84. if msg:
  85. raise Exception(msg)
  86. def clean_falcon_options(fc):
  87. """Update some values in fc.
  88. Replace _ with - in a couple places.
  89. """
  90. keys = ('falcon_sense_option', 'overlap_filtering_setting', 'fc_ovlp_to_graph_option',
  91. )
  92. for key in keys:
  93. update_dash_flags(fc, key)
  94. for dk in ('pa_HPCdaligner_option', 'ovlp_HPCdaligner_option'):
  95. if dk in fc:
  96. check_HPCdaligner_option(fc[dk])
  97. def get_config(config):
  98. """
  99. This is only for the call from pbsmrtpipe:
  100. upport.get_config(support.parse_config(fn))
  101. We have changed parse_config() to return a dict.
  102. So this is a no-op.
  103. """
  104. cfg = dict(config) # already a dict now
  105. return cfg
  106. def dict2config(jdict, section):
  107. config = ConfigParser()
  108. if not config.has_section(section):
  109. config.add_section(section)
  110. for (k, v) in viewitems(jdict):
  111. config.set(section, k, str(v))
  112. return config
  113. def parse_config(config_fn):
  114. """Deprecated.
  115. Called from pbsmrtpipe, for now.
  116. """
  117. return parse_cfg_file(config_fn)
  118. def parse_cfg_file(config_fn):
  119. """Return as dict.
  120. """
  121. with open(config_fn) as stream:
  122. ext = os.path.splitext(config_fn)[1]
  123. if ext in ('.json', '.js'):
  124. config = json.loads(stream.read())
  125. else:
  126. # Parse sections (and case-sensitively), into sub-dicts.
  127. config = parse_cfg_with_sections(stream)
  128. update_defaults(config['General'])
  129. # Copy General section to top, for now.
  130. #for key, val in config['General'].items():
  131. # config[key] = val
  132. ##cfg.update(config.get('General', {}))
  133. check_config_sections(config) # Ensure that the right sections exist.
  134. update_job_sections(config)
  135. return config
  136. def process_job_defaults(job_defaults):
  137. key = 'use_tmpdir'
  138. use_tmpdir = job_defaults.get(key, '')
  139. if '/' in use_tmpdir:
  140. tempfile.tempdir = use_tmpdir
  141. os.environ['TMPDIR'] = use_tmpdir
  142. else:
  143. if use_tmpdir.lower().startswith('t'):
  144. use_tmpdir = tempfile.gettempdir()
  145. else:
  146. use_tmpdir = False
  147. job_defaults[key] = use_tmpdir
  148. def update_job_defaults_section(config):
  149. """For backwards compatibility with stuff from 'General' section.
  150. """
  151. General = config['General']
  152. job_defaults = config['job.defaults']
  153. if 'njobs' in General:
  154. logger.warning('"njobs" belongs in the [job.defaults] section.')
  155. if 'pwatcher_type' in General:
  156. logger.warning('Please specify "pwatcher_type" only in the [job.defaults] section, not in [General].')
  157. if 'job_type' in General:
  158. logger.warning('Please specify "job_type" only in the [job.defaults] section, not in [General].')
  159. if 'stop_all_jobs_on_failure' in General:
  160. logger.warning('Please specify "stop_all_jobs_on_failure" only in the [job.defaults] section, not in [General].')
  161. if 'use_tmpdir' in General:
  162. logger.warning('Please specify "use_tmpdir" only in the [job.defaults] section, not in [General].')
  163. if 'job_name_style' in General:
  164. logger.warning('Please specify "job_name_style" only in the [job.defaults] section, not in [General].')
  165. if 'job_queue' in General:
  166. logger.warning('Please specify "JOB_QUEUE" only in the [job.defaults] section, not as "job_queue" in [General].')
  167. if 'sge_option' in General:
  168. logger.warning('Please specify "JOB_OPTS" in the [job.defaults] section, not as "sge_option" in [General].')
  169. pwatcher_type = General.get('pwatcher_type', 'fs_based') #, config.get('pwatcher_type')))
  170. job_type = job_defaults.get('job_type', General.get('job_type', '')).lower()
  171. job_queue = General.get('job_queue', '')
  172. sge_option = General.get('sge_option', '')
  173. if 'pwatcher_type' not in job_defaults:
  174. job_defaults['pwatcher_type'] = pwatcher_type
  175. else:
  176. pwatcher_type = job_defaults['pwatcher_type']
  177. if 'submit' not in config['job.defaults']:
  178. if 'blocking' == pwatcher_type:
  179. if not job_queue or ' ' not in job_queue:
  180. raise Exception('pwatcher_type=blocking, but "submit" is not in [job.defaults] section.')
  181. config['job.defaults']['submit'] = job_queue
  182. logger.warning('Please set "submit" in [job.defaults] section. (For now, we will use "job_queue" from [General], which was a hack.)')
  183. elif 'fs_based' == pwatcher_type or 'network_based' == pwatcher_type:
  184. if not job_type:
  185. logger.error('job.defaults.submit is not set; pwatcher_type={}; but job_type is not set. Maybe try "job_type=local" first.'.format(pwatcher_type))
  186. job_type = 'local'
  187. job_defaults['job_type'] = job_type
  188. allowed_job_types = ['sge', 'pbs', 'torque', 'slurm', 'lsf', 'local']
  189. assert job_type in allowed_job_types, 'job_type={} not in {}'.format(
  190. job_type, allowed_job_types)
  191. if job_queue and 'JOB_QUEUE' not in config['job.defaults']:
  192. job_defaults['JOB_QUEUE'] = job_queue
  193. else:
  194. raise Exception('Unknown pwatcher_type={}'.format(pwatcher_type))
  195. #assert 'submit' in config['job.defaults'], repr(config)
  196. if sge_option and 'JOB_OPTS' not in config['job.defaults']:
  197. job_defaults['JOB_OPTS'] = sge_option
  198. if 'njobs' not in job_defaults:
  199. config['job.defaults']['njobs'] = int(General.get('default_concurrent_jobs', 8)) # GLOBAL DEFAULT CONCURRENCY
  200. msg = 'Please supply a default for "njobs" (aka concurrency) in section [job.defaults]. For now, we will use {}'.format(
  201. config['job.defaults']['njobs'])
  202. logger.warning(msg)
  203. def update_if_if(key):
  204. if key not in job_defaults:
  205. if key in General:
  206. job_defaults[key] = General[key]
  207. logger.warning('Found "{}" from [General] section; should be in [job.defaults] instead.'.format(key))
  208. update_if_if('job_name_style')
  209. update_if_if('stop_all_jobs_on_failure')
  210. update_if_if('use_tmpdir')
  211. legacy_names = [
  212. 'pwatcher_type', 'pwatcher_directory',
  213. 'job_type', 'job_queue', 'job_name_style',
  214. 'use_tmpdir',
  215. ]
  216. def update_if_missing(name, sub_dict):
  217. if General.get(name) and name not in sub_dict:
  218. sub_dict[name] = General[name]
  219. for name in legacy_names:
  220. update_if_missing(name, config['job.defaults'])
  221. process_job_defaults(job_defaults)
  222. def update_job_sections(config):
  223. """More for backwards compatibility with stuff from 'General' section.
  224. """
  225. update_job_defaults_section(config)
  226. General = config['General']
  227. # Update a few where the names change and the section is non-default.
  228. def update_step_job_opts(name):
  229. if General.get('sge_option_'+name) and 'JOB_OPTS' not in config['job.step.'+name]:
  230. config['job.step.'+name]['JOB_OPTS'] = General['sge_option_'+name]
  231. def update_step_njobs(name):
  232. if General.get(name+'_concurrent_jobs') and 'njobs' not in config['job.step.'+name]:
  233. config['job.step.'+name]['njobs'] = int(General[name+'_concurrent_jobs'])
  234. for name in ['bd', 'da', 'la', 'pda', 'pla', 'cns', 'fc', 'asm']:
  235. update_step_job_opts(name)
  236. update_step_njobs(name)
  237. # Prefer 'asm' to 'fc'.
  238. asm = dict(config['job.step.asm'])
  239. config['job.step.asm'] = config['job.step.fc']
  240. del config['job.step.fc']
  241. config['job.step.asm'].update(asm)
  242. def parse_cfg_with_sections(stream):
  243. """Return as dict of dict of ...
  244. """
  245. #Experimental:
  246. """
  247. ConfigParser sections become sub-sub sections when separated by dots.
  248. [foo.bar]
  249. baz = 42
  250. is equivalent to JSON
  251. {"foo": {"bar": {"baz": 42}}}
  252. """
  253. content = stream.read()
  254. result = dict()
  255. try:
  256. jdict = json.loads(NativeIO(content).read())
  257. return jdict
  258. except ValueError:
  259. pass #logger.exception('Could not parse stream as JSON.')
  260. try:
  261. config = ConfigParser(strict=False)
  262. config.optionxform = str
  263. config.read_file(NativeIO(content))
  264. sections = config.sections()
  265. for sec in sections:
  266. result[sec] = dict(config.items(sec))
  267. return result
  268. except:
  269. raise
  270. def check_config_sections(cfg):
  271. """And ensure these all exist.
  272. """
  273. allowed_sections = set(['General',
  274. 'job.step.dust',
  275. 'job.step.da', 'job.step.pda',
  276. 'job.step.la', 'job.step.pla',
  277. 'job.step.cns', 'job.step.fc',
  278. 'job.step.asm',
  279. 'job.defaults',
  280. ])
  281. all_sections = set(k for k,v in list(cfg.items()) if isinstance(v, dict))
  282. unexpected = all_sections - allowed_sections
  283. if unexpected:
  284. msg = 'You have {} unexpected cfg sections: {}'.format(
  285. len(unexpected), unexpected)
  286. raise Exception(msg)
  287. # Guarantee they all exist.
  288. for sec in allowed_sections:
  289. if sec not in cfg:
  290. cfg[sec] = dict()
  291. def update_dash_flags(cfg, key):
  292. if key not in cfg:
  293. return
  294. val = cfg[key]
  295. cfg[key] = new_val = functional.dash_flags(cfg[key])
  296. if val != new_val:
  297. msg = '''\
  298. Option contains flags with "_":
  299. "{key}={val}". Those should be "-", as in
  300. "{key}={new_val}". Auto-replaced.'''.format(**locals())
  301. logger.warning(msg)
  302. TEXT_FILE_BUSY = 'avoid_text_file_busy'
  303. def update_defaults(cfg):
  304. """cfg is probably the General sub-dict.
  305. """
  306. def set_default(key, val):
  307. if key not in cfg:
  308. cfg[key] = val
  309. set_default('input_type', 'raw')
  310. set_default('overlap_filtering_setting', '--max-diff 1000 --max-cov 1000 --min-cov 2')
  311. #set_default('pa_daligner_option', '-e.70 -s100 -t16') # TODO: -t is a dumb default
  312. #set_default('ovlp_daligner_option', '-e.96 -s1000 -h60 -t32') # TODO: -t is a dumb default
  313. set_default('pa_HPCdaligner_option', '-v -D24')
  314. set_default('ovlp_HPCdaligner_option', '-v -D24 -l500')
  315. set_default('pa_HPCTANmask_option', '-l500') # daligner defaults to -l1000
  316. #set_default('ovlp_HPCTANmask_option', '-l500')
  317. set_default('pa_REPmask_code', '0,300/0,300/0,300')
  318. set_default('pa_DBsplit_option', '-x500 -s200 -a')
  319. set_default('skip_checks', False)
  320. set_default('pa_DBdust_option', '') # Gene recommends the defaults. I have tried -w128 -t2.5 -m20
  321. set_default('pa_fasta_filter_option', 'streamed-internal-median')
  322. set_default('pa_subsample_coverage', 0)
  323. set_default('pa_subsample_strategy', 'random')
  324. set_default('pa_subsample_random_seed', 12345)
  325. set_default('dazcon', False)
  326. set_default('pa_dazcon_option', '-j 4 -x -l 500')
  327. set_default('ovlp_DBdust_option', '')
  328. set_default('ovlp_DBsplit_option', '-x500 -s200 -a')
  329. set_default('falcon_sense_option', '--output-multi --min-idt 0.70 --min-cov 2 --max-n-read 1800')
  330. set_default('falcon_sense_skip_contained', False)
  331. set_default('falcon_sense_greedy', False)
  332. set_default('LA4Falcon_preload', '')
  333. set_default('fc_ovlp_to_graph_option', '')
  334. set_default('genome_size', 0)
  335. set_default('seed_coverage', 20)
  336. set_default('length_cutoff', -1)
  337. set_default('length_cutoff_pr', 0)
  338. set_default('bestn', 12)
  339. set_default('target', 'assembly')
  340. set_default(TEXT_FILE_BUSY, bash.BUG_avoid_Text_file_busy)
  341. for bool_key in ('skip_checks', 'dazcon', 'falcon_sense_skip_contained', 'falcon_sense_greedy', 'LA4Falcon_preload', TEXT_FILE_BUSY):
  342. cfg[bool_key] = functional.cfg_tobool(cfg.get(bool_key, False))
  343. if 'dust' in cfg:
  344. logger.warning(
  345. "The 'dust' option is deprecated and ignored. We always run DBdust now. Use ovlp_/pa_DBdust_option to override DBdust default arguments.")
  346. bash.BUG_avoid_Text_file_busy = cfg[TEXT_FILE_BUSY]
  347. clean_falcon_options(cfg)
  348. falcon_sense_option = cfg['falcon_sense_option']
  349. if 'local_match_count' in falcon_sense_option or 'output_dformat' in falcon_sense_option:
  350. raise Exception('Please remove obsolete "--local_match_count_*" or "--output_dformat"' +
  351. ' from "falcon_sense_option" in your cfg: %s' % repr(falcon_sense_option))
  352. genome_size = int(cfg['genome_size'])
  353. length_cutoff = int(cfg['length_cutoff'])
  354. if length_cutoff < 0 and genome_size < 1:
  355. raise Exception(
  356. 'Must specify either length_cutoff>0 or genome_size>0')
  357. pa_subsample_strategy = cfg['pa_subsample_strategy']
  358. pa_subsample_random_seed = int(cfg['pa_subsample_random_seed'])
  359. pa_subsample_coverage = int(cfg['pa_subsample_coverage'])
  360. if pa_subsample_coverage > 0:
  361. if genome_size < 1:
  362. raise Exception(
  363. 'Must specify genome_size > 0 for subsampling.')
  364. # This one depends on length_cutoff_pr for its default.
  365. fc_ovlp_to_graph_option = cfg['fc_ovlp_to_graph_option']
  366. if '--min_len' not in fc_ovlp_to_graph_option and '--min-len' not in fc_ovlp_to_graph_option:
  367. length_cutoff_pr = cfg['length_cutoff_pr']
  368. fc_ovlp_to_graph_option += ' --min-len {}'.format(length_cutoff_pr)
  369. cfg['fc_ovlp_to_graph_option'] = fc_ovlp_to_graph_option
  370. target = cfg['target']
  371. if target not in ["overlapping", "pre-assembly", "assembly"]:
  372. msg = """ Target has to be "overlapping", "pre-assembly" or "assembly" in this verison. You have an unknown target {!r} in the configuration file. """.format(target)
  373. raise Exception(msg)
  374. possible_extra_keys = [
  375. 'sge_option', 'default_concurrent_jobs',
  376. 'pwatcher_type', 'pwatcher_directory',
  377. 'job_type', 'job_queue', 'job_name_style',
  378. 'use_tmpdir',
  379. ]
  380. for step in ['dust', 'da', 'la', 'pda', 'pla', 'fc', 'cns', 'asm']:
  381. sge_option_key = 'sge_option_' + step
  382. possible_extra_keys.append(sge_option_key)
  383. concurrent_jobs_key = step + '_concurrent_jobs'
  384. possible_extra_keys.append(concurrent_jobs_key)
  385. extra = list()
  386. for key in possible_extra_keys:
  387. if key in cfg:
  388. extra.append(key)
  389. if extra:
  390. extra.sort()
  391. msg = 'You have several old-style options. These should be provided in the `[job.defaults]` or `[job.step.*]` sections, and possibly renamed. See https://github.com/PacificBiosciences/FALCON/wiki/Configuration\n {}'.format(extra)
  392. logger.warning(msg)
  393. check_unexpected_keys(cfg)
  394. def check_unexpected_keys(cfg):
  395. # Warn on unused variables.
  396. expected = (TEXT_FILE_BUSY,
  397. 'input_fofn',
  398. 'input_type',
  399. 'genome_size',
  400. 'seed_coverage',
  401. 'length_cutoff',
  402. 'length_cutoff_pr',
  403. 'dazcon',
  404. 'pa_dazcon_option',
  405. 'pa_DBdust_option',
  406. 'pa_fasta_filter_option',
  407. 'pa_subsample_coverage',
  408. 'pa_subsample_strategy',
  409. 'pa_subsample_random_seed',
  410. 'pa_DBsplit_option',
  411. 'pa_HPCTANmask_option',
  412. 'pa_HPCREPmask_option',
  413. 'pa_REPmask_code',
  414. 'pa_daligner_option',
  415. 'pa_HPCdaligner_option',
  416. 'ovlp_DBdust_option',
  417. 'ovlp_DBsplit_option',
  418. #'ovlp_HPCTANmask_option',
  419. 'ovlp_daligner_option',
  420. 'ovlp_HPCdaligner_option',
  421. 'skip_checks',
  422. 'falcon_sense_option',
  423. 'falcon_sense_skip_contained',
  424. 'falcon_sense_greedy',
  425. 'LA4Falcon_preload',
  426. 'LA4Falcon_pre', # hidden
  427. 'LA4Falcon_post', # hidden
  428. 'LA4Falcon_dbdir', # hidden
  429. 'overlap_filtering_setting',
  430. 'fc_ovlp_to_graph_option',
  431. 'bestn',
  432. 'target',
  433. )
  434. unused = set(cfg.keys()) - set(expected)
  435. if unused:
  436. logger.warning("Unexpected keys in input config: {}".format(unused))
  437. default_logging_config = """
  438. [loggers]
  439. keys=root
  440. [handlers]
  441. keys=stream,file_all
  442. [formatters]
  443. keys=form01,form02
  444. [logger_root]
  445. level=NOTSET
  446. handlers=stream,file_all
  447. [handler_stream]
  448. class=StreamHandler
  449. level=INFO
  450. formatter=form02
  451. args=(sys.stderr,)
  452. [handler_file_all]
  453. class=FileHandler
  454. level=DEBUG
  455. formatter=form01
  456. args=('all.log', 'w')
  457. [formatter_form01]
  458. format=%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s
  459. [formatter_form02]
  460. format=[%(levelname)s]%(message)s
  461. """
  462. def _setup_logging(logging_config_fn):
  463. """See https://docs.python.org/2/library/logging.config.html
  464. """
  465. logging.Formatter.converter = time.gmtime # cannot be done in .ini
  466. if logging_config_fn:
  467. if logging_config_fn.endswith('.json'):
  468. logging.config.dictConfig(
  469. json.loads(open(logging_config_fn).read()))
  470. # print repr(logging.Logger.manager.loggerDict) # to debug
  471. return
  472. logger_fileobj = open(logging_config_fn)
  473. else:
  474. logger_fileobj = NativeIO(default_logging_config)
  475. defaults = {
  476. }
  477. logging.config.fileConfig(
  478. logger_fileobj, defaults=defaults, disable_existing_loggers=False)
  479. def setup_logger(logging_config_fn):
  480. global logger
  481. try:
  482. _setup_logging(logging_config_fn)
  483. logger = logging.getLogger("fc_run")
  484. logger.info('Setup logging from file "{}".'.format(logging_config_fn))
  485. except Exception:
  486. logging.basicConfig()
  487. logger = logging.getLogger()
  488. logger.exception(
  489. 'Failed to setup logging from file "{}". Using basicConfig().'.format(logging_config_fn))
  490. try:
  491. import logging_tree
  492. logger.info(logging_tree.format.build_description())
  493. except ImportError:
  494. pass
  495. return logger
  496. def get_length_cutoff(length_cutoff, fn):
  497. if length_cutoff < 0:
  498. length_cutoff = int(open(fn).read().strip())
  499. logger.info('length_cutoff=%d from %r' % (length_cutoff, fn))
  500. return length_cutoff # possibly updated
  501. def logger_record(func):
  502. def wrapper(*args, **kwargs):
  503. logger.info("====>Begin executing function: {}".format(func.__name__))
  504. if args:
  505. logger.info("====>args={}".format(args))
  506. if kwargs:
  507. logger.info("====>kwargs={}".format(kwargs))
  508. # function execution
  509. t_start = time.time()
  510. func(*args, **kwargs)
  511. t_end = time.time()
  512. logger.info("====>End executing function: {}, time cost: {} second."
  513. .format(func.__name__, (t_end-t_start)))
  514. return wrapper